repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
fkie-cad/FACT_core | src/storage/db_interface_compare.py | 1 | 4915 | import logging
from contextlib import suppress
from time import time
from typing import List, Optional
from pymongo.errors import PyMongoError
from helperFunctions.data_conversion import (
convert_compare_id_to_list, convert_uid_list_to_compare_id, normalize_compare_id
)
from storage.db_interface_common import MongoInterfaceCommon
class FactCompareException(Exception):
def get_message(self):
if self.args: # pylint: disable=using-constant-test
return self.args[0] # pylint: disable=unsubscriptable-object
return ''
class CompareDbInterface(MongoInterfaceCommon):
def _setup_database_mapping(self):
super()._setup_database_mapping()
self.compare_results = self.main.compare_results
def add_compare_result(self, compare_result):
compare_result['_id'] = self._calculate_compare_result_id(compare_result)
compare_result['submission_date'] = time()
with suppress(PyMongoError):
self.compare_results.delete_one({'_id': compare_result['_id']})
self.compare_results.insert_one(compare_result)
logging.info('compare result added to db: {}'.format(compare_result['_id']))
def get_compare_result(self, compare_id: str) -> Optional[dict]:
compare_id = normalize_compare_id(compare_id)
self.check_objects_exist(compare_id)
compare_result = self.compare_results.find_one(compare_id)
if compare_result:
logging.debug('got compare result from db: {}'.format(compare_id))
return compare_result
logging.debug('compare result not found in db: {}'.format(compare_id))
return None
def check_objects_exist(self, compare_id, raise_exc=True):
for uid in convert_compare_id_to_list(compare_id):
if not self.exists(uid):
if raise_exc:
raise FactCompareException('{} not found in database'.format(uid))
return True
return False
def compare_result_is_in_db(self, compare_id):
compare_result = self.compare_results.find_one(normalize_compare_id(compare_id))
return bool(compare_result)
def delete_old_compare_result(self, compare_id):
try:
self.compare_results.remove({'_id': normalize_compare_id(compare_id)})
logging.debug('old compare result deleted: {}'.format(compare_id))
except Exception as exception:
logging.warning('Could not delete old compare result: {} {}'.format(type(exception).__name__, exception))
@staticmethod
def _calculate_compare_result_id(compare_result):
general_dict = compare_result['general']
uid_set = set()
for key in general_dict:
uid_set.update(list(general_dict[key].keys()))
comp_id = convert_uid_list_to_compare_id(list(uid_set))
return comp_id
def page_compare_results(self, skip=0, limit=0):
db_entries = self.compare_results.find({'submission_date': {'$gt': 1}}, {'general.hid': 1, 'submission_date': 1}, skip=skip, limit=limit, sort=[('submission_date', -1)])
all_previous_results = [(item['_id'], item['general']['hid'], item['submission_date']) for item in db_entries]
return [
compare
for compare in all_previous_results
if self._all_objects_are_in_db(compare[0])
]
def _all_objects_are_in_db(self, compare_id):
try:
self.check_objects_exist(compare_id)
return True
except FactCompareException:
return False
def get_total_number_of_results(self):
db_entries = self.compare_results.find({'submission_date': {'$gt': 1}}, {'_id': 1})
return len([1 for entry in db_entries if not self.check_objects_exist(entry['_id'], raise_exc=False)])
def get_ssdeep_hash(self, uid):
file_object_entry = self.file_objects.find_one({'_id': uid}, {'processed_analysis.file_hashes.ssdeep': 1})
return file_object_entry['processed_analysis']['file_hashes']['ssdeep'] if 'file_hashes' in file_object_entry['processed_analysis'] else None
def get_entropy(self, uid):
file_object_entry = self.file_objects.find_one({'_id': uid}, {'processed_analysis.unpacker.entropy': 1})
return file_object_entry['processed_analysis']['unpacker']['entropy'] if 'unpacker' in file_object_entry['processed_analysis'] and 'entropy' in file_object_entry['processed_analysis']['unpacker'] else 0.0
def get_exclusive_files(self, compare_id: str, root_uid: str) -> List[str]:
if compare_id is None or root_uid is None:
return []
try:
result = self.get_compare_result(compare_id)
exclusive_files = result['plugins']['File_Coverage']['exclusive_files'][root_uid]
except (KeyError, FactCompareException):
exclusive_files = []
return exclusive_files
| gpl-3.0 | 2,370,626,291,600,637,000 | 44.091743 | 212 | 0.647202 | false |
GeoCat/QGIS | tests/src/python/test_qgsvectorfilewriter.py | 1 | 30961 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
from builtins import str
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsField,
QgsGeometry,
QgsPointXY,
QgsCoordinateReferenceSystem,
QgsVectorFileWriter,
QgsFeatureRequest,
QgsWkbTypes,
QgsRectangle,
QgsCoordinateTransform
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir
import os
import osgeo.gdal # NOQA
from osgeo import gdal, ogr
from qgis.testing import start_app, unittest
from utilities import writeShape, compareWkt, unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestFieldValueConverter(QgsVectorFileWriter.FieldValueConverter):
def __init__(self, layer):
QgsVectorFileWriter.FieldValueConverter.__init__(self)
self.layer = layer
def fieldDefinition(self, field):
idx = self.layer.fields().indexFromName(field.name())
if idx == 0:
return self.layer.fields()[idx]
elif idx == 2:
return QgsField('conv_attr', QVariant.String)
return QgsField('unexpected_idx')
def convert(self, idx, value):
if idx == 0:
return value
elif idx == 2:
if value == 3:
return 'converted_val'
else:
return 'unexpected_val!'
return 'unexpected_idx'
class TestQgsVectorFileWriter(unittest.TestCase):
mMemoryLayer = None
def testWrite(self):
"""Check we can write a vector file."""
self.mMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
self.assertIsNotNone(self.mMemoryLayer, 'Provider not initialized')
myProvider = self.mMemoryLayer.dataProvider()
self.assertIsNotNone(myProvider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPoint(QgsPointXY(10, 10)))
ft.setAttributes(['Johny', 20, 0.3])
myResult, myFeatures = myProvider.addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
writeShape(self.mMemoryLayer, 'writetest.shp')
def testDateTimeWriteShapefile(self):
"""Check writing date and time fields to an ESRI shapefile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertTrue(ml.isValid())
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPoint(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
# shapefiles do not support time types, result should be string
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.String)
# shapefiles do not support datetime types, result should be string
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.String)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
# shapefiles do not support time types
self.assertIsInstance(f.attributes()[time_idx], str)
self.assertEqual(f.attributes()[time_idx], '13:45:22')
# shapefiles do not support datetime types
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], str)
self.assertEqual(f.attributes()[datetime_idx],
QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)).toString("yyyy/MM/dd hh:mm:ss.zzz"))
def testWriterWithExtent(self):
"""Check writing using extent filter."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-111, 26, -96, 38)
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_no_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testWriterWithExtentAndReprojection(self):
"""Check writing using extent filter with reprojection."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-12511460, 3045157, -10646621, 4683497)
options.ct = QgsCoordinateTransform(source_layer.crs(), QgsCoordinateReferenceSystem.fromEpsgId(3785))
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testDateTimeWriteTabfile(self):
"""Check writing date and time fields to an MapInfo tabfile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPoint(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.tab')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.DateTime)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
self.assertIsInstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 45, 22))
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)))
def testWriteShapefileWithZ(self):
"""Check writing geometries with Z dimension to an ESRI shapefile."""
# start by saving a memory layer and forcing z
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('PointZ (1 2 3)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# check with both a standard PointZ and 25d style Point25D type
for t in [QgsWkbTypes.PointZ, QgsWkbTypes.Point25D]:
dest_file_name = os.path.join(str(QDir.tempPath()), 'point_{}.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
overrideGeometryType=t)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.exportToWkt()
expWkt = 'PointZ (1 2 3)'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
# also try saving out the shapefile version again, as an extra test
# this tests that saving a layer with z WITHOUT explicitly telling the writer to keep z values,
# will stay retain the z values
dest_file_name = os.path.join(str(QDir.tempPath()),
'point_{}_copy.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
created_layer,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer_from_shp = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer_from_shp.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.exportToWkt()
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
def testWriteShapefileWithMultiConversion(self):
"""Check writing geometries to an ESRI shapefile with conversion to multi."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'to_multi.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
forceMulti=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.exportToWkt()
expWkt = 'MultiPoint ((1 2))'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with multi conversion failed: mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
def testWriteShapefileWithAttributeSubsets(self):
"""Tests writing subsets of attributes to files."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&field=field1:int&field=field2:int&field=field3:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1, 11, 12, 13])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# first write out with all attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'all_attributes.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 4)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['id'], 1)
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field2'], 12)
self.assertEqual(f['field3'], 13)
# now test writing out only a subset of attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'subset_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[1, 3])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field3'], 13)
# finally test writing no attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'no_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
skipAttributeCreation=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
# expect only a default 'FID' field for shapefiles
self.assertEqual(created_layer.fields().count(), 1)
self.assertEqual(created_layer.fields()[0].name(), 'FID')
# in this case we also check that the geometry exists, to make sure feature has been correctly written
# even without attributes
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.exportToWkt()
expWkt = 'Point (1 2)'
self.assertTrue(compareWkt(expWkt, wkt),
"geometry not saved correctly when saving without attributes : mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
self.assertEqual(f['FID'], 0)
def testValueConverter(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer(
('Point?field=nonconv:int&field=ignored:string&field=converted:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
self.assertEqual(ml.fields().count(), 3)
ft = QgsFeature()
ft.setAttributes([1, 'ignored', 3])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'value_converter.shp')
converter = TestFieldValueConverter(ml)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
QgsCoordinateReferenceSystem(),
'ESRI Shapefile',
attributes=[0, 2],
fieldValueConverter=converter)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['nonconv'], 1)
self.assertEqual(f['conv_attr'], 'converted_val')
def testInteger64WriteTabfile(self):
"""Check writing Integer64 fields to an MapInfo tabfile (which does not support that type)."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=int8:int8'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setAttributes([2123456789])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'integer64.tab')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('int8')).type(), QVariant.Double)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
int8_idx = created_layer.fields().lookupField('int8')
self.assertEqual(f.attributes()[int8_idx], 2123456789)
def testDefaultDatasetOptions(self):
""" Test retrieving default dataset options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultDatasetOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('ESRI Shapefile')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('GML')
# just test a few
self.assertTrue('GML3_LONGSRS=YES' in options)
self.assertTrue('STRIP_PREFIX=NO' in options)
def testDefaultLayerOptions(self):
""" Test retrieving default layer options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultLayerOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultLayerOptions('ESRI Shapefile')
self.assertEqual(options, ['RESIZE=NO'])
options = QgsVectorFileWriter.defaultLayerOptions('GML')
self.assertEqual(options, [])
def testOverwriteLayer(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([1])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 1)
ds.CreateLayer('another_layer')
del f
del lyr
del ds
caps = QgsVectorFileWriter.editionCapabilities(filename)
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAppendToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewFieldsToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanDeleteLayer))
self.assertTrue(QgsVectorFileWriter.targetLayerExists(filename, 'test'))
self.assertFalse(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0]))
# Test CreateOrOverwriteLayer
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([2])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.CreateOrOverwriteLayer
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 2)
# another_layer should still exist
self.assertIsNotNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test CreateOrOverwriteFile
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([3])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
# another_layer should no longer exist
self.assertIsNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test AppendToLayerNoNewFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([4, -10])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerNoNewFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 1)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
del f
del lyr
del ds
# Test AppendToLayerAddFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([5, -1])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerAddFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 2)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
if hasattr(f, "IsFieldSetAndNotNull"):
# GDAL >= 2.2
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
if hasattr(f, "IsFieldSetAndNotNull"):
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 5)
self.assertEqual(f['secondfield'], -1)
del f
del lyr
del ds
gdal.Unlink(filename)
def testSupportedFormatExtensions(self):
formats = QgsVectorFileWriter.supportedFormatExtensions()
self.assertTrue('gpkg' in formats)
self.assertFalse('exe' in formats)
self.assertEqual(formats[0], 'shp')
def testDriverForExtension(self):
self.assertEqual(QgsVectorFileWriter.driverForExtension('shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('SHP'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('sHp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('tab'), 'MapInfo File')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.GML'), 'GML')
self.assertEqual(QgsVectorFileWriter.driverForExtension('not a format'), '')
self.assertEqual(QgsVectorFileWriter.driverForExtension(''), '')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -844,033,346,833,076,500 | 40.614247 | 126 | 0.627467 | false |
d2chau/git-demo-fizzbuzz | python-fizz-buzz/fizzbuzz.py | 1 | 1059 | def basicfizzbuzz(n):
if n % 3 == 0 and n % 5 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
else:
return str(n)
print "\n".join(basicfizzbuzz(n) for n in xrange(1, 100))
print "\n"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "\n"
def fizzbuzz(n):
one = False
tempStr = ""
dict = {'Buzz': 5, 'Fizz': 3};
for key, value in dict.items():
if n % value == 0:
tempStr = tempStr + key
if not tempStr:
tempStr = str(n)
return tempStr
print "\n".join(fizzbuzz(n) for n in xrange(1, 100))
| apache-2.0 | -1,047,292,258,414,213,900 | 28.416667 | 84 | 0.313503 | false |
kawashiro/dewyatochka2 | src/dewyatochka/plugins/cool_story/parser/_base.py | 1 | 4448 | # -*- coding: UTF-8
""" Common parsers logic
Classes
=======
AbstractParser -- Abstract parser
Functions
=========
parse_multiline_html -- Join html paragraphs collection into one multi line string
Attributes
==========
RawPost -- Raw post immutable structure
"""
import re
from collections import namedtuple
from functools import reduce
from abc import ABCMeta, abstractmethod, abstractproperty
from lxml.html import HtmlElement, tostring
from pyquery import PyQuery
from dewyatochka.core.utils.http import WebClient
__all__ = ['AbstractParser', 'RawPost', 'parse_multiline_html']
# Raw post immutable structure (id: int, title: str, text: str, tags: frozenset)
RawPost = namedtuple('RawPost', ('id', 'source', 'title', 'text', 'tags'))
# Regexp to extract text from raw post html code
__post_new_line_regexp = re.compile(r'<br\s*/?>', re.I)
__post_sanitize_regexp = re.compile(r'<.*?>')
def parse_multiline_html(paragraphs) -> str:
""" Join html paragraphs collection into one multi line string
:param iterable paragraphs: Paragraphs HTML nodes list
:return:
"""
return '\n'.join(
filter(
None,
map(
lambda line: __post_sanitize_regexp.sub(r'', line).strip(),
reduce(
lambda msg_lines, lines: msg_lines + lines,
[__post_new_line_regexp.split(tostring(line, encoding='unicode'))
for line in paragraphs]
)
)
)
)
class AbstractParser(metaclass=ABCMeta):
""" Parser implementation
Each parser is an iterable object that yields posts
beginning from the last and ending on the first post
"""
def __init__(self):
""" Init parser object, define mandatory attributes """
self.__client = None
@abstractmethod
def _parse_post(self, html_element: HtmlElement) -> RawPost:
""" Parse post html element
:param HTMLElement html_element:
:return RawPost:
"""
pass
@abstractmethod
def _parse_posts_collection(self, html: PyQuery) -> list:
""" Get posts HTMLElement[] collection
:param PyQuery html: Page PyQuery object
:return list:
"""
pass
@abstractmethod
def _parse_pages_collection(self, html: PyQuery) -> list:
""" Get pages urls for indexation
:param PyQuery html: Page PyQuery object
:return list:
"""
pass
def parse_page_html(self, html) -> list:
""" Parse page's html code and get stories list
:param str|PyQuery html: Page html code or PyQuery object
:return list:
"""
html_doc = html if isinstance(html, PyQuery) else PyQuery(html)
return [self._parse_post(post) for post in self._parse_posts_collection(html_doc)]
def parse_page_url(self, page: str) -> list:
""" Get stories from page by page url
:param str page: Page url
:return list:
"""
return self.parse_page_html(self._client.get(page))
@property
def _web_host(self) -> str:
""" Remote server hostname, normally same as hostname
:return str:
"""
return self.name
@abstractproperty
def name(self) -> str:
""" Get unique name
:return str:
"""
pass
@property
def _client(self) -> WebClient:
""" Get web client instance
:return WebClient:
"""
if self.__client is None:
# noinspection PyTypeChecker
self.__client = WebClient(self._web_host)
return self.__client
def __iter__(self, start_page='') -> RawPost:
""" Yields all the posts found beginning from the page specified
:param str start_page: Page url (e.g. "/20131117") or empty to start from beginning
:return RawPost:
"""
posts = []
pages_links = [start_page or '/']
while True:
while pages_links:
current_page = pages_links.pop(0)
html_doc = self._client.get(current_page)
posts = self.parse_page_html(html_doc)
if posts:
pages_links = self._parse_pages_collection(html_doc)
break
if not posts:
raise StopIteration()
while posts:
yield posts.pop(0)
| gpl-3.0 | -971,577,892,149,413,400 | 26.288344 | 91 | 0.582284 | false |
Luxoft/SDLP2 | SDL_Core/tools/InterfaceGenerator/generator/parsers/RPCBase.py | 1 | 26596 | """RPC XML base parser.
Contains base parser for SDLRPC v1/v2 and JSON RPC XML format.
"""
import collections
import xml.etree.ElementTree
from generator import Model
class ParseError(Exception):
"""Parse error.
This exception is raised when XML contains errors and can't be parsed.
"""
pass
class Parser(object):
"""RPC XML Parser base.
This class must not be used directly. One of its subclasses must be used
instead.
"""
def __init__(self):
"""Constructor."""
self._types = {}
self._enums = collections.OrderedDict()
self._structs = collections.OrderedDict()
self._functions = collections.OrderedDict()
self._params = {}
def parse(self, filename):
"""Parse XML.
Returns an instance of generator.Model.Interface containing parsed
interface or raises ParseError if input XML contains errors
and can't be parsed.
Keyword arguments:
filename -- name of input XML file.
"""
tree = xml.etree.ElementTree.parse(filename)
root = tree.getroot()
self._enums = self._initialize_enums()
self._structs = collections.OrderedDict()
self._functions = collections.OrderedDict()
self._params = {}
self._types = dict(self._enums.items())
self._parse_root(root)
return Model.Interface(enums=self._enums, structs=self._structs,
functions=self._functions, params=self._params)
def _initialize_enums(self):
"""Initialize enums.
The default implementation returns an OrderedDict with two empty
enums: "FunctionID" and "messageType". Required for formats where
these enums must be generated automatically according to the declared
in the XML functions.
These enums are filled during the parsing of the functions.
"""
return collections.OrderedDict(
[("FunctionID", Model.Enum(name="FunctionID")),
("messageType", Model.Enum(name="messageType"))])
def _check_enum_name(self, enum):
"""Check enum name.
This method is called to check whether the newly parsed enum's name
conflicts with some predefined enum.
This implementation raises an error if enum name is one of the
predefined enums "FunctionID" or "messageType" which must not be
declared explicitly in the XML.
"""
if enum.name in ["FunctionID", "messageType"]:
raise ParseError(
"Enum '" + enum.name +
"' is generated automatically in SDLRPCV1 and"
" must not be declared in xml file")
def _check_function_param_name(self, function_param_name):
"""Check function param name.
This method is called to check whether the newly parsed function
parameter name conflicts with some predefined name.
This implementation doesn't check anything because there is no
predefined names in base RPC XML.
"""
pass
def _parse_root(self, root):
"""Parse root XML element.
Default implementation parses root as interface element without a
prefix.
Keyword arguments:
root -- root element.
"""
self._parse_interface(root, "")
def _parse_interface(self, interface, prefix):
"""Parse interface element.
Keyword arguments:
interface -- interface element.
prefix -- string prefix for all types of the interface.
"""
if interface.tag != "interface":
raise ParseError("Invalid interface tag: " + interface.tag)
params, subelements, attrib = self._parse_base_item(interface, "")
for param in ["description", "design_description", "todos"]:
if 0 != len(params[param]):
attrib[param] = "\n".join(params[param])
if 0 != len(params["issues"]):
attrib["issues"] = "\n".join(i.value for i in params["issues"])
self._params = dict(
self._params.items() +
[(prefix + p[0], p[1]) for p in attrib.items()])
for element in subelements:
if element.tag == "enum":
enum = self._parse_enum(element, prefix)
self._check_enum_name(enum)
self._add_item(self._enums, enum)
self._add_type(enum)
elif element.tag == "struct":
struct = self._parse_struct(element, prefix)
self._add_item(self._structs, struct)
self._add_type(struct)
elif element.tag == "function":
function = self._parse_function(element, prefix)
self._add_item(self._functions, function,
(function.function_id, function.message_type))
else:
raise ParseError("Unexpected element: " + element.tag)
@staticmethod
def _add_item(items, item, key=None):
"""Add new item in the items dictionary with given key.
Performs additional check for presence in the dictionary and throws
ParseError exception if key already exist.
"""
if key is None:
key = item.name
if key in items:
raise ParseError(type(item).__name__ + " '" + str(key) +
"' is declared more than once")
items[key] = item
def _add_type(self, _type):
"""Add new type in the internal types dictionary.
Performs additional check for presence type with same name in the
dictionary and throws ParseError exception if key already exist.
"""
if _type.name in self._types:
raise ParseError("Type '" + _type.name +
"' is declared as both struct and enum")
self._types[_type.name] = _type
def _parse_enum(self, element, prefix):
"""Parse element as enumeration.
Returns an instance of generator.Model.Enum
"""
params, subelements, attributes = \
self._parse_base_item(element, prefix)
internal_scope = None
scope = None
for attribute in attributes:
if attribute == "internal_scope":
internal_scope = attributes[attribute]
elif attribute == "scope":
scope = attributes[attribute]
else:
raise ParseError("Unexpected attribute '" + attribute +
"' in enum '" + params["name"] + "'")
params["internal_scope"] = internal_scope
params["scope"] = scope
elements = collections.OrderedDict()
for subelement in subelements:
if subelement.tag == "element":
self._add_item(elements, self._parse_enum_element(subelement))
else:
raise ParseError("Unexpected element '" + subelement.tag +
"' in enum '" + params["name"] + "'")
params["elements"] = elements
# Magic usage is correct
# pylint: disable=W0142
return Model.Enum(**params)
def _parse_struct(self, element, prefix):
"""Parse element as structure.
Returns an instance of generator.Model.Struct
"""
params, subelements, attrib = self._parse_base_item(element, prefix)
scope = None
for attribute in attrib:
if attribute == "scope":
scope = attrib[attribute]
else:
raise ParseError("Unexpected attribute '" + attribute +
"' in struct '" + params["name"] + "'")
params["scope"] = scope
members = collections.OrderedDict()
for subelement in subelements:
if subelement.tag == "param":
self._add_item(members, self._parse_param(subelement, prefix))
else:
raise ParseError("Unexpected subelement '" + subelement.name +
"' in struct '" + params["name"] + "'")
params["members"] = members
# Magic usage is correct
# pylint: disable=W0142
return Model.Struct(**params)
def _parse_function(self, element, prefix):
"""Parse element as function.
Returns an instance of generator.Model.Function
"""
params, subelements, attributes = \
self._parse_base_item(element, prefix)
function_id, message_type = self._parse_function_id_type(
params["name"],
attributes)
scope = None
for attribute in attributes:
if attribute == "scope":
scope = attributes[attribute]
else:
raise ParseError("Unexpected attribute '" + attribute +
"' in function '" + params["name"] + "'")
params["function_id"] = function_id
params["message_type"] = message_type
params["scope"] = scope
function_params = collections.OrderedDict()
for subelement in subelements:
if subelement.tag == "param":
function_param = self._parse_function_param(subelement,
prefix)
self._check_function_param_name(function_param.name)
if function_param.name in function_params:
raise ParseError("Parameter '" + function_param.name +
"' is specified more than once" +
" for function '" + params["name"] + "'")
function_params[function_param.name] = function_param
else:
raise ParseError("Unexpected subelement '" + subelement.tag +
"' in function '" + params["name"] + "'")
params["params"] = function_params
# Magic usage is correct
# pylint: disable=W0142
return Model.Function(**params)
def _parse_function_id_type(self, function_name, attrib):
"""Parse function id and message type according to XML format.
This implementation takes function name as function id and extracts
attribute "messagetype" as message type and searches them in enums
"FunctionID" and "messageType" adding the missing elements if
necessary.
Returns function id and message type as an instances of EnumElement.
"""
if "messagetype" not in attrib:
raise ParseError("No messagetype specified for function '" +
function_name + "'")
function_id = self._provide_enum_element_for_function(
"FunctionID",
function_name)
message_type = self._provide_enum_element_for_function(
"messageType",
self._extract_attrib(attrib, "messagetype"))
return function_id, message_type
def _provide_enum_element_for_function(self, enum_name, element_name):
"""Provide enum element for functions.
Search an element in an enum and add it if it is missing.
Returns EnumElement.
"""
if enum_name not in self._types:
raise ParseError("Enum '" + enum_name +
"' is not initialized")
enum = self._types[enum_name]
if not isinstance(enum, Model.Enum):
raise ParseError("'" + enum_name + "' is not an enum")
if element_name not in enum.elements:
enum.elements[element_name] = Model.EnumElement(name=element_name)
return enum.elements[element_name]
def _parse_base_item(self, element, prefix):
"""Parse element as base item.
Returns an params, sub-elements and attributes of the element
"""
params = {}
description = []
design_description = []
issues = []
todos = []
subelements = []
if "name" not in element.attrib:
raise ParseError("Name is not specified for " + element.tag)
params["name"] = prefix + element.attrib["name"]
attrib = dict(element.attrib.items())
del attrib["name"]
params["platform"] = self._extract_attrib(attrib, "platform")
for subelement in element:
if subelement.tag == "description":
description.append(self._parse_simple_element(subelement))
elif subelement.tag == "designdescription":
design_description.append(
self._parse_simple_element(subelement))
elif subelement.tag == "todo":
todos.append(self._parse_simple_element(subelement))
elif subelement.tag == "issue":
issues.append(self._parse_issue(subelement))
else:
subelements.append(subelement)
params["description"] = description
params["design_description"] = design_description
params["issues"] = issues
params["todos"] = todos
return params, subelements, attrib
@staticmethod
def _parse_simple_element(element):
"""Parse element as simple element and returns it's text.
Element is simple when it contains no subelements and attributes.
Returns element text if present or empty string if not
"""
if len(element) != 0:
raise ParseError("Unexpected subelements in '" +
element.tag + "'")
if len(element.attrib) != 0:
raise ParseError("Unexpected attributes in '" +
element.tag + "'")
return element.text if element.text is not None else ""
@staticmethod
def _parse_issue(element):
"""Parse element as issue.
Issue must not contain subelements and attributes.
Returns an instance of generator.Model.Issue
"""
if len(element) != 0:
raise ParseError("Unexpected subelements in issue")
if "creator" not in element.attrib:
raise ParseError("No creator in issue")
if len(element.attrib) != 1:
raise ParseError("Unexpected attributes in issue")
return Model.Issue(
creator=element.attrib["creator"],
value=element.text if element.text is not None else "")
def _parse_enum_element(self, element):
"""Parse element as element of enumeration.
Returns an instance of generator.Model.EnumElement
"""
params, subelements, attributes = self._parse_base_item(element, "")
if len(subelements) != 0:
raise ParseError("Unexpected subelements in enum element")
self._ignore_attribute(attributes, "hexvalue")
internal_name = None
value = None
for attribute in attributes:
if attribute == "internal_name":
internal_name = attributes[attribute]
elif attribute == "value":
try:
value = int(attributes[attribute])
except:
raise ParseError("Invalid value for enum element: '" +
attributes[attribute] + "'")
else:
raise ParseError("Unexpected attribute '" +
attribute + "' in enum element")
params["internal_name"] = internal_name
params["value"] = value
# Magic usage is correct
# pylint: disable=W0142
return Model.EnumElement(**params)
def _parse_param(self, element, prefix):
"""Parse element as structure parameter.
Returns an instance of generator.Model.Param
"""
params, subelements, attrib = \
self._parse_param_base_item(element, prefix)
if len(attrib) != 0:
raise ParseError("""Unknown attribute(s) {0} in param {1}
""".format(attrib, params["name"]))
if len(subelements) != 0:
raise ParseError("Unknown subelements in param '" +
params["name"] + "'")
# Magic usage is correct
# pylint: disable=W0142
return Model.Param(**params)
def _parse_function_param(self, element, prefix):
"""Parse element as function parameter.
Returns an instance of generator.Model.FunctionParam
"""
params, subelements, attrib = \
self._parse_param_base_item(element, prefix)
default_value = None
default_value_string = self._extract_attrib(attrib, "defvalue")
if default_value_string is not None:
param_type = params["param_type"]
if type(param_type) is Model.Boolean:
default_value = \
self._get_bool_from_string(default_value_string)
elif type(param_type) is Model.Integer:
try:
default_value = int(default_value_string)
except:
raise ParseError("Invalid value for integer: '" +
default_value_string + "'")
elif type(param_type) is Model.Double:
try:
default_value = float(default_value_string)
except:
raise ParseError("Invalid value for float: '" +
default_value_string + "'")
elif type(param_type) is Model.String:
default_value = default_value_string
elif type(param_type) is Model.Enum or \
type(param_type) is Model.EnumSubset:
if type(param_type) is Model.EnumSubset:
allowed_elements = param_type.allowed_elements
else:
allowed_elements = param_type.elements
if default_value_string not in allowed_elements:
raise ParseError("Default value '" + default_value_string +
"' for parameter '" + params["name"] +
"' is not a member of " +
type(param_type).__name__ +
"'" + params["name"] + "'")
default_value = allowed_elements[default_value_string]
else:
raise ParseError("Default value specified for " +
type(param_type).__name__)
params["default_value"] = default_value
if len(attrib) != 0:
raise ParseError("Unexpected attributes in parameter '" +
params["name"] + "'")
if len(subelements) != 0:
raise ParseError("Unexpected subelements in parameter '" +
params["name"] + "'")
# Magic usage is correct
# pylint: disable=W0142
return Model.FunctionParam(**params)
def _parse_param_base_item(self, element, prefix):
"""Parse base param items.
Returns params, other subelements and attributes.
"""
params, subelements, attrib = self._parse_base_item(element, "")
params["is_mandatory"] = self._extract_optional_bool_attrib(
attrib, "mandatory", True)
scope = self._extract_attrib(attrib, "scope")
if scope is not None:
params["scope"] = scope
self._ignore_attribute(attrib, "defvalue")
param_type = None
type_name = self._extract_attrib(attrib, "type")
if type_name is None:
raise ParseError("Type is not specified for parameter '" +
params["name"] + "'")
if type_name == "Boolean":
param_type = Model.Boolean()
elif type_name == "Integer" or \
type_name == "Float":
min_value = self._extract_optional_number_attrib(
attrib, "minvalue", int if type_name == "Integer" else float)
max_value = self._extract_optional_number_attrib(
attrib, "maxvalue", int if type_name == "Integer" else float)
param_type = \
(Model.Integer if type_name == "Integer" else Model.Double)(
min_value=min_value,
max_value=max_value)
elif type_name == "String":
min_length = self._extract_optional_number_attrib(
attrib, "minlength")
# if minlength is not defined default value is 1
if min_length is None:
min_length = 1
max_length = self._extract_optional_number_attrib(
attrib, "maxlength")
param_type = Model.String(min_length=min_length, max_length=max_length)
else:
if 1 == type_name.count("."):
custom_type_name = type_name.replace(".", "_")
else:
custom_type_name = prefix + type_name
if custom_type_name in self._types:
param_type = self._types[custom_type_name]
else:
raise ParseError("Unknown type '" + type_name + "'")
if self._extract_optional_bool_attrib(attrib, "array", False):
min_size = self._extract_optional_number_attrib(attrib,
"minsize")
max_size = self._extract_optional_number_attrib(attrib,
"maxsize")
param_type = Model.Array(element_type=param_type,
min_size=min_size,
max_size=max_size)
base_type = \
param_type.element_type if isinstance(param_type, Model.Array) \
else param_type
other_subelements = []
for subelement in subelements:
if subelement.tag == "element":
if type(base_type) is not Model.Enum and \
type(base_type) is not Model.EnumSubset:
raise ParseError("Elements specified for parameter '" +
params["name"] + "' of type " +
type(base_type).__name__)
if type(base_type) is Model.Enum:
base_type = Model.EnumSubset(
name=params["name"],
enum=base_type,
description=params["description"],
design_description=params["design_description"],
issues=params["issues"],
todos=params["todos"],
allowed_elements={})
if "name" not in subelement.attrib:
raise ParseError(
"Element name is not specified for parameter '" +
params["name"] + "'")
element_name = subelement.attrib["name"]
if len(subelement.attrib) != 1:
raise ParseError("Unexpected attributes for element '" +
element_name + "' of parameter '" +
params["name"])
if len(subelement.getchildren()) != 0:
raise ParseError("Unexpected subelements for element '" +
element_name + "' of parameter '" +
params["name"])
if element_name in base_type.allowed_elements:
raise ParseError("Element '" + element_name +
"' is specified more than once for" +
" parameter '" + params["name"] + "'")
if element_name not in base_type.enum.elements:
raise ParseError("Element '" + element_name +
"' is not a member of enum '" +
base_type.enum.name + "'")
base_type.allowed_elements[element_name] = \
base_type.enum.elements[element_name]
else:
other_subelements.append(subelement)
if isinstance(param_type, Model.Array):
param_type.element_type = base_type
else:
param_type = base_type
params["param_type"] = param_type
return params, other_subelements, attrib
def _extract_optional_bool_attrib(self, attrib, name, default):
"""Extract boolean attribute with given name.
Returns value of the attribute.
"""
value = self._extract_attrib(attrib, name)
if value is None:
value = default
else:
value = self._get_bool_from_string(value)
return value
def _extract_optional_number_attrib(self, attrib, name, _type=int):
"""Extract number attribute with given name.
Returns value of the attribute.
"""
value = self._extract_attrib(attrib, name)
if value is not None:
try:
value = _type(value)
except:
raise ParseError("Invlaid value for " + _type.__name__ +
": '" + value + "'")
return value
@staticmethod
def _extract_attrib(attrib, name):
"""Extract attribute with given name.
Returns value of the attribute.
"""
value = None
if name in attrib:
value = attrib[name]
del attrib[name]
return value
@staticmethod
def _get_bool_from_string(bool_string):
"""Convert string representation of boolean to real bool value.
Returns converted value.
"""
value = None
if bool_string in ['0', 'false']:
value = False
elif bool_string in ['1', 'true']:
value = True
else:
raise ParseError("Invalid value for bool: '" +
bool_string + "'")
return value
def _ignore_attribute(self, attrib, name):
"""To be called when attribute is meaningless in terms
of code generation but it's presence is not issue.
Removes this attribute from attribute list.
"""
if name in attrib:
del attrib[name]
print ("Ignoring attribute '" +
name + "'")
return True
| lgpl-2.1 | -8,461,146,256,486,704,000 | 34.795424 | 83 | 0.537487 | false |
valeros/platformio | platformio/builder/scripts/frameworks/energia.py | 1 | 2038 | # Copyright 2014-2016 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Energia
Energia Wiring-based framework enables pretty much anyone to start easily
creating microcontroller-based projects and applications. Its easy-to-use
libraries and functions provide developers of all experience levels to start
blinking LEDs, buzzing buzzers and sensing sensors more quickly than ever
before.
http://energia.nu/reference/
"""
from os.path import join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-energia${PLATFORM[2:]}")
)
ENERGIA_VERSION = int(
open(join(env.subst("$PLATFORMFW_DIR"),
"version.txt")).read().replace(".", "").strip())
# include board variant
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkEnergiaVariant"),
join("$PLATFORMFW_DIR", "variants", "${BOARD_OPTIONS['build']['variant']}")
)
env.Append(
CPPDEFINES=[
"ARDUINO=101",
"ENERGIA=%d" % ENERGIA_VERSION
],
CPPPATH=[
join("$BUILD_DIR", "FrameworkEnergia"),
join("$BUILD_DIR", "FrameworkEnergiaVariant")
]
)
if env.get("BOARD_OPTIONS", {}).get("build", {}).get("core") == "lm4f":
env.Append(
LINKFLAGS=["-Wl,--entry=ResetISR"]
)
#
# Target: Build Core Library
#
libs = []
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkEnergia"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
))
env.Append(LIBS=libs)
| apache-2.0 | 4,873,976,666,905,393,000 | 26.540541 | 79 | 0.694799 | false |
mdaif/olympia | apps/compat/views.py | 1 | 7432 | import json
import re
from django import http
from django.db.models import Count
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from tower import ugettext as _
import amo
import amo.utils
from addons.decorators import owner_or_unlisted_reviewer
from amo.decorators import post_required
from amo.utils import urlparams
from amo.urlresolvers import reverse
from addons.models import Addon
from search.utils import floor_version
from versions.compare import version_dict as vdict, version_int as vint
from .models import CompatReport, AppCompat, CompatTotals
from .forms import AppVerForm, CompatForm
def index(request, version=None):
template = 'compat/index.html'
COMPAT = [v for v in amo.COMPAT if v['app'] == request.APP.id]
compat_dict = dict((v['main'], v) for v in COMPAT)
if not COMPAT:
return render(request, template, {'results': False})
if version not in compat_dict:
return http.HttpResponseRedirect(reverse('compat.index',
args=[COMPAT[0]['main']]))
qs = AppCompat.search()
binary = None
initial = {'appver': '%s-%s' % (request.APP.id, version), 'type': 'all'}
initial.update(request.GET.items())
form = CompatForm(initial)
if request.GET and form.is_valid():
if form.cleaned_data['appver']:
app, ver = form.cleaned_data['appver'].split('-')
if int(app) != request.APP.id or ver != version:
new = reverse('compat.index', args=[ver], add_prefix=False)
url = '/%s%s' % (amo.APP_IDS[int(app)].short, new)
type_ = form.cleaned_data['type'] or None
return http.HttpResponseRedirect(urlparams(url, type=type_))
if form.cleaned_data['type'] != 'all':
binary = form.cleaned_data['type'] == 'binary'
compat, app = compat_dict[version], str(request.APP.id)
compat_queries = (
('prev', qs.query(**{
'top_95.%s.%s' % (app, vint(compat['previous'])): True,
'support.%s.max__gte' % app: vint(compat['previous'])})),
('top_95', qs.query(**{'top_95_all.%s' % app: True})),
('all', qs),
)
compat_levels = [(key, version_compat(queryset, compat, app, binary))
for key, queryset in compat_queries]
usage_addons, usage_total = usage_stats(request, compat, app, binary)
return render(request, template,
{'version': version, 'usage_addons': usage_addons,
'usage_total': usage_total, 'compat_levels': compat_levels,
'form': form, 'results': True,
'show_previous': request.GET.get('previous')})
def version_compat(qs, compat, app, binary):
facets = []
for v, prev in zip(compat['versions'], (None,) + compat['versions']):
d = {'from': vint(v)}
if prev:
d['to'] = vint(prev)
facets.append(d)
# Pick up everything else for an Other count.
facets.append({'to': vint(compat['versions'][-1])})
facet = {'range': {'support.%s.max' % app: facets}}
if binary is not None:
qs = qs.query(binary=binary)
qs = qs.facet(by_status=facet)
result = qs[:0].raw()
total_addons = result['hits']['total']
ranges = result['facets']['by_status']['ranges']
titles = compat['versions'] + (_('Other'),)
faceted = [(v, r['count']) for v, r in zip(titles, ranges)]
return total_addons, faceted
def usage_stats(request, compat, app, binary=None):
# Get the list of add-ons for usage stats.
qs = AppCompat.search().order_by('-usage.%s' % app).values_dict()
if request.GET.get('previous'):
qs = qs.filter(**{
'support.%s.max__gte' % app: vint(compat['previous'])})
else:
qs = qs.filter(**{'support.%s.max__gte' % app: 0})
if binary is not None:
qs = qs.filter(binary=binary)
addons = amo.utils.paginate(request, qs)
for obj in addons.object_list:
obj['usage'] = obj['usage'][app]
obj['max_version'] = obj['max_version'][app]
return addons, CompatTotals.objects.get(app=app).total
@csrf_exempt
@post_required
def incoming(request):
# Turn camelCase into snake_case.
def snake_case(s):
return re.sub('[A-Z]+', '_\g<0>', s).lower()
try:
data = [(snake_case(k), v)
for k, v in json.loads(request.body).items()]
except Exception:
return http.HttpResponseBadRequest()
# Build up a new report.
report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', ''))
fields = CompatReport._meta.get_all_field_names()
for key, value in data:
if key in fields:
setattr(report, key, value)
else:
return http.HttpResponseBadRequest()
report.save()
return http.HttpResponse(status=204)
def reporter(request):
query = request.GET.get('guid')
if query:
qs = None
if query.isdigit():
qs = Addon.with_unlisted.filter(id=query)
if not qs:
qs = Addon.with_unlisted.filter(slug=query)
if not qs:
qs = Addon.with_unlisted.filter(guid=query)
if not qs and len(query) > 4:
qs = CompatReport.objects.filter(guid__startswith=query)
if qs:
guid = qs[0].guid
addon = Addon.with_unlisted.get(guid=guid)
if addon.is_listed or owner_or_unlisted_reviewer(request, addon):
return redirect('compat.reporter_detail', guid)
addons = (Addon.with_unlisted.filter(authors=request.user)
if request.user.is_authenticated() else [])
return render(request, 'compat/reporter.html',
dict(query=query, addons=addons))
def reporter_detail(request, guid):
try:
addon = Addon.with_unlisted.get(guid=guid)
except Addon.DoesNotExist:
addon = None
name = addon.name if addon else guid
qs = CompatReport.objects.filter(guid=guid)
if (addon and not addon.is_listed and
not owner_or_unlisted_reviewer(request, addon)):
# Not authorized? Let's pretend this addon simply doesn't exist.
name = guid
qs = CompatReport.objects.none()
form = AppVerForm(request.GET)
if request.GET and form.is_valid() and form.cleaned_data['appver']:
# Apply filters only if we have a good app/version combination.
app, ver = form.cleaned_data['appver'].split('-')
app = amo.APP_IDS[int(app)]
ver = vdict(floor_version(ver))['major'] # 3.6 => 3
# Ideally we'd have a `version_int` column to do strict version
# comparing, but that's overkill for basic version filtering here.
qs = qs.filter(app_guid=app.guid,
app_version__startswith=str(ver) + '.')
works_ = dict(qs.values_list('works_properly').annotate(Count('id')))
works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)}
works_properly = request.GET.get('works_properly')
if works_properly:
qs = qs.filter(works_properly=works_properly)
reports = amo.utils.paginate(request, qs.order_by('-created'), 100)
return render(request, 'compat/reporter_detail.html',
dict(reports=reports, works=works,
works_properly=works_properly,
name=name, guid=guid, form=form))
| bsd-3-clause | 887,170,371,341,290,900 | 37.507772 | 78 | 0.606432 | false |
jonnybazookatone/ADSDeploy_priv | ADSDeploy/config.py | 1 | 2953 | # Connection to the database where we save orcid-claims (this database
# serves as a running log of claims and storage of author-related
# information). It is not consumed by others (ie. we 'push' results)
# SQLALCHEMY_URL = 'postgres://docker:docker@localhost:6432/docker'
SQLALCHEMY_URL = 'sqlite:///'
SQLALCHEMY_ECHO = False
# Configuration of the pipeline; if you start 'vagrant up rabbitmq'
# container, the port is localhost:8072 - but for production, you
# want to point to the ADSImport pipeline
RABBITMQ_URL = 'amqp://guest:[email protected]:6672/?' \
'socket_timeout=10&backpressure_detection=t'
# possible values: WARN, INFO, DEBUG
LOGGING_LEVEL = 'DEBUG'
POLL_INTERVAL = 15 # per-worker poll interval (to check health) in seconds.
# All work we do is concentrated into one exchange (the queues are marked
# by topics, e.g. ads.worker.claims); The queues will be created automatically
# based on the workers' definition. If 'durable' = True, it means that the
# queue is created as permanent *AND* the worker will publish 'permanent'
# messages. Ie. if rabbitmq goes down/restarted, the uncomsumed messages will
# still be there. For an example of a config, see:
# https://github.com/adsabs/ADSOrcid/blob/master/ADSOrcid/config.py#L53
EXCHANGE = 'ADSDeploy'
WORKERS = {
'errors.ErrorHandler': {
'subscribe': None,
'exchange': None,
'publish': None,
'durable': False
}
}
# Web Application configuration parameters
WEBAPP_URL = '172.17.0.1:9000'
GITHUB_SIGNATURE_HEADER = 'X-Hub-Signature'
GITHUB_SECRET = 'redacted'
GITHUB_COMMIT_API = 'https://api.github.com/repos/adsabs/{repo}/git/commits/{hash}'
GITHUB_TAG_FIND_API = 'https://api.github.com/repos/adsabs/{repo}/git/refs/tags/{tag}'
GITHUB_TAG_GET_API = 'https://api.github.com/repos/adsabs/{repo}/git/tags/{hash}'
AWS_REGION = 'us-east-1'
AWS_ACCESS_KEY = 'redacted'
AWS_SECRET_KEY = 'redacted'
WATCHED_REPOS = [
'adsws',
'solr-service',
'export_service',
'graphics_service',
'recommender_service',
'citation_helper_service',
'metrics_service',
'vis-services',
'biblib-service',
'orcid-service',
'myads',
'object_service',
'harbour-service'
]
DEPLOY_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(levelname)s\t%(process)d '
'[%(asctime)s]:\t%(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
}
},
'handlers': {
'console': {
'formatter': 'default',
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# Include here any other configuration options. These will be made available
# to workers via app.config | gpl-3.0 | 3,990,024,058,038,431,000 | 30.425532 | 86 | 0.633932 | false |
ajhager/copycat | copycat/workspace/bond.py | 1 | 13667 | # Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Bond"""
import math
import copycat.toolbox as toolbox
from copycat.workspace import Structure, Mapping
class Bond(Structure):
"""Bond
Attributes:
bond_category:
direction_category:
from_object:
to_object:
bond_facet: Which facet is being related.
from_object_descriptor:
to_object_descriptor:"""
def __init__(self, workspace, from_object, to_object, bond_category,
bond_facet, from_object_descriptor, to_object_descriptor):
"""Initialize Bond."""
super(Bond, self).__init__()
self.workspace = workspace
self.slipnet = self.workspace.slipnet
if from_object.left_string_position < to_object.left_string_position:
self.direction_category = self.slipnet.plato_right
self.left_object = from_object
self.right_object = to_object
else:
self.direction_category = self.slipnet.plato_left
self.left_object = to_object
self.right_object = from_object
if bond_category == self.slipnet.plato_sameness:
self.direction_category = None
self.left_string_position = min(from_object.left_string_position,
to_object.left_string_position)
self.right_string_position = max(from_object.right_string_position,
to_object.right_string_position)
self.proposal_level = None
self.string = from_object.string
self.structure_category = Bond
self.bond_category = bond_category
self.bond_facet = bond_facet
self.from_object = from_object
self.to_object = to_object
self.from_object_descriptor = from_object_descriptor
self.to_object_descriptor = to_object_descriptor
def __eq__(self, other):
"""Return True if this and the given bond represent the same bond."""
if other is None or not isinstance(other, Bond):
return False
return all([self.from_object == other.from_object,
self.to_object == other.to_object,
self.bond_category == other.bond_category,
self.direction_category == other.direction_category])
def __hash__(self):
return hash((self.from_object, self.to_object,
self.bond_category, self.direction_category))
def calculate_external_strength(self):
"""Return the bond's external strength."""
return self.local_support()
def calculate_internal_strength(self):
"""Bonds between objects of the same type are stronger than bonds
between different types. Letter category bonds are stronger than other
types of bonds. A more general mechanism is needed."""
if type(self.from_object) is type(self.to_object):
member_compatibility_factor = 1.0
else:
member_compatibility_factor = .7
if self.bond_facet == self.slipnet.plato_letter_category:
bond_facet_factor = 1.0
else:
bond_facet_factor = .7
degree_of_association = self.bond_category.bond_degree_of_association()
return min(100, round(member_compatibility_factor * \
bond_facet_factor * degree_of_association))
def choose_left_neighbor(self):
"""Return one of the left neighbors of the bond chosen by salience."""
if self.is_leftmost_in_string():
return None
left_neighbors = []
for left_neighbor_object in self.left_object.all_left_neighbors():
left1 = left_neighbor_object.string_number
left2 = self.left_object.string_number
possible_left_neighbor = self.string.left_right_bonds.get((left1, left2))
if possible_left_neighbor != None:
left_neighbors.append(possible_left_neighbor)
saliences = [neighbor.salience() for neighbor in left_neighbors]
return toolbox.weighted_select(saliences, left_neighbors)
def choose_right_neighbor(self):
"""Return one of the right neighbors of the bond chosen by salience."""
if self.is_rightmost_in_string():
return None
right_neighbors = []
for right_neighbor_object in self.right_object.all_right_neighbors():
right1 = self.right_object.string_number
right2 = right_neighbor_object.string_number
possible_right_neighbor = self.string.left_right_bonds.get((right1, right2))
if possible_right_neighbor != None:
right_neighbors.append(possible_right_neighbor)
saliences = [neighbor.salience() for neighbor in right_neighbors]
return toolbox.weighted_select(saliences, right_neighbors)
def happiness(self):
"""Return the happiness of the bond."""
if self.group != None:
return self.group.total_strength
return 0
def unhappiness(self):
"""Return the unhappiness of the bond."""
return 100 - self.happiness()
def salience(self):
"""Return the salience of the bond."""
return round(toolbox.average(self.importance(), self.unhappiness()))
def has_members(self, object1, object2):
'''
Return True of the two objects are the objects in this bond.
'''
objects = [self.from_object, self.to_object]
return object1 in objects and object2 in objects
def importance(self):
"""Sameness bonds are more important than other bonds of other
categories."""
if self.bond_category == self.slipnet.plato_sameness:
return 100
return 50
def incompatible_bonds(self):
"""Return the bonds that are incompatible with the bond."""
return list(set([self.left_object.right_bond,
self.right_object.left_bond]) - set([None]))
def incompatible_correspondences(self):
"""Return the correspondences that are incompatible with this bond. This
only applies to directed bonds and to correspondences between objects
at the edges of strings. E.g., in "abc -> abd, pqrs -> ?, if there is
a correspondence between the "a" and the "p" (with concept mapping
"leftmost -> leftmost"), and a right going succesor bond from the "a"
to the "b" in "abc", then the correspondence will be incompatible with
a left going predecessor bond from the "q" to the "p" in "pqrs",
because the correspondence would then imply both "leftmost -> leftmost"
(the letters) and "right -> left (the bonds.)"""
incompatible_correspondences = []
if self.is_leftmost_in_string():
correspondence = self.left_object.correspondence
if not correspondence:
return []
other_object = correspondence.other_object(self.left_object)
elif self.is_rightmost_in_string():
correspondence = self.right_object.correspondence
if not correspondence:
return []
other_object = correspondence.other_object(self.right_object)
else:
return []
plato_string_position_category = self.slipnet.plato_string_position_category
string_position_category_mapping = None
for mapping in correspondence.get_concept_mappings():
if mapping.description_type1 == plato_string_position_category:
string_position_category_mapping = mapping
if string_position_category_mapping is None:
return []
if other_object.is_leftmost_in_string():
other_bond = other_object.right_bond
elif other_object.is_rightmost_in_string():
other_bond = other_object.left_bond
else:
return []
if not other_bond:
return []
if other_bond.direction_category is None:
return []
mapping = Mapping(self.workspace,
self.slipnet.plato_direction_category,
self.slipnet.plato_direction_category,
self.direction_category,
other_bond.direction_category,
None, None)
if mapping.is_incompatible_concept_mapping(string_position_category_mapping):
incompatible_correspondences.append(correspondence)
return incompatible_correspondences
def is_in_group(self, group):
"""Return True if the bond is in the given group."""
objects = group.objects
return self.from_object in objects and self.to_object in objects
def is_proposed(self):
"""Return True if proposal level is less than the level for built
structures."""
return self.proposal_level < self.workspace.built
def is_leftmost_in_string(self):
"""Return True if the bond is on the left edge of the string."""
return self.left_string_position == 0
def is_rightmost_in_string(self):
"""Return True if the bond is on the right edge of the string."""
return self.right_string_position == self.string.length - 1
def flipped_version(self):
"""Return the flipped version of this bond.
For example, if the bond is a successor bond going to the right,
returns a predecessor bond going to the left using the same two
objects."""
category = self.slipnet.get_related_node(self.bond_category,
self.slipnet.plato_opposite)
flipped_bond = Bond(self.workspace, self.to_object, self.from_object,
category, self.bond_facet, self.to_object_descriptor,
self.from_object_descriptor)
flipped_bond.proposal_level = self.proposal_level
return flipped_bond
def letter_span(self):
"""Return the number of letters spanned by the bond. This is 2 if the
objects are not groups; otherwise it is the sum of the lengths of the
groups."""
return self.from_object.letter_span() + self.to_object.letter_span()
def local_density(self):
"""Return a rough measure of the density in the string of bonds of the
same bond category and direction category as the given bond. This method
is used in calculating the external strength of a bond."""
def calc(direction):
"""Inner calculation."""
slot_sum = 0
support_sum = 0
method_name = 'choose_%s_neighbor' % direction
last_object = {'left': self.left_object,
'right': self.right_object}[direction]
next_object = getattr(last_object, method_name)()
while next_object:
slot_sum += 1
first = next_object.string_number
last = last_object.string_number
bond = self.string.left_right_bonds.get((first, last))
if bond:
if bond.bond_category == self.bond_category and \
bond.direction_category == self.direction_category:
support_sum += 1
last_object = next_object
next_object = getattr(next_object, method_name)()
return slot_sum, support_sum
slot_sum, support_sum = map(sum, zip(calc('left'), calc('right')))
if slot_sum == 0:
return 100
return round(100 * (support_sum / float(slot_sum)))
def local_support(self):
"""Return this bond's local support in the string."""
number = self.number_of_local_supporting_bonds()
if number == 0:
return 0
density = self.local_density()
adjusted_density = 100 * (math.sqrt(density / 100.0))
number_factor = min(1, .6 ** (1.0 / number ** 3))
return round(adjusted_density * number_factor)
def number_of_local_supporting_bonds(self):
"""Return the number of supporting bonds in the given bond's string.
Looks at all the other bonds in the string, counting bonds of the same
bond category and direction category. Does not take distance into
account; all qualifying bonds in the string are counted the same."""
number_of_supporting_bonds = 0
letter_distance = self.workspace.letter_distance
bonds = self.string.get_bonds()
if self in bonds:
bonds.remove(self)
for bond in self.string.get_bonds():
if all([letter_distance(self.left_object, bond.left_object) != 0,
letter_distance(self.right_object, bond.right_object) != 0,
bond.bond_category == self.bond_category,
bond.direction_category == self.direction_category]):
number_of_supporting_bonds += 1
return number_of_supporting_bonds
| gpl-2.0 | -5,235,821,656,455,039,000 | 42.113565 | 88 | 0.615497 | false |
bopen/mariobros | mariobros/mario.py | 1 | 11792 | # -*- coding: utf-8 -*-
# python 2 support via python-future
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import bytes, dict, int, str
import atexit
import collections
import distutils.spawn
import importlib
import logging
import re
import shlex
import subprocess
import sys
import uuid
import future.utils
import luigi
from luigi.contrib.s3 import S3Target
import mako.template
LOGGER = logging.getLogger('luigi-interface')
TEMPLATE = """% for var_def, val_def in default_namespace.items():
% if var_def not in ['action_template', 'sources_repls', 'target_pattern']:
${var_def} = ${val_def}
%endif
% endfor
% if default_namespace['target_pattern']:
${default_namespace['target_pattern']}: ${default_namespace['sources_repls']}
${default_namespace['action_template']}
% endif
% for task_name, section_namespace in section_namespaces.items():
% if task_name != 'DEFAULT':
[${task_name}]
%for var, val in section_namespaces[task_name].items():
% if var not in ['action_template', 'sources_repls', 'target_pattern']:
${var} = ${val}
% endif
% endfor
${section_namespace['target_pattern']}: ${section_namespace['sources_repls']}
${section_namespace['action_template']}
% endif
% endfor
"""
def pretty_unicode(obj):
"""Filter to pretty print iterables."""
if not isinstance(obj, (str, bytes)):
try:
return ' '.join(str(item) for item in obj)
except TypeError:
pass
return str(obj)
class ExistingFile(luigi.ExternalTask):
"""Define Luigi External Task class for existing files requires."""
target = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.target)
class ReRuleTask(luigi.Task):
"""Define Luigi task class through regular expression.
"""
# target_pattern = ''
# sources_repls = []
# action_namespace = {}
# action_template = ''
# SHALL = '/bin/bash'
@staticmethod
def factory(
name, target_pattern, sources_repls=(), action_template='', action_namespace={},
priority=0, worker_timeout=None, resources={}, disabled=False, dry_run_suffix='',
SHELL='/bin/bash'):
"""Create Luigi task class.
:param str name: Task name.
:param str target_pattern: Target pattern.
:param list sources_repls: List of source replacements.
:param str action_template: Action template.
:param dict action_namespace: Action namespace.
:param int priority: Priority Luigi task metadata.
:param int worker_timeout: Worker timeout Luigi task metadata.
:param dict resources: Resources Luigi task metadata.
:param bool disabled: Disabled Luigi task metadata.
:param unicode dry_run_suffix: Suffix to be added to file created during dry run.
:rtype: subclass_of_ReRuleTask
"""
# FIXME: move class init code to init method?
if not target_pattern.startswith('(?<![^/])'):
target_pattern = '(?<![^/])' + target_pattern
if not target_pattern.endswith('$'):
target_pattern += '$'
if action_template.strip() == '':
action_template = 'echo "${SOURCES} -> ${TARGET}"'
_target_pattern = re.compile(target_pattern)
return type(future.utils.native_str(name), (ReRuleTask,), locals())
@classmethod
def match(cls, target):
"""Perform target matching.
:rtype: bool
"""
return bool(cls._target_pattern.search(target))
target = luigi.Parameter()
def render_sources(self):
"""Perform rendering of the sources.
:rtype: str
"""
return tuple(self._target_pattern.sub(repl, self.target) for repl in self.sources_repls)
def render_action(self):
"""Perform rendering of the action.
:rtype: str
"""
sources = self.render_sources()
match = self._target_pattern.match(self.target)
target_namespace = dict(TARGET=self.target, SOURCES=sources, MATCH=match)
return render_template(
self.action_template, target_namespace, default_namespace=self.action_namespace
)
def render_shell(self):
sources = self.render_sources()
match = self._target_pattern.match(self.target)
target_namespace = dict(TARGET=self.target, SOURCES=sources, MATCH=match)
return render_template(
self.SHELL, target_namespace, default_namespace=self.action_namespace
)
def output(self):
"""
The output that this Task produces.
See :ref:`Task.output`
:rtype: luigi.LocalTarget
"""
if self.target.startswith('s3://'):
return S3Target(self.target)
else:
return luigi.LocalTarget(self.target + self.dry_run_suffix)
def requires(self):
"""
The Tasks that this Task depends on.
See :ref:`Task.requires`
:rtype: list
"""
required = []
for source in self.render_sources():
for task_rule in ReRuleTask.__subclasses__():
if task_rule.match(source):
required.append(task_rule(target=source))
break
else:
required.append(ExistingFile(source))
return required
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
action = self.render_action()
if self.dry_run_suffix:
# log intended command line but touch the dry_run target instead
LOGGER.info(action)
action = 'touch ' + self.target + self.dry_run_suffix
# register the dry_run target removal at program exit
atexit.register(self.remove_dry_run_file)
args = ['/bin/bash', '-c', action]
else:
shell = self.render_shell()
args = shlex.split(shell) + ['-c', action]
# be sure to use the abspath of the executable based on the PATH environment variable
args[0] = distutils.spawn.find_executable(args[0])
LOGGER.info('COMMAND: {}'.format(args))
subprocess.check_call(args)
def remove_dry_run_file(self):
"""Remove files generated by dry run process."""
subprocess.call('rm -f ' + self.target + self.dry_run_suffix, shell=True)
def render_template(template, local_namespace, default_namespace={}):
"""Return the rendered template merging local and default namespaces.
:param unicode template: Template.
:param dict local_namespace: Local namespace.
:param dict default_namespace: Default namespace.
:rtype: str
"""
namespace = default_namespace.copy()
namespace.update(local_namespace)
if 'IMPORT_MODULES' in namespace:
import_modules = namespace['IMPORT_MODULES'].split()
namespace.update({name: importlib.import_module(name) for name in import_modules})
template_object = mako.template.Template(
template,
strict_undefined=True,
imports=['from mariobros.mario import pretty_unicode'], # enable the filter
default_filters=['pretty_unicode'],
)
return template_object.render(**namespace)
def render_namespace(namespace, default_namespace={}, skip_names=('action_template', 'SHELL')):
"""Return Render section namespaces with default section namespaces also.
:param dict namespace: Section namespace.
:param dict default_namespace: default section namespace.
:param list skip_names: Namespace names to skip in the render process.
:rtype: dict
"""
torender_namespace = {k: v for k, v in namespace.items() if k not in skip_names}
rendered_namespace = {k: v for k, v in namespace.items() if k in skip_names}
while len(torender_namespace):
loop = True
for key, value_template in list(torender_namespace.items()):
try:
value = render_template(value_template, rendered_namespace, default_namespace)
torender_namespace.pop(key)
rendered_namespace[key] = value
loop = False
except NameError:
pass
if loop:
raise NameError("Can't render: {!r}".format(torender_namespace))
return collections.OrderedDict((k, rendered_namespace[k]) for k in namespace)
def register_tasks(namespaces, default_namespace={}, dry_run_suffix=''):
"""Return a Luigi task class after parsed Luigi task metadata.
:param dict namespaces: Task namespaces.
:param dict default_namespace: Default namespaces.
:param unicode dry_run_suffix: Suffix to be added to file created during dry run.
:rtype: iterable
"""
for task_name, namespace in namespaces.items():
action_namespace = default_namespace.copy()
action_namespace.update(namespace)
task_keys = ['target_pattern', 'sources_repls', 'action_template', 'SHELL']
task_namespace = {k: action_namespace[k] for k in task_keys if k in action_namespace}
task_namespace['sources_repls'] = task_namespace['sources_repls'].split()
# luigi attributes
task_namespace['resources'] = {k.partition('_')[2]: int(v) for k, v in namespace.items()
if k.startswith('RESOURCES_')}
task_namespace.update(
{k: int(namespace[k]) for k in ['priority', 'disabled', 'worker_timeout']
if k in namespace})
yield ReRuleTask.factory(
task_name, dry_run_suffix=dry_run_suffix, action_namespace=action_namespace,
**task_namespace
)
def print_namespaces(default_namespace, section_namespaces):
"""Print namespaces with the MarioFile format.
:param dict default_namespace: Default namespace dictionary.
:param dict section_namespaces: Section namespaces dictionary.
:return: str
"""
template = mako.template.Template(TEMPLATE)
namespaces = template.render(
default_namespace=default_namespace, section_namespaces=section_namespaces
)
return namespaces
def render_config(section_namespaces):
"""Parse and render a MarioFile.
:param dict section_namespaces: Section namespaces dictionary.
:return: (dict, dict, dict)
"""
default_namespace = render_namespace(section_namespaces['DEFAULT'])
rendered_namespaces = collections.OrderedDict(
(k, render_namespace(v, default_namespace)) for k, v in section_namespaces.items()
)
return default_namespace, rendered_namespaces
def mario(rendered_namespaces, default_namespace, targets=('DEFAULT',), dry_run=False):
"""Generate Luigi tasks' file from MarioFile and Luigi template file
:param dict rendered_namespaces: Rendered namespaces dictionary.
:param dict default_namespace: Default namespace dictionary.
:param iterable targets: List of targets.
:param bool dry_run: Dry run flag.
:rtype : iterable
"""
# ensure '.' is present in sys.path so 'IMPORT_MODULES = local_module' works
if '.' not in sys.path:
sys.path.append('.')
dry_run_suffix = '-dry_run-' + str(uuid.uuid4()) if dry_run else ''
rendered_namespaces = collections.OrderedDict(reversed(list(rendered_namespaces.items())))
tasks = list(register_tasks(
rendered_namespaces, default_namespace=default_namespace, dry_run_suffix=dry_run_suffix
))
target_tasks = []
for target in targets:
for task_rule in tasks:
if task_rule.match(target):
target_tasks.append(task_rule(target=target))
break
return target_tasks
| apache-2.0 | -5,749,971,859,850,435,000 | 34.841945 | 99 | 0.6356 | false |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_mailbox.py | 2 | 92803 | import os
import sys
import time
import stat
import socket
import email
import email.message
import re
import io
import tempfile
from test import support
import unittest
import textwrap
import mailbox
import glob
class TestBase:
all_mailbox_types = (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage)
def _check_sample(self, msg):
# Inspect a mailbox.Message representation of the sample message
self.assertIsInstance(msg, email.message.Message)
self.assertIsInstance(msg, mailbox.Message)
for key, value in _sample_headers.items():
self.assertIn(value, msg.get_all(key))
self.assertTrue(msg.is_multipart())
self.assertEqual(len(msg.get_payload()), len(_sample_payloads))
for i, payload in enumerate(_sample_payloads):
part = msg.get_payload(i)
self.assertIsInstance(part, email.message.Message)
self.assertNotIsInstance(part, mailbox.Message)
self.assertEqual(part.get_payload(), payload)
def _delete_recursively(self, target):
# Delete a file or delete a directory recursively
if os.path.isdir(target):
support.rmtree(target)
elif os.path.exists(target):
support.unlink(target)
class TestMailbox(TestBase):
maxDiff = None
_factory = None # Overridden by subclasses to reuse tests
_template = 'From: foo\n\n%s\n'
def setUp(self):
self._path = support.TESTFN
self._delete_recursively(self._path)
self._box = self._factory(self._path)
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
def test_add(self):
# Add copies of a sample message
keys = []
keys.append(self._box.add(self._template % 0))
self.assertEqual(len(self._box), 1)
keys.append(self._box.add(mailbox.Message(_sample_message)))
self.assertEqual(len(self._box), 2)
keys.append(self._box.add(email.message_from_string(_sample_message)))
self.assertEqual(len(self._box), 3)
keys.append(self._box.add(io.BytesIO(_bytes_sample_message)))
self.assertEqual(len(self._box), 4)
keys.append(self._box.add(_sample_message))
self.assertEqual(len(self._box), 5)
keys.append(self._box.add(_bytes_sample_message))
self.assertEqual(len(self._box), 6)
with self.assertWarns(DeprecationWarning):
keys.append(self._box.add(
io.TextIOWrapper(io.BytesIO(_bytes_sample_message))))
self.assertEqual(len(self._box), 7)
self.assertEqual(self._box.get_string(keys[0]), self._template % 0)
for i in (1, 2, 3, 4, 5, 6):
self._check_sample(self._box[keys[i]])
_nonascii_msg = textwrap.dedent("""\
From: foo
Subject: Falinaptár házhozszállítással. Már rendeltél?
0
""")
def test_add_invalid_8bit_bytes_header(self):
key = self._box.add(self._nonascii_msg.encode('latin-1'))
self.assertEqual(len(self._box), 1)
self.assertEqual(self._box.get_bytes(key),
self._nonascii_msg.encode('latin-1'))
def test_invalid_nonascii_header_as_string(self):
subj = self._nonascii_msg.splitlines()[1]
key = self._box.add(subj.encode('latin-1'))
self.assertEqual(self._box.get_string(key),
'Subject: =?unknown-8bit?b?RmFsaW5hcHThciBo4Xpob3pzeuFsbO104XNz'
'YWwuIE3hciByZW5kZWx06Ww/?=\n\n')
def test_add_nonascii_string_header_raises(self):
with self.assertRaisesRegex(ValueError, "ASCII-only"):
self._box.add(self._nonascii_msg)
self._box.flush()
self.assertEqual(len(self._box), 0)
self.assertMailboxEmpty()
def test_add_that_raises_leaves_mailbox_empty(self):
def raiser(*args, **kw):
raise Exception("a fake error")
support.patch(self, email.generator.BytesGenerator, 'flatten', raiser)
with self.assertRaises(Exception):
self._box.add(email.message_from_string("From: Alphöso"))
self.assertEqual(len(self._box), 0)
self._box.close()
self.assertMailboxEmpty()
_non_latin_bin_msg = textwrap.dedent("""\
From: [email protected]
To: báz
Subject: Maintenant je vous présente mon collègue, le pouf célèbre
\tJean de Baddie
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Да, они летят.
""").encode('utf-8')
def test_add_8bit_body(self):
key = self._box.add(self._non_latin_bin_msg)
self.assertEqual(self._box.get_bytes(key),
self._non_latin_bin_msg)
with self._box.get_file(key) as f:
self.assertEqual(f.read(),
self._non_latin_bin_msg.replace(b'\n',
os.linesep.encode()))
self.assertEqual(self._box[key].get_payload(),
"Да, они летят.\n")
def test_add_binary_file(self):
with tempfile.TemporaryFile('wb+') as f:
f.write(_bytes_sample_message)
f.seek(0)
key = self._box.add(f)
self.assertEqual(self._box.get_bytes(key).split(b'\n'),
_bytes_sample_message.split(b'\n'))
def test_add_binary_nonascii_file(self):
with tempfile.TemporaryFile('wb+') as f:
f.write(self._non_latin_bin_msg)
f.seek(0)
key = self._box.add(f)
self.assertEqual(self._box.get_bytes(key).split(b'\n'),
self._non_latin_bin_msg.split(b'\n'))
def test_add_text_file_warns(self):
with tempfile.TemporaryFile('w+') as f:
f.write(_sample_message)
f.seek(0)
with self.assertWarns(DeprecationWarning):
key = self._box.add(f)
self.assertEqual(self._box.get_bytes(key).split(b'\n'),
_bytes_sample_message.split(b'\n'))
def test_add_StringIO_warns(self):
with self.assertWarns(DeprecationWarning):
key = self._box.add(io.StringIO(self._template % "0"))
self.assertEqual(self._box.get_string(key), self._template % "0")
def test_add_nonascii_StringIO_raises(self):
with self.assertWarns(DeprecationWarning):
with self.assertRaisesRegex(ValueError, "ASCII-only"):
self._box.add(io.StringIO(self._nonascii_msg))
self.assertEqual(len(self._box), 0)
self._box.close()
self.assertMailboxEmpty()
def test_remove(self):
# Remove messages using remove()
self._test_remove_or_delitem(self._box.remove)
def test_delitem(self):
# Remove messages using __delitem__()
self._test_remove_or_delitem(self._box.__delitem__)
def _test_remove_or_delitem(self, method):
# (Used by test_remove() and test_delitem().)
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assertEqual(len(self._box), 2)
method(key0)
self.assertEqual(len(self._box), 1)
self.assertRaises(KeyError, lambda: self._box[key0])
self.assertRaises(KeyError, lambda: method(key0))
self.assertEqual(self._box.get_string(key1), self._template % 1)
key2 = self._box.add(self._template % 2)
self.assertEqual(len(self._box), 2)
method(key2)
self.assertEqual(len(self._box), 1)
self.assertRaises(KeyError, lambda: self._box[key2])
self.assertRaises(KeyError, lambda: method(key2))
self.assertEqual(self._box.get_string(key1), self._template % 1)
method(key1)
self.assertEqual(len(self._box), 0)
self.assertRaises(KeyError, lambda: self._box[key1])
self.assertRaises(KeyError, lambda: method(key1))
def test_discard(self, repetitions=10):
# Discard messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assertEqual(len(self._box), 2)
self._box.discard(key0)
self.assertEqual(len(self._box), 1)
self.assertRaises(KeyError, lambda: self._box[key0])
self._box.discard(key0)
self.assertEqual(len(self._box), 1)
self.assertRaises(KeyError, lambda: self._box[key0])
def test_get(self):
# Retrieve messages using get()
key0 = self._box.add(self._template % 0)
msg = self._box.get(key0)
self.assertEqual(msg['from'], 'foo')
self.assertEqual(msg.get_payload(), '0\n')
self.assertIsNone(self._box.get('foo'))
self.assertIs(self._box.get('foo', False), False)
self._box.close()
self._box = self._factory(self._path)
key1 = self._box.add(self._template % 1)
msg = self._box.get(key1)
self.assertEqual(msg['from'], 'foo')
self.assertEqual(msg.get_payload(), '1\n')
def test_getitem(self):
# Retrieve message using __getitem__()
key0 = self._box.add(self._template % 0)
msg = self._box[key0]
self.assertEqual(msg['from'], 'foo')
self.assertEqual(msg.get_payload(), '0\n')
self.assertRaises(KeyError, lambda: self._box['foo'])
self._box.discard(key0)
self.assertRaises(KeyError, lambda: self._box[key0])
def test_get_message(self):
# Get Message representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
msg0 = self._box.get_message(key0)
self.assertIsInstance(msg0, mailbox.Message)
self.assertEqual(msg0['from'], 'foo')
self.assertEqual(msg0.get_payload(), '0\n')
self._check_sample(self._box.get_message(key1))
def test_get_bytes(self):
# Get bytes representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
self.assertEqual(self._box.get_bytes(key0),
(self._template % 0).encode('ascii'))
self.assertEqual(self._box.get_bytes(key1), _bytes_sample_message)
def test_get_string(self):
# Get string representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
self.assertEqual(self._box.get_string(key0), self._template % 0)
self.assertEqual(self._box.get_string(key1).split('\n'),
_sample_message.split('\n'))
def test_get_file(self):
# Get file representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
with self._box.get_file(key0) as file:
data0 = file.read()
with self._box.get_file(key1) as file:
data1 = file.read()
self.assertEqual(data0.decode('ascii').replace(os.linesep, '\n'),
self._template % 0)
self.assertEqual(data1.decode('ascii').replace(os.linesep, '\n'),
_sample_message)
def test_get_file_can_be_closed_twice(self):
# Issue 11700
key = self._box.add(_sample_message)
f = self._box.get_file(key)
f.close()
f.close()
def test_iterkeys(self):
# Get keys using iterkeys()
self._check_iteration(self._box.iterkeys, do_keys=True, do_values=False)
def test_keys(self):
# Get keys using keys()
self._check_iteration(self._box.keys, do_keys=True, do_values=False)
def test_itervalues(self):
# Get values using itervalues()
self._check_iteration(self._box.itervalues, do_keys=False,
do_values=True)
def test_iter(self):
# Get values using __iter__()
self._check_iteration(self._box.__iter__, do_keys=False,
do_values=True)
def test_values(self):
# Get values using values()
self._check_iteration(self._box.values, do_keys=False, do_values=True)
def test_iteritems(self):
# Get keys and values using iteritems()
self._check_iteration(self._box.iteritems, do_keys=True,
do_values=True)
def test_items(self):
# Get keys and values using items()
self._check_iteration(self._box.items, do_keys=True, do_values=True)
def _check_iteration(self, method, do_keys, do_values, repetitions=10):
for value in method():
self.fail("Not empty")
keys, values = [], []
for i in range(repetitions):
keys.append(self._box.add(self._template % i))
values.append(self._template % i)
if do_keys and not do_values:
returned_keys = list(method())
elif do_values and not do_keys:
returned_values = list(method())
else:
returned_keys, returned_values = [], []
for key, value in method():
returned_keys.append(key)
returned_values.append(value)
if do_keys:
self.assertEqual(len(keys), len(returned_keys))
self.assertEqual(set(keys), set(returned_keys))
if do_values:
count = 0
for value in returned_values:
self.assertEqual(value['from'], 'foo')
self.assertLess(int(value.get_payload()), repetitions)
count += 1
self.assertEqual(len(values), count)
def test_contains(self):
# Check existence of keys using __contains__()
self.assertNotIn('foo', self._box)
key0 = self._box.add(self._template % 0)
self.assertIn(key0, self._box)
self.assertNotIn('foo', self._box)
key1 = self._box.add(self._template % 1)
self.assertIn(key1, self._box)
self.assertIn(key0, self._box)
self.assertNotIn('foo', self._box)
self._box.remove(key0)
self.assertNotIn(key0, self._box)
self.assertIn(key1, self._box)
self.assertNotIn('foo', self._box)
self._box.remove(key1)
self.assertNotIn(key1, self._box)
self.assertNotIn(key0, self._box)
self.assertNotIn('foo', self._box)
def test_len(self, repetitions=10):
# Get message count
keys = []
for i in range(repetitions):
self.assertEqual(len(self._box), i)
keys.append(self._box.add(self._template % i))
self.assertEqual(len(self._box), i + 1)
for i in range(repetitions):
self.assertEqual(len(self._box), repetitions - i)
self._box.remove(keys[i])
self.assertEqual(len(self._box), repetitions - i - 1)
def test_set_item(self):
# Modify messages using __setitem__()
key0 = self._box.add(self._template % 'original 0')
self.assertEqual(self._box.get_string(key0),
self._template % 'original 0')
key1 = self._box.add(self._template % 'original 1')
self.assertEqual(self._box.get_string(key1),
self._template % 'original 1')
self._box[key0] = self._template % 'changed 0'
self.assertEqual(self._box.get_string(key0),
self._template % 'changed 0')
self._box[key1] = self._template % 'changed 1'
self.assertEqual(self._box.get_string(key1),
self._template % 'changed 1')
self._box[key0] = _sample_message
self._check_sample(self._box[key0])
self._box[key1] = self._box[key0]
self._check_sample(self._box[key1])
self._box[key0] = self._template % 'original 0'
self.assertEqual(self._box.get_string(key0),
self._template % 'original 0')
self._check_sample(self._box[key1])
self.assertRaises(KeyError,
lambda: self._box.__setitem__('foo', 'bar'))
self.assertRaises(KeyError, lambda: self._box['foo'])
self.assertEqual(len(self._box), 2)
def test_clear(self, iterations=10):
# Remove all messages using clear()
keys = []
for i in range(iterations):
self._box.add(self._template % i)
for i, key in enumerate(keys):
self.assertEqual(self._box.get_string(key), self._template % i)
self._box.clear()
self.assertEqual(len(self._box), 0)
for i, key in enumerate(keys):
self.assertRaises(KeyError, lambda: self._box.get_string(key))
def test_pop(self):
# Get and remove a message using pop()
key0 = self._box.add(self._template % 0)
self.assertIn(key0, self._box)
key1 = self._box.add(self._template % 1)
self.assertIn(key1, self._box)
self.assertEqual(self._box.pop(key0).get_payload(), '0\n')
self.assertNotIn(key0, self._box)
self.assertIn(key1, self._box)
key2 = self._box.add(self._template % 2)
self.assertIn(key2, self._box)
self.assertEqual(self._box.pop(key2).get_payload(), '2\n')
self.assertNotIn(key2, self._box)
self.assertIn(key1, self._box)
self.assertEqual(self._box.pop(key1).get_payload(), '1\n')
self.assertNotIn(key1, self._box)
self.assertEqual(len(self._box), 0)
def test_popitem(self, iterations=10):
# Get and remove an arbitrary (key, message) using popitem()
keys = []
for i in range(10):
keys.append(self._box.add(self._template % i))
seen = []
for i in range(10):
key, msg = self._box.popitem()
self.assertIn(key, keys)
self.assertNotIn(key, seen)
seen.append(key)
self.assertEqual(int(msg.get_payload()), keys.index(key))
self.assertEqual(len(self._box), 0)
for key in keys:
self.assertRaises(KeyError, lambda: self._box[key])
def test_update(self):
# Modify multiple messages using update()
key0 = self._box.add(self._template % 'original 0')
key1 = self._box.add(self._template % 'original 1')
key2 = self._box.add(self._template % 'original 2')
self._box.update({key0: self._template % 'changed 0',
key2: _sample_message})
self.assertEqual(len(self._box), 3)
self.assertEqual(self._box.get_string(key0),
self._template % 'changed 0')
self.assertEqual(self._box.get_string(key1),
self._template % 'original 1')
self._check_sample(self._box[key2])
self._box.update([(key2, self._template % 'changed 2'),
(key1, self._template % 'changed 1'),
(key0, self._template % 'original 0')])
self.assertEqual(len(self._box), 3)
self.assertEqual(self._box.get_string(key0),
self._template % 'original 0')
self.assertEqual(self._box.get_string(key1),
self._template % 'changed 1')
self.assertEqual(self._box.get_string(key2),
self._template % 'changed 2')
self.assertRaises(KeyError,
lambda: self._box.update({'foo': 'bar',
key0: self._template % "changed 0"}))
self.assertEqual(len(self._box), 3)
self.assertEqual(self._box.get_string(key0),
self._template % "changed 0")
self.assertEqual(self._box.get_string(key1),
self._template % "changed 1")
self.assertEqual(self._box.get_string(key2),
self._template % "changed 2")
def test_flush(self):
# Write changes to disk
self._test_flush_or_close(self._box.flush, True)
def test_popitem_and_flush_twice(self):
# See #15036.
self._box.add(self._template % 0)
self._box.add(self._template % 1)
self._box.flush()
self._box.popitem()
self._box.flush()
self._box.popitem()
self._box.flush()
def test_lock_unlock(self):
# Lock and unlock the mailbox
self.assertFalse(os.path.exists(self._get_lock_path()))
self._box.lock()
self.assertTrue(os.path.exists(self._get_lock_path()))
self._box.unlock()
self.assertFalse(os.path.exists(self._get_lock_path()))
def test_close(self):
# Close mailbox and flush changes to disk
self._test_flush_or_close(self._box.close, False)
def _test_flush_or_close(self, method, should_call_close):
contents = [self._template % i for i in range(3)]
self._box.add(contents[0])
self._box.add(contents[1])
self._box.add(contents[2])
oldbox = self._box
method()
if should_call_close:
self._box.close()
self._box = self._factory(self._path)
keys = self._box.keys()
self.assertEqual(len(keys), 3)
for key in keys:
self.assertIn(self._box.get_string(key), contents)
oldbox.close()
def test_dump_message(self):
# Write message representations to disk
for input in (email.message_from_string(_sample_message),
_sample_message, io.BytesIO(_bytes_sample_message)):
output = io.BytesIO()
self._box._dump_message(input, output)
self.assertEqual(output.getvalue(),
_bytes_sample_message.replace(b'\n', os.linesep.encode()))
output = io.BytesIO()
self.assertRaises(TypeError,
lambda: self._box._dump_message(None, output))
def _get_lock_path(self):
# Return the path of the dot lock file. May be overridden.
return self._path + '.lock'
class TestMailboxSuperclass(TestBase, unittest.TestCase):
def test_notimplemented(self):
# Test that all Mailbox methods raise NotImplementedException.
box = mailbox.Mailbox('path')
self.assertRaises(NotImplementedError, lambda: box.add(''))
self.assertRaises(NotImplementedError, lambda: box.remove(''))
self.assertRaises(NotImplementedError, lambda: box.__delitem__(''))
self.assertRaises(NotImplementedError, lambda: box.discard(''))
self.assertRaises(NotImplementedError, lambda: box.__setitem__('', ''))
self.assertRaises(NotImplementedError, lambda: box.iterkeys())
self.assertRaises(NotImplementedError, lambda: box.keys())
self.assertRaises(NotImplementedError, lambda: box.itervalues().__next__())
self.assertRaises(NotImplementedError, lambda: box.__iter__().__next__())
self.assertRaises(NotImplementedError, lambda: box.values())
self.assertRaises(NotImplementedError, lambda: box.iteritems().__next__())
self.assertRaises(NotImplementedError, lambda: box.items())
self.assertRaises(NotImplementedError, lambda: box.get(''))
self.assertRaises(NotImplementedError, lambda: box.__getitem__(''))
self.assertRaises(NotImplementedError, lambda: box.get_message(''))
self.assertRaises(NotImplementedError, lambda: box.get_string(''))
self.assertRaises(NotImplementedError, lambda: box.get_bytes(''))
self.assertRaises(NotImplementedError, lambda: box.get_file(''))
self.assertRaises(NotImplementedError, lambda: '' in box)
self.assertRaises(NotImplementedError, lambda: box.__contains__(''))
self.assertRaises(NotImplementedError, lambda: box.__len__())
self.assertRaises(NotImplementedError, lambda: box.clear())
self.assertRaises(NotImplementedError, lambda: box.pop(''))
self.assertRaises(NotImplementedError, lambda: box.popitem())
self.assertRaises(NotImplementedError, lambda: box.update((('', ''),)))
self.assertRaises(NotImplementedError, lambda: box.flush())
self.assertRaises(NotImplementedError, lambda: box.lock())
self.assertRaises(NotImplementedError, lambda: box.unlock())
self.assertRaises(NotImplementedError, lambda: box.close())
class TestMaildir(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Maildir(path, factory)
def setUp(self):
TestMailbox.setUp(self)
if (os.name == 'nt') or (sys.platform == 'cygwin'):
self._box.colon = '!'
def assertMailboxEmpty(self):
self.assertEqual(os.listdir(os.path.join(self._path, 'tmp')), [])
def test_add_MM(self):
# Add a MaildirMessage instance
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_info('foo')
key = self._box.add(msg)
self.assertTrue(os.path.exists(os.path.join(self._path, 'cur', '%s%sfoo' %
(key, self._box.colon))))
def test_get_MM(self):
# Get a MaildirMessage instance
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_flags('RF')
key = self._box.add(msg)
msg_returned = self._box.get_message(key)
self.assertIsInstance(msg_returned, mailbox.MaildirMessage)
self.assertEqual(msg_returned.get_subdir(), 'cur')
self.assertEqual(msg_returned.get_flags(), 'FR')
def test_set_MM(self):
# Set with a MaildirMessage instance
msg0 = mailbox.MaildirMessage(self._template % 0)
msg0.set_flags('TP')
key = self._box.add(msg0)
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), 'PT')
msg1 = mailbox.MaildirMessage(self._template % 1)
self._box[key] = msg1
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), '')
self.assertEqual(msg_returned.get_payload(), '1\n')
msg2 = mailbox.MaildirMessage(self._template % 2)
msg2.set_info('2,S')
self._box[key] = msg2
self._box[key] = self._template % 3
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), 'S')
self.assertEqual(msg_returned.get_payload(), '3\n')
def test_consistent_factory(self):
# Add a message.
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_flags('RF')
key = self._box.add(msg)
# Create new mailbox with
class FakeMessage(mailbox.MaildirMessage):
pass
box = mailbox.Maildir(self._path, factory=FakeMessage)
box.colon = self._box.colon
msg2 = box.get_message(key)
self.assertIsInstance(msg2, FakeMessage)
def test_initialize_new(self):
# Initialize a non-existent mailbox
self.tearDown()
self._box = mailbox.Maildir(self._path)
self._check_basics()
self._delete_recursively(self._path)
self._box = self._factory(self._path, factory=None)
self._check_basics()
def test_initialize_existing(self):
# Initialize an existing mailbox
self.tearDown()
for subdir in '', 'tmp', 'new', 'cur':
os.mkdir(os.path.normpath(os.path.join(self._path, subdir)))
self._box = mailbox.Maildir(self._path)
self._check_basics()
def _check_basics(self, factory=None):
# (Used by test_open_new() and test_open_existing().)
self.assertEqual(self._box._path, os.path.abspath(self._path))
self.assertEqual(self._box._factory, factory)
for subdir in '', 'tmp', 'new', 'cur':
path = os.path.join(self._path, subdir)
mode = os.stat(path)[stat.ST_MODE]
self.assertTrue(stat.S_ISDIR(mode), "Not a directory: '%s'" % path)
def test_list_folders(self):
# List folders
self._box.add_folder('one')
self._box.add_folder('two')
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 3)
self.assertEqual(set(self._box.list_folders()),
set(('one', 'two', 'three')))
def test_get_folder(self):
# Open folders
self._box.add_folder('foo.bar')
folder0 = self._box.get_folder('foo.bar')
folder0.add(self._template % 'bar')
self.assertTrue(os.path.isdir(os.path.join(self._path, '.foo.bar')))
folder1 = self._box.get_folder('foo.bar')
self.assertEqual(folder1.get_string(folder1.keys()[0]),
self._template % 'bar')
def test_add_and_remove_folders(self):
# Delete folders
self._box.add_folder('one')
self._box.add_folder('two')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('one', 'two')))
self._box.remove_folder('one')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two',)))
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('two', 'three')))
self._box.remove_folder('three')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two',)))
self._box.remove_folder('two')
self.assertEqual(len(self._box.list_folders()), 0)
self.assertEqual(self._box.list_folders(), [])
def test_clean(self):
# Remove old files from 'tmp'
foo_path = os.path.join(self._path, 'tmp', 'foo')
bar_path = os.path.join(self._path, 'tmp', 'bar')
with open(foo_path, 'w') as f:
f.write("@")
with open(bar_path, 'w') as f:
f.write("@")
self._box.clean()
self.assertTrue(os.path.exists(foo_path))
self.assertTrue(os.path.exists(bar_path))
foo_stat = os.stat(foo_path)
os.utime(foo_path, (time.time() - 129600 - 2,
foo_stat.st_mtime))
self._box.clean()
self.assertFalse(os.path.exists(foo_path))
self.assertTrue(os.path.exists(bar_path))
def test_create_tmp(self, repetitions=10):
# Create files in tmp directory
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
pid = os.getpid()
pattern = re.compile(r"(?P<time>\d+)\.M(?P<M>\d{1,6})P(?P<P>\d+)"
r"Q(?P<Q>\d+)\.(?P<host>[^:/]+)")
previous_groups = None
for x in range(repetitions):
tmp_file = self._box._create_tmp()
head, tail = os.path.split(tmp_file.name)
self.assertEqual(head, os.path.abspath(os.path.join(self._path,
"tmp")),
"File in wrong location: '%s'" % head)
match = pattern.match(tail)
self.assertIsNotNone(match, "Invalid file name: '%s'" % tail)
groups = match.groups()
if previous_groups is not None:
self.assertGreaterEqual(int(groups[0]), int(previous_groups[0]),
"Non-monotonic seconds: '%s' before '%s'" %
(previous_groups[0], groups[0]))
if int(groups[0]) == int(previous_groups[0]):
self.assertGreaterEqual(int(groups[1]), int(previous_groups[1]),
"Non-monotonic milliseconds: '%s' before '%s'" %
(previous_groups[1], groups[1]))
self.assertEqual(int(groups[2]), pid,
"Process ID mismatch: '%s' should be '%s'" %
(groups[2], pid))
self.assertEqual(int(groups[3]), int(previous_groups[3]) + 1,
"Non-sequential counter: '%s' before '%s'" %
(previous_groups[3], groups[3]))
self.assertEqual(groups[4], hostname,
"Host name mismatch: '%s' should be '%s'" %
(groups[4], hostname))
previous_groups = groups
tmp_file.write(_bytes_sample_message)
tmp_file.seek(0)
self.assertEqual(tmp_file.read(), _bytes_sample_message)
tmp_file.close()
file_count = len(os.listdir(os.path.join(self._path, "tmp")))
self.assertEqual(file_count, repetitions,
"Wrong file count: '%s' should be '%s'" %
(file_count, repetitions))
def test_refresh(self):
# Update the table of contents
self.assertEqual(self._box._toc, {})
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assertEqual(self._box._toc, {})
self._box._refresh()
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0),
key1: os.path.join('new', key1)})
key2 = self._box.add(self._template % 2)
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0),
key1: os.path.join('new', key1)})
self._box._refresh()
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0),
key1: os.path.join('new', key1),
key2: os.path.join('new', key2)})
def test_refresh_after_safety_period(self):
# Issue #13254: Call _refresh after the "file system safety
# period" of 2 seconds has passed; _toc should still be
# updated because this is the first call to _refresh.
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self._box = self._factory(self._path)
self.assertEqual(self._box._toc, {})
# Emulate sleeping. Instead of sleeping for 2 seconds, use the
# skew factor to make _refresh think that the filesystem
# safety period has passed and re-reading the _toc is only
# required if mtimes differ.
self._box._skewfactor = -3
self._box._refresh()
self.assertEqual(sorted(self._box._toc.keys()), sorted([key0, key1]))
def test_lookup(self):
# Look up message subpaths in the TOC
self.assertRaises(KeyError, lambda: self._box._lookup('foo'))
key0 = self._box.add(self._template % 0)
self.assertEqual(self._box._lookup(key0), os.path.join('new', key0))
os.remove(os.path.join(self._path, 'new', key0))
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0)})
# Be sure that the TOC is read back from disk (see issue #6896
# about bad mtime behaviour on some systems).
self._box.flush()
self.assertRaises(KeyError, lambda: self._box._lookup(key0))
self.assertEqual(self._box._toc, {})
def test_lock_unlock(self):
# Lock and unlock the mailbox. For Maildir, this does nothing.
self._box.lock()
self._box.unlock()
def test_folder (self):
# Test for bug #1569790: verify that folders returned by .get_folder()
# use the same factory function.
def dummy_factory (s):
return None
box = self._factory(self._path, factory=dummy_factory)
folder = box.add_folder('folder1')
self.assertIs(folder._factory, dummy_factory)
folder1_alias = box.get_folder('folder1')
self.assertIs(folder1_alias._factory, dummy_factory)
def test_directory_in_folder (self):
# Test that mailboxes still work if there's a stray extra directory
# in a folder.
for i in range(10):
self._box.add(mailbox.Message(_sample_message))
# Create a stray directory
os.mkdir(os.path.join(self._path, 'cur', 'stray-dir'))
# Check that looping still works with the directory present.
for msg in self._box:
pass
@unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()')
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_file_permissions(self):
# Verify that message files are created without execute permissions
msg = mailbox.MaildirMessage(self._template % 0)
orig_umask = os.umask(0)
try:
key = self._box.add(msg)
finally:
os.umask(orig_umask)
path = os.path.join(self._path, self._box._lookup(key))
mode = os.stat(path).st_mode
self.assertFalse(mode & 0o111)
@unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()')
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_folder_file_perms(self):
# From bug #3228, we want to verify that the file created inside a Maildir
# subfolder isn't marked as executable.
orig_umask = os.umask(0)
try:
subfolder = self._box.add_folder('subfolder')
finally:
os.umask(orig_umask)
path = os.path.join(subfolder._path, 'maildirfolder')
st = os.stat(path)
perms = st.st_mode
self.assertFalse((perms & 0o111)) # Execute bits should all be off.
def test_reread(self):
# Do an initial unconditional refresh
self._box._refresh()
# Put the last modified times more than two seconds into the past
# (because mtime may have a two second granularity)
for subdir in ('cur', 'new'):
os.utime(os.path.join(self._box._path, subdir),
(time.time()-5,)*2)
# Because mtime has a two second granularity in worst case (FAT), a
# refresh is done unconditionally if called for within
# two-second-plus-a-bit of the last one, just in case the mbox has
# changed; so now we have to wait for that interval to expire.
#
# Because this is a test, emulate sleeping. Instead of
# sleeping for 2 seconds, use the skew factor to make _refresh
# think that 2 seconds have passed and re-reading the _toc is
# only required if mtimes differ.
self._box._skewfactor = -3
# Re-reading causes the ._toc attribute to be assigned a new dictionary
# object, so we'll check that the ._toc attribute isn't a different
# object.
orig_toc = self._box._toc
def refreshed():
return self._box._toc is not orig_toc
self._box._refresh()
self.assertFalse(refreshed())
# Now, write something into cur and remove it. This changes
# the mtime and should cause a re-read. Note that "sleep
# emulation" is still in effect, as skewfactor is -3.
filename = os.path.join(self._path, 'cur', 'stray-file')
support.create_empty_file(filename)
os.unlink(filename)
self._box._refresh()
self.assertTrue(refreshed())
class _TestSingleFile(TestMailbox):
'''Common tests for single-file mailboxes'''
def test_add_doesnt_rewrite(self):
# When only adding messages, flush() should not rewrite the
# mailbox file. See issue #9559.
# Inode number changes if the contents are written to another
# file which is then renamed over the original file. So we
# must check that the inode number doesn't change.
inode_before = os.stat(self._path).st_ino
self._box.add(self._template % 0)
self._box.flush()
inode_after = os.stat(self._path).st_ino
self.assertEqual(inode_before, inode_after)
# Make sure the message was really added
self._box.close()
self._box = self._factory(self._path)
self.assertEqual(len(self._box), 1)
def test_permissions_after_flush(self):
# See issue #5346
# Make the mailbox world writable. It's unlikely that the new
# mailbox file would have these permissions after flush(),
# because umask usually prevents it.
mode = os.stat(self._path).st_mode | 0o666
os.chmod(self._path, mode)
self._box.add(self._template % 0)
i = self._box.add(self._template % 1)
# Need to remove one message to make flush() create a new file
self._box.remove(i)
self._box.flush()
self.assertEqual(os.stat(self._path).st_mode, mode)
class _TestMboxMMDF(_TestSingleFile):
def tearDown(self):
super().tearDown()
self._box.close()
self._delete_recursively(self._path)
for lock_remnant in glob.glob(self._path + '.*'):
support.unlink(lock_remnant)
def assertMailboxEmpty(self):
with open(self._path) as f:
self.assertEqual(f.readlines(), [])
def test_add_from_string(self):
# Add a string starting with 'From ' to the mailbox
key = self._box.add('From foo@bar blah\nFrom: foo\n\n0\n')
self.assertEqual(self._box[key].get_from(), 'foo@bar blah')
self.assertEqual(self._box[key].get_payload(), '0\n')
def test_add_from_bytes(self):
# Add a byte string starting with 'From ' to the mailbox
key = self._box.add(b'From foo@bar blah\nFrom: foo\n\n0\n')
self.assertEqual(self._box[key].get_from(), 'foo@bar blah')
self.assertEqual(self._box[key].get_payload(), '0\n')
def test_add_mbox_or_mmdf_message(self):
# Add an mboxMessage or MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = class_('From foo@bar blah\nFrom: foo\n\n0\n')
key = self._box.add(msg)
def test_open_close_open(self):
# Open and inspect previously-created mailbox
values = [self._template % i for i in range(3)]
for value in values:
self._box.add(value)
self._box.close()
mtime = os.path.getmtime(self._path)
self._box = self._factory(self._path)
self.assertEqual(len(self._box), 3)
for key in self._box.iterkeys():
self.assertIn(self._box.get_string(key), values)
self._box.close()
self.assertEqual(mtime, os.path.getmtime(self._path))
def test_add_and_close(self):
# Verifying that closing a mailbox doesn't change added items
self._box.add(_sample_message)
for i in range(3):
self._box.add(self._template % i)
self._box.add(_sample_message)
self._box._file.flush()
self._box._file.seek(0)
contents = self._box._file.read()
self._box.close()
with open(self._path, 'rb') as f:
self.assertEqual(contents, f.read())
self._box = self._factory(self._path)
@unittest.skipUnless(hasattr(os, 'fork'), "Test needs fork().")
@unittest.skipUnless(hasattr(socket, 'socketpair'), "Test needs socketpair().")
def test_lock_conflict(self):
# Fork off a child process that will lock the mailbox temporarily,
# unlock it and exit.
c, p = socket.socketpair()
self.addCleanup(c.close)
self.addCleanup(p.close)
pid = os.fork()
if pid == 0:
# child
try:
# lock the mailbox, and signal the parent it can proceed
self._box.lock()
c.send(b'c')
# wait until the parent is done, and unlock the mailbox
c.recv(1)
self._box.unlock()
finally:
os._exit(0)
# In the parent, wait until the child signals it locked the mailbox.
p.recv(1)
try:
self.assertRaises(mailbox.ExternalClashError,
self._box.lock)
finally:
# Signal the child it can now release the lock and exit.
p.send(b'p')
# Wait for child to exit. Locking should now succeed.
exited_pid, status = os.waitpid(pid, 0)
self._box.lock()
self._box.unlock()
def test_relock(self):
# Test case for bug #1575506: the mailbox class was locking the
# wrong file object in its flush() method.
msg = "Subject: sub\n\nbody\n"
key1 = self._box.add(msg)
self._box.flush()
self._box.close()
self._box = self._factory(self._path)
self._box.lock()
key2 = self._box.add(msg)
self._box.flush()
self.assertTrue(self._box._locked)
self._box.close()
class TestMbox(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.mbox(path, factory)
@unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()')
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_file_perms(self):
# From bug #3228, we want to verify that the mailbox file isn't executable,
# even if the umask is set to something that would leave executable bits set.
# We only run this test on platforms that support umask.
try:
old_umask = os.umask(0o077)
self._box.close()
os.unlink(self._path)
self._box = mailbox.mbox(self._path, create=True)
self._box.add('')
self._box.close()
finally:
os.umask(old_umask)
st = os.stat(self._path)
perms = st.st_mode
self.assertFalse((perms & 0o111)) # Execute bits should all be off.
def test_terminating_newline(self):
message = email.message.Message()
message['From'] = '[email protected]'
message.set_payload('No newline at the end')
i = self._box.add(message)
# A newline should have been appended to the payload
message = self._box.get(i)
self.assertEqual(message.get_payload(), 'No newline at the end\n')
def test_message_separator(self):
# Check there's always a single blank line after each message
self._box.add('From: foo\n\n0') # No newline at the end
with open(self._path) as f:
data = f.read()
self.assertEqual(data[-3:], '0\n\n')
self._box.add('From: foo\n\n0\n') # Newline at the end
with open(self._path) as f:
data = f.read()
self.assertEqual(data[-3:], '0\n\n')
class TestMMDF(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MMDF(path, factory)
class TestMH(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MH(path, factory)
def assertMailboxEmpty(self):
self.assertEqual(os.listdir(self._path), ['.mh_sequences'])
def test_list_folders(self):
# List folders
self._box.add_folder('one')
self._box.add_folder('two')
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 3)
self.assertEqual(set(self._box.list_folders()),
set(('one', 'two', 'three')))
def test_get_folder(self):
# Open folders
def dummy_factory (s):
return None
self._box = self._factory(self._path, dummy_factory)
new_folder = self._box.add_folder('foo.bar')
folder0 = self._box.get_folder('foo.bar')
folder0.add(self._template % 'bar')
self.assertTrue(os.path.isdir(os.path.join(self._path, 'foo.bar')))
folder1 = self._box.get_folder('foo.bar')
self.assertEqual(folder1.get_string(folder1.keys()[0]),
self._template % 'bar')
# Test for bug #1569790: verify that folders returned by .get_folder()
# use the same factory function.
self.assertIs(new_folder._factory, self._box._factory)
self.assertIs(folder0._factory, self._box._factory)
def test_add_and_remove_folders(self):
# Delete folders
self._box.add_folder('one')
self._box.add_folder('two')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('one', 'two')))
self._box.remove_folder('one')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two',)))
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('two', 'three')))
self._box.remove_folder('three')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two',)))
self._box.remove_folder('two')
self.assertEqual(len(self._box.list_folders()), 0)
self.assertEqual(self._box.list_folders(), [])
def test_sequences(self):
# Get and set sequences
self.assertEqual(self._box.get_sequences(), {})
msg0 = mailbox.MHMessage(self._template % 0)
msg0.add_sequence('foo')
key0 = self._box.add(msg0)
self.assertEqual(self._box.get_sequences(), {'foo':[key0]})
msg1 = mailbox.MHMessage(self._template % 1)
msg1.set_sequences(['bar', 'replied', 'foo'])
key1 = self._box.add(msg1)
self.assertEqual(self._box.get_sequences(),
{'foo':[key0, key1], 'bar':[key1], 'replied':[key1]})
msg0.set_sequences(['flagged'])
self._box[key0] = msg0
self.assertEqual(self._box.get_sequences(),
{'foo':[key1], 'bar':[key1], 'replied':[key1],
'flagged':[key0]})
self._box.remove(key1)
self.assertEqual(self._box.get_sequences(), {'flagged':[key0]})
def test_issue2625(self):
msg0 = mailbox.MHMessage(self._template % 0)
msg0.add_sequence('foo')
key0 = self._box.add(msg0)
refmsg0 = self._box.get_message(key0)
def test_issue7627(self):
msg0 = mailbox.MHMessage(self._template % 0)
key0 = self._box.add(msg0)
self._box.lock()
self._box.remove(key0)
self._box.unlock()
def test_pack(self):
# Pack the contents of the mailbox
msg0 = mailbox.MHMessage(self._template % 0)
msg1 = mailbox.MHMessage(self._template % 1)
msg2 = mailbox.MHMessage(self._template % 2)
msg3 = mailbox.MHMessage(self._template % 3)
msg0.set_sequences(['foo', 'unseen'])
msg1.set_sequences(['foo'])
msg2.set_sequences(['foo', 'flagged'])
msg3.set_sequences(['foo', 'bar', 'replied'])
key0 = self._box.add(msg0)
key1 = self._box.add(msg1)
key2 = self._box.add(msg2)
key3 = self._box.add(msg3)
self.assertEqual(self._box.get_sequences(),
{'foo':[key0,key1,key2,key3], 'unseen':[key0],
'flagged':[key2], 'bar':[key3], 'replied':[key3]})
self._box.remove(key2)
self.assertEqual(self._box.get_sequences(),
{'foo':[key0,key1,key3], 'unseen':[key0], 'bar':[key3],
'replied':[key3]})
self._box.pack()
self.assertEqual(self._box.keys(), [1, 2, 3])
key0 = key0
key1 = key0 + 1
key2 = key1 + 1
self.assertEqual(self._box.get_sequences(),
{'foo':[1, 2, 3], 'unseen':[1], 'bar':[3], 'replied':[3]})
# Test case for packing while holding the mailbox locked.
key0 = self._box.add(msg1)
key1 = self._box.add(msg1)
key2 = self._box.add(msg1)
key3 = self._box.add(msg1)
self._box.remove(key0)
self._box.remove(key2)
self._box.lock()
self._box.pack()
self._box.unlock()
self.assertEqual(self._box.get_sequences(),
{'foo':[1, 2, 3, 4, 5],
'unseen':[1], 'bar':[3], 'replied':[3]})
def _get_lock_path(self):
return os.path.join(self._path, '.mh_sequences.lock')
class TestBabyl(_TestSingleFile, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Babyl(path, factory)
def assertMailboxEmpty(self):
with open(self._path) as f:
self.assertEqual(f.readlines(), [])
def tearDown(self):
super().tearDown()
self._box.close()
self._delete_recursively(self._path)
for lock_remnant in glob.glob(self._path + '.*'):
support.unlink(lock_remnant)
def test_labels(self):
# Get labels from the mailbox
self.assertEqual(self._box.get_labels(), [])
msg0 = mailbox.BabylMessage(self._template % 0)
msg0.add_label('foo')
key0 = self._box.add(msg0)
self.assertEqual(self._box.get_labels(), ['foo'])
msg1 = mailbox.BabylMessage(self._template % 1)
msg1.set_labels(['bar', 'answered', 'foo'])
key1 = self._box.add(msg1)
self.assertEqual(set(self._box.get_labels()), set(['foo', 'bar']))
msg0.set_labels(['blah', 'filed'])
self._box[key0] = msg0
self.assertEqual(set(self._box.get_labels()),
set(['foo', 'bar', 'blah']))
self._box.remove(key1)
self.assertEqual(set(self._box.get_labels()), set(['blah']))
class FakeFileLikeObject:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
class FakeMailBox(mailbox.Mailbox):
def __init__(self):
mailbox.Mailbox.__init__(self, '', lambda file: None)
self.files = [FakeFileLikeObject() for i in range(10)]
def get_file(self, key):
return self.files[key]
class TestFakeMailBox(unittest.TestCase):
def test_closing_fd(self):
box = FakeMailBox()
for i in range(10):
self.assertFalse(box.files[i].closed)
for i in range(10):
box[i]
for i in range(10):
self.assertTrue(box.files[i].closed)
class TestMessage(TestBase, unittest.TestCase):
_factory = mailbox.Message # Overridden by subclasses to reuse tests
def setUp(self):
self._path = support.TESTFN
def tearDown(self):
self._delete_recursively(self._path)
def test_initialize_with_eMM(self):
# Initialize based on email.message.Message instance
eMM = email.message_from_string(_sample_message)
msg = self._factory(eMM)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_string(self):
# Initialize based on string
msg = self._factory(_sample_message)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_file(self):
# Initialize based on contents of file
with open(self._path, 'w+') as f:
f.write(_sample_message)
f.seek(0)
msg = self._factory(f)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_binary_file(self):
# Initialize based on contents of binary file
with open(self._path, 'wb+') as f:
f.write(_bytes_sample_message)
f.seek(0)
msg = self._factory(f)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_nothing(self):
# Initialize without arguments
msg = self._factory()
self._post_initialize_hook(msg)
self.assertIsInstance(msg, email.message.Message)
self.assertIsInstance(msg, mailbox.Message)
self.assertIsInstance(msg, self._factory)
self.assertEqual(msg.keys(), [])
self.assertFalse(msg.is_multipart())
self.assertIsNone(msg.get_payload())
def test_initialize_incorrectly(self):
# Initialize with invalid argument
self.assertRaises(TypeError, lambda: self._factory(object()))
def test_all_eMM_attribues_exist(self):
# Issue 12537
eMM = email.message_from_string(_sample_message)
msg = self._factory(_sample_message)
for attr in eMM.__dict__:
self.assertIn(attr, msg.__dict__,
'{} attribute does not exist'.format(attr))
def test_become_message(self):
# Take on the state of another message
eMM = email.message_from_string(_sample_message)
msg = self._factory()
msg._become_message(eMM)
self._check_sample(msg)
def test_explain_to(self):
# Copy self's format-specific data to other message formats.
# This test is superficial; better ones are in TestMessageConversion.
msg = self._factory()
for class_ in self.all_mailbox_types:
other_msg = class_()
msg._explain_to(other_msg)
other_msg = email.message.Message()
self.assertRaises(TypeError, lambda: msg._explain_to(other_msg))
def _post_initialize_hook(self, msg):
# Overridden by subclasses to check extra things after initialization
pass
class TestMaildirMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MaildirMessage
def _post_initialize_hook(self, msg):
self.assertEqual(msg._subdir, 'new')
self.assertEqual(msg._info, '')
def test_subdir(self):
# Use get_subdir() and set_subdir()
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_subdir(), 'new')
msg.set_subdir('cur')
self.assertEqual(msg.get_subdir(), 'cur')
msg.set_subdir('new')
self.assertEqual(msg.get_subdir(), 'new')
self.assertRaises(ValueError, lambda: msg.set_subdir('tmp'))
self.assertEqual(msg.get_subdir(), 'new')
msg.set_subdir('new')
self.assertEqual(msg.get_subdir(), 'new')
self._check_sample(msg)
def test_flags(self):
# Use get_flags(), set_flags(), add_flag(), remove_flag()
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_flags(), '')
self.assertEqual(msg.get_subdir(), 'new')
msg.set_flags('F')
self.assertEqual(msg.get_subdir(), 'new')
self.assertEqual(msg.get_flags(), 'F')
msg.set_flags('SDTP')
self.assertEqual(msg.get_flags(), 'DPST')
msg.add_flag('FT')
self.assertEqual(msg.get_flags(), 'DFPST')
msg.remove_flag('TDRP')
self.assertEqual(msg.get_flags(), 'FS')
self.assertEqual(msg.get_subdir(), 'new')
self._check_sample(msg)
def test_date(self):
# Use get_date() and set_date()
msg = mailbox.MaildirMessage(_sample_message)
self.assertLess(abs(msg.get_date() - time.time()), 60)
msg.set_date(0.0)
self.assertEqual(msg.get_date(), 0.0)
def test_info(self):
# Use get_info() and set_info()
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_info(), '')
msg.set_info('1,foo=bar')
self.assertEqual(msg.get_info(), '1,foo=bar')
self.assertRaises(TypeError, lambda: msg.set_info(None))
self._check_sample(msg)
def test_info_and_flags(self):
# Test interaction of info and flag methods
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_info(), '')
msg.set_flags('SF')
self.assertEqual(msg.get_flags(), 'FS')
self.assertEqual(msg.get_info(), '2,FS')
msg.set_info('1,')
self.assertEqual(msg.get_flags(), '')
self.assertEqual(msg.get_info(), '1,')
msg.remove_flag('RPT')
self.assertEqual(msg.get_flags(), '')
self.assertEqual(msg.get_info(), '1,')
msg.add_flag('D')
self.assertEqual(msg.get_flags(), 'D')
self.assertEqual(msg.get_info(), '2,D')
self._check_sample(msg)
class _TestMboxMMDFMessage:
_factory = mailbox._mboxMMDFMessage
def _post_initialize_hook(self, msg):
self._check_from(msg)
def test_initialize_with_unixfrom(self):
# Initialize with a message that already has a _unixfrom attribute
msg = mailbox.Message(_sample_message)
msg.set_unixfrom('From foo@bar blah')
msg = mailbox.mboxMessage(msg)
self.assertEqual(msg.get_from(), 'foo@bar blah', msg.get_from())
def test_from(self):
# Get and set "From " line
msg = mailbox.mboxMessage(_sample_message)
self._check_from(msg)
msg.set_from('foo bar')
self.assertEqual(msg.get_from(), 'foo bar')
msg.set_from('foo@bar', True)
self._check_from(msg, 'foo@bar')
msg.set_from('blah@temp', time.localtime())
self._check_from(msg, 'blah@temp')
def test_flags(self):
# Use get_flags(), set_flags(), add_flag(), remove_flag()
msg = mailbox.mboxMessage(_sample_message)
self.assertEqual(msg.get_flags(), '')
msg.set_flags('F')
self.assertEqual(msg.get_flags(), 'F')
msg.set_flags('XODR')
self.assertEqual(msg.get_flags(), 'RODX')
msg.add_flag('FA')
self.assertEqual(msg.get_flags(), 'RODFAX')
msg.remove_flag('FDXA')
self.assertEqual(msg.get_flags(), 'RO')
self._check_sample(msg)
def _check_from(self, msg, sender=None):
# Check contents of "From " line
if sender is None:
sender = "MAILER-DAEMON"
self.assertIsNotNone(re.match(
sender + r" \w{3} \w{3} [\d ]\d [\d ]\d:\d{2}:\d{2} \d{4}",
msg.get_from()))
class TestMboxMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.mboxMessage
class TestMHMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MHMessage
def _post_initialize_hook(self, msg):
self.assertEqual(msg._sequences, [])
def test_sequences(self):
# Get, set, join, and leave sequences
msg = mailbox.MHMessage(_sample_message)
self.assertEqual(msg.get_sequences(), [])
msg.set_sequences(['foobar'])
self.assertEqual(msg.get_sequences(), ['foobar'])
msg.set_sequences([])
self.assertEqual(msg.get_sequences(), [])
msg.add_sequence('unseen')
self.assertEqual(msg.get_sequences(), ['unseen'])
msg.add_sequence('flagged')
self.assertEqual(msg.get_sequences(), ['unseen', 'flagged'])
msg.add_sequence('flagged')
self.assertEqual(msg.get_sequences(), ['unseen', 'flagged'])
msg.remove_sequence('unseen')
self.assertEqual(msg.get_sequences(), ['flagged'])
msg.add_sequence('foobar')
self.assertEqual(msg.get_sequences(), ['flagged', 'foobar'])
msg.remove_sequence('replied')
self.assertEqual(msg.get_sequences(), ['flagged', 'foobar'])
msg.set_sequences(['foobar', 'replied'])
self.assertEqual(msg.get_sequences(), ['foobar', 'replied'])
class TestBabylMessage(TestMessage, unittest.TestCase):
_factory = mailbox.BabylMessage
def _post_initialize_hook(self, msg):
self.assertEqual(msg._labels, [])
def test_labels(self):
# Get, set, join, and leave labels
msg = mailbox.BabylMessage(_sample_message)
self.assertEqual(msg.get_labels(), [])
msg.set_labels(['foobar'])
self.assertEqual(msg.get_labels(), ['foobar'])
msg.set_labels([])
self.assertEqual(msg.get_labels(), [])
msg.add_label('filed')
self.assertEqual(msg.get_labels(), ['filed'])
msg.add_label('resent')
self.assertEqual(msg.get_labels(), ['filed', 'resent'])
msg.add_label('resent')
self.assertEqual(msg.get_labels(), ['filed', 'resent'])
msg.remove_label('filed')
self.assertEqual(msg.get_labels(), ['resent'])
msg.add_label('foobar')
self.assertEqual(msg.get_labels(), ['resent', 'foobar'])
msg.remove_label('unseen')
self.assertEqual(msg.get_labels(), ['resent', 'foobar'])
msg.set_labels(['foobar', 'answered'])
self.assertEqual(msg.get_labels(), ['foobar', 'answered'])
def test_visible(self):
# Get, set, and update visible headers
msg = mailbox.BabylMessage(_sample_message)
visible = msg.get_visible()
self.assertEqual(visible.keys(), [])
self.assertIsNone(visible.get_payload())
visible['User-Agent'] = 'FooBar 1.0'
visible['X-Whatever'] = 'Blah'
self.assertEqual(msg.get_visible().keys(), [])
msg.set_visible(visible)
visible = msg.get_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever'])
self.assertEqual(visible['User-Agent'], 'FooBar 1.0')
self.assertEqual(visible['X-Whatever'], 'Blah')
self.assertIsNone(visible.get_payload())
msg.update_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever'])
self.assertIsNone(visible.get_payload())
visible = msg.get_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'Date', 'From', 'To',
'Subject'])
for header in ('User-Agent', 'Date', 'From', 'To', 'Subject'):
self.assertEqual(visible[header], msg[header])
class TestMMDFMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.MMDFMessage
class TestMessageConversion(TestBase, unittest.TestCase):
def test_plain_to_x(self):
# Convert Message to all formats
for class_ in self.all_mailbox_types:
msg_plain = mailbox.Message(_sample_message)
msg = class_(msg_plain)
self._check_sample(msg)
def test_x_to_plain(self):
# Convert all formats to Message
for class_ in self.all_mailbox_types:
msg = class_(_sample_message)
msg_plain = mailbox.Message(msg)
self._check_sample(msg_plain)
def test_x_from_bytes(self):
# Convert all formats to Message
for class_ in self.all_mailbox_types:
msg = class_(_bytes_sample_message)
self._check_sample(msg)
def test_x_to_invalid(self):
# Convert all formats to an invalid format
for class_ in self.all_mailbox_types:
self.assertRaises(TypeError, lambda: class_(False))
def test_type_specific_attributes_removed_on_conversion(self):
reference = {class_: class_(_sample_message).__dict__
for class_ in self.all_mailbox_types}
for class1 in self.all_mailbox_types:
for class2 in self.all_mailbox_types:
if class1 is class2:
continue
source = class1(_sample_message)
target = class2(source)
type_specific = [a for a in reference[class1]
if a not in reference[class2]]
for attr in type_specific:
self.assertNotIn(attr, target.__dict__,
"while converting {} to {}".format(class1, class2))
def test_maildir_to_maildir(self):
# Convert MaildirMessage to MaildirMessage
msg_maildir = mailbox.MaildirMessage(_sample_message)
msg_maildir.set_flags('DFPRST')
msg_maildir.set_subdir('cur')
date = msg_maildir.get_date()
msg = mailbox.MaildirMessage(msg_maildir)
self._check_sample(msg)
self.assertEqual(msg.get_flags(), 'DFPRST')
self.assertEqual(msg.get_subdir(), 'cur')
self.assertEqual(msg.get_date(), date)
def test_maildir_to_mboxmmdf(self):
# Convert MaildirMessage to mboxmessage and MMDFMessage
pairs = (('D', ''), ('F', 'F'), ('P', ''), ('R', 'A'), ('S', 'R'),
('T', 'D'), ('DFPRST', 'RDFA'))
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_maildir = mailbox.MaildirMessage(_sample_message)
msg_maildir.set_date(0.0)
for setting, result in pairs:
msg_maildir.set_flags(setting)
msg = class_(msg_maildir)
self.assertEqual(msg.get_flags(), result)
self.assertEqual(msg.get_from(), 'MAILER-DAEMON %s' %
time.asctime(time.gmtime(0.0)))
msg_maildir.set_subdir('cur')
self.assertEqual(class_(msg_maildir).get_flags(), 'RODFA')
def test_maildir_to_mh(self):
# Convert MaildirMessage to MHMessage
msg_maildir = mailbox.MaildirMessage(_sample_message)
pairs = (('D', ['unseen']), ('F', ['unseen', 'flagged']),
('P', ['unseen']), ('R', ['unseen', 'replied']), ('S', []),
('T', ['unseen']), ('DFPRST', ['replied', 'flagged']))
for setting, result in pairs:
msg_maildir.set_flags(setting)
self.assertEqual(mailbox.MHMessage(msg_maildir).get_sequences(),
result)
def test_maildir_to_babyl(self):
# Convert MaildirMessage to Babyl
msg_maildir = mailbox.MaildirMessage(_sample_message)
pairs = (('D', ['unseen']), ('F', ['unseen']),
('P', ['unseen', 'forwarded']), ('R', ['unseen', 'answered']),
('S', []), ('T', ['unseen', 'deleted']),
('DFPRST', ['deleted', 'answered', 'forwarded']))
for setting, result in pairs:
msg_maildir.set_flags(setting)
self.assertEqual(mailbox.BabylMessage(msg_maildir).get_labels(),
result)
def test_mboxmmdf_to_maildir(self):
# Convert mboxMessage and MMDFMessage to MaildirMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
msg_mboxMMDF.set_from('foo@bar', time.gmtime(0.0))
pairs = (('R', 'S'), ('O', ''), ('D', 'T'), ('F', 'F'), ('A', 'R'),
('RODFA', 'FRST'))
for setting, result in pairs:
msg_mboxMMDF.set_flags(setting)
msg = mailbox.MaildirMessage(msg_mboxMMDF)
self.assertEqual(msg.get_flags(), result)
self.assertEqual(msg.get_date(), 0.0)
msg_mboxMMDF.set_flags('O')
self.assertEqual(mailbox.MaildirMessage(msg_mboxMMDF).get_subdir(),
'cur')
def test_mboxmmdf_to_mboxmmdf(self):
# Convert mboxMessage and MMDFMessage to mboxMessage and MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
msg_mboxMMDF.set_flags('RODFA')
msg_mboxMMDF.set_from('foo@bar')
for class2_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg2 = class2_(msg_mboxMMDF)
self.assertEqual(msg2.get_flags(), 'RODFA')
self.assertEqual(msg2.get_from(), 'foo@bar')
def test_mboxmmdf_to_mh(self):
# Convert mboxMessage and MMDFMessage to MHMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
pairs = (('R', []), ('O', ['unseen']), ('D', ['unseen']),
('F', ['unseen', 'flagged']),
('A', ['unseen', 'replied']),
('RODFA', ['replied', 'flagged']))
for setting, result in pairs:
msg_mboxMMDF.set_flags(setting)
self.assertEqual(mailbox.MHMessage(msg_mboxMMDF).get_sequences(),
result)
def test_mboxmmdf_to_babyl(self):
# Convert mboxMessage and MMDFMessage to BabylMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = class_(_sample_message)
pairs = (('R', []), ('O', ['unseen']),
('D', ['unseen', 'deleted']), ('F', ['unseen']),
('A', ['unseen', 'answered']),
('RODFA', ['deleted', 'answered']))
for setting, result in pairs:
msg.set_flags(setting)
self.assertEqual(mailbox.BabylMessage(msg).get_labels(), result)
def test_mh_to_maildir(self):
# Convert MHMessage to MaildirMessage
pairs = (('unseen', ''), ('replied', 'RS'), ('flagged', 'FS'))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), result)
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), 'FR')
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
def test_mh_to_mboxmmdf(self):
# Convert MHMessage to mboxMessage and MMDFMessage
pairs = (('unseen', 'O'), ('replied', 'ROA'), ('flagged', 'ROF'))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assertEqual(class_(msg).get_flags(), result)
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assertEqual(class_(msg).get_flags(), 'OFA')
def test_mh_to_mh(self):
# Convert MHMessage to MHMessage
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assertEqual(mailbox.MHMessage(msg).get_sequences(),
['unseen', 'replied', 'flagged'])
def test_mh_to_babyl(self):
# Convert MHMessage to BabylMessage
pairs = (('unseen', ['unseen']), ('replied', ['answered']),
('flagged', []))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
self.assertEqual(mailbox.BabylMessage(msg).get_labels(), result)
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assertEqual(mailbox.BabylMessage(msg).get_labels(),
['unseen', 'answered'])
def test_babyl_to_maildir(self):
# Convert BabylMessage to MaildirMessage
pairs = (('unseen', ''), ('deleted', 'ST'), ('filed', 'S'),
('answered', 'RS'), ('forwarded', 'PS'), ('edited', 'S'),
('resent', 'PS'))
for setting, result in pairs:
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), result)
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), 'PRT')
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
def test_babyl_to_mboxmmdf(self):
# Convert BabylMessage to mboxMessage and MMDFMessage
pairs = (('unseen', 'O'), ('deleted', 'ROD'), ('filed', 'RO'),
('answered', 'ROA'), ('forwarded', 'RO'), ('edited', 'RO'),
('resent', 'RO'))
for setting, result in pairs:
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assertEqual(class_(msg).get_flags(), result)
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assertEqual(class_(msg).get_flags(), 'ODA')
def test_babyl_to_mh(self):
# Convert BabylMessage to MHMessage
pairs = (('unseen', ['unseen']), ('deleted', []), ('filed', []),
('answered', ['replied']), ('forwarded', []), ('edited', []),
('resent', []))
for setting, result in pairs:
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assertEqual(mailbox.MHMessage(msg).get_sequences(), result)
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
self.assertEqual(mailbox.MHMessage(msg).get_sequences(),
['unseen', 'replied'])
def test_babyl_to_babyl(self):
# Convert BabylMessage to BabylMessage
msg = mailbox.BabylMessage(_sample_message)
msg.update_visible()
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
msg2 = mailbox.BabylMessage(msg)
self.assertEqual(msg2.get_labels(), ['unseen', 'deleted', 'filed',
'answered', 'forwarded', 'edited',
'resent'])
self.assertEqual(msg.get_visible().keys(), msg2.get_visible().keys())
for key in msg.get_visible().keys():
self.assertEqual(msg.get_visible()[key], msg2.get_visible()[key])
class TestProxyFileBase(TestBase):
def _test_read(self, proxy):
# Read by byte
proxy.seek(0)
self.assertEqual(proxy.read(), b'bar')
proxy.seek(1)
self.assertEqual(proxy.read(), b'ar')
proxy.seek(0)
self.assertEqual(proxy.read(2), b'ba')
proxy.seek(1)
self.assertEqual(proxy.read(-1), b'ar')
proxy.seek(2)
self.assertEqual(proxy.read(1000), b'r')
def _test_readline(self, proxy):
# Read by line
linesep = os.linesep.encode()
proxy.seek(0)
self.assertEqual(proxy.readline(), b'foo' + linesep)
self.assertEqual(proxy.readline(), b'bar' + linesep)
self.assertEqual(proxy.readline(), b'fred' + linesep)
self.assertEqual(proxy.readline(), b'bob')
proxy.seek(2)
self.assertEqual(proxy.readline(), b'o' + linesep)
proxy.seek(6 + 2 * len(os.linesep))
self.assertEqual(proxy.readline(), b'fred' + linesep)
proxy.seek(6 + 2 * len(os.linesep))
self.assertEqual(proxy.readline(2), b'fr')
self.assertEqual(proxy.readline(-10), b'ed' + linesep)
def _test_readlines(self, proxy):
# Read multiple lines
linesep = os.linesep.encode()
proxy.seek(0)
self.assertEqual(proxy.readlines(), [b'foo' + linesep,
b'bar' + linesep,
b'fred' + linesep, b'bob'])
proxy.seek(0)
self.assertEqual(proxy.readlines(2), [b'foo' + linesep])
proxy.seek(3 + len(linesep))
self.assertEqual(proxy.readlines(4 + len(linesep)),
[b'bar' + linesep, b'fred' + linesep])
proxy.seek(3)
self.assertEqual(proxy.readlines(1000), [linesep, b'bar' + linesep,
b'fred' + linesep, b'bob'])
def _test_iteration(self, proxy):
# Iterate by line
linesep = os.linesep.encode()
proxy.seek(0)
iterator = iter(proxy)
self.assertEqual(next(iterator), b'foo' + linesep)
self.assertEqual(next(iterator), b'bar' + linesep)
self.assertEqual(next(iterator), b'fred' + linesep)
self.assertEqual(next(iterator), b'bob')
self.assertRaises(StopIteration, next, iterator)
def _test_seek_and_tell(self, proxy):
# Seek and use tell to check position
linesep = os.linesep.encode()
proxy.seek(3)
self.assertEqual(proxy.tell(), 3)
self.assertEqual(proxy.read(len(linesep)), linesep)
proxy.seek(2, 1)
self.assertEqual(proxy.read(1 + len(linesep)), b'r' + linesep)
proxy.seek(-3 - len(linesep), 2)
self.assertEqual(proxy.read(3), b'bar')
proxy.seek(2, 0)
self.assertEqual(proxy.read(), b'o' + linesep + b'bar' + linesep)
proxy.seek(100)
self.assertFalse(proxy.read())
def _test_close(self, proxy):
# Close a file
self.assertFalse(proxy.closed)
proxy.close()
self.assertTrue(proxy.closed)
# Issue 11700 subsequent closes should be a no-op.
proxy.close()
self.assertTrue(proxy.closed)
class TestProxyFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = support.TESTFN
self._file = open(self._path, 'wb+')
def tearDown(self):
self._file.close()
self._delete_recursively(self._path)
def test_initialize(self):
# Initialize and check position
self._file.write(b'foo')
pos = self._file.tell()
proxy0 = mailbox._ProxyFile(self._file)
self.assertEqual(proxy0.tell(), pos)
self.assertEqual(self._file.tell(), pos)
proxy1 = mailbox._ProxyFile(self._file, 0)
self.assertEqual(proxy1.tell(), 0)
self.assertEqual(self._file.tell(), pos)
def test_read(self):
self._file.write(b'bar')
self._test_read(mailbox._ProxyFile(self._file))
def test_readline(self):
self._file.write(bytes('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep), 'ascii'))
self._test_readline(mailbox._ProxyFile(self._file))
def test_readlines(self):
self._file.write(bytes('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep), 'ascii'))
self._test_readlines(mailbox._ProxyFile(self._file))
def test_iteration(self):
self._file.write(bytes('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep), 'ascii'))
self._test_iteration(mailbox._ProxyFile(self._file))
def test_seek_and_tell(self):
self._file.write(bytes('foo%sbar%s' % (os.linesep, os.linesep), 'ascii'))
self._test_seek_and_tell(mailbox._ProxyFile(self._file))
def test_close(self):
self._file.write(bytes('foo%sbar%s' % (os.linesep, os.linesep), 'ascii'))
self._test_close(mailbox._ProxyFile(self._file))
class TestPartialFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = support.TESTFN
self._file = open(self._path, 'wb+')
def tearDown(self):
self._file.close()
self._delete_recursively(self._path)
def test_initialize(self):
# Initialize and check position
self._file.write(bytes('foo' + os.linesep + 'bar', 'ascii'))
pos = self._file.tell()
proxy = mailbox._PartialFile(self._file, 2, 5)
self.assertEqual(proxy.tell(), 0)
self.assertEqual(self._file.tell(), pos)
def test_read(self):
self._file.write(bytes('***bar***', 'ascii'))
self._test_read(mailbox._PartialFile(self._file, 3, 6))
def test_readline(self):
self._file.write(bytes('!!!!!foo%sbar%sfred%sbob!!!!!' %
(os.linesep, os.linesep, os.linesep), 'ascii'))
self._test_readline(mailbox._PartialFile(self._file, 5,
18 + 3 * len(os.linesep)))
def test_readlines(self):
self._file.write(bytes('foo%sbar%sfred%sbob?????' %
(os.linesep, os.linesep, os.linesep), 'ascii'))
self._test_readlines(mailbox._PartialFile(self._file, 0,
13 + 3 * len(os.linesep)))
def test_iteration(self):
self._file.write(bytes('____foo%sbar%sfred%sbob####' %
(os.linesep, os.linesep, os.linesep), 'ascii'))
self._test_iteration(mailbox._PartialFile(self._file, 4,
17 + 3 * len(os.linesep)))
def test_seek_and_tell(self):
self._file.write(bytes('(((foo%sbar%s$$$' % (os.linesep, os.linesep), 'ascii'))
self._test_seek_and_tell(mailbox._PartialFile(self._file, 3,
9 + 2 * len(os.linesep)))
def test_close(self):
self._file.write(bytes('&foo%sbar%s^' % (os.linesep, os.linesep), 'ascii'))
self._test_close(mailbox._PartialFile(self._file, 1,
6 + 3 * len(os.linesep)))
## Start: tests from the original module (for backward compatibility).
FROM_ = "From [email protected] Sat Jul 24 13:43:35 2004\n"
DUMMY_MESSAGE = """\
From: [email protected]
To: [email protected]
Subject: Simple Test
This is a dummy message.
"""
class MaildirTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._dir = support.TESTFN
if os.path.isdir(self._dir):
support.rmtree(self._dir)
elif os.path.isfile(self._dir):
support.unlink(self._dir)
os.mkdir(self._dir)
os.mkdir(os.path.join(self._dir, "cur"))
os.mkdir(os.path.join(self._dir, "tmp"))
os.mkdir(os.path.join(self._dir, "new"))
self._counter = 1
self._msgfiles = []
def tearDown(self):
list(map(os.unlink, self._msgfiles))
support.rmdir(os.path.join(self._dir, "cur"))
support.rmdir(os.path.join(self._dir, "tmp"))
support.rmdir(os.path.join(self._dir, "new"))
support.rmdir(self._dir)
def createMessage(self, dir, mbox=False):
t = int(time.time() % 1000000)
pid = self._counter
self._counter += 1
filename = ".".join((str(t), str(pid), "myhostname", "mydomain"))
tmpname = os.path.join(self._dir, "tmp", filename)
newname = os.path.join(self._dir, dir, filename)
with open(tmpname, "w") as fp:
self._msgfiles.append(tmpname)
if mbox:
fp.write(FROM_)
fp.write(DUMMY_MESSAGE)
if hasattr(os, "link"):
os.link(tmpname, newname)
else:
with open(newname, "w") as fp:
fp.write(DUMMY_MESSAGE)
self._msgfiles.append(newname)
return tmpname
def test_empty_maildir(self):
"""Test an empty maildir mailbox"""
# Test for regression on bug #117490:
# Make sure the boxes attribute actually gets set.
self.mbox = mailbox.Maildir(support.TESTFN)
#self.assertTrue(hasattr(self.mbox, "boxes"))
#self.assertEqual(len(self.mbox.boxes), 0)
self.assertIsNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
def test_nonempty_maildir_cur(self):
self.createMessage("cur")
self.mbox = mailbox.Maildir(support.TESTFN)
#self.assertEqual(len(self.mbox.boxes), 1)
self.assertIsNotNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
def test_nonempty_maildir_new(self):
self.createMessage("new")
self.mbox = mailbox.Maildir(support.TESTFN)
#self.assertEqual(len(self.mbox.boxes), 1)
self.assertIsNotNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
def test_nonempty_maildir_both(self):
self.createMessage("cur")
self.createMessage("new")
self.mbox = mailbox.Maildir(support.TESTFN)
#self.assertEqual(len(self.mbox.boxes), 2)
self.assertIsNotNone(self.mbox.next())
self.assertIsNotNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
self.assertIsNone(self.mbox.next())
## End: tests from the original module (for backward compatibility).
_sample_message = """\
Return-Path: <[email protected]>
X-Original-To: gkj+person@localhost
Delivered-To: gkj+person@localhost
Received: from localhost (localhost [127.0.0.1])
by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17
for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)
Delivered-To: [email protected]
Received: from localhost [127.0.0.1]
by localhost with POP3 (fetchmail-6.2.5)
for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)
Received: from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228])
by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746
for <[email protected]>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)
Received: by andy.gregorykjohnson.com (Postfix, from userid 1000)
id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)
Date: Wed, 13 Jul 2005 17:23:11 -0400
From: "Gregory K. Johnson" <[email protected]>
To: [email protected]
Subject: Sample message
Message-ID: <[email protected]>
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary="NMuMz9nt05w80d4+"
Content-Disposition: inline
User-Agent: Mutt/1.5.9i
--NMuMz9nt05w80d4+
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
This is a sample message.
--
Gregory K. Johnson
--NMuMz9nt05w80d4+
Content-Type: application/octet-stream
Content-Disposition: attachment; filename="text.gz"
Content-Transfer-Encoding: base64
H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs
3FYlAAAA
--NMuMz9nt05w80d4+--
"""
_bytes_sample_message = _sample_message.encode('ascii')
_sample_headers = {
"Return-Path":"<[email protected]>",
"X-Original-To":"gkj+person@localhost",
"Delivered-To":"gkj+person@localhost",
"Received":"""from localhost (localhost [127.0.0.1])
by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17
for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""",
"Delivered-To":"[email protected]",
"Received":"""from localhost [127.0.0.1]
by localhost with POP3 (fetchmail-6.2.5)
for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""",
"Received":"""from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228])
by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746
for <[email protected]>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""",
"Received":"""by andy.gregorykjohnson.com (Postfix, from userid 1000)
id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""",
"Date":"Wed, 13 Jul 2005 17:23:11 -0400",
"From":""""Gregory K. Johnson" <[email protected]>""",
"To":"[email protected]",
"Subject":"Sample message",
"Mime-Version":"1.0",
"Content-Type":"""multipart/mixed; boundary="NMuMz9nt05w80d4+\"""",
"Content-Disposition":"inline",
"User-Agent": "Mutt/1.5.9i" }
_sample_payloads = ("""This is a sample message.
--
Gregory K. Johnson
""",
"""H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs
3FYlAAAA
""")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {"linesep", "fcntl"}
support.check__all__(self, mailbox, blacklist=blacklist)
def test_main():
tests = (TestMailboxSuperclass, TestMaildir, TestMbox, TestMMDF, TestMH,
TestBabyl, TestMessage, TestMaildirMessage, TestMboxMessage,
TestMHMessage, TestBabylMessage, TestMMDFMessage,
TestMessageConversion, TestProxyFile, TestPartialFile,
MaildirTestCase, TestFakeMailBox, MiscTestCase)
support.run_unittest(*tests)
support.reap_children()
if __name__ == '__main__':
test_main()
| mit | 4,246,049,689,262,483,500 | 39.546329 | 90 | 0.582829 | false |
iulian787/spack | lib/spack/spack/test/cmd/versions.py | 1 | 1677 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
from spack.main import SpackCommand
versions = SpackCommand('versions')
def test_safe_only_versions():
"""Only test the safe versions of a package.
(Using the deprecated command line argument)
"""
versions('--safe-only', 'zlib')
def test_safe_versions():
"""Only test the safe versions of a package."""
versions('--safe', 'zlib')
@pytest.mark.network
def test_remote_versions():
"""Test a package for which remote versions should be available."""
versions('zlib')
@pytest.mark.network
def test_remote_versions_only():
"""Test a package for which remote versions should be available."""
versions('--remote', 'zlib')
@pytest.mark.network
@pytest.mark.usefixtures('mock_packages')
def test_new_versions_only():
"""Test a package for which new versions should be available."""
versions('--new', 'brillig')
@pytest.mark.network
def test_no_versions():
"""Test a package for which no remote versions are available."""
versions('converge')
@pytest.mark.network
def test_no_unchecksummed_versions():
"""Test a package for which no unchecksummed versions are available."""
versions('bzip2')
@pytest.mark.network
def test_versions_no_url():
"""Test a package with versions but without a ``url`` attribute."""
versions('graphviz')
@pytest.mark.network
def test_no_versions_no_url():
"""Test a package without versions or a ``url`` attribute."""
versions('opengl')
| lgpl-2.1 | 8,678,284,814,418,964,000 | 21.662162 | 75 | 0.694097 | false |
michaelhidalgo/7WCSQ | Tools/SQLMap/sqlmap/extra/shutils/regressiontest.py | 1 | 5523 | #!/usr/bin/env python
# Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
# See the file 'doc/COPYING' for copying permission
import codecs
import inspect
import os
import re
import smtplib
import subprocess
import sys
import time
import traceback
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
sys.path.append(os.path.normpath("%s/../../" % os.path.dirname(inspect.getfile(inspect.currentframe()))))
from lib.core.revision import getRevisionNumber
START_TIME = time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime())
SQLMAP_HOME = "/opt/sqlmap"
SMTP_SERVER = "127.0.0.1"
SMTP_PORT = 25
SMTP_TIMEOUT = 30
FROM = "[email protected]"
#TO = "[email protected]"
TO = ["[email protected]", "[email protected]"]
SUBJECT = "regression test started on %s using revision %s" % (START_TIME, getRevisionNumber())
TARGET = "debian"
def prepare_email(content):
global FROM
global TO
global SUBJECT
msg = MIMEMultipart()
msg["Subject"] = SUBJECT
msg["From"] = FROM
msg["To"] = TO if isinstance(TO, basestring) else ",".join(TO)
msg.attach(MIMEText(content))
return msg
def send_email(msg):
global SMTP_SERVER
global SMTP_PORT
global SMTP_TIMEOUT
try:
s = smtplib.SMTP(host=SMTP_SERVER, port=SMTP_PORT, timeout=SMTP_TIMEOUT)
s.sendmail(FROM, TO, msg.as_string())
s.quit()
# Catch all for SMTP exceptions
except smtplib.SMTPException, e:
print "Failure to send email: %s" % str(e)
def failure_email(msg):
msg = prepare_email(msg)
send_email(msg)
sys.exit(1)
def main():
global SUBJECT
content = ""
test_counts = []
attachments = {}
updateproc = subprocess.Popen("cd /opt/sqlmap/ ; python /opt/sqlmap/sqlmap.py --update", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = updateproc.communicate()
if stderr:
failure_email("Update of sqlmap failed with error:\n\n%s" % stderr)
regressionproc = subprocess.Popen("python /opt/sqlmap/sqlmap.py --live-test", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
stdout, stderr = regressionproc.communicate()
if stderr:
failure_email("Execution of regression test failed with error:\n\n%s" % stderr)
failed_tests = re.findall("running live test case: (.+?) \((\d+)\/\d+\)[\r]*\n.+test failed (at parsing items: (.+))?\s*\- scan folder: (\/.+) \- traceback: (.*?)( - SQL injection not detected)?[\r]*\n", stdout, re.M)
for failed_test in failed_tests:
title = failed_test[0]
test_count = int(failed_test[1])
parse = failed_test[3] if failed_test[3] else None
output_folder = failed_test[4]
traceback = False if failed_test[5] == "False" else bool(failed_test[5])
detected = False if failed_test[6] else True
test_counts.append(test_count)
console_output_file = os.path.join(output_folder, "console_output")
log_file = os.path.join(output_folder, TARGET, "log")
traceback_file = os.path.join(output_folder, "traceback")
if os.path.exists(console_output_file):
console_output_fd = codecs.open(console_output_file, "rb", "utf8")
console_output = console_output_fd.read()
console_output_fd.close()
attachments[test_count] = str(console_output)
if os.path.exists(log_file):
log_fd = codecs.open(log_file, "rb", "utf8")
log = log_fd.read()
log_fd.close()
if os.path.exists(traceback_file):
traceback_fd = codecs.open(traceback_file, "rb", "utf8")
traceback = traceback_fd.read()
traceback_fd.close()
content += "Failed test case '%s' (#%d)" % (title, test_count)
if parse:
content += " at parsing: %s:\n\n" % parse
content += "### Log file:\n\n"
content += "%s\n\n" % log
elif not detected:
content += " - SQL injection not detected\n\n"
else:
content += "\n\n"
if traceback:
content += "### Traceback:\n\n"
content += "%s\n\n" % str(traceback)
content += "#######################################################################\n\n"
end_string = "Regression test finished at %s" % time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime())
if content:
content += end_string
SUBJECT = "Failed %s (%s)" % (SUBJECT, ", ".join("#%d" % count for count in test_counts))
msg = prepare_email(content)
for test_count, attachment in attachments.items():
attachment = MIMEText(attachment)
attachment.add_header("Content-Disposition", "attachment", filename="test_case_%d_console_output.txt" % test_count)
msg.attach(attachment)
send_email(msg)
else:
SUBJECT = "Successful %s" % SUBJECT
msg = prepare_email("All test cases were successful\n\n%s" % end_string)
send_email(msg)
if __name__ == "__main__":
log_fd = open("/tmp/sqlmapregressiontest.log", "wb")
log_fd.write("Regression test started at %s\n" % START_TIME)
try:
main()
except Exception, e:
log_fd.write("An exception has occurred:\n%s" % str(traceback.format_exc()))
log_fd.write("Regression test finished at %s\n\n" % time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime()))
log_fd.close()
| apache-2.0 | -358,738,374,266,894,200 | 32.676829 | 221 | 0.611443 | false |
bitcraze/crazyflie-lib-python | cflib/__init__.py | 1 | 2100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
The Crazyflie Micro Quadcopter library API used to communicate with the
Crazyflie Micro Quadcopter via a communication link.
The API takes care of scanning, opening and closing the communication link
as well as sending/receiving data from the Crazyflie.
A link is described using an URI of the following format:
<interface>://<interface defined data>.
See each link for the data that can be included in the URI for that interface.
The two main uses-cases are scanning for Crazyflies available on a
communication link and opening a communication link to a Crazyflie.
Example of scanning for available Crazyflies on all communication links:
cflib.crtp.init_drivers()
available = cflib.crtp.scan_interfaces()
for i in available:
print "Found Crazyflie on URI [%s] with comment [%s]"
% (available[0], available[1])
Example of connecting to a Crazyflie with know URI (radio dongle 0 and
radio channel 125):
cf = Crazyflie()
cf.open_link("radio://0/125")
...
cf.close_link()
"""
| gpl-2.0 | -1,534,993,565,534,744,300 | 37.888889 | 78 | 0.661429 | false |
fgaTactics/tecprog2017.1 | gameEngine/Mouse.py | 1 | 1363 | import pygame
# This class controls all mouse functionalities
class Mouse:
def __init__(self):
# Array with mouse x and y positions
self.position = pygame.mouse.get_pos()
assert (self.position is not None), "Invalid pygame mouse position return"
# Array that indicate if some mouse button is pressed
self.click = pygame.mouse.get_pressed()
assert (self.click is not None), "Invalid pygame mouse click return"
# Verify if mouse cursor is over a certain object
def is_mouse_over(self, element):
# Compare object coordinates with mouse position
if((element.get_x() + element.get_width() > self.position[0] >
element.get_x()) and (element.get_y() + element.get_height() >
self.position[1] > element.get_y())):
return True
else:
return False
# Verify if the mouse cursor was over a certain object during a click event
def is_mouse_click(self, element, event):
# Verify coincidence between mouse cursor and object position
if(self.is_mouse_over(element)):
# Watch the mouse button release after a click
if(event.type == pygame.MOUSEBUTTONUP):
return True
else:
return False
else:
return False
| gpl-3.0 | -432,616,678,001,625,500 | 33.075 | 82 | 0.605282 | false |
PIVX-Project/PIVX | test/functional/sapling_wallet_nullifiers.py | 1 | 7387 | #!/usr/bin/env python3
# Copyright (c) 2016 The Zcash developers
# Copyright (c) 2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import PivxTestFramework
from test_framework.util import *
from decimal import Decimal
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
class WalletNullifiersTest (PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
saplingUpgrade = ['-nuparams=v5_shield:201']
self.extra_args = [saplingUpgrade, saplingUpgrade, saplingUpgrade, saplingUpgrade]
def run_test (self):
self.nodes[0].generate(1) # activate Sapling
# add shield addr to node 0
myzaddr0 = self.nodes[0].getnewshieldaddress()
# send node 0 taddr to shield addr to get out of coinbase
# Tests using the default cached chain have one address per coinbase output
mytaddr = get_coinstake_address(self.nodes[0])
recipients = []
recipients.append({"address":myzaddr0, "amount":Decimal('10.0') - Decimal('1')}) # utxo amount less fee
txid = self.nodes[0].shieldsendmany(mytaddr, recipients)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# add shield addr to node 2
myzaddr = self.nodes[2].getnewshieldaddress()
# import node 2 shield addr into node 1
myzkey = self.nodes[2].exportsaplingkey(myzaddr)
self.nodes[1].importsaplingkey(myzkey)
# encrypt node 1 wallet and wait to terminate
self.nodes[1].node_encrypt_wallet("test")
# restart node 1
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 1)
connect_nodes_bi(self.nodes, 3, 1)
self.sync_all()
# send node 0 shield addr to note 2 zaddr
recipients = []
recipients.append({"address":myzaddr, "amount":7.0})
txid = self.nodes[0].shieldsendmany(myzaddr0, recipients)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# check shield addr balance
zsendmanynotevalue = Decimal('7.0')
assert_equal(self.nodes[2].getshieldbalance(myzaddr), zsendmanynotevalue)
assert_equal(self.nodes[1].getshieldbalance(myzaddr), zsendmanynotevalue)
# add shield addr to node 3
myzaddr3 = self.nodes[3].getnewshieldaddress()
# send node 2 shield addr to note 3 shield addr
recipients = []
recipients.append({"address":myzaddr3, "amount":2.0})
txid = self.nodes[2].shieldsendmany(myzaddr, recipients)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
# check shield addr balance
zsendmany2notevalue = Decimal('2.0')
zsendmanyfee = Decimal(self.nodes[2].viewshieldtransaction(txid)['fee'])
zaddrremaining = zsendmanynotevalue - zsendmany2notevalue - zsendmanyfee
assert_equal(self.nodes[3].getshieldbalance(myzaddr3), zsendmany2notevalue)
assert_equal(self.nodes[2].getshieldbalance(myzaddr), zaddrremaining)
assert_equal(self.nodes[1].getshieldbalance(myzaddr), zaddrremaining)
# send node 2 shield addr on node 1 to taddr
# This requires that node 1 be unlocked, which triggers caching of
# uncached nullifiers.
self.nodes[1].walletpassphrase("test", 600)
mytaddr1 = self.nodes[1].getnewaddress()
recipients = []
recipients.append({"address":mytaddr1, "amount":1.0})
txid = self.nodes[1].shieldsendmany(myzaddr, recipients)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# check shield addr balance
# Now that the encrypted wallet has been unlocked, the note nullifiers
# have been cached and spent notes can be detected. Thus the two wallets
# are in agreement once more.
zsendmany3notevalue = Decimal('1.0')
zsendmanyfee = Decimal(self.nodes[1].viewshieldtransaction(txid)['fee'])
zaddrremaining2 = zaddrremaining - zsendmany3notevalue - zsendmanyfee
assert_equal(self.nodes[1].getshieldbalance(myzaddr), zaddrremaining2)
assert_equal(self.nodes[2].getshieldbalance(myzaddr), zaddrremaining2)
# Test viewing keys
node3mined = Decimal('6250.0')
assert_equal(self.nodes[3].getshieldbalance(), zsendmany2notevalue)
assert_equal(self.nodes[3].getbalance(1, False, True, False), node3mined)
# Add node 1 address and node 2 viewing key to node 3
myzvkey = self.nodes[2].exportsaplingviewingkey(myzaddr)
self.nodes[3].importaddress(mytaddr1)
importvk_result = self.nodes[3].importsaplingviewingkey(myzvkey, 'whenkeyisnew', 1)
# Check results of importsaplingviewingkey
assert_equal(importvk_result["address"], myzaddr)
# Check the address has been imported
assert_equal(myzaddr in self.nodes[3].listshieldaddresses(), False)
assert_equal(myzaddr in self.nodes[3].listshieldaddresses(True), True)
# Node 3 should see the same received notes as node 2; however, there are 2 things:
# - Some of the notes were change for node 2 but not for node 3.
# - Each node wallet store transaction time as received. As
# `wait_and_assert_operationid_status` is called node 2 and 3 are off by a few seconds.
# Aside from that the received notes should be the same. So,
# group by txid and then check that all properties aside from
# change are equal.
node2Received = dict([r['txid'], r] for r in self.nodes[2].listreceivedbyshieldaddress(myzaddr))
node3Received = dict([r['txid'], r] for r in self.nodes[3].listreceivedbyshieldaddress(myzaddr))
assert_equal(len(node2Received), len(node2Received))
for txid in node2Received:
received2 = node2Received[txid]
received3 = node3Received[txid]
# the change field will be omitted for received3, but all other fields should be shared
assert_true(len(received2) >= len(received3))
for key in received2:
# check all the properties except for change and blocktime
if key != 'change' and key != 'blocktime':
assert_equal(received2[key], received3[key])
# Node 3's balances should be unchanged without explicitly requesting
# to include watch-only balances
assert_equal(self.nodes[3].getshieldbalance(), zsendmany2notevalue)
assert_equal(self.nodes[3].getbalance(1, False, True, False), node3mined)
assert_equal(self.nodes[3].getshieldbalance("*", 1, True), zsendmany2notevalue + zaddrremaining2)
assert_equal(self.nodes[3].getbalance(1, True, True, False), node3mined + Decimal('1.0'))
# Check individual balances reflect the above
assert_equal(self.nodes[3].getreceivedbyaddress(mytaddr1), Decimal('1.0'))
assert_equal(self.nodes[3].getshieldbalance(myzaddr), Decimal('0.0'))
assert_equal(self.nodes[3].getshieldbalance(myzaddr, 1, True), zaddrremaining2)
if __name__ == '__main__':
WalletNullifiersTest().main ()
| mit | 1,046,206,625,469,703,000 | 42.710059 | 111 | 0.662651 | false |
AmatanHead/collective-blog | s_markdown/widgets.py | 1 | 3381 | """Markdown widgets"""
from django import forms
from django.utils.safestring import mark_safe
from django.utils.deconstruct import deconstructible
from json import dumps
@deconstructible
class MarkdownTextarea(forms.Textarea):
"""Basic textarea widget for rendering Markdown objects"""
pass
@deconstructible
class CodeMirror(MarkdownTextarea):
def __init__(self, *args, **kwargs):
"""Widget that uses the `CodeMirror` editor
:param mode: Syntax mode name.
:param addons: List of addons (each element is a relative path
to the addon, without `.js` extension. Example: `mode/overlay`)
:param theme: Theme name.
:param theme_path: Path to the theme file.
Default is `s_markdown/codemirror/theme/<theme>.css`
:param keymap: A keymap name.
:param options: A dict of options that will be passed
to the codemirror editor.
:param additional_modes: Load additional modes for `overlay` extension.
:param js_var_format: A name of the js variable in which
the codemirror instance is saved.
"""
self.mode = kwargs.pop('mode', 'markdown')
self.addons = kwargs.pop('addons', [])
self.theme = kwargs.pop('theme', 'default')
self.theme_path = kwargs.pop('theme_path', 's_markdown/codemirror/theme/%s.css' % self.theme)
self.keymap = kwargs.pop('keymap', None)
self.options = kwargs.pop('options', {})
self.additional_modes = kwargs.pop('additional_modes', [])
self.js_var_format = kwargs.pop('js_var_format', None)
self.options.update(dict(mode=self.mode, theme=self.theme))
self.option_json = dumps(self.options)
super(CodeMirror, self).__init__(*args, **kwargs)
@property
def media(self):
"""Construct a list of mediafiles required for this widget
:return: `forms.Media` instance.
"""
css = ['s_markdown/codemirror/lib/codemirror.css']
if self.theme:
css.append(self.theme_path)
js = ['s_markdown/codemirror/lib/codemirror.js']
js.extend('s_markdown/codemirror/addon/%s.js' % a for a in self.addons)
if self.keymap:
js.append('s_markdown/codemirror/keymap/%s.js' % self.keymap)
if self.mode:
js.append('s_markdown/codemirror/mode/%s/%s.js' % (self.mode, self.mode))
for mode in self.additional_modes:
js.append('s_markdown/codemirror/mode/%s/%s.js' % (mode, mode))
return forms.Media(
css=dict(all=css),
js=js,
)
def render(self, name, value, attrs=None):
"""Render this widget
:param value: Current field vlue.
:param attrs: Attributes of the widget.
:param name: Name of the widget.
:return: Rendered html.
"""
if self.js_var_format is not None:
js_var_bit = 'var %s = ' % (self.js_var_format % name)
else:
js_var_bit = ''
output = [super(CodeMirror, self).render(name, value, attrs),
'<script type="text/javascript">'
'%sCodeMirror.fromTextArea('
'document.getElementById(%s), %s);'
'</script>' %
(js_var_bit, '"id_%s"' % name, self.option_json)]
return mark_safe('\n'.join(output))
| mit | 1,420,852,129,473,943,600 | 36.153846 | 101 | 0.599823 | false |
tmfoltz/worldengine | worldengine/generation.py | 1 | 7471 | from noise import snoise2
from worldengine.world import Step
from worldengine.simulations.basic import find_threshold_f
from worldengine.simulations.hydrology import WatermapSimulation
from worldengine.simulations.irrigation import IrrigationSimulation
from worldengine.simulations.humidity import HumiditySimulation
from worldengine.simulations.temperature import TemperatureSimulation
from worldengine.simulations.permeability import PermeabilitySimulation
from worldengine.simulations.erosion import ErosionSimulation
from worldengine.simulations.precipitation import PrecipitationSimulation
from worldengine.simulations.biome import BiomeSimulation
from worldengine.common import anti_alias, get_verbose, matrix_min_and_max, rescale_value
# ------------------
# Initial generation
# ------------------
def center_land(world):
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map"""
min_sum_on_y = None
y_with_min_sum = None
latshift = 0
for y in range(world.height):
sum_on_y = 0
for x in range(world.width):
sum_on_y += world.elevation['data'][y][x]
if min_sum_on_y is None or sum_on_y < min_sum_on_y:
min_sum_on_y = sum_on_y
y_with_min_sum = y
if get_verbose():
print("geo.center_land: height complete")
min_sum_on_x = None
x_with_min_sum = None
for x in range(world.width):
sum_on_x = 0
for y in range(world.height):
sum_on_x += world.elevation['data'][y][x]
if min_sum_on_x is None or sum_on_x < min_sum_on_x:
min_sum_on_x = sum_on_x
x_with_min_sum = x
if get_verbose():
print("geo.center_land: width complete")
new_elevation_data = []
new_plates = []
for y in range(world.height):
new_elevation_data.append([])
new_plates.append([])
src_y = (y_with_min_sum + y - latshift) % world.height
for x in range(world.width):
src_x = (x_with_min_sum + x) % world.width
new_elevation_data[y].append(world.elevation['data'][src_y][src_x])
new_plates[y].append(world.plates[src_y][src_x])
world.elevation['data'] = new_elevation_data
world.plates = new_plates
if get_verbose():
print("geo.center_land: width complete")
def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.elevation['data'][y][x] = \
(world.elevation['data'][y][x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i)
def add_noise_to_elevation(world, seed):
octaves = 8
freq = 16.0 * octaves
for y in range(world.height):
for x in range(world.width):
n = snoise2(x / freq * 2, y / freq * 2, octaves, base=seed)
world.elevation['data'][y][x] += n
def fill_ocean(elevation, sea_level):
width = len(elevation[0])
height = len(elevation)
ocean = [[False for x in range(width)] for y in range(height)] # TODO: use numpy
to_expand = []
for x in range(width):
if elevation[0][x] <= sea_level:
to_expand.append((x, 0))
if elevation[height - 1][x] <= sea_level:
to_expand.append((x, height - 1))
for y in range(height):
if elevation[y][0] <= sea_level:
to_expand.append((0, y))
if elevation[y][width - 1] <= sea_level:
to_expand.append((width - 1, y))
for t in to_expand:
tx, ty = t
if not ocean[ty][tx]:
ocean[ty][tx] = True
for px, py in _around(tx, ty, width, height):
if not ocean[py][px] and elevation[py][px] <= sea_level:
to_expand.append((px, py))
return ocean
def initialize_ocean_and_thresholds(world, ocean_level=1.0):
"""
Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed
"""
e = world.elevation['data']
ocean = fill_ocean(e, ocean_level)
hl = find_threshold_f(e, 0.10)
ml = find_threshold_f(e, 0.03)
e_th = [('sea', ocean_level),
('plain', hl),
('hill', ml),
('mountain', None)]
world.set_ocean(ocean)
world.set_elevation(e, e_th)
world.sea_depth = sea_depth(world, ocean_level)
# ----
# Misc
# ----
def sea_depth(world, sea_level):
sea_depth = [[sea_level - world.elevation['data'][y][x]
for x in range(world.width)] for y in range(world.height)]
for y in range(world.height):
for x in range(world.width):
if world.tiles_around((x, y), radius=1, predicate=world.is_land):
sea_depth[y][x] = 0
elif world.tiles_around((x, y), radius=2, predicate=world.is_land):
sea_depth[y][x] *= 0.3
elif world.tiles_around((x, y), radius=3, predicate=world.is_land):
sea_depth[y][x] *= 0.5
elif world.tiles_around((x, y), radius=4, predicate=world.is_land):
sea_depth[y][x] *= 0.7
elif world.tiles_around((x, y), radius=5, predicate=world.is_land):
sea_depth[y][x] *= 0.9
sea_depth = anti_alias(sea_depth, 10)
min_depth, max_depth = matrix_min_and_max(sea_depth)
sea_depth = [[rescale_value(sea_depth[y][x], min_depth,
max_depth, 0.0, 1.0)
for x in range(world.width)] for y in
range(world.height)]
return sea_depth
def _around(x, y, width, height):
ps = []
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height and (dx != 0 or dy != 0):
ps.append((nx, ny))
return ps
def generate_world(w, step):
if isinstance(step, str):
step = Step.get_by_name(step)
seed = w.seed
if not step.include_precipitations:
return w
# Precipitation with thresholds
PrecipitationSimulation().execute(w, seed)
if not step.include_erosion:
return w
ErosionSimulation().execute(w, seed)
if get_verbose():
print("...erosion calculated")
WatermapSimulation().execute(w, seed)
# FIXME: create setters
IrrigationSimulation().execute(w, seed)
TemperatureSimulation().execute(w, seed)
HumiditySimulation().execute(w, seed)
PermeabilitySimulation().execute(w, seed)
cm, biome_cm = BiomeSimulation().execute(w, seed)
for cl in cm.keys():
count = cm[cl]
if get_verbose():
print("%s = %i" % (str(cl), count))
if get_verbose():
print('') # empty line
print('Biome obtained:')
for cl in biome_cm.keys():
count = biome_cm[cl]
if get_verbose():
print(" %30s = %7i" % (str(cl), count))
return w
| mit | 62,308,796,191,271,064 | 32.352679 | 89 | 0.584125 | false |
jfterpstra/bluebottle | bluebottle/wallposts/models.py | 1 | 8042 | from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import fields
from django_extensions.db.fields import (ModificationDateTimeField,
CreationDateTimeField)
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
from polymorphic.models import PolymorphicModel
from .managers import ReactionManager, WallpostManager
WALLPOST_TEXT_MAX_LENGTH = getattr(settings, 'WALLPOST_TEXT_MAX_LENGTH', 300)
WALLPOST_REACTION_MAX_LENGTH = getattr(settings, 'WALLPOST_REACTION_MAX_LENGTH',
300)
GROUP_PERMS = {
'Staff': {
'perms': (
'add_reaction', 'change_reaction', 'delete_reaction',
'add_wallpost', 'change_wallpost', 'delete_wallpost',
'add_mediawallpost', 'change_mediawallpost', 'delete_mediawallpost',
'add_textwallpost', 'change_textwallpost', 'delete_textwallpost',
'add_systemwallpost', 'change_systemwallpost',
'delete_systemwallpost',
'add_mediawallpostphoto', 'change_mediawallpostphoto',
'delete_mediawallpostphoto',
)
}
}
class Wallpost(PolymorphicModel):
"""
The Wallpost base class. This class will never be used directly because the
content of a Wallpost is always defined
in the child classes.
Implementation Note: Normally this would be an abstract class but it's not
possible to make this an abstract class
and have the polymorphic behaviour of sorting on the common fields.
"""
@property
def wallpost_type(self):
return 'unknown'
# The user who wrote the wall post. This can be empty to support wallposts
# without users (e.g. anonymous
# TextWallposts, system Wallposts for donations etc.)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('author'),
related_name="%(class)s_wallpost", blank=True,
null=True)
editor = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True,
null=True, help_text=_("The last user to edit this wallpost."))
# The metadata for the wall post.
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True,
default=None)
# Generic foreign key so we can connect it to any object.
content_type = models.ForeignKey(
ContentType, verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_id = models.PositiveIntegerField(_('object ID'))
content_object = fields.GenericForeignKey('content_type', 'object_id')
share_with_facebook = models.BooleanField(default=False)
share_with_twitter = models.BooleanField(default=False)
share_with_linkedin = models.BooleanField(default=False)
email_followers = models.BooleanField(default=True)
donation = models.ForeignKey('donations.Donation',
verbose_name=_("Donation"),
related_name='donation',
null=True, blank=True)
# Manager
objects = WallpostManager()
class Meta:
ordering = ('created',)
def __unicode__(self):
return str(self.id)
class MediaWallpost(Wallpost):
# The content of the wall post.
@property
def wallpost_type(self):
return 'media'
title = models.CharField(max_length=60)
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True,
default='')
video_url = models.URLField(max_length=100, blank=True, default='')
def __unicode__(self):
return Truncator(self.text).words(10)
# FIXME: See how we can re-enable this
# def save(self, *args, **kwargs):
# super(MediaWallpost, self).save(*args, **kwargs)
#
# # Mark the photos as deleted when the MediaWallpost is deleted.
# if self.deleted:
# for photo in self.photos.all():
# if not photo.deleted:
# photo.deleted = self.deleted
# photo.save()
class MediaWallpostPhoto(models.Model):
mediawallpost = models.ForeignKey(MediaWallpost, related_name='photos',
null=True, blank=True)
photo = models.ImageField(upload_to='mediawallpostphotos')
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True,
default=None)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('author'),
related_name="%(class)s_wallpost_photo",
blank=True, null=True)
editor = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('editor'), blank=True, null=True,
help_text=_(
"The last user to edit this wallpost photo."))
class TextWallpost(Wallpost):
# The content of the wall post.
@property
def wallpost_type(self):
return 'text'
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH)
def __unicode__(self):
return Truncator(self.text).words(10)
class SystemWallpost(Wallpost):
# The content of the wall post.
@property
def wallpost_type(self):
return 'system'
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True)
# Generic foreign key so we can connect any object to it.
related_type = models.ForeignKey(ContentType,
verbose_name=_('related type'))
related_id = models.PositiveIntegerField(_('related ID'))
related_object = fields.GenericForeignKey('related_type', 'related_id')
def __unicode__(self):
return Truncator(self.text).words(10)
class Reaction(models.Model):
"""
A user reaction or comment to a Wallpost. This model is based on
the Comments model from django.contrib.comments.
"""
# Who posted this reaction. User will need to be logged in to
# make a reaction.
author = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('author'),
related_name='wallpost_reactions')
editor = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True,
null=True, related_name='+',
help_text=_("The last user to edit this reaction."))
# The reaction text and the wallpost it's a reaction to.
text = models.TextField(_('reaction text'),
max_length=WALLPOST_REACTION_MAX_LENGTH)
wallpost = models.ForeignKey(Wallpost, related_name='reactions')
# Metadata for the reaction.
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True,
default=None)
# Manager
objects = ReactionManager()
objects_with_deleted = models.Manager()
class Meta:
ordering = ('created',)
verbose_name = _('Reaction')
verbose_name_plural = _('Reactions')
def __unicode__(self):
s = "{0}: {1}".format(self.author.get_full_name(), self.text)
return Truncator(s).words(10)
import mails
import bluebottle.wallposts.signals
| bsd-3-clause | -1,037,258,378,431,632,900 | 37.295238 | 85 | 0.615145 | false |
rootofevil/watercounter | watercounter/app/sqlmodel.py | 1 | 4586 | # -*- coding: utf-8 -*-
from app import db, mail
from sqlalchemy.orm import backref, relationship
from datetime import datetime
from sqlalchemy_utils import PasswordType
from sqlalchemy.sql.schema import ForeignKey
from flask_mail import Message
from config import HOSTNAME
class Waterhistory(db.Model):
id = db.Column(db.Integer, primary_key = True)
hw_counter = db.Column(db.Integer)
cw_counter = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=backref('history', lazy='dynamic'))
month = db.Column(db.Integer)
def __init__(self, hw_counter, cw_counter, user):
self.hw_counter = hw_counter
self.cw_counter = cw_counter
self.user = user
self.month = datetime.utcnow().month
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(80))
email = db.Column(db.String(120), unique=True)
phone = db.Column(db.String(12), unique=True)
is_active = db.Column(db.Boolean, default = True)
role = db.Column(db.String(20), default = 'user')
flat_number = db.Column(db.Integer)
password = db.Column(PasswordType(schemes=['pbkdf2_sha512', 'md5_crypt'], deprecated=['md5_crypt']))
email_verified = db.Column(db.Boolean, default = False)
phone_verified = db.Column(db.Boolean, default = False)
def __init__(self, name, email, phone, flat_number, password):
self.name = name
self.email = email
self.phone = phone
self.flat_number = flat_number
self.password = password
def is_authenticated(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def _gen_act_code(self):
import binascii, os
act_code = binascii.hexlify(os.urandom(48))
raw = Act_code(self, act_code)
db.session.add(raw)
db.session.commit()
return act_code
def verify_email(self, act_code = None):
if act_code is None:
act_code = self._gen_act_code()
link = 'http://' + HOSTNAME + '/activate/' + act_code
msg = Message('Hello!', recipients=[self.email], sender=('WC Project', '[email protected]'))
msg.html = '<a href="' + link + '">Link</a>'
mail.send(msg)
return True
saved_codes = Act_code.query.filter_by(user = self)
for saved_code in saved_codes:
if saved_code.code == act_code and (datetime.now() - saved_code.created).seconds <= 43200:
self.email_verified = True
db.session.commit()
return True
def verify_phone(self, act_code):
pass
return False
def __repr__(self):
return 'User %r' % self.name
class Application(db.Model):
id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref = backref('apps', lazy = 'dynamic'))
link = db.Column(db.String(100))
created = db.Column(db.DateTime)
is_active = db.Column(db.Boolean())
def __init__(self, user, link, created = None, is_active = True):
self.user = user
self.link = link
if created is None:
created = datetime.utcnow()
self.created = created
self.is_active = is_active
class Act_code(db.Model):
id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref = backref('codes', lazy = 'dynamic'))
code = db.Column(db.String(50))
created = db.Column(db.DateTime)
def __init__(self, user, code):
self.user = user
self.code = code
self.created = datetime.now()
class Providers_oAuth(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(20))
consumer_key = db.Column(db.String(120))
consumer_secret = db.Column(db.String(120))
request_token_params = db.Column(db.String(120))
base_url = db.Column(db.String(120))
request_token_url = db.Column(db.String(120))
access_token_method = db.Column(db.String(10))
access_token_url = db.Column(db.String(120))
authorize_url = db.Column(db.String(120))
| gpl-3.0 | -8,005,757,017,711,927,000 | 34.983871 | 104 | 0.589184 | false |
jshaffstall/PyPhysicsSandbox | py2d/Math/Transform.py | 1 | 2380 | class Transform(object):
"""Class for representing affine transformations"""
def __init__(self, data):
self.data = data
@staticmethod
def unit():
"""Get a new unit tranformation"""
return Transform([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
@staticmethod
def move(dx, dy):
"""Get a transformation that moves by dx, dy"""
return Transform([[1, 0, dx],
[0, 1, dy],
[0, 0, 1]])
@staticmethod
def rotate(phi):
"""Get a transformation that rotates by phi"""
return Transform([[math.cos(phi), -math.sin(phi), 0],
[math.sin(phi), math.cos(phi), 0],
[0, 0, 1]])
@staticmethod
def rotate_around(cx, cy, phi):
"""Get a transformation that rotates around (cx, cy) by phi"""
return Transform.move(cx, cy) * Transform.rotate(phi) * Transform.move(-cx, -cy)
@staticmethod
def scale(sx, sy):
"""Get a transformation that scales by sx, sy"""
return Transform([[sx, 0, 0],
[0, sy, 0],
[0, 0, 1]])
@staticmethod
def mirror_x():
"""Get a transformation that mirrors along the x axis"""
return Transform([[-1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]])
@staticmethod
def mirror_y():
"""Get a transformation that mirrors along the y axis"""
return Transform([[ 1, 0, 0],
[ 0,-1, 0],
[ 0, 0, 1]])
def __add__(self, b):
t = Transform()
t.data = [[self.data[x][y] + b.data[x][y] for y in range(3)] for x in range(3)]
return t
def __sub__(self, b):
t = Transform()
t.data = [[self.data[x][y] - b.data[x][y] for y in range(3)] for x in range(3)]
return t
def __mul__(self, val):
if isinstance(val, Vector):
x = val.x * self.data[0][0] + val.y * self.data[0][1] + self.data[0][2]
y = val.x * self.data[1][0] + val.y * self.data[1][1] + self.data[1][2]
return Vector(x,y)
elif isinstance(val, Transform):
data = [[0 for y in range(3)] for x in range(3)]
for i in range(3):
for j in range(3):
for k in range(3):
data[i][j] += self.data[i][k] * val.data[k][j]
return Transform(data)
elif isinstance(val, Polygon):
p_transform = [ self * v for v in val.points ]
return Polygon.from_pointlist(p_transform)
else:
raise ValueError("Unknown multiplier: %s" % val)
| mit | -787,264,642,545,114,100 | 26.045455 | 82 | 0.542017 | false |
President3D/Quality-SPC | src/Ui/InMainWindow.py | 1 | 108855 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'InMainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_myMainWindow(object):
def setupUi(self, myMainWindow):
myMainWindow.setObjectName("myMainWindow")
myMainWindow.resize(1024, 768)
myMainWindow.setWindowTitle("Qualiy SPC")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Icons/Images/Micrometer-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
myMainWindow.setWindowIcon(icon)
myMainWindow.setAccessibleName("myMainWindow")
myMainWindow.setStyleSheet("")
self.myCentralwidget = QtWidgets.QWidget(myMainWindow)
self.myCentralwidget.setAccessibleName("myCentralwidget")
self.myCentralwidget.setStyleSheet("background-color: rgb(240, 240, 240);")
self.myCentralwidget.setObjectName("myCentralwidget")
self.gridLayoutCentralwidget = QtWidgets.QGridLayout(self.myCentralwidget)
self.gridLayoutCentralwidget.setContentsMargins(6, 2, 6, 2)
self.gridLayoutCentralwidget.setHorizontalSpacing(6)
self.gridLayoutCentralwidget.setVerticalSpacing(0)
self.gridLayoutCentralwidget.setObjectName("gridLayoutCentralwidget")
self.myStackedWidget = QtWidgets.QStackedWidget(self.myCentralwidget)
self.myStackedWidget.setAccessibleName("myStackedWidget")
self.myStackedWidget.setObjectName("myStackedWidget")
self.myPageTestInstruction = QtWidgets.QWidget()
self.myPageTestInstruction.setAccessibleName("myPageTestInstruction")
self.myPageTestInstruction.setObjectName("myPageTestInstruction")
self.myGridLayoutPageTestInstruction = QtWidgets.QGridLayout(self.myPageTestInstruction)
self.myGridLayoutPageTestInstruction.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutPageTestInstruction.setSpacing(1)
self.myGridLayoutPageTestInstruction.setObjectName("myGridLayoutPageTestInstruction")
self.myGroupBoxSpc = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxSpc.setAccessibleName("myGroupBoxSpc")
self.myGroupBoxSpc.setStyleSheet("#myGroupBoxSpc {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxSpc::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxSpc.setObjectName("myGroupBoxSpc")
self.gridLayoutGroupBoxSpc = QtWidgets.QGridLayout(self.myGroupBoxSpc)
self.gridLayoutGroupBoxSpc.setContentsMargins(4, 15, 4, 4)
self.gridLayoutGroupBoxSpc.setSpacing(4)
self.gridLayoutGroupBoxSpc.setObjectName("gridLayoutGroupBoxSpc")
self.myFrameSpc = QtWidgets.QFrame(self.myGroupBoxSpc)
self.myFrameSpc.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myFrameSpc.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameSpc.setLineWidth(0)
self.myFrameSpc.setObjectName("myFrameSpc")
self.myGridLayoutSpc = QtWidgets.QGridLayout(self.myFrameSpc)
self.myGridLayoutSpc.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutSpc.setSpacing(0)
self.myGridLayoutSpc.setObjectName("myGridLayoutSpc")
self.gridLayoutGroupBoxSpc.addWidget(self.myFrameSpc, 0, 0, 1, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxSpc, 0, 0, 1, 1)
self.myGroupBoxSetpoint = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxSetpoint.setAccessibleName("myGroupBoxSetpoint")
self.myGroupBoxSetpoint.setStyleSheet("#myGroupBoxSetpoint {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxSetpoint::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxSetpoint.setObjectName("myGroupBoxSetpoint")
self.gridLayoutGroupBoxSetpoint = QtWidgets.QGridLayout(self.myGroupBoxSetpoint)
self.gridLayoutGroupBoxSetpoint.setContentsMargins(4, 15, 4, 4)
self.gridLayoutGroupBoxSetpoint.setSpacing(4)
self.gridLayoutGroupBoxSetpoint.setObjectName("gridLayoutGroupBoxSetpoint")
self.myScrollAreaSetpoint = QtWidgets.QScrollArea(self.myGroupBoxSetpoint)
self.myScrollAreaSetpoint.setAccessibleName("myScrollAreaSetpoint")
self.myScrollAreaSetpoint.setStyleSheet("background-color: transparent;")
self.myScrollAreaSetpoint.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaSetpoint.setFrameShadow(QtWidgets.QFrame.Plain)
self.myScrollAreaSetpoint.setWidgetResizable(True)
self.myScrollAreaSetpoint.setObjectName("myScrollAreaSetpoint")
self.myScrollAreaSetpointContents = QtWidgets.QWidget()
self.myScrollAreaSetpointContents.setGeometry(QtCore.QRect(0, 0, 293, 195))
self.myScrollAreaSetpointContents.setAccessibleName("myScrollAreaSetpointContents")
self.myScrollAreaSetpointContents.setStyleSheet("background-color: transparent;")
self.myScrollAreaSetpointContents.setObjectName("myScrollAreaSetpointContents")
self.gridLayoutScrollAreaSetpoint = QtWidgets.QGridLayout(self.myScrollAreaSetpointContents)
self.gridLayoutScrollAreaSetpoint.setContentsMargins(1, 1, 1, 1)
self.gridLayoutScrollAreaSetpoint.setSpacing(1)
self.gridLayoutScrollAreaSetpoint.setObjectName("gridLayoutScrollAreaSetpoint")
self.myFrameSetpointHline = QtWidgets.QFrame(self.myScrollAreaSetpointContents)
self.myFrameSetpointHline.setAccessibleName("myFrameSetpointHline")
self.myFrameSetpointHline.setStyleSheet("color: rgb(154, 154, 154);")
self.myFrameSetpointHline.setFrameShape(QtWidgets.QFrame.HLine)
self.myFrameSetpointHline.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameSetpointHline.setObjectName("myFrameSetpointHline")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myFrameSetpointHline, 3, 0, 1, 1)
self.myLabelType = QtWidgets.QLabel(self.myScrollAreaSetpointContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelType.setFont(font)
self.myLabelType.setAccessibleName("myLabelType")
self.myLabelType.setStyleSheet("background-color: transparent;")
self.myLabelType.setText("")
self.myLabelType.setObjectName("myLabelType")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelType, 0, 0, 1, 1)
self.myLabelValue = QtWidgets.QLabel(self.myScrollAreaSetpointContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelValue.setFont(font)
self.myLabelValue.setAccessibleName("myLabelValue")
self.myLabelValue.setStyleSheet("background-color: transparent;")
self.myLabelValue.setObjectName("myLabelValue")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelValue, 4, 0, 1, 1)
self.myLabelReference = QtWidgets.QLabel(self.myScrollAreaSetpointContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelReference.setFont(font)
self.myLabelReference.setAccessibleName("myLabelReference")
self.myLabelReference.setStyleSheet("background-color: transparent;")
self.myLabelReference.setObjectName("myLabelReference")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelReference, 1, 0, 1, 1)
self.myLabelEquipment = QtWidgets.QLabel(self.myScrollAreaSetpointContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelEquipment.setFont(font)
self.myLabelEquipment.setAccessibleName("myLabelEquipment")
self.myLabelEquipment.setStyleSheet("background-color: transparent;")
self.myLabelEquipment.setObjectName("myLabelEquipment")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelEquipment, 2, 0, 1, 1)
self.myLabelTolerance = QtWidgets.QLabel(self.myScrollAreaSetpointContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelTolerance.setFont(font)
self.myLabelTolerance.setAccessibleName("myLabelTolerance")
self.myLabelTolerance.setStyleSheet("background-color: transparent;")
self.myLabelTolerance.setObjectName("myLabelTolerance")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelTolerance, 5, 0, 1, 1)
self.myLabelInterference = QtWidgets.QLabel(self.myScrollAreaSetpointContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelInterference.setFont(font)
self.myLabelInterference.setAccessibleName("myLabelInterference")
self.myLabelInterference.setStyleSheet("background-color: transparent;")
self.myLabelInterference.setObjectName("myLabelInterference")
self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelInterference, 6, 0, 1, 1)
self.myScrollAreaSetpoint.setWidget(self.myScrollAreaSetpointContents)
self.gridLayoutGroupBoxSetpoint.addWidget(self.myScrollAreaSetpoint, 0, 0, 1, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxSetpoint, 0, 1, 1, 1)
self.myGroupBoxCharacteristics = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxCharacteristics.setAccessibleName("myGroupBoxCharacteristics")
self.myGroupBoxCharacteristics.setStyleSheet("#myGroupBoxCharacteristics {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristics::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxCharacteristics.setObjectName("myGroupBoxCharacteristics")
self.gridLayoutGroupBoxCharacteristics = QtWidgets.QGridLayout(self.myGroupBoxCharacteristics)
self.gridLayoutGroupBoxCharacteristics.setContentsMargins(1, 15, 1, 1)
self.gridLayoutGroupBoxCharacteristics.setSpacing(4)
self.gridLayoutGroupBoxCharacteristics.setObjectName("gridLayoutGroupBoxCharacteristics")
self.myScrollAreaCharacteristics = QtWidgets.QScrollArea(self.myGroupBoxCharacteristics)
self.myScrollAreaCharacteristics.setAccessibleName("myScrollAreaCharacteristics")
self.myScrollAreaCharacteristics.setStyleSheet("background-color: transparent;")
self.myScrollAreaCharacteristics.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaCharacteristics.setWidgetResizable(True)
self.myScrollAreaCharacteristics.setObjectName("myScrollAreaCharacteristics")
self.myScrollAreaCharacteristicsContents = QtWidgets.QWidget()
self.myScrollAreaCharacteristicsContents.setGeometry(QtCore.QRect(0, 0, 400, 416))
self.myScrollAreaCharacteristicsContents.setAccessibleName("myScrollAreaCharacteristicsContents")
self.myScrollAreaCharacteristicsContents.setStyleSheet("#myScrollAreaCharacteristics {background-color: transparent;}")
self.myScrollAreaCharacteristicsContents.setObjectName("myScrollAreaCharacteristicsContents")
self.gridLayoutScrollAreaCharacteristics = QtWidgets.QGridLayout(self.myScrollAreaCharacteristicsContents)
self.gridLayoutScrollAreaCharacteristics.setContentsMargins(1, 1, 1, 1)
self.gridLayoutScrollAreaCharacteristics.setHorizontalSpacing(6)
self.gridLayoutScrollAreaCharacteristics.setVerticalSpacing(4)
self.gridLayoutScrollAreaCharacteristics.setObjectName("gridLayoutScrollAreaCharacteristics")
self.myLabelTestInstruction = QtWidgets.QLabel(self.myScrollAreaCharacteristicsContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelTestInstruction.setFont(font)
self.myLabelTestInstruction.setAccessibleName("myLabelTestInstruction")
self.myLabelTestInstruction.setText("")
self.myLabelTestInstruction.setObjectName("myLabelTestInstruction")
self.gridLayoutScrollAreaCharacteristics.addWidget(self.myLabelTestInstruction, 0, 0, 1, 1)
self.myTableViewCharacteristics = QtWidgets.QTableView(self.myScrollAreaCharacteristicsContents)
self.myTableViewCharacteristics.setAccessibleName("myTableViewCharacteristics")
self.myTableViewCharacteristics.setStyleSheet("QHeaderView::section {\n"
" background-color: lightgray;\n"
" color: black;\n"
" padding: 4px;\n"
" border: 1px solid black;\n"
"}\n"
"\n"
"QHeaderView::section:checked\n"
"{\n"
" background-color: lightgray;\n"
"}")
self.myTableViewCharacteristics.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTableViewCharacteristics.setFrameShadow(QtWidgets.QFrame.Plain)
self.myTableViewCharacteristics.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.myTableViewCharacteristics.setAlternatingRowColors(True)
self.myTableViewCharacteristics.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.myTableViewCharacteristics.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.myTableViewCharacteristics.setObjectName("myTableViewCharacteristics")
self.gridLayoutScrollAreaCharacteristics.addWidget(self.myTableViewCharacteristics, 1, 0, 1, 1)
self.myScrollAreaCharacteristics.setWidget(self.myScrollAreaCharacteristicsContents)
self.gridLayoutGroupBoxCharacteristics.addWidget(self.myScrollAreaCharacteristics, 0, 0, 1, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxCharacteristics, 0, 2, 2, 1)
self.myGroupBoxDeviation = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxDeviation.setAccessibleName("myGroupBoxDeviation")
self.myGroupBoxDeviation.setStyleSheet("#myGroupBoxDeviation {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxDeviation::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxDeviation.setObjectName("myGroupBoxDeviation")
self.gridLayoutGroupBoxDeviation = QtWidgets.QGridLayout(self.myGroupBoxDeviation)
self.gridLayoutGroupBoxDeviation.setContentsMargins(4, 15, 4, 4)
self.gridLayoutGroupBoxDeviation.setSpacing(4)
self.gridLayoutGroupBoxDeviation.setObjectName("gridLayoutGroupBoxDeviation")
self.myFrameDeviation = QtWidgets.QFrame(self.myGroupBoxDeviation)
self.myFrameDeviation.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myFrameDeviation.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameDeviation.setLineWidth(0)
self.myFrameDeviation.setObjectName("myFrameDeviation")
self.myGridLayoutDeviation = QtWidgets.QGridLayout(self.myFrameDeviation)
self.myGridLayoutDeviation.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutDeviation.setSpacing(0)
self.myGridLayoutDeviation.setObjectName("myGridLayoutDeviation")
self.gridLayoutGroupBoxDeviation.addWidget(self.myFrameDeviation, 0, 0, 1, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxDeviation, 1, 0, 1, 1)
self.myGroupBoxActualValue = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxActualValue.setAccessibleName("myGroupBoxActualValue")
self.myGroupBoxActualValue.setStyleSheet("#myGroupBoxActualValue {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxActualValue::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxActualValue.setObjectName("myGroupBoxActualValue")
self.gridLayout_2 = QtWidgets.QGridLayout(self.myGroupBoxActualValue)
self.gridLayout_2.setContentsMargins(4, 15, 4, 4)
self.gridLayout_2.setSpacing(4)
self.gridLayout_2.setObjectName("gridLayout_2")
self.myToolButtonOk = QtWidgets.QToolButton(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.myToolButtonOk.sizePolicy().hasHeightForWidth())
self.myToolButtonOk.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.myToolButtonOk.setFont(font)
self.myToolButtonOk.setAccessibleName("myToolButtonOk")
self.myToolButtonOk.setStyleSheet("#myToolButtonOk {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myToolButtonOk:hover {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 2px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myToolButtonOk:pressed {color: black;\n"
"background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: inset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/Icons/Images/Approval-96.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myToolButtonOk.setIcon(icon1)
self.myToolButtonOk.setIconSize(QtCore.QSize(40, 40))
self.myToolButtonOk.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.myToolButtonOk.setObjectName("myToolButtonOk")
self.gridLayout_2.addWidget(self.myToolButtonOk, 3, 1, 2, 1)
self.myLineEditSerialNo = QtWidgets.QLineEdit(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.myLineEditSerialNo.sizePolicy().hasHeightForWidth())
self.myLineEditSerialNo.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(18)
self.myLineEditSerialNo.setFont(font)
self.myLineEditSerialNo.setAccessibleName("myLineEditSerialNo")
self.myLineEditSerialNo.setStyleSheet("#myLineEditSerialNo {\n"
"border: 1px solid gray;\n"
"border-radius: 2px;\n"
"padding: 3 3px;\n"
"background: white;\n"
"selection-background-color: darkgray;\n"
"}")
self.myLineEditSerialNo.setObjectName("myLineEditSerialNo")
self.gridLayout_2.addWidget(self.myLineEditSerialNo, 4, 0, 1, 1)
self.myLabelSerialNo = QtWidgets.QLabel(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.myLabelSerialNo.sizePolicy().hasHeightForWidth())
self.myLabelSerialNo.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSerialNo.setFont(font)
self.myLabelSerialNo.setAccessibleName("myLabelSerialNo")
self.myLabelSerialNo.setStyleSheet("background-color: transparent;")
self.myLabelSerialNo.setObjectName("myLabelSerialNo")
self.gridLayout_2.addWidget(self.myLabelSerialNo, 3, 0, 1, 1)
self.myToolButtonNok = QtWidgets.QToolButton(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.myToolButtonNok.sizePolicy().hasHeightForWidth())
self.myToolButtonNok.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.myToolButtonNok.setFont(font)
self.myToolButtonNok.setAccessibleName("myToolButtonNok")
self.myToolButtonNok.setStyleSheet("#myToolButtonNok {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myToolButtonNok:hover {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 2px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myToolButtonNok:pressed {color: black;\n"
"background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: inset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/Icons/Images/Cancel-96.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myToolButtonNok.setIcon(icon2)
self.myToolButtonNok.setIconSize(QtCore.QSize(40, 40))
self.myToolButtonNok.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.myToolButtonNok.setObjectName("myToolButtonNok")
self.gridLayout_2.addWidget(self.myToolButtonNok, 3, 2, 2, 1)
self.myLineEditActualValue = QtWidgets.QLineEdit(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.myLineEditActualValue.sizePolicy().hasHeightForWidth())
self.myLineEditActualValue.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(18)
self.myLineEditActualValue.setFont(font)
self.myLineEditActualValue.setAccessibleName("myLineEditActualValue")
self.myLineEditActualValue.setStyleSheet("#myLineEditActualValue {\n"
"border: 1px solid gray;\n"
"border-radius: 2px;\n"
"padding: 3 3px;\n"
"background: white;\n"
"selection-background-color: darkgray;\n"
"}")
self.myLineEditActualValue.setObjectName("myLineEditActualValue")
self.gridLayout_2.addWidget(self.myLineEditActualValue, 1, 0, 1, 3)
self.myFrameActualValueHLine = QtWidgets.QFrame(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.myFrameActualValueHLine.sizePolicy().hasHeightForWidth())
self.myFrameActualValueHLine.setSizePolicy(sizePolicy)
self.myFrameActualValueHLine.setAccessibleName("myFrameActualValueHLine")
self.myFrameActualValueHLine.setStyleSheet("color: rgb(154, 154, 154);")
self.myFrameActualValueHLine.setFrameShape(QtWidgets.QFrame.HLine)
self.myFrameActualValueHLine.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameActualValueHLine.setObjectName("myFrameActualValueHLine")
self.gridLayout_2.addWidget(self.myFrameActualValueHLine, 2, 0, 1, 3)
self.myLabelActualValue = QtWidgets.QLabel(self.myGroupBoxActualValue)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.myLabelActualValue.sizePolicy().hasHeightForWidth())
self.myLabelActualValue.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelActualValue.setFont(font)
self.myLabelActualValue.setAccessibleName("myLabelActualValue")
self.myLabelActualValue.setStyleSheet("background-color: transparent;")
self.myLabelActualValue.setObjectName("myLabelActualValue")
self.gridLayout_2.addWidget(self.myLabelActualValue, 0, 0, 1, 1)
self.myLabelActualValuePreview = QtWidgets.QLabel(self.myGroupBoxActualValue)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelActualValuePreview.setFont(font)
self.myLabelActualValuePreview.setStyleSheet("#myLabelActualValuePreview {\n"
"border: 1px solid gray;\n"
"border-radius: 2px;\n"
"padding: 3 3px;\n"
"background: transparent;\n"
"}")
self.myLabelActualValuePreview.setObjectName("myLabelActualValuePreview")
self.gridLayout_2.addWidget(self.myLabelActualValuePreview, 0, 1, 1, 2)
self.gridLayout_2.setColumnStretch(0, 2)
self.gridLayout_2.setColumnStretch(1, 1)
self.gridLayout_2.setColumnStretch(2, 1)
self.gridLayout_2.setRowStretch(0, 1)
self.gridLayout_2.setRowStretch(1, 1)
self.gridLayout_2.setRowStretch(2, 1)
self.gridLayout_2.setRowStretch(3, 1)
self.gridLayout_2.setRowStretch(4, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxActualValue, 1, 1, 1, 1)
self.myGroupBoxImage = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxImage.setAccessibleName("myGroupBoxImage")
self.myGroupBoxImage.setStyleSheet("#myGroupBoxImage {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxImage::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxImage.setObjectName("myGroupBoxImage")
self.gridLayoutGroupBoxImage = QtWidgets.QGridLayout(self.myGroupBoxImage)
self.gridLayoutGroupBoxImage.setContentsMargins(4, 15, 4, 4)
self.gridLayoutGroupBoxImage.setSpacing(4)
self.gridLayoutGroupBoxImage.setObjectName("gridLayoutGroupBoxImage")
self.myLabelImageAmount = QtWidgets.QLabel(self.myGroupBoxImage)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelImageAmount.setFont(font)
self.myLabelImageAmount.setAccessibleName("myLabelImageAmount")
self.myLabelImageAmount.setStyleSheet("background-color: transparent;")
self.myLabelImageAmount.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing)
self.myLabelImageAmount.setObjectName("myLabelImageAmount")
self.gridLayoutGroupBoxImage.addWidget(self.myLabelImageAmount, 4, 1, 1, 1)
self.myPushButtonBackward = QtWidgets.QPushButton(self.myGroupBoxImage)
font = QtGui.QFont()
font.setPointSize(12)
self.myPushButtonBackward.setFont(font)
self.myPushButtonBackward.setAccessibleName("myPushButtonBackward")
self.myPushButtonBackward.setStyleSheet("#myPushButtonBackward {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonBackward:hover {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 2px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonBackward:pressed {color: black;\n"
"background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: inset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"")
self.myPushButtonBackward.setObjectName("myPushButtonBackward")
self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonBackward, 1, 1, 1, 1)
self.myPushButtonZoom = QtWidgets.QPushButton(self.myGroupBoxImage)
font = QtGui.QFont()
font.setPointSize(12)
self.myPushButtonZoom.setFont(font)
self.myPushButtonZoom.setAccessibleName("myPushButtonZoom")
self.myPushButtonZoom.setStyleSheet("#myPushButtonZoom {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonZoom:hover {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 2px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonZoom:pressed {color: black;\n"
"background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: inset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"")
self.myPushButtonZoom.setObjectName("myPushButtonZoom")
self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonZoom, 2, 1, 1, 1)
self.myPushButtonForward = QtWidgets.QPushButton(self.myGroupBoxImage)
font = QtGui.QFont()
font.setPointSize(12)
self.myPushButtonForward.setFont(font)
self.myPushButtonForward.setAccessibleName("myPushButtonForward")
self.myPushButtonForward.setStyleSheet("#myPushButtonForward {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonForward:hover {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 2px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonForward:pressed {color: black;\n"
"background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: inset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"")
self.myPushButtonForward.setObjectName("myPushButtonForward")
self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonForward, 0, 1, 1, 1)
self.myLabelImage = QtWidgets.QLabel(self.myGroupBoxImage)
self.myLabelImage.setAccessibleName("myLabelImage")
self.myLabelImage.setStyleSheet("background-color: transparent;")
self.myLabelImage.setText("")
self.myLabelImage.setScaledContents(False)
self.myLabelImage.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.myLabelImage.setObjectName("myLabelImage")
self.gridLayoutGroupBoxImage.addWidget(self.myLabelImage, 0, 0, 5, 1)
self.myPushButtonVideo = QtWidgets.QPushButton(self.myGroupBoxImage)
font = QtGui.QFont()
font.setPointSize(12)
self.myPushButtonVideo.setFont(font)
self.myPushButtonVideo.setAccessibleName("myPushButtonVideo")
self.myPushButtonVideo.setStyleSheet("#myPushButtonVideo {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonVideo:hover {color: black;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n"
"border-width: 2px;\n"
"border-color: gray;\n"
"border-style: outset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"#myPushButtonVideo:pressed {color: black;\n"
"background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n"
"border-width: 1px;\n"
"border-color: gray;\n"
"border-style: inset;\n"
"border-radius: 2px;\n"
"padding: 3px}\n"
"")
self.myPushButtonVideo.setObjectName("myPushButtonVideo")
self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonVideo, 3, 1, 1, 1)
self.gridLayoutGroupBoxImage.setColumnStretch(0, 6)
self.gridLayoutGroupBoxImage.setColumnStretch(1, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxImage, 2, 0, 1, 2)
self.myGroupBoxDescription = QtWidgets.QGroupBox(self.myPageTestInstruction)
self.myGroupBoxDescription.setAccessibleName("myGroupBoxDescription")
self.myGroupBoxDescription.setStyleSheet("#myGroupBoxDescription {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxDescription::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxDescription.setObjectName("myGroupBoxDescription")
self.gridLayoutGroupBoxDescription = QtWidgets.QGridLayout(self.myGroupBoxDescription)
self.gridLayoutGroupBoxDescription.setContentsMargins(1, 15, 1, 1)
self.gridLayoutGroupBoxDescription.setHorizontalSpacing(1)
self.gridLayoutGroupBoxDescription.setVerticalSpacing(6)
self.gridLayoutGroupBoxDescription.setObjectName("gridLayoutGroupBoxDescription")
self.myTextBrowserDescription = QtWidgets.QTextBrowser(self.myGroupBoxDescription)
font = QtGui.QFont()
font.setPointSize(12)
self.myTextBrowserDescription.setFont(font)
self.myTextBrowserDescription.setFocusPolicy(QtCore.Qt.NoFocus)
self.myTextBrowserDescription.setAccessibleName("myTextBrowserDescription")
self.myTextBrowserDescription.setStyleSheet("background-color: transparent;")
self.myTextBrowserDescription.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTextBrowserDescription.setFrameShadow(QtWidgets.QFrame.Plain)
self.myTextBrowserDescription.setObjectName("myTextBrowserDescription")
self.gridLayoutGroupBoxDescription.addWidget(self.myTextBrowserDescription, 0, 0, 1, 1)
self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxDescription, 2, 2, 1, 1)
self.myGridLayoutPageTestInstruction.setColumnStretch(0, 3)
self.myGridLayoutPageTestInstruction.setColumnStretch(1, 3)
self.myGridLayoutPageTestInstruction.setColumnStretch(2, 4)
self.myGridLayoutPageTestInstruction.setRowStretch(0, 3)
self.myGridLayoutPageTestInstruction.setRowStretch(1, 3)
self.myGridLayoutPageTestInstruction.setRowStretch(2, 4)
self.myStackedWidget.addWidget(self.myPageTestInstruction)
self.myPageSpc = QtWidgets.QWidget()
self.myPageSpc.setAccessibleName("myPageSpc")
self.myPageSpc.setObjectName("myPageSpc")
self.myGridLayoutPageSpc = QtWidgets.QGridLayout(self.myPageSpc)
self.myGridLayoutPageSpc.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutPageSpc.setSpacing(1)
self.myGridLayoutPageSpc.setObjectName("myGridLayoutPageSpc")
self.myGroupBoxSpcFull = QtWidgets.QGroupBox(self.myPageSpc)
self.myGroupBoxSpcFull.setStyleSheet("#myGroupBoxSpcFull {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxSpcFull::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxSpcFull.setObjectName("myGroupBoxSpcFull")
self.myGridLayoutGroupBoxSpcFull = QtWidgets.QGridLayout(self.myGroupBoxSpcFull)
self.myGridLayoutGroupBoxSpcFull.setContentsMargins(4, 15, 4, 4)
self.myGridLayoutGroupBoxSpcFull.setSpacing(0)
self.myGridLayoutGroupBoxSpcFull.setObjectName("myGridLayoutGroupBoxSpcFull")
self.myFrameSpcFull = QtWidgets.QFrame(self.myGroupBoxSpcFull)
self.myFrameSpcFull.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myFrameSpcFull.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameSpcFull.setLineWidth(0)
self.myFrameSpcFull.setObjectName("myFrameSpcFull")
self.myVerticalLayoutFrameSpcFull = QtWidgets.QVBoxLayout(self.myFrameSpcFull)
self.myVerticalLayoutFrameSpcFull.setContentsMargins(0, 0, 0, 0)
self.myVerticalLayoutFrameSpcFull.setSpacing(0)
self.myVerticalLayoutFrameSpcFull.setObjectName("myVerticalLayoutFrameSpcFull")
self.myGridLayoutGroupBoxSpcFull.addWidget(self.myFrameSpcFull, 0, 0, 1, 1)
self.myGridLayoutPageSpc.addWidget(self.myGroupBoxSpcFull, 0, 0, 1, 1)
self.myGroupBoxStatisticSpc = QtWidgets.QGroupBox(self.myPageSpc)
self.myGroupBoxStatisticSpc.setStyleSheet("#myGroupBoxStatisticSpc {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxStatisticSpc::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxStatisticSpc.setObjectName("myGroupBoxStatisticSpc")
self.myGridLayoutStatisticSpc = QtWidgets.QGridLayout(self.myGroupBoxStatisticSpc)
self.myGridLayoutStatisticSpc.setContentsMargins(1, 15, 1, 1)
self.myGridLayoutStatisticSpc.setSpacing(4)
self.myGridLayoutStatisticSpc.setObjectName("myGridLayoutStatisticSpc")
self.myScrollAreaGroupBoxStatisticSpc = QtWidgets.QScrollArea(self.myGroupBoxStatisticSpc)
self.myScrollAreaGroupBoxStatisticSpc.setStyleSheet("background-color: transparent;")
self.myScrollAreaGroupBoxStatisticSpc.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaGroupBoxStatisticSpc.setWidgetResizable(True)
self.myScrollAreaGroupBoxStatisticSpc.setObjectName("myScrollAreaGroupBoxStatisticSpc")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 396, 113))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.myGridLayoutGroupBoxStatisticSpc = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.myGridLayoutGroupBoxStatisticSpc.setContentsMargins(1, 1, 1, 1)
self.myGridLayoutGroupBoxStatisticSpc.setSpacing(4)
self.myGridLayoutGroupBoxStatisticSpc.setObjectName("myGridLayoutGroupBoxStatisticSpc")
self.myLabelStatisticSpcCpk = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelStatisticSpcCpk.setFont(font)
self.myLabelStatisticSpcCpk.setObjectName("myLabelStatisticSpcCpk")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcCpk, 0, 0, 1, 1)
self.myLabelSpcCpkValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelSpcCpkValue.setFont(font)
self.myLabelSpcCpkValue.setObjectName("myLabelSpcCpkValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcCpkValue, 0, 1, 1, 1)
self.myLabelStatisticSpcAverage = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcAverage.setFont(font)
self.myLabelStatisticSpcAverage.setObjectName("myLabelStatisticSpcAverage")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcAverage, 1, 0, 1, 1)
self.myLabelSpcDeivationValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcDeivationValue.setFont(font)
self.myLabelSpcDeivationValue.setObjectName("myLabelSpcDeivationValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcDeivationValue, 2, 1, 1, 1)
self.myLabelStatistcSpcDeviation = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatistcSpcDeviation.setFont(font)
self.myLabelStatistcSpcDeviation.setObjectName("myLabelStatistcSpcDeviation")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatistcSpcDeviation, 2, 0, 1, 1)
self.myLabelSpcAverageValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcAverageValue.setFont(font)
self.myLabelSpcAverageValue.setObjectName("myLabelSpcAverageValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcAverageValue, 1, 1, 1, 1)
self.myLabelStatisticSpcAndSicSigma = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcAndSicSigma.setFont(font)
self.myLabelStatisticSpcAndSicSigma.setObjectName("myLabelStatisticSpcAndSicSigma")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcAndSicSigma, 3, 0, 1, 1)
self.myLabelStatisticSpcMinusSixSigma = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcMinusSixSigma.setFont(font)
self.myLabelStatisticSpcMinusSixSigma.setObjectName("myLabelStatisticSpcMinusSixSigma")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcMinusSixSigma, 4, 0, 1, 1)
self.myLabelSpcMinusSixSigmaValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcMinusSixSigmaValue.setFont(font)
self.myLabelSpcMinusSixSigmaValue.setObjectName("myLabelSpcMinusSixSigmaValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcMinusSixSigmaValue, 4, 1, 1, 1)
self.myLabelSpcAndSixSigmaValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcAndSixSigmaValue.setFont(font)
self.myLabelSpcAndSixSigmaValue.setObjectName("myLabelSpcAndSixSigmaValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcAndSixSigmaValue, 3, 1, 1, 1)
self.myLabelStatisticSpcUtl = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcUtl.setFont(font)
self.myLabelStatisticSpcUtl.setObjectName("myLabelStatisticSpcUtl")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcUtl, 1, 3, 1, 1)
self.myLabelStatisticSpcLtl = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcLtl.setFont(font)
self.myLabelStatisticSpcLtl.setObjectName("myLabelStatisticSpcLtl")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcLtl, 2, 3, 1, 1)
self.myLabelSpcUtlValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcUtlValue.setFont(font)
self.myLabelSpcUtlValue.setObjectName("myLabelSpcUtlValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcUtlValue, 1, 4, 1, 1)
self.myLabelSpcLtlValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcLtlValue.setFont(font)
self.myLabelSpcLtlValue.setObjectName("myLabelSpcLtlValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcLtlValue, 2, 4, 1, 1)
self.myLabelStatisticSpcUil = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcUil.setFont(font)
self.myLabelStatisticSpcUil.setObjectName("myLabelStatisticSpcUil")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcUil, 3, 3, 1, 1)
self.myLabelStatisticSpcLil = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelStatisticSpcLil.setFont(font)
self.myLabelStatisticSpcLil.setObjectName("myLabelStatisticSpcLil")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcLil, 4, 3, 1, 1)
self.myLabelSpcUilValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcUilValue.setFont(font)
self.myLabelSpcUilValue.setObjectName("myLabelSpcUilValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcUilValue, 3, 4, 1, 1)
self.myLabelSpcLilValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelSpcLilValue.setFont(font)
self.myLabelSpcLilValue.setObjectName("myLabelSpcLilValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcLilValue, 4, 4, 1, 1)
self.myFrameVlineStatisticSpc = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.myFrameVlineStatisticSpc.setStyleSheet("color: rgb(154, 154, 154);")
self.myFrameVlineStatisticSpc.setFrameShape(QtWidgets.QFrame.VLine)
self.myFrameVlineStatisticSpc.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameVlineStatisticSpc.setObjectName("myFrameVlineStatisticSpc")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myFrameVlineStatisticSpc, 0, 2, 5, 1)
self.myLabelStatisticSpcPpm = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelStatisticSpcPpm.setFont(font)
self.myLabelStatisticSpcPpm.setObjectName("myLabelStatisticSpcPpm")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcPpm, 0, 3, 1, 1)
self.myLabelSpcPpmValue = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelSpcPpmValue.setFont(font)
self.myLabelSpcPpmValue.setObjectName("myLabelSpcPpmValue")
self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcPpmValue, 0, 4, 1, 1)
self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(0, 2)
self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(1, 1)
self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(2, 1)
self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(3, 2)
self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(4, 1)
self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(0, 1)
self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(1, 1)
self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(2, 1)
self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(3, 1)
self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(4, 1)
self.myScrollAreaGroupBoxStatisticSpc.setWidget(self.scrollAreaWidgetContents)
self.myGridLayoutStatisticSpc.addWidget(self.myScrollAreaGroupBoxStatisticSpc, 0, 0, 1, 1)
self.myGridLayoutPageSpc.addWidget(self.myGroupBoxStatisticSpc, 1, 0, 1, 1)
self.myGroupBoxCharacteristicsSpc = QtWidgets.QGroupBox(self.myPageSpc)
self.myGroupBoxCharacteristicsSpc.setStyleSheet("#myGroupBoxCharacteristicsSpc {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristicsSpc::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxCharacteristicsSpc.setObjectName("myGroupBoxCharacteristicsSpc")
self.myGridLayoutGroupBoxCharacteristicsSpc = QtWidgets.QGridLayout(self.myGroupBoxCharacteristicsSpc)
self.myGridLayoutGroupBoxCharacteristicsSpc.setContentsMargins(1, 15, 1, 1)
self.myGridLayoutGroupBoxCharacteristicsSpc.setSpacing(4)
self.myGridLayoutGroupBoxCharacteristicsSpc.setObjectName("myGridLayoutGroupBoxCharacteristicsSpc")
self.myScrollAreaGroupBoyCharacteristicsSpc = QtWidgets.QScrollArea(self.myGroupBoxCharacteristicsSpc)
self.myScrollAreaGroupBoyCharacteristicsSpc.setStyleSheet("background-color: transparent;")
self.myScrollAreaGroupBoyCharacteristicsSpc.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaGroupBoyCharacteristicsSpc.setFrameShadow(QtWidgets.QFrame.Sunken)
self.myScrollAreaGroupBoyCharacteristicsSpc.setWidgetResizable(True)
self.myScrollAreaGroupBoyCharacteristicsSpc.setObjectName("myScrollAreaGroupBoyCharacteristicsSpc")
self.myScrollAreaWidgetContentsSpc = QtWidgets.QWidget()
self.myScrollAreaWidgetContentsSpc.setGeometry(QtCore.QRect(0, 0, 83, 94))
self.myScrollAreaWidgetContentsSpc.setStyleSheet("#myScrollAreaWidgetContentsSpc {background-color: transparent;}")
self.myScrollAreaWidgetContentsSpc.setObjectName("myScrollAreaWidgetContentsSpc")
self.myGridLayoutScrollAreaWidgetContentsSpc = QtWidgets.QGridLayout(self.myScrollAreaWidgetContentsSpc)
self.myGridLayoutScrollAreaWidgetContentsSpc.setContentsMargins(1, 1, 1, 1)
self.myGridLayoutScrollAreaWidgetContentsSpc.setSpacing(4)
self.myGridLayoutScrollAreaWidgetContentsSpc.setObjectName("myGridLayoutScrollAreaWidgetContentsSpc")
self.myTableViewCharacteristicsPageSpc = QtWidgets.QTableView(self.myScrollAreaWidgetContentsSpc)
self.myTableViewCharacteristicsPageSpc.setStyleSheet("QHeaderView::section {\n"
" background-color: lightgray;\n"
" color: black;\n"
" padding: 4px;\n"
" border: 1px solid black;\n"
"}\n"
"\n"
"QHeaderView::section:checked\n"
"{\n"
" background-color: lightgray;\n"
"}")
self.myTableViewCharacteristicsPageSpc.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTableViewCharacteristicsPageSpc.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.myTableViewCharacteristicsPageSpc.setAlternatingRowColors(True)
self.myTableViewCharacteristicsPageSpc.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.myTableViewCharacteristicsPageSpc.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.myTableViewCharacteristicsPageSpc.setObjectName("myTableViewCharacteristicsPageSpc")
self.myGridLayoutScrollAreaWidgetContentsSpc.addWidget(self.myTableViewCharacteristicsPageSpc, 1, 0, 1, 1)
self.myLabelTestInstructionNamePageSpc = QtWidgets.QLabel(self.myScrollAreaWidgetContentsSpc)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelTestInstructionNamePageSpc.setFont(font)
self.myLabelTestInstructionNamePageSpc.setObjectName("myLabelTestInstructionNamePageSpc")
self.myGridLayoutScrollAreaWidgetContentsSpc.addWidget(self.myLabelTestInstructionNamePageSpc, 0, 0, 1, 1)
self.myScrollAreaGroupBoyCharacteristicsSpc.setWidget(self.myScrollAreaWidgetContentsSpc)
self.myGridLayoutGroupBoxCharacteristicsSpc.addWidget(self.myScrollAreaGroupBoyCharacteristicsSpc, 0, 0, 1, 1)
self.myGridLayoutPageSpc.addWidget(self.myGroupBoxCharacteristicsSpc, 0, 1, 2, 1)
self.myGridLayoutPageSpc.setColumnStretch(0, 6)
self.myGridLayoutPageSpc.setColumnStretch(1, 4)
self.myGridLayoutPageSpc.setRowStretch(0, 6)
self.myGridLayoutPageSpc.setRowStretch(1, 4)
self.myStackedWidget.addWidget(self.myPageSpc)
self.myPageDeviation = QtWidgets.QWidget()
self.myPageDeviation.setAccessibleName("myPageDeviation")
self.myPageDeviation.setObjectName("myPageDeviation")
self.myGridLayoutPageDeviation = QtWidgets.QGridLayout(self.myPageDeviation)
self.myGridLayoutPageDeviation.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutPageDeviation.setSpacing(1)
self.myGridLayoutPageDeviation.setObjectName("myGridLayoutPageDeviation")
self.myGroupBoxDeviationFull = QtWidgets.QGroupBox(self.myPageDeviation)
self.myGroupBoxDeviationFull.setStyleSheet("#myGroupBoxDeviationFull {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxDeviationFull::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxDeviationFull.setObjectName("myGroupBoxDeviationFull")
self.myGridLayoutGroupBoxDeviationFull = QtWidgets.QGridLayout(self.myGroupBoxDeviationFull)
self.myGridLayoutGroupBoxDeviationFull.setContentsMargins(4, 15, 4, 4)
self.myGridLayoutGroupBoxDeviationFull.setObjectName("myGridLayoutGroupBoxDeviationFull")
self.myFrameDeviationFull = QtWidgets.QFrame(self.myGroupBoxDeviationFull)
self.myFrameDeviationFull.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myFrameDeviationFull.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameDeviationFull.setLineWidth(0)
self.myFrameDeviationFull.setObjectName("myFrameDeviationFull")
self.myVerticalLayoutGroupBoxDeviationFull = QtWidgets.QVBoxLayout(self.myFrameDeviationFull)
self.myVerticalLayoutGroupBoxDeviationFull.setContentsMargins(0, 0, 0, 0)
self.myVerticalLayoutGroupBoxDeviationFull.setSpacing(0)
self.myVerticalLayoutGroupBoxDeviationFull.setObjectName("myVerticalLayoutGroupBoxDeviationFull")
self.myGridLayoutGroupBoxDeviationFull.addWidget(self.myFrameDeviationFull, 0, 0, 1, 1)
self.myGridLayoutPageDeviation.addWidget(self.myGroupBoxDeviationFull, 0, 0, 1, 1)
self.myGroupBoxStatisticDeviation = QtWidgets.QGroupBox(self.myPageDeviation)
self.myGroupBoxStatisticDeviation.setStyleSheet("#myGroupBoxStatisticDeviation {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxStatisticDeviation::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxStatisticDeviation.setObjectName("myGroupBoxStatisticDeviation")
self.myGridLayoutGroupBoxStatisticDeviation = QtWidgets.QGridLayout(self.myGroupBoxStatisticDeviation)
self.myGridLayoutGroupBoxStatisticDeviation.setContentsMargins(1, 15, 1, 1)
self.myGridLayoutGroupBoxStatisticDeviation.setSpacing(4)
self.myGridLayoutGroupBoxStatisticDeviation.setObjectName("myGridLayoutGroupBoxStatisticDeviation")
self.myScrollAreaGroupBoxStatisticDeviation = QtWidgets.QScrollArea(self.myGroupBoxStatisticDeviation)
self.myScrollAreaGroupBoxStatisticDeviation.setStyleSheet("background-color: transparent;")
self.myScrollAreaGroupBoxStatisticDeviation.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaGroupBoxStatisticDeviation.setFrameShadow(QtWidgets.QFrame.Sunken)
self.myScrollAreaGroupBoxStatisticDeviation.setWidgetResizable(True)
self.myScrollAreaGroupBoxStatisticDeviation.setObjectName("myScrollAreaGroupBoxStatisticDeviation")
self.myScrollAreaWidgetContentsDeviation_2 = QtWidgets.QWidget()
self.myScrollAreaWidgetContentsDeviation_2.setGeometry(QtCore.QRect(0, 0, 396, 113))
self.myScrollAreaWidgetContentsDeviation_2.setObjectName("myScrollAreaWidgetContentsDeviation_2")
self.myGridLayoutScrollAreaDeviation = QtWidgets.QGridLayout(self.myScrollAreaWidgetContentsDeviation_2)
self.myGridLayoutScrollAreaDeviation.setContentsMargins(1, 1, 1, 1)
self.myGridLayoutScrollAreaDeviation.setSpacing(4)
self.myGridLayoutScrollAreaDeviation.setObjectName("myGridLayoutScrollAreaDeviation")
self.myLabelDeviationLtlValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationLtlValue.setFont(font)
self.myLabelDeviationLtlValue.setObjectName("myLabelDeviationLtlValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLtlValue, 2, 4, 1, 1)
self.myLabelDeviationLilValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationLilValue.setFont(font)
self.myLabelDeviationLilValue.setObjectName("myLabelDeviationLilValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLilValue, 4, 4, 1, 1)
self.myFrameVlineStatisticDeviation = QtWidgets.QFrame(self.myScrollAreaWidgetContentsDeviation_2)
self.myFrameVlineStatisticDeviation.setStyleSheet("color: rgb(154, 154, 154);")
self.myFrameVlineStatisticDeviation.setFrameShape(QtWidgets.QFrame.VLine)
self.myFrameVlineStatisticDeviation.setFrameShadow(QtWidgets.QFrame.Plain)
self.myFrameVlineStatisticDeviation.setObjectName("myFrameVlineStatisticDeviation")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myFrameVlineStatisticDeviation, 0, 2, 5, 1)
self.myLabelDeviationCpkValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelDeviationCpkValue.setFont(font)
self.myLabelDeviationCpkValue.setObjectName("myLabelDeviationCpkValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationCpkValue, 0, 1, 1, 1)
self.myLabelDeviationDeivationValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationDeivationValue.setFont(font)
self.myLabelDeviationDeivationValue.setObjectName("myLabelDeviationDeivationValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationDeivationValue, 2, 1, 1, 1)
self.myLabelDeviationUtlValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationUtlValue.setFont(font)
self.myLabelDeviationUtlValue.setObjectName("myLabelDeviationUtlValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUtlValue, 1, 4, 1, 1)
self.myLabelDeviationPpmValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelDeviationPpmValue.setFont(font)
self.myLabelDeviationPpmValue.setObjectName("myLabelDeviationPpmValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationPpmValue, 0, 4, 1, 1)
self.myLabelDeviationAverageValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationAverageValue.setFont(font)
self.myLabelDeviationAverageValue.setObjectName("myLabelDeviationAverageValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAverageValue, 1, 1, 1, 1)
self.myLabelDeviationAndSixSigmaValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationAndSixSigmaValue.setFont(font)
self.myLabelDeviationAndSixSigmaValue.setObjectName("myLabelDeviationAndSixSigmaValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAndSixSigmaValue, 3, 1, 1, 1)
self.myLabelDeviationMinusSixSigmaValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationMinusSixSigmaValue.setFont(font)
self.myLabelDeviationMinusSixSigmaValue.setObjectName("myLabelDeviationMinusSixSigmaValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationMinusSixSigmaValue, 4, 1, 1, 1)
self.myLabelDeviationUilValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationUilValue.setFont(font)
self.myLabelDeviationUilValue.setObjectName("myLabelDeviationUilValue")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUilValue, 3, 4, 1, 1)
self.myLabelDeviationCpk = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelDeviationCpk.setFont(font)
self.myLabelDeviationCpk.setObjectName("myLabelDeviationCpk")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationCpk, 0, 0, 1, 1)
self.myLabelDeviationAverage = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationAverage.setFont(font)
self.myLabelDeviationAverage.setObjectName("myLabelDeviationAverage")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAverage, 1, 0, 1, 1)
self.myLabelDeviationDeivation = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationDeivation.setFont(font)
self.myLabelDeviationDeivation.setObjectName("myLabelDeviationDeivation")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationDeivation, 2, 0, 1, 1)
self.myLabelDeviationAndSixSigma = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationAndSixSigma.setFont(font)
self.myLabelDeviationAndSixSigma.setObjectName("myLabelDeviationAndSixSigma")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAndSixSigma, 3, 0, 1, 1)
self.myLabelDeviationMinusSixSigma = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationMinusSixSigma.setFont(font)
self.myLabelDeviationMinusSixSigma.setObjectName("myLabelDeviationMinusSixSigma")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationMinusSixSigma, 4, 0, 1, 1)
self.myLabelDeviationPpm = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.myLabelDeviationPpm.setFont(font)
self.myLabelDeviationPpm.setObjectName("myLabelDeviationPpm")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationPpm, 0, 3, 1, 1)
self.myLabelDeviationUtl = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationUtl.setFont(font)
self.myLabelDeviationUtl.setObjectName("myLabelDeviationUtl")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUtl, 1, 3, 1, 1)
self.myLabelDeviationLtl = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationLtl.setFont(font)
self.myLabelDeviationLtl.setObjectName("myLabelDeviationLtl")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLtl, 2, 3, 1, 1)
self.myLabelDeviationUil = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationUil.setFont(font)
self.myLabelDeviationUil.setObjectName("myLabelDeviationUil")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUil, 3, 3, 1, 1)
self.myLabelDeviationLil = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelDeviationLil.setFont(font)
self.myLabelDeviationLil.setObjectName("myLabelDeviationLil")
self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLil, 4, 3, 1, 1)
self.myGridLayoutScrollAreaDeviation.setColumnStretch(0, 2)
self.myGridLayoutScrollAreaDeviation.setColumnStretch(1, 1)
self.myGridLayoutScrollAreaDeviation.setColumnStretch(2, 1)
self.myGridLayoutScrollAreaDeviation.setColumnStretch(3, 2)
self.myGridLayoutScrollAreaDeviation.setColumnStretch(4, 1)
self.myGridLayoutScrollAreaDeviation.setRowStretch(0, 1)
self.myGridLayoutScrollAreaDeviation.setRowStretch(1, 1)
self.myGridLayoutScrollAreaDeviation.setRowStretch(2, 1)
self.myGridLayoutScrollAreaDeviation.setRowStretch(3, 1)
self.myGridLayoutScrollAreaDeviation.setRowStretch(4, 1)
self.myScrollAreaGroupBoxStatisticDeviation.setWidget(self.myScrollAreaWidgetContentsDeviation_2)
self.myGridLayoutGroupBoxStatisticDeviation.addWidget(self.myScrollAreaGroupBoxStatisticDeviation, 0, 0, 1, 1)
self.myGridLayoutPageDeviation.addWidget(self.myGroupBoxStatisticDeviation, 1, 0, 1, 1)
self.myGroupBoxCharacteristicsDeviation = QtWidgets.QGroupBox(self.myPageDeviation)
self.myGroupBoxCharacteristicsDeviation.setStyleSheet("#myGroupBoxCharacteristicsDeviation {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristicsDeviation::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxCharacteristicsDeviation.setObjectName("myGroupBoxCharacteristicsDeviation")
self.myGridLayoutGroupBoxCharacteristicsDeviation = QtWidgets.QGridLayout(self.myGroupBoxCharacteristicsDeviation)
self.myGridLayoutGroupBoxCharacteristicsDeviation.setContentsMargins(1, 15, 1, 1)
self.myGridLayoutGroupBoxCharacteristicsDeviation.setSpacing(4)
self.myGridLayoutGroupBoxCharacteristicsDeviation.setObjectName("myGridLayoutGroupBoxCharacteristicsDeviation")
self.myScrollAreaGroupBoxCharacteristicsDeviation = QtWidgets.QScrollArea(self.myGroupBoxCharacteristicsDeviation)
self.myScrollAreaGroupBoxCharacteristicsDeviation.setStyleSheet("background-color: transparent;")
self.myScrollAreaGroupBoxCharacteristicsDeviation.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaGroupBoxCharacteristicsDeviation.setWidgetResizable(True)
self.myScrollAreaGroupBoxCharacteristicsDeviation.setObjectName("myScrollAreaGroupBoxCharacteristicsDeviation")
self.myScrollAreaWidgetContentsDeviation = QtWidgets.QWidget()
self.myScrollAreaWidgetContentsDeviation.setGeometry(QtCore.QRect(0, 0, 83, 94))
self.myScrollAreaWidgetContentsDeviation.setStyleSheet("#myScrollAreaWidgetContentsDeviation {background-color: transparent;}")
self.myScrollAreaWidgetContentsDeviation.setObjectName("myScrollAreaWidgetContentsDeviation")
self.myGridLayoutScrollAreaWidgetsDeviation = QtWidgets.QGridLayout(self.myScrollAreaWidgetContentsDeviation)
self.myGridLayoutScrollAreaWidgetsDeviation.setContentsMargins(1, 1, 1, 1)
self.myGridLayoutScrollAreaWidgetsDeviation.setSpacing(4)
self.myGridLayoutScrollAreaWidgetsDeviation.setObjectName("myGridLayoutScrollAreaWidgetsDeviation")
self.myTableViewCharacteristicsDeviationFull = QtWidgets.QTableView(self.myScrollAreaWidgetContentsDeviation)
self.myTableViewCharacteristicsDeviationFull.setStyleSheet("QHeaderView::section {\n"
" background-color: lightgray;\n"
" color: black;\n"
" padding: 4px;\n"
" border: 1px solid black;\n"
"}\n"
"\n"
"QHeaderView::section:checked\n"
"{\n"
" background-color: lightgray;\n"
"}")
self.myTableViewCharacteristicsDeviationFull.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTableViewCharacteristicsDeviationFull.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.myTableViewCharacteristicsDeviationFull.setAlternatingRowColors(True)
self.myTableViewCharacteristicsDeviationFull.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.myTableViewCharacteristicsDeviationFull.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.myTableViewCharacteristicsDeviationFull.setObjectName("myTableViewCharacteristicsDeviationFull")
self.myGridLayoutScrollAreaWidgetsDeviation.addWidget(self.myTableViewCharacteristicsDeviationFull, 1, 0, 1, 1)
self.myLabelTestInstructionNamePageDeviation = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelTestInstructionNamePageDeviation.setFont(font)
self.myLabelTestInstructionNamePageDeviation.setObjectName("myLabelTestInstructionNamePageDeviation")
self.myGridLayoutScrollAreaWidgetsDeviation.addWidget(self.myLabelTestInstructionNamePageDeviation, 0, 0, 1, 1)
self.myScrollAreaGroupBoxCharacteristicsDeviation.setWidget(self.myScrollAreaWidgetContentsDeviation)
self.myGridLayoutGroupBoxCharacteristicsDeviation.addWidget(self.myScrollAreaGroupBoxCharacteristicsDeviation, 0, 0, 1, 1)
self.myGridLayoutPageDeviation.addWidget(self.myGroupBoxCharacteristicsDeviation, 0, 1, 2, 1)
self.myGridLayoutPageDeviation.setColumnStretch(0, 6)
self.myGridLayoutPageDeviation.setColumnStretch(1, 4)
self.myGridLayoutPageDeviation.setRowStretch(0, 6)
self.myGridLayoutPageDeviation.setRowStretch(1, 4)
self.myStackedWidget.addWidget(self.myPageDeviation)
self.myPageResult = QtWidgets.QWidget()
self.myPageResult.setObjectName("myPageResult")
self.myGridLayoutPageResult = QtWidgets.QGridLayout(self.myPageResult)
self.myGridLayoutPageResult.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutPageResult.setSpacing(1)
self.myGridLayoutPageResult.setObjectName("myGridLayoutPageResult")
self.myGroupBoxCharacteristicsResult = QtWidgets.QGroupBox(self.myPageResult)
self.myGroupBoxCharacteristicsResult.setStyleSheet("#myGroupBoxCharacteristicsResult {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristicsResult::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxCharacteristicsResult.setObjectName("myGroupBoxCharacteristicsResult")
self.myGridLayoutGroupBoxCharacteristicsResult = QtWidgets.QGridLayout(self.myGroupBoxCharacteristicsResult)
self.myGridLayoutGroupBoxCharacteristicsResult.setContentsMargins(1, 15, 1, 1)
self.myGridLayoutGroupBoxCharacteristicsResult.setSpacing(4)
self.myGridLayoutGroupBoxCharacteristicsResult.setObjectName("myGridLayoutGroupBoxCharacteristicsResult")
self.myScrollAreaCharacteristicsPageResult = QtWidgets.QScrollArea(self.myGroupBoxCharacteristicsResult)
self.myScrollAreaCharacteristicsPageResult.setStyleSheet("background-color: transparent;")
self.myScrollAreaCharacteristicsPageResult.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaCharacteristicsPageResult.setWidgetResizable(True)
self.myScrollAreaCharacteristicsPageResult.setObjectName("myScrollAreaCharacteristicsPageResult")
self.myScrollAreaCharacteristicsPageResultWidgetContents = QtWidgets.QWidget()
self.myScrollAreaCharacteristicsPageResultWidgetContents.setGeometry(QtCore.QRect(0, 0, 83, 94))
self.myScrollAreaCharacteristicsPageResultWidgetContents.setStyleSheet("#myScrollAreaCharacteristicsPageResultWidgetContents {background-color: transparent;}")
self.myScrollAreaCharacteristicsPageResultWidgetContents.setObjectName("myScrollAreaCharacteristicsPageResultWidgetContents")
self.myGridLayoutScrollAreaCharacteristicsWidgetContents = QtWidgets.QGridLayout(self.myScrollAreaCharacteristicsPageResultWidgetContents)
self.myGridLayoutScrollAreaCharacteristicsWidgetContents.setContentsMargins(1, 1, 1, 1)
self.myGridLayoutScrollAreaCharacteristicsWidgetContents.setSpacing(4)
self.myGridLayoutScrollAreaCharacteristicsWidgetContents.setObjectName("myGridLayoutScrollAreaCharacteristicsWidgetContents")
self.myLabelTestInstructionNamePageResult = QtWidgets.QLabel(self.myScrollAreaCharacteristicsPageResultWidgetContents)
font = QtGui.QFont()
font.setPointSize(12)
self.myLabelTestInstructionNamePageResult.setFont(font)
self.myLabelTestInstructionNamePageResult.setObjectName("myLabelTestInstructionNamePageResult")
self.myGridLayoutScrollAreaCharacteristicsWidgetContents.addWidget(self.myLabelTestInstructionNamePageResult, 0, 0, 1, 1)
self.myTableViewCharacteristicsPageResult = QtWidgets.QTableView(self.myScrollAreaCharacteristicsPageResultWidgetContents)
self.myTableViewCharacteristicsPageResult.setStyleSheet("QHeaderView::section {\n"
" background-color: lightgray;\n"
" color: black;\n"
" padding: 4px;\n"
" border: 1px solid black;\n"
"}\n"
"\n"
"QHeaderView::section:checked\n"
"{\n"
" background-color: lightgray;\n"
"}")
self.myTableViewCharacteristicsPageResult.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTableViewCharacteristicsPageResult.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.myTableViewCharacteristicsPageResult.setAlternatingRowColors(True)
self.myTableViewCharacteristicsPageResult.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.myTableViewCharacteristicsPageResult.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.myTableViewCharacteristicsPageResult.setObjectName("myTableViewCharacteristicsPageResult")
self.myGridLayoutScrollAreaCharacteristicsWidgetContents.addWidget(self.myTableViewCharacteristicsPageResult, 1, 0, 1, 1)
self.myScrollAreaCharacteristicsPageResult.setWidget(self.myScrollAreaCharacteristicsPageResultWidgetContents)
self.myGridLayoutGroupBoxCharacteristicsResult.addWidget(self.myScrollAreaCharacteristicsPageResult, 0, 0, 1, 1)
self.myGridLayoutPageResult.addWidget(self.myGroupBoxCharacteristicsResult, 0, 1, 1, 1)
self.myGroupBoxResult = QtWidgets.QGroupBox(self.myPageResult)
self.myGroupBoxResult.setStyleSheet("#myGroupBoxResult {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxResult::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}")
self.myGroupBoxResult.setObjectName("myGroupBoxResult")
self.myGridLayoutGroupBoxResult = QtWidgets.QGridLayout(self.myGroupBoxResult)
self.myGridLayoutGroupBoxResult.setContentsMargins(1, 15, 1, 1)
self.myGridLayoutGroupBoxResult.setSpacing(4)
self.myGridLayoutGroupBoxResult.setObjectName("myGridLayoutGroupBoxResult")
self.myScrollAreaGroupBoxResult = QtWidgets.QScrollArea(self.myGroupBoxResult)
self.myScrollAreaGroupBoxResult.setStyleSheet("background-color: transparent;")
self.myScrollAreaGroupBoxResult.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myScrollAreaGroupBoxResult.setWidgetResizable(True)
self.myScrollAreaGroupBoxResult.setObjectName("myScrollAreaGroupBoxResult")
self.myScrollAreaGroupBoxResultWidgetContents = QtWidgets.QWidget()
self.myScrollAreaGroupBoxResultWidgetContents.setGeometry(QtCore.QRect(0, 0, 83, 71))
self.myScrollAreaGroupBoxResultWidgetContents.setStyleSheet("#myScrollAreaGroupBoxWidgetContents {background-color: transparent;}")
self.myScrollAreaGroupBoxResultWidgetContents.setObjectName("myScrollAreaGroupBoxResultWidgetContents")
self.myGridLayoutSrcollAreaResultWidgetContents = QtWidgets.QGridLayout(self.myScrollAreaGroupBoxResultWidgetContents)
self.myGridLayoutSrcollAreaResultWidgetContents.setContentsMargins(1, 1, 1, 1)
self.myGridLayoutSrcollAreaResultWidgetContents.setSpacing(4)
self.myGridLayoutSrcollAreaResultWidgetContents.setObjectName("myGridLayoutSrcollAreaResultWidgetContents")
self.myTableViewResult = QtWidgets.QTableView(self.myScrollAreaGroupBoxResultWidgetContents)
self.myTableViewResult.setStyleSheet("QHeaderView::section {\n"
" background-color: lightgray;\n"
" color: black;\n"
" padding: 4px;\n"
" border: 1px solid black;\n"
"}\n"
"\n"
"QHeaderView::section:checked\n"
"{\n"
" background-color: lightgray;\n"
"}")
self.myTableViewResult.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTableViewResult.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.myTableViewResult.setAlternatingRowColors(True)
self.myTableViewResult.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.myTableViewResult.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.myTableViewResult.setObjectName("myTableViewResult")
self.myGridLayoutSrcollAreaResultWidgetContents.addWidget(self.myTableViewResult, 0, 0, 1, 1)
self.myScrollAreaGroupBoxResult.setWidget(self.myScrollAreaGroupBoxResultWidgetContents)
self.myGridLayoutGroupBoxResult.addWidget(self.myScrollAreaGroupBoxResult, 0, 0, 1, 1)
self.myGridLayoutPageResult.addWidget(self.myGroupBoxResult, 0, 0, 1, 1)
self.myGridLayoutPageResult.setColumnStretch(0, 6)
self.myGridLayoutPageResult.setColumnStretch(1, 4)
self.myStackedWidget.addWidget(self.myPageResult)
self.myPageLicense = QtWidgets.QWidget()
self.myPageLicense.setAccessibleName("myPageLicense")
self.myPageLicense.setObjectName("myPageLicense")
self.myGridLayoutPageLicense = QtWidgets.QGridLayout(self.myPageLicense)
self.myGridLayoutPageLicense.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutPageLicense.setSpacing(1)
self.myGridLayoutPageLicense.setObjectName("myGridLayoutPageLicense")
self.myTextBrowserLicense = QtWidgets.QTextBrowser(self.myPageLicense)
font = QtGui.QFont()
font.setPointSize(12)
self.myTextBrowserLicense.setFont(font)
self.myTextBrowserLicense.setFocusPolicy(QtCore.Qt.NoFocus)
self.myTextBrowserLicense.setStyleSheet("#myTextBrowserLicense {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex}")
self.myTextBrowserLicense.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTextBrowserLicense.setFrameShadow(QtWidgets.QFrame.Plain)
self.myTextBrowserLicense.setObjectName("myTextBrowserLicense")
self.myGridLayoutPageLicense.addWidget(self.myTextBrowserLicense, 0, 0, 1, 1)
self.myStackedWidget.addWidget(self.myPageLicense)
self.myPageContact = QtWidgets.QWidget()
self.myPageContact.setAccessibleName("myPageContact")
self.myPageContact.setObjectName("myPageContact")
self.myGridLayoutPageContact = QtWidgets.QGridLayout(self.myPageContact)
self.myGridLayoutPageContact.setContentsMargins(0, 0, 0, 0)
self.myGridLayoutPageContact.setSpacing(1)
self.myGridLayoutPageContact.setObjectName("myGridLayoutPageContact")
self.myTextBrowserContact = QtWidgets.QTextBrowser(self.myPageContact)
font = QtGui.QFont()
font.setPointSize(12)
self.myTextBrowserContact.setFont(font)
self.myTextBrowserContact.setFocusPolicy(QtCore.Qt.NoFocus)
self.myTextBrowserContact.setStyleSheet("#myTextBrowserContact {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex}")
self.myTextBrowserContact.setFrameShape(QtWidgets.QFrame.NoFrame)
self.myTextBrowserContact.setFrameShadow(QtWidgets.QFrame.Plain)
self.myTextBrowserContact.setObjectName("myTextBrowserContact")
self.myGridLayoutPageContact.addWidget(self.myTextBrowserContact, 0, 0, 1, 1)
self.myStackedWidget.addWidget(self.myPageContact)
self.gridLayoutCentralwidget.addWidget(self.myStackedWidget, 0, 0, 1, 1)
myMainWindow.setCentralWidget(self.myCentralwidget)
self.myMenubar = QtWidgets.QMenuBar(myMainWindow)
self.myMenubar.setGeometry(QtCore.QRect(0, 0, 1024, 21))
self.myMenubar.setAccessibleName("myMenubar")
self.myMenubar.setObjectName("myMenubar")
self.myMenuFile = QtWidgets.QMenu(self.myMenubar)
self.myMenuFile.setAccessibleName("myMenuFile")
self.myMenuFile.setObjectName("myMenuFile")
self.myMenuView = QtWidgets.QMenu(self.myMenubar)
self.myMenuView.setAccessibleName("myMenuView")
self.myMenuView.setObjectName("myMenuView")
self.myMenuInfo = QtWidgets.QMenu(self.myMenubar)
self.myMenuInfo.setAccessibleName("myMenuInfo")
self.myMenuInfo.setObjectName("myMenuInfo")
myMainWindow.setMenuBar(self.myMenubar)
self.myStatusbar = QtWidgets.QStatusBar(myMainWindow)
self.myStatusbar.setAccessibleName("myStatusbar")
self.myStatusbar.setObjectName("myStatusbar")
myMainWindow.setStatusBar(self.myStatusbar)
self.myActionStartTesting = QtWidgets.QAction(myMainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/Icons/Images/Open Folder-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionStartTesting.setIcon(icon3)
self.myActionStartTesting.setObjectName("myActionStartTesting")
self.myActionCreateDocumentation = QtWidgets.QAction(myMainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/Icons/Images/Document-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionCreateDocumentation.setIcon(icon4)
self.myActionCreateDocumentation.setObjectName("myActionCreateDocumentation")
self.myActionNewTestInstruction = QtWidgets.QAction(myMainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/Icons/Images/Create New-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionNewTestInstruction.setIcon(icon5)
self.myActionNewTestInstruction.setObjectName("myActionNewTestInstruction")
self.myActionEditTestInstruction = QtWidgets.QAction(myMainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/Icons/Images/Edit Image-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionEditTestInstruction.setIcon(icon6)
self.myActionEditTestInstruction.setObjectName("myActionEditTestInstruction")
self.myActionQuit = QtWidgets.QAction(myMainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/Icons/Images/Close Window-80.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionQuit.setIcon(icon7)
self.myActionQuit.setObjectName("myActionQuit")
self.myActionFullscreenTi = QtWidgets.QAction(myMainWindow)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/Icons/Images/To Do-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionFullscreenTi.setIcon(icon8)
self.myActionFullscreenTi.setObjectName("myActionFullscreenTi")
self.myActionFullscreenSpc = QtWidgets.QAction(myMainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/Icons/Images/Line Chart-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionFullscreenSpc.setIcon(icon9)
self.myActionFullscreenSpc.setObjectName("myActionFullscreenSpc")
self.myActionFullscreenDeviation = QtWidgets.QAction(myMainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/Icons/Images/Normal Distribution Histogram-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionFullscreenDeviation.setIcon(icon10)
self.myActionFullscreenDeviation.setObjectName("myActionFullscreenDeviation")
self.myActionLicense = QtWidgets.QAction(myMainWindow)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(":/Icons/Images/Diploma 1-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionLicense.setIcon(icon11)
self.myActionLicense.setObjectName("myActionLicense")
self.myActionContact = QtWidgets.QAction(myMainWindow)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap(":/Icons/Images/Address Book-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionContact.setIcon(icon12)
self.myActionContact.setObjectName("myActionContact")
self.myActionResultlist = QtWidgets.QAction(myMainWindow)
self.myActionResultlist.setIcon(icon4)
self.myActionResultlist.setObjectName("myActionResultlist")
self.myActionStartTestingScanner = QtWidgets.QAction(myMainWindow)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap(":/Icons/Images/Barcode-96.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.myActionStartTestingScanner.setIcon(icon13)
self.myActionStartTestingScanner.setObjectName("myActionStartTestingScanner")
self.myMenuFile.addAction(self.myActionStartTesting)
self.myMenuFile.addAction(self.myActionStartTestingScanner)
self.myMenuFile.addSeparator()
self.myMenuFile.addAction(self.myActionNewTestInstruction)
self.myMenuFile.addAction(self.myActionEditTestInstruction)
self.myMenuFile.addSeparator()
self.myMenuFile.addAction(self.myActionQuit)
self.myMenuView.addAction(self.myActionFullscreenTi)
self.myMenuView.addAction(self.myActionFullscreenSpc)
self.myMenuView.addAction(self.myActionFullscreenDeviation)
self.myMenuView.addAction(self.myActionResultlist)
self.myMenuInfo.addAction(self.myActionLicense)
self.myMenuInfo.addAction(self.myActionContact)
self.myMenubar.addAction(self.myMenuFile.menuAction())
self.myMenubar.addAction(self.myMenuView.menuAction())
self.myMenubar.addAction(self.myMenuInfo.menuAction())
self.retranslateUi(myMainWindow)
self.myStackedWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(myMainWindow)
myMainWindow.setTabOrder(self.myLineEditActualValue, self.myLineEditSerialNo)
myMainWindow.setTabOrder(self.myLineEditSerialNo, self.myToolButtonOk)
myMainWindow.setTabOrder(self.myToolButtonOk, self.myToolButtonNok)
myMainWindow.setTabOrder(self.myToolButtonNok, self.myPushButtonForward)
myMainWindow.setTabOrder(self.myPushButtonForward, self.myPushButtonBackward)
myMainWindow.setTabOrder(self.myPushButtonBackward, self.myPushButtonZoom)
myMainWindow.setTabOrder(self.myPushButtonZoom, self.myPushButtonVideo)
myMainWindow.setTabOrder(self.myPushButtonVideo, self.myTableViewCharacteristics)
myMainWindow.setTabOrder(self.myTableViewCharacteristics, self.myScrollAreaCharacteristics)
myMainWindow.setTabOrder(self.myScrollAreaCharacteristics, self.myScrollAreaSetpoint)
def retranslateUi(self, myMainWindow):
_translate = QtCore.QCoreApplication.translate
self.myGroupBoxSpc.setTitle(_translate("myMainWindow", "SPC"))
self.myFrameSpc.setAccessibleName(_translate("myMainWindow", "myFrameSpc"))
self.myGroupBoxSetpoint.setTitle(_translate("myMainWindow", "Soll"))
self.myGroupBoxCharacteristics.setTitle(_translate("myMainWindow", "Merkmale"))
self.myGroupBoxDeviation.setTitle(_translate("myMainWindow", "Verteilung"))
self.myFrameDeviation.setAccessibleName(_translate("myMainWindow", "myFrameDeviation"))
self.myGroupBoxActualValue.setTitle(_translate("myMainWindow", "Ist"))
self.myToolButtonOk.setText(_translate("myMainWindow", "i.O."))
self.myLineEditSerialNo.setPlaceholderText(_translate("myMainWindow", "..."))
self.myLabelSerialNo.setText(_translate("myMainWindow", "Serien Nr.:"))
self.myToolButtonNok.setText(_translate("myMainWindow", "n.i.O."))
self.myLineEditActualValue.setPlaceholderText(_translate("myMainWindow", "..."))
self.myLabelActualValue.setText(_translate("myMainWindow", "Messwert:"))
self.myLabelActualValuePreview.setAccessibleName(_translate("myMainWindow", "myLabelActualValuePreview"))
self.myGroupBoxImage.setTitle(_translate("myMainWindow", "Bilder"))
self.myPushButtonBackward.setText(_translate("myMainWindow", "Zurück"))
self.myPushButtonZoom.setText(_translate("myMainWindow", "Zoom"))
self.myPushButtonForward.setText(_translate("myMainWindow", "Vor"))
self.myPushButtonVideo.setText(_translate("myMainWindow", "Video"))
self.myGroupBoxDescription.setTitle(_translate("myMainWindow", "Beschreibung"))
self.myGroupBoxSpcFull.setAccessibleName(_translate("myMainWindow", "myGroupBoxSpcFull"))
self.myGroupBoxSpcFull.setTitle(_translate("myMainWindow", "SPC Fullscreen"))
self.myFrameSpcFull.setAccessibleName(_translate("myMainWindow", "myFrameSpcFull"))
self.myGroupBoxStatisticSpc.setAccessibleName(_translate("myMainWindow", "myGroupBoxStatisticSpc"))
self.myGroupBoxStatisticSpc.setTitle(_translate("myMainWindow", "Statistik"))
self.myScrollAreaGroupBoxStatisticSpc.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxStatisticSpc"))
self.myLabelStatisticSpcCpk.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcCpk"))
self.myLabelStatisticSpcCpk.setText(_translate("myMainWindow", "Cpk:"))
self.myLabelSpcCpkValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcCpkValue"))
self.myLabelSpcCpkValue.setText(_translate("myMainWindow", "..."))
self.myLabelStatisticSpcAverage.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcAverage"))
self.myLabelStatisticSpcAverage.setText(_translate("myMainWindow", "Mittelwert (µ):"))
self.myLabelSpcDeivationValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcDeivationValue"))
self.myLabelSpcDeivationValue.setText(_translate("myMainWindow", "..."))
self.myLabelStatistcSpcDeviation.setAccessibleName(_translate("myMainWindow", "myLabelStatistcSpcDeviation"))
self.myLabelStatistcSpcDeviation.setText(_translate("myMainWindow", "Standardabweichung (σ):"))
self.myLabelSpcAverageValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcAverageValue"))
self.myLabelSpcAverageValue.setText(_translate("myMainWindow", "..."))
self.myLabelStatisticSpcAndSicSigma.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcAndSicSigma"))
self.myLabelStatisticSpcAndSicSigma.setText(_translate("myMainWindow", "µ + 6σ:"))
self.myLabelStatisticSpcMinusSixSigma.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcMinusSixSigma"))
self.myLabelStatisticSpcMinusSixSigma.setText(_translate("myMainWindow", "µ - 6σ:"))
self.myLabelSpcMinusSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcMinusSixSigmaValue"))
self.myLabelSpcMinusSixSigmaValue.setText(_translate("myMainWindow", "..."))
self.myLabelSpcAndSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcAndSixSigmaValue"))
self.myLabelSpcAndSixSigmaValue.setText(_translate("myMainWindow", "..."))
self.myLabelStatisticSpcUtl.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcUtl"))
self.myLabelStatisticSpcUtl.setText(_translate("myMainWindow", "Obere Toleranzgrenze:"))
self.myLabelStatisticSpcLtl.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcLtl"))
self.myLabelStatisticSpcLtl.setText(_translate("myMainWindow", "Untere Toleranzgrenze:"))
self.myLabelSpcUtlValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcUtlValue"))
self.myLabelSpcUtlValue.setText(_translate("myMainWindow", "..."))
self.myLabelSpcLtlValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcLtlValue"))
self.myLabelSpcLtlValue.setText(_translate("myMainWindow", "..."))
self.myLabelStatisticSpcUil.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcUil"))
self.myLabelStatisticSpcUil.setText(_translate("myMainWindow", "Obere Eingriffsgrenze:"))
self.myLabelStatisticSpcLil.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcLil"))
self.myLabelStatisticSpcLil.setText(_translate("myMainWindow", "Untere Eingriffsgrenze:"))
self.myLabelSpcUilValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcUilValue"))
self.myLabelSpcUilValue.setText(_translate("myMainWindow", "..."))
self.myLabelSpcLilValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcLilValue"))
self.myLabelSpcLilValue.setText(_translate("myMainWindow", "..."))
self.myFrameVlineStatisticSpc.setAccessibleName(_translate("myMainWindow", "myFrameVlineStatisticSpc"))
self.myLabelStatisticSpcPpm.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcPpm"))
self.myLabelStatisticSpcPpm.setText(_translate("myMainWindow", "PPM:"))
self.myLabelSpcPpmValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcPpmValue"))
self.myLabelSpcPpmValue.setText(_translate("myMainWindow", "..."))
self.myGroupBoxCharacteristicsSpc.setAccessibleName(_translate("myMainWindow", "myGroupBoxCharacteristicsSpc"))
self.myGroupBoxCharacteristicsSpc.setTitle(_translate("myMainWindow", "Merkmale"))
self.myScrollAreaGroupBoyCharacteristicsSpc.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoyCharacteristicsSpc"))
self.myScrollAreaWidgetContentsSpc.setAccessibleName(_translate("myMainWindow", "myScrollAreaWidgetContentsSpc"))
self.myTableViewCharacteristicsPageSpc.setAccessibleName(_translate("myMainWindow", "myTableViewCharacteristicsPageSpc"))
self.myLabelTestInstructionNamePageSpc.setAccessibleName(_translate("myMainWindow", "myLabelTestInstructionNamePageSpc"))
self.myLabelTestInstructionNamePageSpc.setText(_translate("myMainWindow", "..."))
self.myGroupBoxDeviationFull.setAccessibleName(_translate("myMainWindow", "myGroupBoxDeviationFull"))
self.myGroupBoxDeviationFull.setTitle(_translate("myMainWindow", "Verteilung Fullscreen"))
self.myFrameDeviationFull.setAccessibleName(_translate("myMainWindow", "myFrameDeviationFull"))
self.myGroupBoxStatisticDeviation.setAccessibleName(_translate("myMainWindow", "myGroupBoxStatisticDeviation"))
self.myGroupBoxStatisticDeviation.setTitle(_translate("myMainWindow", "Statistik"))
self.myScrollAreaGroupBoxStatisticDeviation.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxStatisticDeviation"))
self.myScrollAreaWidgetContentsDeviation_2.setAccessibleName(_translate("myMainWindow", "myScrollAreaWidgetContentsDeviation"))
self.myLabelDeviationLtlValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationLtlValue"))
self.myLabelDeviationLtlValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationLilValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationLilValue"))
self.myLabelDeviationLilValue.setText(_translate("myMainWindow", "..."))
self.myFrameVlineStatisticDeviation.setAccessibleName(_translate("myMainWindow", "myFrameVlineStatisticDeviation"))
self.myLabelDeviationCpkValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationCpkValue"))
self.myLabelDeviationCpkValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationDeivationValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationDeivationValue"))
self.myLabelDeviationDeivationValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationUtlValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUtlValue"))
self.myLabelDeviationUtlValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationPpmValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationPpmValue"))
self.myLabelDeviationPpmValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationAverageValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAverageValue"))
self.myLabelDeviationAverageValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationAndSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAndSixSigmaValue"))
self.myLabelDeviationAndSixSigmaValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationMinusSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationMinusSixSigmaValue"))
self.myLabelDeviationMinusSixSigmaValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationUilValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUilValue"))
self.myLabelDeviationUilValue.setText(_translate("myMainWindow", "..."))
self.myLabelDeviationCpk.setAccessibleName(_translate("myMainWindow", "myLabelDeviationCpk"))
self.myLabelDeviationCpk.setText(_translate("myMainWindow", "Cpk:"))
self.myLabelDeviationAverage.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAverage"))
self.myLabelDeviationAverage.setText(_translate("myMainWindow", "Mittelwert (µ):"))
self.myLabelDeviationDeivation.setAccessibleName(_translate("myMainWindow", "myLabelDeviationDeivation"))
self.myLabelDeviationDeivation.setText(_translate("myMainWindow", "Standardabweichung (σ):"))
self.myLabelDeviationAndSixSigma.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAndSixSigma"))
self.myLabelDeviationAndSixSigma.setText(_translate("myMainWindow", "µ + 6σ:"))
self.myLabelDeviationMinusSixSigma.setAccessibleName(_translate("myMainWindow", "myLabelDeviationMinusSixSigma"))
self.myLabelDeviationMinusSixSigma.setText(_translate("myMainWindow", "µ - 6σ:"))
self.myLabelDeviationPpm.setAccessibleName(_translate("myMainWindow", "myLabelDeviationPpm"))
self.myLabelDeviationPpm.setText(_translate("myMainWindow", "PPM:"))
self.myLabelDeviationUtl.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUtl"))
self.myLabelDeviationUtl.setText(_translate("myMainWindow", "Obere Toleranzgrenze:"))
self.myLabelDeviationLtl.setText(_translate("myMainWindow", "Untere Toleranzgrenze:"))
self.myLabelDeviationUil.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUil"))
self.myLabelDeviationUil.setText(_translate("myMainWindow", "Obere Eingriffsgrenze:"))
self.myLabelDeviationLil.setAccessibleName(_translate("myMainWindow", "myLabelDeviationLil"))
self.myLabelDeviationLil.setText(_translate("myMainWindow", "Untere Eingriffsgrenze:"))
self.myGroupBoxCharacteristicsDeviation.setAccessibleName(_translate("myMainWindow", "myGroupBoxCharacteristicsDeviation"))
self.myGroupBoxCharacteristicsDeviation.setTitle(_translate("myMainWindow", "Merkmale"))
self.myScrollAreaGroupBoxCharacteristicsDeviation.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxCharacteristicsDeviation"))
self.myScrollAreaWidgetContentsDeviation.setAccessibleName(_translate("myMainWindow", "myScrollAreaWidgetContentsDeviation"))
self.myTableViewCharacteristicsDeviationFull.setAccessibleName(_translate("myMainWindow", "myTableViewCharacteristicsDeviationFull"))
self.myLabelTestInstructionNamePageDeviation.setAccessibleName(_translate("myMainWindow", "myLabelTestInstructionNamePageDeviation"))
self.myLabelTestInstructionNamePageDeviation.setText(_translate("myMainWindow", "..."))
self.myPageResult.setAccessibleName(_translate("myMainWindow", "myPageResult"))
self.myGroupBoxCharacteristicsResult.setAccessibleName(_translate("myMainWindow", "myGroupBoxCharacteristicsResult"))
self.myGroupBoxCharacteristicsResult.setTitle(_translate("myMainWindow", "Merkmale"))
self.myScrollAreaCharacteristicsPageResult.setAccessibleName(_translate("myMainWindow", "myScrollAreaCharacteristicsPageResult"))
self.myScrollAreaCharacteristicsPageResultWidgetContents.setAccessibleName(_translate("myMainWindow", "myScrollAreaCharacteristicsPageResultWidgetContents"))
self.myLabelTestInstructionNamePageResult.setAccessibleName(_translate("myMainWindow", "myLabelTestInstructionNamePageResult"))
self.myLabelTestInstructionNamePageResult.setText(_translate("myMainWindow", "..."))
self.myTableViewCharacteristicsPageResult.setAccessibleName(_translate("myMainWindow", "myTableViewCharacteristicsPageResult"))
self.myGroupBoxResult.setAccessibleName(_translate("myMainWindow", "myGroupBoxResult"))
self.myGroupBoxResult.setTitle(_translate("myMainWindow", "Ergebnisliste"))
self.myScrollAreaGroupBoxResult.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxResult"))
self.myScrollAreaGroupBoxResultWidgetContents.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxResultWidgetContents"))
self.myTableViewResult.setAccessibleName(_translate("myMainWindow", "myTableViewResult"))
self.myTextBrowserLicense.setAccessibleName(_translate("myMainWindow", "myTextBrowserLicense"))
self.myTextBrowserContact.setAccessibleName(_translate("myMainWindow", "myTextBrowserContact"))
self.myMenuFile.setTitle(_translate("myMainWindow", "Datei"))
self.myMenuView.setTitle(_translate("myMainWindow", "Ansicht"))
self.myMenuInfo.setTitle(_translate("myMainWindow", "Info"))
self.myActionStartTesting.setText(_translate("myMainWindow", "Prüfplan öffnen - Manuell"))
self.myActionStartTesting.setStatusTip(_translate("myMainWindow", "Die Prüfungen mit einem vorhandenen Prüfplan beginnen"))
self.myActionStartTesting.setShortcut(_translate("myMainWindow", "Ctrl+O"))
self.myActionCreateDocumentation.setText(_translate("myMainWindow", "Ergebnis dokumentieren"))
self.myActionCreateDocumentation.setStatusTip(_translate("myMainWindow", "Die Ergebnisse in einem Prüfprotokoll dokumentieren"))
self.myActionCreateDocumentation.setShortcut(_translate("myMainWindow", "Ctrl+I"))
self.myActionNewTestInstruction.setText(_translate("myMainWindow", "Prüfplan erstellen"))
self.myActionNewTestInstruction.setStatusTip(_translate("myMainWindow", "Einen neuen Prüfplan erstellen"))
self.myActionNewTestInstruction.setShortcut(_translate("myMainWindow", "Ctrl+N"))
self.myActionEditTestInstruction.setText(_translate("myMainWindow", "Prüfplan bearbeiten"))
self.myActionEditTestInstruction.setStatusTip(_translate("myMainWindow", "Einen vorhandenen Prüfplan bearbeiten"))
self.myActionEditTestInstruction.setShortcut(_translate("myMainWindow", "Ctrl+E"))
self.myActionQuit.setText(_translate("myMainWindow", "Beenden"))
self.myActionQuit.setStatusTip(_translate("myMainWindow", "Das Programm beenden"))
self.myActionQuit.setShortcut(_translate("myMainWindow", "Ctrl+Q"))
self.myActionFullscreenTi.setText(_translate("myMainWindow", "Prüfplan"))
self.myActionFullscreenTi.setStatusTip(_translate("myMainWindow", "Den Prüfplan im Vollbildmodus anzeigen"))
self.myActionFullscreenTi.setShortcut(_translate("myMainWindow", "Ctrl+T"))
self.myActionFullscreenSpc.setText(_translate("myMainWindow", "SPC Diagramm"))
self.myActionFullscreenSpc.setStatusTip(_translate("myMainWindow", "Das SPC Diagramm im Vollbildmodus anzeigen"))
self.myActionFullscreenSpc.setShortcut(_translate("myMainWindow", "Ctrl+S"))
self.myActionFullscreenDeviation.setText(_translate("myMainWindow", "Verteilungsdiagramm"))
self.myActionFullscreenDeviation.setStatusTip(_translate("myMainWindow", "Das Verteilungsdiagramm im Vollbildmodus anzeigen"))
self.myActionFullscreenDeviation.setShortcut(_translate("myMainWindow", "Ctrl+D"))
self.myActionLicense.setText(_translate("myMainWindow", "Lizenz"))
self.myActionLicense.setStatusTip(_translate("myMainWindow", "Die Lizenzvereinbarung anzeigen"))
self.myActionContact.setText(_translate("myMainWindow", "Kontakt"))
self.myActionContact.setStatusTip(_translate("myMainWindow", "Die Kontaktdaten anzeigen"))
self.myActionResultlist.setText(_translate("myMainWindow", "Ergebnisliste"))
self.myActionResultlist.setStatusTip(_translate("myMainWindow", "Die Ergebnisliste anzeigen"))
self.myActionResultlist.setShortcut(_translate("myMainWindow", "Ctrl+R"))
self.myActionStartTestingScanner.setText(_translate("myMainWindow", "Prüfplan öffnen - Scanner"))
self.myActionStartTestingScanner.setStatusTip(_translate("myMainWindow", "Die Prüfungen mit einem vorhandenen Prüfplan beginnen, mit Scannerunterstützung"))
self.myActionStartTestingScanner.setShortcut(_translate("myMainWindow", "Ctrl+B"))
import InResources_rc
| gpl-3.0 | 1,980,948,443,473,866,500 | 71.64753 | 497 | 0.762483 | false |
itdxer/neupy | examples/competitive/sofm_compare_weight_init.py | 1 | 1754 | from itertools import product
import matplotlib.pyplot as plt
from neupy import algorithms, utils, init
from utils import plot_2d_grid, make_circle, make_elipse, make_square
plt.style.use('ggplot')
utils.reproducible()
if __name__ == '__main__':
GRID_WIDTH = 4
GRID_HEIGHT = 4
datasets = [
make_square(),
make_circle(),
make_elipse(corr=0.7),
]
configurations = [{
'weight_init': init.Uniform(0, 1),
'title': 'Random uniform initialization',
}, {
'weight_init': 'sample_from_data',
'title': 'Sampled from the data',
}, {
'weight_init': 'init_pca',
'title': 'Initialize with PCA',
}]
plt.figure(figsize=(15, 15))
plt.title("Compare weight initialization methods for SOFM")
red, blue = ('#E24A33', '#348ABD')
n_columns = len(configurations)
n_rows = len(datasets)
index = 1
for data, conf in product(datasets, configurations):
sofm = algorithms.SOFM(
n_inputs=2,
features_grid=(GRID_HEIGHT, GRID_WIDTH),
verbose=True,
shuffle_data=True,
weight=conf['weight_init'],
learning_radius=8,
reduce_radius_after=5,
std=2,
reduce_std_after=5,
step=0.3,
reduce_step_after=5,
)
if not sofm.initialized:
sofm.init_weights(data)
plt.subplot(n_rows, n_columns, index)
plt.title(conf['title'])
plt.scatter(*data.T, color=blue, alpha=0.05)
plt.scatter(*sofm.weight, color=red)
weights = sofm.weight.reshape((2, GRID_HEIGHT, GRID_WIDTH))
plot_2d_grid(weights, color=red)
index += 1
plt.show()
| mit | 8,403,345,659,887,349,000 | 22.702703 | 69 | 0.562144 | false |
alphagov/stagecraft | stagecraft/libs/mass_update/copy_dataset_with_new_mapping.py | 1 | 5218 | import reversion
from performanceplatform.client import DataSet as client
from stagecraft.apps.datasets.models import DataGroup, DataSet, DataType
from django.conf import settings
INTERNAL_KEY = [
"_day_start_at",
"_hour_start_at",
"_week_start_at",
"_month_start_at",
"_quarter_start_at",
"_updated_at"]
# should pass in whole mapping?
@reversion.create_revision()
def migrate_data_set(old_attributes, changed_attributes, data_mapping):
print("getting existing dataset")
existing_data_set = get_existing_data_set(old_attributes['data_group'],
old_attributes['data_type'])
if not existing_data_set:
print("no existing dataset found, skipping")
return False
new_data_set_attributes = get_new_attributes(
serialize_for_update(existing_data_set), changed_attributes)
print("got new attributes {}".format(new_data_set_attributes))
print("creating new dataset with attributes")
new_data_set = get_or_create_new_data_set(new_data_set_attributes)
print("getting old data")
old_data = get_old_data(old_attributes['data_group'],
old_attributes['data_type'])
print("converting old data")
new_data = convert_old_data(old_data, data_mapping)
serialized_new_data_set = new_data_set.serialize()
print("posting data {} to dataset {}".format(new_data,
serialized_new_data_set))
post_new_data(serialized_new_data_set['data_group'],
serialized_new_data_set['data_type'],
serialized_new_data_set['bearer_token'],
new_data)
def serialize_for_update(data_set):
serialized_data_set = data_set.serialize()
serialized_data_set['auto_ids'] = data_set.auto_ids
serialized_data_set['upload_filters'] = data_set.upload_filters
return serialized_data_set
def get_existing_data_set(data_group_name, data_type_name):
data_type = DataType.objects.filter(
name=data_type_name).first()
data_group = DataGroup.objects.filter(
name=data_group_name).first()
if not data_group or not data_type:
return None
return DataSet.objects.filter(data_type=data_type,
data_group=data_group).first()
def get_new_attributes(existing_attributes, changed_attributes):
"""
>>> existing_attributes = {'a': 1, 'b': 2, 'c': 3}
>>> changed_attributes = {'a': 6, 'c': 'x,y'}
>>> get_new_attributes(existing_attributes,changed_attributes) \
== {'b': 2, 'c': 'x,y', 'a': 6}
True
"""
new_attributes = existing_attributes.copy()
new_attributes.update(changed_attributes)
return new_attributes
def get_or_create_new_data_set(new_attributes):
(data_type, new) = DataType.objects.get_or_create(
name=new_attributes.pop('data_type'))
(data_group, new) = DataGroup.objects.get_or_create(
name=new_attributes.pop('data_group'))
(obj, new) = DataSet.objects.get_or_create(
data_type=data_type, data_group=data_group)
new_attributes['data_type'] = data_type
new_attributes['data_group'] = data_group
del new_attributes['schema']
del new_attributes['name']
data_set_to_update_queryset = DataSet.objects.filter(name=obj.name)
data_set_to_update_queryset.update(**new_attributes)
return data_set_to_update_queryset.first()
def get_qualified_backdrop_url():
return settings.BACKDROP_WRITE_URL + '/data'
def get_old_data(data_group_name, data_type_name):
data_set_client = client.from_group_and_type(get_qualified_backdrop_url(),
data_group_name,
data_type_name)
return data_set_client.get().json()['data']
def apply_new_key_mappings(document, key_mapping):
for key, val in document.items():
if key in key_mapping:
document.pop(key)
document[key_mapping[key]] = val
elif key in INTERNAL_KEY:
del document[key]
else:
document[key] = val
return document
def apply_new_values(document, value_mapping):
# we need to convert counts to i - they are floats currently
for key, val in document.items():
if val in value_mapping:
document[key] = value_mapping[val]
if key == 'count':
document[key] = int(val)
return document
def convert_old_data(old_data, data_mapping):
new_data = []
key_mapping = data_mapping['key_mapping']
value_mapping = data_mapping['value_mapping']
for document in old_data:
doc = apply_new_values(
apply_new_key_mappings(document, key_mapping), value_mapping)
new_data.append(doc)
return new_data
def post_new_data(data_group_name, data_type_name, bearer_token, data):
data_set_client = client.from_group_and_type(get_qualified_backdrop_url(),
data_group_name,
data_type_name,
token=bearer_token)
return data_set_client.post(data)
| mit | 718,580,277,900,791,000 | 36.271429 | 78 | 0.61422 | false |
64studio/pdk | pdk/xml_legacy/sax/writer.py | 1 | 18896 | """SAX document handlers that support output generation of XML, SGML,
and XHTML.
This module provides three different groups of objects: the actual SAX
document handlers that drive the output, DTD information containers,
and syntax descriptors (of limited public use in most cases).
Output Drivers
--------------
The output drivers conform to the SAX C<DocumentHandler> protocol.
They can be used anywhere a C<DocumentHandler> is used. Two drivers
are provided: a `basic' driver which creates a fairly minimal output
without much intelligence, and a `pretty-printing' driver that
performs pretty-printing with nice indentation and the like. Both can
optionally make use of DTD information and syntax objects.
DTD Information Containers
--------------------------
Each DTD information object provides an attribute C<syntax> which
describes the expected output syntax; an alternate can be provided to
the output drivers if desired.
Syntax Descriptors
------------------
Syntax descriptor objects provide several attributes which describe
the various lexical components of XML & SGML markup. The attributes
have names that reflect the shorthand notation from the SGML world,
but the values are strings which give the appropriate characters for
the markup language being described. The one addition is the
C<empty_stagc> attribute which should be used to end the start tag of
elements which have no content. This is needed to properly support
XML and XHTML.
"""
__version__ = '$Revision: 1.9 $'
import string
import pdk.xml_legacy.parsers.xmlproc.dtdparser
import pdk.xml_legacy.parsers.xmlproc.xmlapp
from saxutils import escape
DEFAULT_LINELENGTH = 74
class Syntax:
com = "--" # comment start or end
cro = "&#" # character reference open
refc = ";" # reference close
dso = "[" # declaration subset open
dsc = "]" # declaration subset close
ero = "&" # entity reference open
lit = '"' # literal start or end
lit_quoted = '"' # quoted literal
lita = "'" # literal start or end (alternative)
mdo = "<!" # markup declaration open
mdc = ">" # markup declaration close
msc = "]]" # marked section close
pio = "<?" # processing instruciton open
stago = "<" # start tag open
etago = "</" # end tag open
tagc = ">" # tag close
vi = "=" # value indicator
def __init__(self):
if self.__class__ is Syntax:
raise RuntimeError, "Syntax must be subclassed to be used!"
class SGMLSyntax(Syntax):
empty_stagc = ">"
pic = ">" # processing instruction close
net = "/" # null end tag
class XMLSyntax(Syntax):
empty_stagc = "/>"
pic = "?>" # processing instruction close
net = None # null end tag not supported
class XHTMLSyntax(XMLSyntax):
empty_stagc = " />"
class DoctypeInfo:
syntax = XMLSyntax()
fpi = None
sysid = None
def __init__(self):
self.__empties = {}
self.__elements_only = {}
self.__attribs = {}
def is_empty(self, gi):
return self.__empties.has_key(gi)
def get_empties_list(self):
return self.__empties.keys()
def has_element_content(self, gi):
return self.__elements_only.has_key(gi)
def get_element_containers_list(self):
return self.__elements_only.keys()
def get_attributes_list(self, gi):
return self.__attribs.get(gi, {}).keys()
def get_attribute_info(self, gi, attr):
return self.__attribs[gi][attr]
def add_empty(self, gi):
self.__empties[gi] = 1
def add_element_container(self, gi):
self.__elements_only[gi] = gi
def add_attribute_defn(self, gi, attr, type, decl, default):
try:
d = self.__attribs[gi]
except KeyError:
d = self.__attribs[gi] = {}
if not d.has_key(attr):
d[attr] = (type, decl, default)
else:
print "<%s> attribute %s already defined" % (gi, attr)
def load_pubtext(self, pubtext):
raise NotImplementedError, "sublasses must implement load_pubtext()"
class _XMLDTDLoader(pdk.xml_legacy.parsers.xmlproc.xmlapp.DTDConsumer):
def __init__(self, info, parser):
self.info = info
xml.parsers.xmlproc.xmlapp.DTDConsumer.__init__(self, parser)
self.new_attribute = info.add_attribute_defn
def new_element_type(self, gi, model):
if model[0] == "|" and model[1][0] == ("#PCDATA", ""):
# no action required
pass
elif model == ("", [], ""):
self.info.add_empty(gi)
else:
self.info.add_element_container(gi)
class XMLDoctypeInfo(DoctypeInfo):
def load_pubtext(self, sysid):
parser = pdk.xml_legacy.parsers.xmlproc.dtdparser.DTDParser()
loader = _XMLDTDLoader(self, parser)
parser.set_dtd_consumer(loader)
parser.parse_resource(sysid)
class XHTMLDoctypeInfo(XMLDoctypeInfo):
# Bogus W3C cruft requires the extra space when terminating empty elements.
syntax = XHTMLSyntax()
class SGMLDoctypeInfo(DoctypeInfo):
syntax = SGMLSyntax()
import re
__element_prefix_search = re.compile("<!ELEMENT", re.IGNORECASE).search
__element_prefix_len = len("<!ELEMENT")
del re
def load_pubtext(self, sysid):
#
# Really should build a proper SGML DTD parser!
#
pubtext = open(sysid).read()
m = self.__element_prefix_search(pubtext)
while m:
pubtext = pubtext[m.end():]
if pubtext and pubtext[0] in string.whitespace:
pubtext = string.lstrip(pubtext)
else:
continue
gi, pubtext = string.split(pubtext, None, 1)
pubtext = string.lstrip(pubtext)
# maybe need to remove/collect tag occurance specifiers
# ...
raise NotImplementedError, "implementation incomplete"
#
m = self.__element_prefix_search(pubtext)
class XmlWriter:
"""Basic XML output handler."""
def __init__(self, fp, standalone=None, dtdinfo=None,
syntax=None, linelength=None, encoding='iso-8859-1'):
self._offset = 0
self._packing = 1
self._flowing = 1
self._write = fp.write
self._dtdflowing = None
self._prefix = ''
self._encoding = encoding
self.__stack = []
self.__lang = None
self.__pending_content = 0
self.__pending_doctype = 1
self.__standalone = standalone
self.__dtdinfo = dtdinfo
if syntax is None:
if dtdinfo:
syntax = dtdinfo.syntax
else:
syntax = XMLSyntax()
self.__syntax = syntax
self.indentation = 0
self.indentEndTags = 0
if linelength is None:
self.lineLength = DEFAULT_LINELENGTH
else:
self.lineLength = linelength
def setDocumentLocator(self, locator):
self.locator = locator
def startDocument(self):
if self.__syntax.pic == "?>":
lit = self.__syntax.lit
s = '%sxml version=%s1.0%s encoding%s%s%s%s' % (
self.__syntax.pio, lit, lit, self.__syntax.vi, lit,
self._encoding, lit)
if self.__standalone:
s = '%s standalone%s%s%s%s' % (
s, self.__syntax.vi, lit, self.__standalone, lit)
self._write("%s%s\n" % (s, self.__syntax.pic))
def endDocument(self):
if self.__stack:
raise RuntimeError, "open element stack cannot be empty on close"
def startElement(self, tag, attrs={}):
if self.__pending_doctype:
self.handle_doctype(tag)
self._check_pending_content()
self.__pushtag(tag)
self.__check_flowing(tag, attrs)
if attrs.has_key("xml:lang"):
self.__lang = attrs["xml:lang"]
del attrs["xml:lang"]
if self._packing:
prefix = ""
elif self._flowing:
prefix = self._prefix[:-self.indentation]
else:
prefix = ""
stag = "%s%s%s" % (prefix, self.__syntax.stago, tag)
prefix = "%s %s" % (prefix, (len(tag) * " "))
lit = self.__syntax.lit
lita = self.__syntax.lita
vi = self.__syntax.vi
a = ''
if self._flowing != self.__stack[-1][0]:
if self._dtdflowing is not None \
and self._flowing == self._dtdflowing:
pass
else:
a = ' xml:space%s%s%s%s' \
% (vi, lit, ["default", "preserve"][self._flowing], lit)
if self.__lang != self.__stack[-1][1]:
a = '%s xml:lang%s%s%s%s' % (a, vi, lit, self.lang, lit)
line = stag + a
self._offset = self._offset + len(line)
a = ''
for k, v in attrs.items():
if v is None:
continue
v = str(v)
if string.find(v, lit) == -1:
a = ' %s%s%s%s%s' % (k, vi, lit, escape(str(v)), lit)
elif string.find(v, lita) == -1:
a = ' %s%s%s%s%s' % (k, vi, lita, escape(str(v)), lita)
else:
a = ' %s%s%s%s%s' % (k, vi, lit,
escape(str(v), {lit:self.__syntax.lit_quoted}),
lita)
if (self._offset + len(a)) > self.lineLength:
self._write(line + "\n")
line = prefix + a
self._offset = len(line)
else:
line = line + a
self._offset = self._offset + len(a)
self._write(line)
self.__pending_content = 1
if ( self.__dtdinfo and not
(self.__dtdinfo.has_element_content(tag)
or self.__dtdinfo.is_empty(tag))):
self._packing = 1
def endElement(self, tag):
if self.__pending_content:
if self._flowing:
self._write(self.__syntax.empty_stagc)
if self._packing:
self._offset = self._offset \
+ len(self.__syntax.empty_stagc)
else:
self._write("\n")
self._offset = 0
else:
self._write(self.__syntax.empty_stagc)
self._offset = self._offset + len(self.__syntax.empty_stagc)
self.__pending_content = 0
self.__poptag(tag)
return
depth = len(self.__stack)
if depth == 1 or self._packing or not self._flowing:
prefix = ''
else:
prefix = self._prefix[:-self.indentation] \
+ (" " * self.indentEndTags)
self.__poptag(tag)
self._write("%s%s%s%s" % (
prefix, self.__syntax.etago, tag, self.__syntax.tagc))
if self._packing:
self._offset = self._offset + len(tag) + 3
else:
self._write("\n")
self._offset = 0
def characters(self, data, start, length):
data = data[start: start+length]
if data:
self._check_pending_content()
data = escape(data)
if "\n" in data:
p = string.find(data, "\n")
self._offset = len(data) - (p + 1)
else:
self._offset = self._offset + len(data)
self._check_pending_content()
self._write(data)
def comment(self, data, start, length):
data = data[start: start+length]
self._check_pending_content()
s = "%s%s%s%s%s" % (self.__syntax.mdo, self.__syntax.com,
data, self.__syntax.com, self.__syntax.mdc)
p = string.rfind(s, "\n")
if self._packing:
if p >= 0:
self._offset = len(s) - (p + 1)
else:
self._offset = self._offset + len(s)
else:
self._write("%s%s\n" % (self._prefix, s))
self._offset = 0
def ignorableWhitespace(self, data, start, length):
pass
def processingInstruction(self, target, data):
self._check_pending_content()
s = "%s%s %s%s" % (self.__syntax.pio, target, data, self.__syntax.pic)
prefix = self._prefix[:-self.indentation] \
+ (" " * self.indentEndTags)
if "\n" in s:
p = string.rfind(s, "\n")
if self._flowing and not self._packing:
self._write(prefix + s + "\n")
self._offset = 0
else:
self._write(s)
self._offset = len(s) - (p + 1)
elif self._flowing and not self._packing:
self._write(prefix + s + "\n")
self._offset = 0
else:
self._write(s)
self._offset = self._offset + len(s)
# This doesn't actually have a SAX equivalent, so we'll use it as
# an internal helper.
def handle_doctype(self, root):
self.__pending_doctype = 0
if self.__dtdinfo:
fpi = self.__dtdinfo.fpi
sysid = self.__dtdinfo.sysid
else:
fpi = sysid = None
lit = self.__syntax.lit
isxml = self.__syntax.pic == "?>"
if isxml and sysid:
s = '%sDOCTYPE %s\n' % (self.__syntax.mdo, root)
if fpi:
s = s + ' PUBLIC %s%s%s\n' % (lit, fpi, lit)
s = s + ' %s%s%s>\n' % (lit, sysid, lit)
else:
s = s + ' SYSTEM %s%s%s>\n' % (lit, sysid, lit)
self._write(s)
self._offset = 0
elif not isxml:
s = "%sDOCTYPE %s" % (self.__syntax.mdo, root)
if fpi:
s = '%s\n PUBLIC %s%s%s' % (s, lit, fpi, lit)
if sysid:
s = '%s\n SYSTEM %s%s%s' % (s, lit, sysid, lit)
self._write("%s%s\n" % (s, self.__syntax.mdc))
self._offset = 0
def handle_cdata(self, data):
self._check_pending_content()
# There should be a better way to generate '[CDATA['
start = self.__syntax.mdo + "[CDATA["
end = self.__syntax.msc + self.__syntax.mdc
s = "%s%s%s" % (start, escape(data), end)
if self._packing:
if "\n" in s:
rpos = string.rfind(s, "\n")
self._offset = len(s) - (rpos + 1) + len(end)
else:
self._offset = self._offset + len(s) + len(start + end)
self._write(s)
else:
self._offset = 0
self._write(s + "\n")
# Internal helper methods.
def __poptag(self, tag):
state = self.__stack.pop()
self._flowing, self.__lang, expected_tag, \
self._packing, self._dtdflowing = state
if tag != expected_tag:
raise RuntimeError, \
"expected </%s>, got </%s>" % (expected_tag, tag)
self._prefix = self._prefix[:-self.indentation]
def __pushtag(self, tag):
self.__stack.append((self._flowing, self.__lang, tag,
self._packing, self._dtdflowing))
self._prefix = self._prefix + " " * self.indentation
def __check_flowing(self, tag, attrs):
"""Check the contents of attrs and the DTD information to determine
whether the following content should be flowed.
tag -- general identifier of the element being opened
attrs -- attributes dictionary as reported by the parser or
application
This sets up both the _flowing and _dtdflowing (object) attributes.
"""
docspec = dtdspec = None
if self.__dtdinfo:
try:
info = self.__dtdinfo.get_attribute_info(tag, "xml:space")
except KeyError:
info = None
if info is not None:
self._flowing = info[2] != "preserve"
self._dtdflowing = self._flowing
if attrs.has_key("xml:space"):
self._flowing = attrs["xml:space"] != "preserve"
del attrs["xml:space"]
def _check_pending_content(self):
if self.__pending_content:
s = self.__syntax.tagc
if self._flowing and not self._packing:
s = s + "\n"
self._offset = 0
else:
self._offset = self._offset + len(s)
self._write(s)
self.__pending_content = 0
class PrettyPrinter(XmlWriter):
"""Pretty-printing XML output handler."""
def __init__(self, fp, standalone=None, dtdinfo=None,
syntax=None, linelength=None,
indentation=2, endtagindentation=None):
XmlWriter.__init__(self, fp, standalone=standalone, dtdinfo=dtdinfo,
syntax=syntax, linelength=linelength)
self.indentation = indentation
if endtagindentation is not None:
self.indentEndTags = endtagindentation
else:
self.indentEndTags = indentation
def characters(self, data, start, length):
data = data[start: start + length]
if not data:
return
self._check_pending_content()
data = escape(data)
if not self._flowing:
self._write(data)
return
words = string.split(data)
begspace = data[0] in string.whitespace
endspace = words and (data[-1] in string.whitespace)
prefix = self._prefix
if len(prefix) > 40:
prefix = " "
offset = self._offset
L = []
append = L.append
if begspace:
append(" ")
offset = offset + 1
ws = ""
ws_len = 0
while words:
w = words[0]
del words[0]
if (offset + ws_len + len(w)) > self.lineLength:
append("\n")
append(prefix)
append(w)
offset = len(prefix) + len(w)
else:
append(ws)
ws, ws_len = " ", 1
append(w)
offset = offset + 1 + len(w)
if endspace:
append(" ")
offset = offset + 1
self._offset = offset
self._write(string.join(L, ""))
| gpl-2.0 | -2,206,020,784,340,318,500 | 33.418944 | 84 | 0.512701 | false |
vkroz/kafka | tests/kafkatest/services/performance/end_to_end_latency.py | 1 | 3028 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.performance import PerformanceService
from kafkatest.utils.security_config import SecurityConfig
class EndToEndLatencyService(PerformanceService):
logs = {
"end_to_end_latency_log": {
"path": "/mnt/end-to-end-latency.log",
"collect_default": True},
}
def __init__(self, context, num_nodes, kafka, security_protocol, topic, num_records, consumer_fetch_max_wait=100, acks=1):
super(EndToEndLatencyService, self).__init__(context, num_nodes)
self.kafka = kafka
self.security_config = SecurityConfig(security_protocol)
self.security_protocol = security_protocol
self.args = {
'topic': topic,
'num_records': num_records,
'consumer_fetch_max_wait': consumer_fetch_max_wait,
'acks': acks
}
def _worker(self, idx, node):
args = self.args.copy()
self.security_config.setup_node(node)
if self.security_protocol == SecurityConfig.SSL:
ssl_config_file = SecurityConfig.SSL_DIR + "/security.properties"
node.account.create_file(ssl_config_file, str(self.security_config))
else:
ssl_config_file = ""
args.update({
'zk_connect': self.kafka.zk.connect_setting(),
'bootstrap_servers': self.kafka.bootstrap_servers(),
'ssl_config_file': ssl_config_file
})
cmd = "/opt/kafka/bin/kafka-run-class.sh kafka.tools.EndToEndLatency "\
"%(bootstrap_servers)s %(topic)s %(num_records)d "\
"%(acks)d 20 %(ssl_config_file)s" % args
cmd += " | tee /mnt/end-to-end-latency.log"
self.logger.debug("End-to-end latency %d command: %s", idx, cmd)
results = {}
for line in node.account.ssh_capture(cmd):
if line.startswith("Avg latency:"):
results['latency_avg_ms'] = float(line.split()[2])
if line.startswith("Percentiles"):
results['latency_50th_ms'] = float(line.split()[3][:-1])
results['latency_99th_ms'] = float(line.split()[6][:-1])
results['latency_999th_ms'] = float(line.split()[9])
self.results[idx-1] = results
| apache-2.0 | 6,619,955,687,392,177,000 | 42.884058 | 126 | 0.637715 | false |
benhoff/chrome-stream-chat | CHATIMUSMAXIMUS/youtube_scrapper.py | 1 | 3564 | import sys
import os
#import httplib2
from time import sleep
from threading import Thread
from selenium import webdriver
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
from PyQt5 import QtCore
"""
_YOUTUBE_API_SERVICE_NAME = 'youtube'
_YOUTUBE_API_VERSION = 'v3'
def _youtube_authentication():
client_secrets_file = 'client_secrets.json'
youtube_scope = "https://www.googleapis.com/auth/youtube.readonly"
missing_client_message = "You need to populate the client_secrets.json!"
flow = flow_from_clientsecrets(client_secrets_file,
scope=youtube_scope,
message=missing_client_message)
storage = Storage("{}-oauth2.json".format(sys.argv[0]))
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(_YOUTUBE_API_SERVICE_NAME,
_YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def get_current_youtube_link():
youtube_api = _youtube_authentication()
broadcasts_requests = youtube.liveBroadcasts().list(
broadcastStatus=('active',),
part='id',
maxResults=5)
while broadcasts_requests:
response = broadcasts_requests.execute()
youtube_id = response.get('items', [])[0]['id']
return 'http://youtube.com/watch?v={}'.format(youtube_id)
"""
class YoutubeScrapper(QtCore.QObject):
chat_signal = QtCore.pyqtSignal(str, str, str)
def __init__(self, video_url=None, parent=None):
super(YoutubeScrapper, self).__init__(parent)
"""
if video_url is None:
video_url = get_current_youtube_link()
"""
self.video_url = video_url
self._number_of_messages = 0
self._thread = Thread(target=self.run)
self._thread.setDaemon(True)
self._thread.start()
def run(self):
driver = webdriver.PhantomJS()
# TODO: see if this is needed or not
driver.set_window_size(1000, 1000)
driver.get(self.video_url)
# NOTE: need some time for comments to load
sleep(5)
all_comments = driver.find_element_by_id("all-comments")
comments = all_comments.find_elements_by_tag_name('li')
self._number_of_messages = len(comments)
for comment in comments:
author = comment.find_element_by_class_name('author').text
message = comment.find_element_by_class_name('comment-text').text
self.chat_signal.emit(author, message, 'YT')
while True:
comments = all_comments.find_elements_by_tag_name('li')
comments_length = len(comments)
if comments_length > self._number_of_messages:
# NOTE: this number is intentionally NEGATIVE
messages_not_parsed = self._number_of_messages - comments_length
self._number_of_messages = len(comments)
comments = comments[messages_not_parsed:]
for comment in comments:
author = comment.find_element_by_class_name('author').text
message = comment.find_element_by_class_name('comment-text').text
self.chat_signal.emit(author, message, 'YT')
if __name__ == '__main__':
scrapper = YoutubeScrapper('https://www.youtube.com/watch?v=W2DS6wT6_48')
while True:
sleep(1)
| mit | -8,556,869,593,251,939,000 | 32 | 85 | 0.632155 | false |
ecolell/pfamserver | pfamserver/commands/unused_columns.py | 1 | 2440 | unused_columns = {
'pdb': [
'keywords'
],
'pdb_pfamA_reg': [
'auto_pdb_reg',
'pdb_start_icode',
'pdb_end_icode',
'seq_start',
'seq_end',
'hex_color'
],
'pfamA': [
'previous_id',
'author',
'deposited_by',
'seed_source',
'type',
'comment',
'sequence_GA',
'domain_GA',
'sequence_TC',
'domain_TC',
'sequence_NC',
'domain_NC',
'buildMethod',
'model_length',
'searchMethod',
'msv_lambda',
'msv_mu',
'viterbi_lambda',
'viterbi_mu',
'forward_lambda',
'forward_tau',
'num_seed',
'version',
'number_archs',
'number_species',
'number_structures',
'number_ncbi',
'number_meta',
'average_length',
'percentage_id',
'average_coverage',
'change_status',
'seed_consensus',
'full_consensus',
'number_shuffled_hits',
'number_uniprot',
'rp_seed',
'number_rp15',
'number_rp35',
'number_rp55',
'number_rp75'
],
'pfamA_reg_full_significant': [
'auto_pfamA_reg_full',
'ali_start',
'ali_end',
'model_start',
'model_end',
'domain_bits_score',
'domain_evalue_score',
'sequence_bits_score',
'sequence_evalue_score',
'cigar',
'tree_order',
'domain_order'
],
'pfamseq': [
'seq_version',
'crc64',
'md5',
'description',
'evidence',
'length',
'species',
'taxonomy',
'is_fragment',
'sequence',
'ncbi_taxid',
'auto_architecture',
'treefam_acc',
'swissprot'
],
'uniprot': [
'seq_version',
'crc64',
'md5',
'evidence',
'species',
'taxonomy',
'is_fragment',
'sequence',
'ncbi_taxid',
'ref_proteome',
'complete_proteome',
'treefam_acc',
'rp15',
'rp35',
'rp55',
'rp75'
],
'uniprot_reg_full': [
'ali_start',
'ali_end',
'model_start',
'model_end',
'domain_bits_score',
'domain_evalue_score',
'sequence_bits_score',
'sequence_evalue_score'
]
}
| agpl-3.0 | -2,147,323,191,928,900,400 | 20.403509 | 35 | 0.431557 | false |
monodokimes/pythonmon | core/scene.py | 1 | 3014 | import controller.component
from util import jsonmanager, debug, configuration
from view.entity import Entity
class Scene:
def __init__(self, name, entities_data):
self.name = name
self.entities = []
self.started_entities = []
self.event_input = None
self.cont_input = None
for entity_data in entities_data:
position = (entity_data["X"], entity_data["Y"])
entity = Entity(entity_data["Name"], position)
for component_data in entity_data["Components"]:
try:
component_constructor = getattr(controller.component, component_data["Type"])
component = component_constructor()
component.scene = self
data = component_data["ComponentData"]
if not len(data) == 0:
component.load_data(data)
entity.add_component(component)
except AttributeError:
debug.log(component_data["Type"] + " not recognised :/")
self.entities.append(entity)
def start(self):
self.event_input = 'none'
self.cont_input = 'none'
while not self.ready_to_start():
debug.log('preparing to start entities...')
entities_to_start = []
for ent in self.entities:
if not ent.is_started():
entities_to_start.append(ent)
debug.log(str(len(entities_to_start)) + ' entities ready to start.')
debug.log('starting...')
for entity in entities_to_start:
try:
entity.start()
except Exception as e:
debug.log('could not start entity. Logging error:')
debug.log(e)
debug.log('started {0} entities :)'.format(len(self.entities)))
def update(self, event_input, cont_input):
self.event_input = event_input
self.cont_input = cont_input
for entity in self.entities:
entity.update()
def find_entity(self, entity_name):
for entity in self.entities:
if entity.name == entity_name:
return entity
return None
def add_entity(self, entity):
self.entities.append(entity)
def ready_to_start(self):
for entity in self.entities:
if not entity.is_started():
return False
return True
class SceneManager:
@staticmethod
def get_path(scene_name):
return configuration.scene_data_folder_path + scene_name + '.json'
@staticmethod
def load_scene(scene_name):
path = SceneManager.get_path(scene_name)
scene_data = jsonmanager.get_data(path)
return Scene(scene_name, scene_data['Entities'])
@staticmethod
def check_if_scene_exists(scene_name):
path = SceneManager.get_path(scene_name)
return jsonmanager.check_for_file(path)
| gpl-3.0 | 2,675,223,090,908,184,000 | 31.06383 | 97 | 0.562044 | false |
by46/coffee | code1.py | 1 | 2408 | # -*- coding: utf-8 -*-
ENCODINGS = ['utf8', 'gbk']
def decode_statement(statement, encodings):
# if isinstance(statement, unicode):
# return statement
for encoding in encodings:
try:
return statement.decode(encoding)
except UnicodeDecodeError:
pass
def get_initial_letters(statement):
statement = decode_statement(statement, ENCODINGS)
if statement is None:
return ''
return ''.join(get_initial_letter(word) for word in statement)
def get_initial_letter(character):
character = character.encode('gbk')
try:
ord(character)
return character.lower()
except Exception:
# ignore exception
asc = ord(character[0]) * 256 + ord(character[1]) - 65536
if -20319 <= asc <= -20284:
return 'a'
if -20283 <= asc <= -19776:
return 'b'
if -19775 <= asc <= -19219:
return 'c'
if -19218 <= asc <= -18711:
return 'd'
if -18710 <= asc <= -18527:
return 'e'
if -18526 <= asc <= -18240:
return 'f'
if -18239 <= asc <= -17923:
return 'g'
if -17922 <= asc <= -17418:
return 'h'
if -17417 <= asc <= -16475:
return 'j'
if -16474 <= asc <= -16213:
return 'k'
if -16212 <= asc <= -15641:
return 'l'
if -15640 <= asc <= -15166:
return 'm'
if -15165 <= asc <= -14923:
return 'n'
if -14922 <= asc <= -14915:
return 'o'
if -14914 <= asc <= -14631:
return 'p'
if -14630 <= asc <= -14150:
return 'q'
if -14149 <= asc <= -14091:
return 'r'
if -14090 <= asc <= -13119:
return 's'
if -13118 <= asc <= -12839:
return 't'
if -12838 <= asc <= -12557:
return 'w'
if -12556 <= asc <= -11848:
return 'x'
if -11847 <= asc <= -11056:
return 'y'
if -11055 <= asc <= -10247:
return 'z'
return ''
def main(str_input):
a = get_initial_letters(str_input)
return ''.join(a)
if __name__ == "__main__":
str_input = u'K珠穆朗玛峰'
print(main(str_input))
| mit | 5,043,973,506,449,608,000 | 25.563218 | 66 | 0.455379 | false |
jonathanslenders/python-vterm | libpymux/utils.py | 1 | 1881 | import array
import asyncio
import fcntl
import signal
import termios
def get_size(stdout):
# Thanks to fabric (fabfile.org), and
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
"""
Get the size of this pseudo terminal.
:returns: A (rows, cols) tuple.
"""
#assert stdout.isatty()
# Buffer for the C call
buf = array.array('h', [0, 0, 0, 0 ])
# Do TIOCGWINSZ (Get)
#fcntl.ioctl(stdout.fileno(), termios.TIOCGWINSZ, buf, True)
fcntl.ioctl(0, termios.TIOCGWINSZ, buf, True)
# Return rows, cols
return buf[0], buf[1]
def set_size(stdout_fileno, rows, cols):
"""
Set terminal size.
(This is also mainly for internal use. Setting the terminal size
automatically happens when the window resizes. However, sometimes the process
that created a pseudo terminal, and the process that's attached to the output window
are not the same, e.g. in case of a telnet connection, or unix domain socket, and then
we have to sync the sizes by hand.)
"""
# Buffer for the C call
buf = array.array('h', [rows, cols, 0, 0 ])
# Do: TIOCSWINSZ (Set)
fcntl.ioctl(stdout_fileno, termios.TIOCSWINSZ, buf)
def alternate_screen(write):
class Context:
def __enter__(self):
# Enter alternate screen buffer
write(b'\033[?1049h')
def __exit__(self, *a):
# Exit alternate screen buffer and make cursor visible again.
write(b'\033[?1049l')
write(b'\033[?25h')
return Context()
def call_on_sigwinch(callback):
"""
Set a function to be called when the SIGWINCH signal is received.
(Normally, on terminal resize.)
"""
def sigwinch_handler(n, frame):
loop = asyncio.get_event_loop()
loop.call_soon(callback)
signal.signal(signal.SIGWINCH, sigwinch_handler)
| bsd-2-clause | 58,246,880,209,152,530 | 27.074627 | 90 | 0.640085 | false |
openstack/python-troveclient | troveclient/tests/osc/v1/test_database_configurations.py | 1 | 14807 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from osc_lib import utils
from troveclient import common
from troveclient import exceptions
from troveclient.osc.v1 import database_configurations
from troveclient.tests.osc.v1 import fakes
class TestConfigurations(fakes.TestDatabasev1):
fake_configurations = fakes.FakeConfigurations()
fake_configuration_params = fakes.FakeConfigurationParameters()
def setUp(self):
super(TestConfigurations, self).setUp()
self.mock_client = self.app.client_manager.database
self.configuration_client = (self.app.client_manager.database.
configurations)
self.instance_client = self.app.client_manager.database.instances
self.configuration_params_client = (self.app.client_manager.
database.configuration_parameters)
class TestConfigurationList(TestConfigurations):
defaults = {
'limit': None,
'marker': None
}
columns = database_configurations.ListDatabaseConfigurations.columns
values = ('c-123', 'test_config', '', 'mysql', '5.6', "5.7.29")
def setUp(self):
super(TestConfigurationList, self).setUp()
self.cmd = database_configurations.ListDatabaseConfigurations(self.app,
None)
data = [self.fake_configurations.get_configurations_c_123()]
self.configuration_client.list.return_value = common.Paginated(data)
def test_configuration_list_defaults(self):
parsed_args = self.check_parser(self.cmd, [], [])
columns, data = self.cmd.take_action(parsed_args)
self.configuration_client.list.assert_called_once_with(**self.defaults)
self.assertEqual(self.columns, columns)
self.assertEqual([tuple(self.values)], data)
class TestConfigurationShow(TestConfigurations):
values = ('2015-05-16T10:24:28', 'mysql', '5.6', '5.7.29', '', 'c-123',
'test_config', '2015-05-16T10:24:29', '{"max_connections": 5}')
def setUp(self):
super(TestConfigurationShow, self).setUp()
self.cmd = database_configurations.ShowDatabaseConfiguration(self.app,
None)
self.data = self.fake_configurations.get_configurations_c_123()
self.configuration_client.get.return_value = self.data
self.columns = (
'created',
'datastore_name',
'datastore_version_name',
'datastore_version_number',
'description',
'id',
'name',
'updated',
'values',
)
def test_show(self):
args = ['c-123']
parsed_args = self.check_parser(self.cmd, args, [])
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.values, data)
class TestConfigurationParameterList(TestConfigurations):
columns = database_configurations.\
ListDatabaseConfigurationParameters.columns
values = ('connect_timeout', 'integer', 2, 31536000, 'false')
def setUp(self):
super(TestConfigurationParameterList, self).setUp()
self.cmd = database_configurations.\
ListDatabaseConfigurationParameters(self.app, None)
data = [self.fake_configuration_params.get_params_connect_timeout()]
self.configuration_params_client.parameters.return_value =\
common.Paginated(data)
self.configuration_params_client.parameters_by_version.return_value =\
common.Paginated(data)
def test_configuration_parameters_list_defaults(self):
args = ['d-123', '--datastore', 'mysql']
verifylist = [
('datastore_version', 'd-123'),
('datastore', 'mysql'),
]
parsed_args = self.check_parser(self.cmd, args, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual([tuple(self.values)], data)
def test_configuration_parameters_list_with_version_id_exception(self):
args = [
'd-123',
]
verifylist = [
('datastore_version', 'd-123'),
]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(exceptions.NoUniqueMatch,
self.cmd.take_action,
parsed_args)
class TestConfigurationParameterShow(TestConfigurations):
values = ('d-123', 31536000, 2, 'connect_timeout', 'false', 'integer')
def setUp(self):
super(TestConfigurationParameterShow, self).setUp()
self.cmd = database_configurations. \
ShowDatabaseConfigurationParameter(self.app, None)
data = self.fake_configuration_params.get_params_connect_timeout()
self.configuration_params_client.get_parameter.return_value = data
self.configuration_params_client.\
get_parameter_by_version.return_value = data
self.columns = (
'datastore_version_id',
'max',
'min',
'name',
'restart_required',
'type',
)
def test_configuration_parameter_show_defaults(self):
args = ['d-123', 'connect_timeout', '--datastore', 'mysql']
verifylist = [
('datastore_version', 'd-123'),
('parameter', 'connect_timeout'),
('datastore', 'mysql'),
]
parsed_args = self.check_parser(self.cmd, args, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.values, data)
def test_configuration_parameter_show_with_version_id_exception(self):
args = [
'd-123',
'connect_timeout',
]
verifylist = [
('datastore_version', 'd-123'),
('parameter', 'connect_timeout'),
]
parsed_args = self.check_parser(self.cmd, args, verifylist)
self.assertRaises(exceptions.NoUniqueMatch,
self.cmd.take_action,
parsed_args)
class TestDatabaseConfigurationDelete(TestConfigurations):
def setUp(self):
super(TestDatabaseConfigurationDelete, self).setUp()
self.cmd = database_configurations.\
DeleteDatabaseConfiguration(self.app, None)
@mock.patch.object(utils, 'find_resource')
def test_configuration_delete(self, mock_find):
args = ['config1']
mock_find.return_value = args[0]
parsed_args = self.check_parser(self.cmd, args, [])
result = self.cmd.take_action(parsed_args)
self.configuration_client.delete.assert_called_with('config1')
self.assertIsNone(result)
@mock.patch.object(utils, 'find_resource')
def test_configuration_delete_with_exception(self, mock_find):
args = ['fakeconfig']
parsed_args = self.check_parser(self.cmd, args, [])
mock_find.side_effect = exceptions.CommandError
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
class TestConfigurationCreate(TestConfigurations):
values = ('2015-05-16T10:24:28', 'mysql', '5.6', '5.7.29', '', 'c-123',
'test_config', '2015-05-16T10:24:29', '{"max_connections": 5}')
def setUp(self):
super(TestConfigurationCreate, self).setUp()
self.cmd = database_configurations.\
CreateDatabaseConfiguration(self.app, None)
self.data = self.fake_configurations.get_configurations_c_123()
self.configuration_client.create.return_value = self.data
self.columns = (
'created',
'datastore_name',
'datastore_version_name',
'datastore_version_number',
'description',
'id',
'name',
'updated',
'values',
)
def test_configuration_create_return_value(self):
args = ['c-123', '{"max_connections": 5}',
'--description', 'test_config',
'--datastore', 'mysql',
'--datastore-version', '5.6']
parsed_args = self.check_parser(self.cmd, args, [])
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.values, data)
def test_configuration_create(self):
args = ['cgroup1', '{"param1": 1, "param2": 2}']
parsed_args = self.check_parser(self.cmd, args, [])
self.cmd.take_action(parsed_args)
self.configuration_client.create.assert_called_with(
'cgroup1',
'{"param1": 1, "param2": 2}',
description=None,
datastore=None,
datastore_version=None,
datastore_version_number=None)
def test_configuration_create_with_optional_args(self):
args = ['cgroup2', '{"param3": 3, "param4": 4}',
'--description', 'cgroup 2',
'--datastore', 'mysql',
'--datastore-version', '5.6']
parsed_args = self.check_parser(self.cmd, args, [])
self.cmd.take_action(parsed_args)
self.configuration_client.create.assert_called_with(
'cgroup2',
'{"param3": 3, "param4": 4}',
description='cgroup 2',
datastore='mysql',
datastore_version='5.6',
datastore_version_number=None)
class TestConfigurationAttach(TestConfigurations):
def setUp(self):
super(TestConfigurationAttach, self).setUp()
self.cmd = database_configurations.\
AttachDatabaseConfiguration(self.app, None)
@mock.patch.object(utils, 'find_resource')
def test_configuration_attach(self, mock_find):
args = ['instance1', 'config1']
mock_find.side_effect = ['instance1', 'config1']
parsed_args = self.check_parser(self.cmd, args, [])
result = self.cmd.take_action(parsed_args)
self.instance_client.modify.assert_called_with('instance1', 'config1')
self.assertIsNone(result)
class TestConfigurationDetach(TestConfigurations):
def setUp(self):
super(TestConfigurationDetach, self).setUp()
self.cmd = database_configurations.\
DetachDatabaseConfiguration(self.app, None)
@mock.patch.object(utils, 'find_resource')
def test_configuration_detach(self, mock_find):
args = ['instance2']
mock_find.return_value = args[0]
parsed_args = self.check_parser(self.cmd, args, [])
result = self.cmd.take_action(parsed_args)
self.instance_client.modify.assert_called_with('instance2')
self.assertIsNone(result)
class TestConfigurationInstancesList(TestConfigurations):
defaults = {
'limit': None,
'marker': None
}
columns = (
database_configurations.ListDatabaseConfigurationInstances.columns)
values = [('1', 'instance-1'),
('2', 'instance-2')]
def setUp(self):
super(TestConfigurationInstancesList, self).setUp()
self.cmd = database_configurations.ListDatabaseConfigurationInstances(
self.app, None)
data = (
self.fake_configurations.get_configuration_instances())
self.configuration_client.instances.return_value = common.Paginated(
data)
@mock.patch.object(utils, 'find_resource')
def test_configuration_instances_list(self, mock_find):
args = ['c-123']
mock_find.return_value = args[0]
parsed_args = self.check_parser(self.cmd, args, [])
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.values, data)
class TestConfigurationDefault(TestConfigurations):
values = ('2', '98', '1', '15M')
def setUp(self):
super(TestConfigurationDefault, self).setUp()
self.cmd = database_configurations.DefaultDatabaseConfiguration(
self.app, None)
self.data = (
self.fake_configurations.get_default_configuration())
self.instance_client.configuration.return_value = self.data
self.columns = (
'innodb_log_files_in_group',
'max_user_connections',
'skip-external-locking',
'tmp_table_size',
)
@mock.patch.object(utils, 'find_resource')
def test_default_database_configuration(self, mock_find):
args = ['1234']
mock_find.return_value = args[0]
parsed_args = self.check_parser(self.cmd, args, [])
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.values, data)
class TestSetDatabaseConfiguration(TestConfigurations):
def setUp(self):
super(TestSetDatabaseConfiguration, self).setUp()
self.cmd = database_configurations.SetDatabaseConfiguration(
self.app, None)
def test_set_database_configuration_parameter(self):
args = ['config_group_id', '{"param1": 1, "param2": 2}']
parsed_args = self.check_parser(self.cmd, args, [])
self.cmd.take_action(parsed_args)
self.configuration_client.edit.assert_called_once_with(
'config_group_id',
'{"param1": 1, "param2": 2}'
)
class TestUpdateDatabaseConfiguration(TestConfigurations):
def setUp(self):
super(TestUpdateDatabaseConfiguration, self).setUp()
self.cmd = database_configurations.UpdateDatabaseConfiguration(
self.app, None)
def test_set_database_configuration_parameter(self):
args = ['config_group_id', '{"param1": 1, "param2": 2}', '--name',
'new_name']
parsed_args = self.check_parser(self.cmd, args, [])
self.cmd.take_action(parsed_args)
self.configuration_client.update.assert_called_once_with(
'config_group_id',
'{"param1": 1, "param2": 2}',
name='new_name',
description=None
)
| apache-2.0 | -6,860,908,495,404,124,000 | 36.581218 | 79 | 0.61221 | false |
sunlightlabs/read_FEC | fecreader/api/serializers.py | 1 | 6024 |
from fec_alerts.models import new_filing
from summary_data.models import Committee_Overlay, Candidate_Overlay, DistrictWeekly, District
from formdata.models import SkedE
from rest_framework import serializers
class NFSerializer(serializers.HyperlinkedModelSerializer):
form_name = serializers.Field(source='get_form_name')
process_time_formatted = serializers.Field(source='process_time_formatted')
skeda_url = serializers.Field(source='get_skeda_url')
spending_url = serializers.Field(source='get_spending_url')
absolute_url = serializers.Field(source='get_absolute_url')
committee_url = serializers.Field(source='get_committee_url')
class Meta:
model = new_filing
fields = ('fec_id', 'committee_name', 'filing_number', 'form_type', 'filed_date', 'coverage_from_date', 'coverage_to_date', 'is_superpac', 'committee_designation', 'committee_type', 'coh_end', 'new_loans', 'tot_raised', 'tot_spent', 'lines_present', 'form_name', 'skeda_url', 'spending_url', 'absolute_url', 'committee_url', 'process_time_formatted', 'is_superceded', 'cycle')
class COSerializer(serializers.HyperlinkedModelSerializer):
display_type = serializers.Field(source='display_type')
candidate_url = serializers.Field(source='candidate_url')
candidate_office = serializers.Field(source='curated_candidate_office')
candidate_name = serializers.Field(source='curated_candidate_name')
committee_url = serializers.Field(source='get_absolute_url')
class Meta:
model = Committee_Overlay
fields=('fec_id', 'name', 'total_receipts', 'total_disbursements', 'outstanding_loans', 'cash_on_hand', 'cash_on_hand_date', 'ctype', 'candidate_office','candidate_name', 'candidate_url', 'display_type', 'committee_url', 'political_orientation')
#depth = 1
class OSSerializer(serializers.HyperlinkedModelSerializer):
display_type = serializers.Field(source='display_type')
committee_url = serializers.Field(source='get_absolute_url')
get_filtered_ie_url = serializers.Field(source='get_filtered_ie_url')
display_coh_date = serializers.Field(source='display_coh_date')
display_coh = serializers.Field(source='display_coh')
major_activity = serializers.Field(source='major_activity')
class Meta:
model = Committee_Overlay
fields=('fec_id', 'name', 'total_receipts', 'total_disbursements', 'outstanding_loans', 'ctype', 'total_indy_expenditures','ie_support_dems', 'ie_oppose_dems', 'ie_support_reps', 'ie_oppose_reps', 'political_orientation', 'political_orientation_verified', 'display_type', 'committee_url', 'get_filtered_ie_url', 'display_coh', 'display_coh_date', 'major_activity', 'cycle')
#depth = 1
class DistrictSerializer(serializers.ModelSerializer):
district_url = serializers.Field(source='get_absolute_url')
next_election = serializers.Field(source='next_election')
class Meta:
model = District
fields=('id', 'district_url', 'cycle', 'state', 'office', 'office_district', 'term_class', 'incumbent_name', 'incumbent_party', 'next_election_date', 'next_election_code', 'next_election', 'open_seat', 'candidate_raised', 'candidate_spending', 'outside_spending', 'total_spending', 'rothenberg_rating_id', 'rothenberg_rating_text')
class MinimalDistrictSerializer(serializers.ModelSerializer):
race_name = serializers.Field(source='__unicode__')
class Meta:
model = District
fields=('race_name', 'state', 'office', 'office_district', 'term_class', 'id')
class CandidateSerializer(serializers.ModelSerializer):
candidate_url = serializers.Field(source='get_absolute_url')
race_url = serializers.Field(source='get_race_url')
ie_url = serializers.Field(source='get_filtered_ie_url')
status = serializers.Field(source='show_candidate_status')
district = MinimalDistrictSerializer(source='district')
class Meta:
model = Candidate_Overlay
fields=('name', 'fec_id', 'pcc', 'party', 'candidate_url', 'race_url', 'ie_url', 'is_incumbent', 'cycle', 'not_seeking_reelection', 'other_office_sought', 'other_fec_id', 'election_year', 'state', 'office', 'office_district', 'term_class', 'candidate_status', 'total_expenditures', 'expenditures_supporting', 'expenditures_opposing', 'total_receipts', 'total_contributions', 'total_disbursements', 'cash_on_hand', 'cash_on_hand_date', 'district', 'outstanding_loans', 'cand_is_gen_winner', 'status')
class DWSerializer(serializers.HyperlinkedModelSerializer):
district = MinimalDistrictSerializer(source='district')
class Meta:
model = DistrictWeekly
depth = 1
fields=('start_date', 'end_date', 'cycle_week_number', 'outside_spending', 'district')
class SkedESerializer(serializers.ModelSerializer):
payee_name_simplified = serializers.Field(source='payee_name_simplified')
candidate_url = serializers.Field(source='get_candidate_url')
committee_url = serializers.Field(source='get_committee_url')
short_office = serializers.Field(source='short_office')
candidate_name = serializers.Field(source='candidate_name_raw')
race_url = serializers.Field(source='get_race_url')
class Meta:
model = SkedE
fields=('form_type', 'superceded_by_amendment', 'candidate_id_checked', 'candidate_name', 'candidate_party_checked', 'candidate_office_checked', 'candidate_state_checked', 'candidate_district_checked', 'support_oppose_checked', 'committee_name', 'transaction_id', 'payee_organization_name', 'payee_street_1', 'payee_street_2', 'payee_city', 'payee_state', 'payee_zip', 'payee_name_simplified', 'election_code', 'election_other_description', 'expenditure_date_formatted', 'expenditure_amount', 'expenditure_purpose_code', 'expenditure_purpose_descrip', 'date_signed_formatted', 'memo_code', 'memo_text_description', 'filer_committee_id_number', 'district_checked', 'race_url', 'committee_url', 'candidate_url', 'short_office')
| bsd-3-clause | 2,079,631,095,254,151,700 | 59.24 | 733 | 0.708001 | false |
VandroiyLabs/FaroresWind | faroreswind/server/handler_Metadata.py | 1 | 3875 | ## database
import psycopg2
## system libraries
import io, os
import datetime, time
import logging
## web libraries
import tornado
import tornado.auth
import tornado.escape
import tornado.gen
import tornado.httpserver
import urlparse
import threading
import functools
from tornado.ioloop import IOLoop
from tornado.web import asynchronous, RequestHandler, Application
from tornado.httpclient import AsyncHTTPClient
## custom libraries
import faroreDB
rootdir = os.path.dirname(__file__)
class listEnoseConfHandler(tornado.web.RequestHandler):
def initialize(self, database, IPs):
self.db = database
self.IPs = IPs
return
def get(self):
if self.request.remote_ip[:11] in self.IPs :
miolo = '<div class="page-header">' + \
'<table class="table table-striped">' + \
'<thead><tr><th width=500px colspan=2>enose ID</th><th width=150px colspan=3>Location</th><th width=150px colspan=3>Date</th><th width=50px></th><th width=50px></th></tr></thead>'+ \
'<tbody>\n'
# Retrieving data from inductions
db = self.db
listConfs = self.db.getEnoseConfs( )
for conf in listConfs:
miolo += "<tr><td colspan=2>hal" + str(conf[-1]) + "k</td>\n"
miolo += "<td colspan=3>" + str(conf[-2]) + "</td>\n"
miolo += "<td colspan=5>" + str(conf[1]) + "</td>"
miolo += "</tr><tr>"
for j in range(10):
miolo += "<td>" + str(conf[2+j]) + "</td>"
miolo += "</tr>"
miolo += '</tbody></table></div>'
self.render(rootdir+'/pagess/index.html', title="Current list of ENoses", miolo = miolo,
top=file(rootdir+"/pagess/top.html").read(), bottom=file(rootdir+"/pagess/bottom.html").read())
## If in this else, someone tried to access this
else:
logging.warning('Access to list_inductions from outside IP list: ' + str(self.request.remote_ip) )
return
class inputEnoseConfigHandler(tornado.web.RequestHandler):
def initialize(self, database, IPs):
self.IPs = IPs
return
def get(self):
if self.request.remote_ip[:-2] == self.IPs[0] or self.request.remote_ip[:7] == self.IPs[1]:
miolo = file(rootdir+'/pagess/input_enose_config.html').read()
self.render(rootdir+'/pagess/index.html', title="Farore's wind", miolo = miolo,
top=file(rootdir+"/pagess/top.html").read(), bottom=file(rootdir+"/pagess/bottom.html").read())
## If in this else, someone tried to access this
else:
logging.warning('Access to input_metadata from outside IP list: ' + str(self.request.remote_ip) )
return
class actionEnoseConfigHandler(tornado.web.RequestHandler):
def initialize(self, database, IPs):
self.db = database
self.IPs = IPs
return
def post(self):
if self.request.remote_ip[:11] in self.IPs :
self.render(rootdir+'/pagess/metadata_action.html')
date = self.get_argument('date', '')
S = []
for j in range(1,11):
S.append( self.get_argument('S'+str(j), '') )
T = []
for j in range(1,9):
T.append( self.get_argument('T'+str(j), '') )
location = self.get_argument('location', '')
enose = self.get_argument('enose', '')
if len(enose) > 1:
enose = enose[3]
self.db.insertEnoseConf(enose, date, S, T, location)
## If in this else, someone tried to access this
else:
logging.warning('Access to metadata_action from outside IP list: ' + str(self.request.remote_ip) )
return
| gpl-3.0 | -8,024,903,895,091,325,000 | 28.135338 | 202 | 0.571871 | false |
aurex-linux/virt-manager | tests/capabilities.py | 1 | 9927 | # Copyright (C) 2013, 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import os
import unittest
from tests import utils
from virtinst import CapabilitiesParser as capabilities
def build_host_feature_dict(feature_list):
fdict = {}
for f in feature_list:
fdict[f] = capabilities.FEATURE_ON
return fdict
class TestCapabilities(unittest.TestCase):
def _compareGuest(self, (arch, os_type, domains, features), guest):
self.assertEqual(arch, guest.arch)
self.assertEqual(os_type, guest.os_type)
self.assertEqual(len(domains), len(guest.domains))
for n in range(len(domains)):
self.assertEqual(domains[n][0], guest.domains[n].hypervisor_type)
self.assertEqual(domains[n][1], guest.domains[n].emulator)
self.assertEqual(domains[n][2], guest.domains[n].machines)
for n in features:
self.assertEqual(features[n], guest.features[n])
def _buildCaps(self, filename):
path = os.path.join("tests/capabilities-xml", filename)
xml = file(path).read()
return capabilities.Capabilities(xml)
def _testCapabilities(self, path, (host_arch, host_features), guests,
secmodel=None):
caps = self._buildCaps(path)
if host_arch:
self.assertEqual(host_arch, caps.host.cpu.arch)
for n in host_features:
self.assertEqual(host_features[n], caps.host.cpu.features[n])
if secmodel:
self.assertEqual(secmodel[0], caps.host.secmodel.model)
self.assertEqual(secmodel[1], caps.host.secmodel.doi)
if secmodel[2]:
for k, v in secmodel[2].items():
self.assertEqual(v, caps.host.secmodel.baselabels[k])
for idx in range(len(guests)):
self._compareGuest(guests[idx], caps.guests[idx])
def testCapabilities1(self):
host = ('x86_64', {'vmx': capabilities.FEATURE_ON})
guests = [
('x86_64', 'xen',
[['xen', None, []]], {}),
('i686', 'xen',
[['xen', None, []]], {'pae': capabilities.FEATURE_ON}),
('i686', 'hvm',
[['xen', "/usr/lib64/xen/bin/qemu-dm", ['pc', 'isapc']]], {'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}),
('x86_64', 'hvm',
[['xen', "/usr/lib64/xen/bin/qemu-dm", ['pc', 'isapc']]], {})
]
self._testCapabilities("capabilities-xen.xml", host, guests)
def testCapabilities2(self):
host = ('x86_64', {})
secmodel = ('selinux', '0', None)
guests = [
('x86_64', 'hvm',
[['qemu', '/usr/bin/qemu-system-x86_64', ['pc', 'isapc']]], {}),
('i686', 'hvm',
[['qemu', '/usr/bin/qemu', ['pc', 'isapc']]], {}),
('mips', 'hvm',
[['qemu', '/usr/bin/qemu-system-mips', ['mips']]], {}),
('mipsel', 'hvm',
[['qemu', '/usr/bin/qemu-system-mipsel', ['mips']]], {}),
('sparc', 'hvm',
[['qemu', '/usr/bin/qemu-system-sparc', ['sun4m']]], {}),
('ppc', 'hvm',
[['qemu', '/usr/bin/qemu-system-ppc',
['g3bw', 'mac99', 'prep']]], {}),
]
self._testCapabilities("capabilities-qemu.xml", host, guests, secmodel)
def testCapabilities3(self):
host = ('i686', {})
guests = [
('i686', 'hvm',
[['qemu', '/usr/bin/qemu', ['pc', 'isapc']],
['kvm', '/usr/bin/qemu-kvm', ['pc', 'isapc']]], {}),
('x86_64', 'hvm',
[['qemu', '/usr/bin/qemu-system-x86_64', ['pc', 'isapc']]], {}),
('mips', 'hvm',
[['qemu', '/usr/bin/qemu-system-mips', ['mips']]], {}),
('mipsel', 'hvm',
[['qemu', '/usr/bin/qemu-system-mipsel', ['mips']]], {}),
('sparc', 'hvm',
[['qemu', '/usr/bin/qemu-system-sparc', ['sun4m']]], {}),
('ppc', 'hvm',
[['qemu', '/usr/bin/qemu-system-ppc',
['g3bw', 'mac99', 'prep']]], {}),
]
secmodel = ('dac', '0', {"kvm" : "+0:+0", "qemu" : "+0:+0"})
self._testCapabilities("capabilities-kvm.xml", host, guests, secmodel)
def testCapabilities4(self):
host = ('i686',
{'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF})
guests = [
('i686', 'linux',
[['test', None, []]],
{'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}),
]
self._testCapabilities("capabilities-test.xml", host, guests)
def testCapsLXC(self):
guests = [
("x86_64", "exe", [["lxc", "/usr/libexec/libvirt_lxc", []]], {}),
("i686", "exe", [["lxc", "/usr/libexec/libvirt_lxc", []]], {}),
]
self._testCapabilities("capabilities-lxc.xml",
(None, None), guests)
def testCapsTopology(self):
filename = "capabilities-test.xml"
caps = self._buildCaps(filename)
self.assertTrue(bool(caps.host.topology))
self.assertTrue(len(caps.host.topology.cells) == 2)
self.assertTrue(len(caps.host.topology.cells[0].cpus) == 8)
self.assertTrue(len(caps.host.topology.cells[0].cpus) == 8)
def testCapsCPUFeaturesOldSyntax(self):
filename = "rhel5.4-xen-caps-virt-enabled.xml"
host_feature_list = ["vmx"]
feature_dict = build_host_feature_dict(host_feature_list)
caps = self._buildCaps(filename)
for f in feature_dict.keys():
self.assertEquals(caps.host.cpu.features[f], feature_dict[f])
def testCapsCPUFeaturesOldSyntaxSVM(self):
filename = "rhel5.4-xen-caps.xml"
host_feature_list = ["svm"]
feature_dict = build_host_feature_dict(host_feature_list)
caps = self._buildCaps(filename)
for f in feature_dict.keys():
self.assertEquals(caps.host.cpu.features[f], feature_dict[f])
def testCapsCPUFeaturesNewSyntax(self):
filename = "libvirt-0.7.6-qemu-caps.xml"
host_feature_list = ['lahf_lm', 'xtpr', 'cx16', 'tm2', 'est', 'vmx',
'ds_cpl', 'pbe', 'tm', 'ht', 'ss', 'acpi', 'ds']
feature_dict = build_host_feature_dict(host_feature_list)
caps = self._buildCaps(filename)
for f in feature_dict.keys():
self.assertEquals(caps.host.cpu.features[f], feature_dict[f])
self.assertEquals(caps.host.cpu.model, "core2duo")
self.assertEquals(caps.host.cpu.vendor, "Intel")
self.assertEquals(caps.host.cpu.threads, "3")
self.assertEquals(caps.host.cpu.cores, "5")
self.assertEquals(caps.host.cpu.sockets, "7")
def testCapsUtilFuncs(self):
new_caps = self._buildCaps("libvirt-0.7.6-qemu-caps.xml")
new_caps_no_kvm = self._buildCaps(
"libvirt-0.7.6-qemu-no-kvmcaps.xml")
empty_caps = self._buildCaps("empty-caps.xml")
rhel_xen_enable_hvm_caps = self._buildCaps(
"rhel5.4-xen-caps-virt-enabled.xml")
rhel_xen_caps = self._buildCaps("rhel5.4-xen-caps.xml")
rhel_kvm_caps = self._buildCaps("rhel5.4-kvm-caps.xml")
def test_utils(caps, no_guests, is_hvm, is_kvm, is_bios_disable,
is_xenner):
self.assertEquals(caps.no_install_options(), no_guests)
self.assertEquals(caps.hw_virt_supported(), is_hvm)
self.assertEquals(caps.is_kvm_available(), is_kvm)
self.assertEquals(caps.is_bios_virt_disabled(), is_bios_disable)
self.assertEquals(caps.is_xenner_available(), is_xenner)
test_utils(new_caps, False, True, True, False, True)
test_utils(empty_caps, True, False, False, False, False)
test_utils(rhel_xen_enable_hvm_caps, False, True, False, False, False)
test_utils(rhel_xen_caps, False, True, False, True, False)
test_utils(rhel_kvm_caps, False, True, True, False, False)
test_utils(new_caps_no_kvm, False, True, False, False, False)
def testCPUMap(self):
caps = self._buildCaps("libvirt-0.7.6-qemu-caps.xml")
cpu_64 = caps.get_cpu_values(None, "x86_64")
cpu_32 = caps.get_cpu_values(None, "i486")
cpu_random = caps.get_cpu_values(None, "mips")
def test_cpu_map(cpumap, cpus):
cpunames = sorted([c.model for c in cpumap], key=str.lower)
for c in cpus:
self.assertTrue(c in cpunames)
self.assertEquals(cpu_64, cpu_32)
x86_cpunames = [
'486', 'athlon', 'Conroe', 'core2duo', 'coreduo', 'n270',
'Nehalem', 'Opteron_G1', 'Opteron_G2', 'Opteron_G3', 'Penryn',
'pentium', 'pentium2', 'pentium3', 'pentiumpro', 'phenom',
'qemu32', 'qemu64']
test_cpu_map(cpu_64, x86_cpunames)
test_cpu_map(cpu_random, [])
conn = utils.open_testdriver()
cpu_64 = caps.get_cpu_values(conn, "x86_64")
self.assertTrue(len(cpu_64) > 0)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 8,204,153,660,796,448,000 | 38.392857 | 133 | 0.556966 | false |
pamapa/callblocker | usr/share/callblocker/onlinecheck_phonespamfilter_com.py | 1 | 2486 | #!/usr/bin/env python3
# callblocker - blocking unwanted calls from your home phone
# Copyright (C) 2015-2020 Patrick Ammann <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import sys
from online_base import OnlineBase
class OnlineCheckTellowsDE(OnlineBase):
def supported_country_codes(self):
return ["+1", "+33", "+44", "+61", "+64"]
def handle_number(self, args, number):
# map number to correct URL
if args.number.startswith("+1"): # USA, Canada
site = "www.phonespamfilter.com"
number = number[2:]
elif args.number.startswith("+33"): # France
site = "www.phonespamfilter.fr"
number = number[3:]
elif args.number.startswith("+44"): # United Kingdom
site = "www.phonespamfilter.co.uk"
number = number[3:]
elif args.number.startswith("+61"): # Australia
site = "au.phonespamfilter.com"
number = number[3:]
elif args.number.startswith("+64"): # New Zealand
site = "www.phonespamfilter.co.nz"
number = number[3:]
else:
self.log.error("number '%s' is not supported '%s'" % (args.number, self.supported_country_codes()))
sys.exit(-1)
url = "http://%s/check.php?phone=%s" % (site, number)
content = self.http_get(url)
self.log.debug(content)
score = int(content)
spam = False if score < args.spamscore else True
return self.onlinecheck_2_result(spam, score)
#
# main
#
if __name__ == "__main__":
m = OnlineCheckTellowsDE()
parser = m.get_parser("Online check via phonespamfilter.com")
parser.add_argument("--spamscore", help="score limit to mark as spam [0..100]", default=50)
args = parser.parse_args()
m.run(args)
| gpl-2.0 | -8,970,024,645,685,095,000 | 35.558824 | 111 | 0.641593 | false |
miguelgrinberg/python-socketio | src/socketio/kombu_manager.py | 1 | 5298 | import pickle
import uuid
try:
import kombu
except ImportError:
kombu = None
from .pubsub_manager import PubSubManager
class KombuManager(PubSubManager): # pragma: no cover
"""Client manager that uses kombu for inter-process messaging.
This class implements a client manager backend for event sharing across
multiple processes, using RabbitMQ, Redis or any other messaging mechanism
supported by `kombu <http://kombu.readthedocs.org/en/latest/>`_.
To use a kombu backend, initialize the :class:`Server` instance as
follows::
url = 'amqp://user:password@hostname:port//'
server = socketio.Server(client_manager=socketio.KombuManager(url))
:param url: The connection URL for the backend messaging queue. Example
connection URLs are ``'amqp://guest:guest@localhost:5672//'``
and ``'redis://localhost:6379/'`` for RabbitMQ and Redis
respectively. Consult the `kombu documentation
<http://kombu.readthedocs.org/en/latest/userguide\
/connections.html#urls>`_ for more on how to construct
connection URLs.
:param channel: The channel name on which the server sends and receives
notifications. Must be the same in all the servers.
:param write_only: If set to ``True``, only initialize to emit events. The
default of ``False`` initializes the class for emitting
and receiving.
:param connection_options: additional keyword arguments to be passed to
``kombu.Connection()``.
:param exchange_options: additional keyword arguments to be passed to
``kombu.Exchange()``.
:param queue_options: additional keyword arguments to be passed to
``kombu.Queue()``.
:param producer_options: additional keyword arguments to be passed to
``kombu.Producer()``.
"""
name = 'kombu'
def __init__(self, url='amqp://guest:guest@localhost:5672//',
channel='socketio', write_only=False, logger=None,
connection_options=None, exchange_options=None,
queue_options=None, producer_options=None):
if kombu is None:
raise RuntimeError('Kombu package is not installed '
'(Run "pip install kombu" in your '
'virtualenv).')
super(KombuManager, self).__init__(channel=channel,
write_only=write_only,
logger=logger)
self.url = url
self.connection_options = connection_options or {}
self.exchange_options = exchange_options or {}
self.queue_options = queue_options or {}
self.producer_options = producer_options or {}
self.producer = self._producer()
def initialize(self):
super(KombuManager, self).initialize()
monkey_patched = True
if self.server.async_mode == 'eventlet':
from eventlet.patcher import is_monkey_patched
monkey_patched = is_monkey_patched('socket')
elif 'gevent' in self.server.async_mode:
from gevent.monkey import is_module_patched
monkey_patched = is_module_patched('socket')
if not monkey_patched:
raise RuntimeError(
'Kombu requires a monkey patched socket library to work '
'with ' + self.server.async_mode)
def _connection(self):
return kombu.Connection(self.url, **self.connection_options)
def _exchange(self):
options = {'type': 'fanout', 'durable': False}
options.update(self.exchange_options)
return kombu.Exchange(self.channel, **options)
def _queue(self):
queue_name = 'flask-socketio.' + str(uuid.uuid4())
options = {'durable': False, 'queue_arguments': {'x-expires': 300000}}
options.update(self.queue_options)
return kombu.Queue(queue_name, self._exchange(), **options)
def _producer(self):
return self._connection().Producer(exchange=self._exchange(),
**self.producer_options)
def __error_callback(self, exception, interval):
self._get_logger().exception('Sleeping {}s'.format(interval))
def _publish(self, data):
connection = self._connection()
publish = connection.ensure(self.producer, self.producer.publish,
errback=self.__error_callback)
publish(pickle.dumps(data))
def _listen(self):
reader_queue = self._queue()
while True:
connection = self._connection().ensure_connection(
errback=self.__error_callback)
try:
with connection.SimpleQueue(reader_queue) as queue:
while True:
message = queue.get(block=True)
message.ack()
yield message.payload
except connection.connection_errors:
self._get_logger().exception("Connection error "
"while reading from queue")
| mit | 5,484,561,835,408,599,000 | 42.42623 | 78 | 0.58607 | false |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/visualization/transform.py | 1 | 1198 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division, print_function
__all__ = ['BaseTransform', 'CompositeTransform']
class BaseTransform(object):
"""
A transformation object.
This is used to construct transformations such as scaling, stretching, and
so on.
"""
def __add__(self, other):
return CompositeTransform(other, self)
class CompositeTransform(BaseTransform):
"""
A combination of two transforms.
Parameters
----------
transform_1: :class:`astropy.visualization.BaseTransform`
The first transform to apply.
transform_2: :class:`astropy.visualization.BaseTransform`
The second transform to apply.
"""
def __init__(self, transform_1, transform_2):
super(CompositeTransform, self).__init__()
self.transform_1 = transform_1
self.transform_2 = transform_2
def __call__(self, values, clip=True):
return self.transform_2(self.transform_1(values, clip=clip), clip=clip)
@property
def inverse(self):
return CompositeTransform(self.transform_2.inverse,
self.transform_1.inverse)
| mit | -9,155,531,039,135,937,000 | 27.52381 | 79 | 0.647746 | false |
vhaupert/mitmproxy | mitmproxy/proxy/protocol/tls.py | 1 | 20430 | from typing import Optional # noqa
from typing import Union
from mitmproxy import exceptions
from mitmproxy.net import tls as net_tls
from mitmproxy.proxy.protocol import base
# taken from https://testssl.sh/openssl-rfc.mapping.html
CIPHER_ID_NAME_MAP = {
0x00: 'NULL-MD5',
0x01: 'NULL-MD5',
0x02: 'NULL-SHA',
0x03: 'EXP-RC4-MD5',
0x04: 'RC4-MD5',
0x05: 'RC4-SHA',
0x06: 'EXP-RC2-CBC-MD5',
0x07: 'IDEA-CBC-SHA',
0x08: 'EXP-DES-CBC-SHA',
0x09: 'DES-CBC-SHA',
0x0a: 'DES-CBC3-SHA',
0x0b: 'EXP-DH-DSS-DES-CBC-SHA',
0x0c: 'DH-DSS-DES-CBC-SHA',
0x0d: 'DH-DSS-DES-CBC3-SHA',
0x0e: 'EXP-DH-RSA-DES-CBC-SHA',
0x0f: 'DH-RSA-DES-CBC-SHA',
0x10: 'DH-RSA-DES-CBC3-SHA',
0x11: 'EXP-EDH-DSS-DES-CBC-SHA',
0x12: 'EDH-DSS-DES-CBC-SHA',
0x13: 'EDH-DSS-DES-CBC3-SHA',
0x14: 'EXP-EDH-RSA-DES-CBC-SHA',
0x15: 'EDH-RSA-DES-CBC-SHA',
0x16: 'EDH-RSA-DES-CBC3-SHA',
0x17: 'EXP-ADH-RC4-MD5',
0x18: 'ADH-RC4-MD5',
0x19: 'EXP-ADH-DES-CBC-SHA',
0x1a: 'ADH-DES-CBC-SHA',
0x1b: 'ADH-DES-CBC3-SHA',
# 0x1c: ,
# 0x1d: ,
0x1e: 'KRB5-DES-CBC-SHA',
0x1f: 'KRB5-DES-CBC3-SHA',
0x20: 'KRB5-RC4-SHA',
0x21: 'KRB5-IDEA-CBC-SHA',
0x22: 'KRB5-DES-CBC-MD5',
0x23: 'KRB5-DES-CBC3-MD5',
0x24: 'KRB5-RC4-MD5',
0x25: 'KRB5-IDEA-CBC-MD5',
0x26: 'EXP-KRB5-DES-CBC-SHA',
0x27: 'EXP-KRB5-RC2-CBC-SHA',
0x28: 'EXP-KRB5-RC4-SHA',
0x29: 'EXP-KRB5-DES-CBC-MD5',
0x2a: 'EXP-KRB5-RC2-CBC-MD5',
0x2b: 'EXP-KRB5-RC4-MD5',
0x2f: 'AES128-SHA',
0x30: 'DH-DSS-AES128-SHA',
0x31: 'DH-RSA-AES128-SHA',
0x32: 'DHE-DSS-AES128-SHA',
0x33: 'DHE-RSA-AES128-SHA',
0x34: 'ADH-AES128-SHA',
0x35: 'AES256-SHA',
0x36: 'DH-DSS-AES256-SHA',
0x37: 'DH-RSA-AES256-SHA',
0x38: 'DHE-DSS-AES256-SHA',
0x39: 'DHE-RSA-AES256-SHA',
0x3a: 'ADH-AES256-SHA',
0x3b: 'NULL-SHA256',
0x3c: 'AES128-SHA256',
0x3d: 'AES256-SHA256',
0x3e: 'DH-DSS-AES128-SHA256',
0x3f: 'DH-RSA-AES128-SHA256',
0x40: 'DHE-DSS-AES128-SHA256',
0x41: 'CAMELLIA128-SHA',
0x42: 'DH-DSS-CAMELLIA128-SHA',
0x43: 'DH-RSA-CAMELLIA128-SHA',
0x44: 'DHE-DSS-CAMELLIA128-SHA',
0x45: 'DHE-RSA-CAMELLIA128-SHA',
0x46: 'ADH-CAMELLIA128-SHA',
0x62: 'EXP1024-DES-CBC-SHA',
0x63: 'EXP1024-DHE-DSS-DES-CBC-SHA',
0x64: 'EXP1024-RC4-SHA',
0x65: 'EXP1024-DHE-DSS-RC4-SHA',
0x66: 'DHE-DSS-RC4-SHA',
0x67: 'DHE-RSA-AES128-SHA256',
0x68: 'DH-DSS-AES256-SHA256',
0x69: 'DH-RSA-AES256-SHA256',
0x6a: 'DHE-DSS-AES256-SHA256',
0x6b: 'DHE-RSA-AES256-SHA256',
0x6c: 'ADH-AES128-SHA256',
0x6d: 'ADH-AES256-SHA256',
0x80: 'GOST94-GOST89-GOST89',
0x81: 'GOST2001-GOST89-GOST89',
0x82: 'GOST94-NULL-GOST94',
0x83: 'GOST2001-GOST89-GOST89',
0x84: 'CAMELLIA256-SHA',
0x85: 'DH-DSS-CAMELLIA256-SHA',
0x86: 'DH-RSA-CAMELLIA256-SHA',
0x87: 'DHE-DSS-CAMELLIA256-SHA',
0x88: 'DHE-RSA-CAMELLIA256-SHA',
0x89: 'ADH-CAMELLIA256-SHA',
0x8a: 'PSK-RC4-SHA',
0x8b: 'PSK-3DES-EDE-CBC-SHA',
0x8c: 'PSK-AES128-CBC-SHA',
0x8d: 'PSK-AES256-CBC-SHA',
# 0x8e: ,
# 0x8f: ,
# 0x90: ,
# 0x91: ,
# 0x92: ,
# 0x93: ,
# 0x94: ,
# 0x95: ,
0x96: 'SEED-SHA',
0x97: 'DH-DSS-SEED-SHA',
0x98: 'DH-RSA-SEED-SHA',
0x99: 'DHE-DSS-SEED-SHA',
0x9a: 'DHE-RSA-SEED-SHA',
0x9b: 'ADH-SEED-SHA',
0x9c: 'AES128-GCM-SHA256',
0x9d: 'AES256-GCM-SHA384',
0x9e: 'DHE-RSA-AES128-GCM-SHA256',
0x9f: 'DHE-RSA-AES256-GCM-SHA384',
0xa0: 'DH-RSA-AES128-GCM-SHA256',
0xa1: 'DH-RSA-AES256-GCM-SHA384',
0xa2: 'DHE-DSS-AES128-GCM-SHA256',
0xa3: 'DHE-DSS-AES256-GCM-SHA384',
0xa4: 'DH-DSS-AES128-GCM-SHA256',
0xa5: 'DH-DSS-AES256-GCM-SHA384',
0xa6: 'ADH-AES128-GCM-SHA256',
0xa7: 'ADH-AES256-GCM-SHA384',
0x5600: 'TLS_FALLBACK_SCSV',
0xc001: 'ECDH-ECDSA-NULL-SHA',
0xc002: 'ECDH-ECDSA-RC4-SHA',
0xc003: 'ECDH-ECDSA-DES-CBC3-SHA',
0xc004: 'ECDH-ECDSA-AES128-SHA',
0xc005: 'ECDH-ECDSA-AES256-SHA',
0xc006: 'ECDHE-ECDSA-NULL-SHA',
0xc007: 'ECDHE-ECDSA-RC4-SHA',
0xc008: 'ECDHE-ECDSA-DES-CBC3-SHA',
0xc009: 'ECDHE-ECDSA-AES128-SHA',
0xc00a: 'ECDHE-ECDSA-AES256-SHA',
0xc00b: 'ECDH-RSA-NULL-SHA',
0xc00c: 'ECDH-RSA-RC4-SHA',
0xc00d: 'ECDH-RSA-DES-CBC3-SHA',
0xc00e: 'ECDH-RSA-AES128-SHA',
0xc00f: 'ECDH-RSA-AES256-SHA',
0xc010: 'ECDHE-RSA-NULL-SHA',
0xc011: 'ECDHE-RSA-RC4-SHA',
0xc012: 'ECDHE-RSA-DES-CBC3-SHA',
0xc013: 'ECDHE-RSA-AES128-SHA',
0xc014: 'ECDHE-RSA-AES256-SHA',
0xc015: 'AECDH-NULL-SHA',
0xc016: 'AECDH-RC4-SHA',
0xc017: 'AECDH-DES-CBC3-SHA',
0xc018: 'AECDH-AES128-SHA',
0xc019: 'AECDH-AES256-SHA',
0xc01a: 'SRP-3DES-EDE-CBC-SHA',
0xc01b: 'SRP-RSA-3DES-EDE-CBC-SHA',
0xc01c: 'SRP-DSS-3DES-EDE-CBC-SHA',
0xc01d: 'SRP-AES-128-CBC-SHA',
0xc01e: 'SRP-RSA-AES-128-CBC-SHA',
0xc01f: 'SRP-DSS-AES-128-CBC-SHA',
0xc020: 'SRP-AES-256-CBC-SHA',
0xc021: 'SRP-RSA-AES-256-CBC-SHA',
0xc022: 'SRP-DSS-AES-256-CBC-SHA',
0xc023: 'ECDHE-ECDSA-AES128-SHA256',
0xc024: 'ECDHE-ECDSA-AES256-SHA384',
0xc025: 'ECDH-ECDSA-AES128-SHA256',
0xc026: 'ECDH-ECDSA-AES256-SHA384',
0xc027: 'ECDHE-RSA-AES128-SHA256',
0xc028: 'ECDHE-RSA-AES256-SHA384',
0xc029: 'ECDH-RSA-AES128-SHA256',
0xc02a: 'ECDH-RSA-AES256-SHA384',
0xc02b: 'ECDHE-ECDSA-AES128-GCM-SHA256',
0xc02c: 'ECDHE-ECDSA-AES256-GCM-SHA384',
0xc02d: 'ECDH-ECDSA-AES128-GCM-SHA256',
0xc02e: 'ECDH-ECDSA-AES256-GCM-SHA384',
0xc02f: 'ECDHE-RSA-AES128-GCM-SHA256',
0xc030: 'ECDHE-RSA-AES256-GCM-SHA384',
0xc031: 'ECDH-RSA-AES128-GCM-SHA256',
0xc032: 'ECDH-RSA-AES256-GCM-SHA384',
0xcc13: 'ECDHE-RSA-CHACHA20-POLY1305',
0xcc14: 'ECDHE-ECDSA-CHACHA20-POLY1305',
0xcc15: 'DHE-RSA-CHACHA20-POLY1305',
0xff00: 'GOST-MD5',
0xff01: 'GOST-GOST94',
0xff02: 'GOST-GOST89MAC',
0xff03: 'GOST-GOST89STREAM',
0x010080: 'RC4-MD5',
0x020080: 'EXP-RC4-MD5',
0x030080: 'RC2-CBC-MD5',
0x040080: 'EXP-RC2-CBC-MD5',
0x050080: 'IDEA-CBC-MD5',
0x060040: 'DES-CBC-MD5',
0x0700c0: 'DES-CBC3-MD5',
0x080080: 'RC4-64-MD5',
}
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
# https://ssl-config.mozilla.org/#config=old
DEFAULT_CLIENT_CIPHERS = (
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:"
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:"
"DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:"
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:"
"ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:"
"AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
)
class TlsLayer(base.Layer):
"""
The TLS layer implements transparent TLS connections.
It exposes the following API to child layers:
- :py:meth:`set_server_tls` to modify TLS settings for the server connection.
- :py:attr:`server_tls`, :py:attr:`server_sni` as read-only attributes describing the current TLS settings for
the server connection.
"""
def __init__(self, ctx, client_tls, server_tls, custom_server_sni=None):
super().__init__(ctx)
self._client_tls = client_tls
self._server_tls = server_tls
self._custom_server_sni = custom_server_sni
self._client_hello: Optional[net_tls.ClientHello] = None
def __call__(self):
"""
The strategy for establishing TLS is as follows:
First, we determine whether we need the server cert to establish ssl with the client.
If so, we first connect to the server and then to the client.
If not, we only connect to the client and do the server handshake lazily.
An additional complexity is that we need to mirror SNI and ALPN from the client when connecting to the server.
We manually peek into the connection and parse the ClientHello message to obtain these values.
"""
if self._client_tls:
# Peek into the connection, read the initial client hello and parse it to obtain SNI and ALPN values.
try:
self._client_hello = net_tls.ClientHello.from_file(self.client_conn.rfile)
except exceptions.TlsProtocolException as e:
self.log("Cannot parse Client Hello: %s" % repr(e), "error")
# Without knowning the ClientHello we cannot proceed in this connection.
return
# Do we need to do a server handshake now?
# There are two reasons why we would want to establish TLS with the server now:
# 1. If we already have an existing server connection and server_tls is True,
# we need to establish TLS now because .connect() will not be called anymore.
# 2. We may need information from the server connection for the client handshake.
#
# A couple of factors influence (2):
# 2.1 There actually is (or will be) a TLS-enabled upstream connection
# 2.2 An upstream connection is not wanted by the user if --no-upstream-cert is passed.
# 2.3 An upstream connection is implied by add_upstream_certs_to_client_chain
# 2.4 The client wants to negotiate an alternative protocol in its handshake, we need to find out
# what is supported by the server
# 2.5 The client did not sent a SNI value, we don't know the certificate subject.
client_tls_requires_server_connection = (
self._server_tls and
self.config.options.upstream_cert and
(
self.config.options.add_upstream_certs_to_client_chain or
self._client_tls and (
self._client_hello.alpn_protocols or
not self._client_hello.sni
)
)
)
establish_server_tls_now = (
(self.server_conn.connected() and self._server_tls) or
client_tls_requires_server_connection
)
if self._client_tls and establish_server_tls_now:
self._establish_tls_with_client_and_server()
elif self._client_tls:
self._establish_tls_with_client()
elif establish_server_tls_now:
self._establish_tls_with_server()
layer = self.ctx.next_layer(self)
layer()
def __repr__(self): # pragma: no cover
if self._client_tls and self._server_tls:
return "TlsLayer(client and server)"
elif self._client_tls:
return "TlsLayer(client)"
elif self._server_tls:
return "TlsLayer(server)"
else:
return "TlsLayer(inactive)"
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
if self._server_tls and not self.server_conn.tls_established:
self._establish_tls_with_server()
def set_server_tls(self, server_tls: bool, sni: Union[str, None, bool] = None) -> None:
"""
Set the TLS settings for the next server connection that will be established.
This function will not alter an existing connection.
Args:
server_tls: Shall we establish TLS with the server?
sni: ``str`` for a custom SNI value,
``None`` for the client SNI value,
``False`` if no SNI value should be sent.
"""
self._server_tls = server_tls
self._custom_server_sni = sni
@property
def server_tls(self):
"""
``True``, if the next server connection that will be established should be upgraded to TLS.
"""
return self._server_tls
@property
def server_sni(self) -> Optional[str]:
"""
The Server Name Indication we want to send with the next server TLS handshake.
"""
if self._custom_server_sni is False:
return None
elif self._custom_server_sni:
return self._custom_server_sni
elif self._client_hello and self._client_hello.sni:
return self._client_hello.sni.decode("idna")
else:
return None
@property
def alpn_for_client_connection(self):
return self.server_conn.get_alpn_proto_negotiated()
def __alpn_select_callback(self, conn_, options):
# This gets triggered if we haven't established an upstream connection yet.
default_alpn = b'http/1.1'
if self.alpn_for_client_connection in options:
choice = bytes(self.alpn_for_client_connection)
elif default_alpn in options:
choice = bytes(default_alpn)
else:
choice = options[0]
self.log("ALPN for client: %s" % choice, "debug")
return choice
def _establish_tls_with_client_and_server(self):
try:
self.ctx.connect()
self._establish_tls_with_server()
except Exception:
# If establishing TLS with the server fails, we try to establish TLS with the client nonetheless
# to send an error message over TLS.
try:
self._establish_tls_with_client()
except:
pass
raise
self._establish_tls_with_client()
def _establish_tls_with_client(self):
self.log("Establish TLS with client", "debug")
cert, key, chain_file = self._find_cert()
if self.config.options.add_upstream_certs_to_client_chain:
extra_certs = self.server_conn.server_certs
else:
extra_certs = None
try:
tls_method, tls_options = net_tls.VERSION_CHOICES[self.config.options.ssl_version_client]
self.client_conn.convert_to_tls(
cert, key,
method=tls_method,
options=tls_options,
cipher_list=self.config.options.ciphers_client or DEFAULT_CLIENT_CIPHERS,
dhparams=self.config.certstore.dhparams,
chain_file=chain_file,
alpn_select_callback=self.__alpn_select_callback,
extra_chain_certs=extra_certs,
)
# Some TLS clients will not fail the handshake,
# but will immediately throw an "unexpected eof" error on the first read.
# The reason for this might be difficult to find, so we try to peek here to see if it
# raises ann error.
self.client_conn.rfile.peek(1)
except exceptions.TlsException as e:
sni_str = self._client_hello.sni and self._client_hello.sni.decode("idna")
raise exceptions.ClientHandshakeException(
"Cannot establish TLS with client (sni: {sni}): {e}".format(
sni=sni_str, e=repr(e)
),
sni_str or repr(self.server_conn.address)
)
def _establish_tls_with_server(self):
self.log("Establish TLS with server", "debug")
try:
alpn = None
if self._client_tls:
if self._client_hello.alpn_protocols:
# We only support http/1.1 and h2.
# If the server only supports spdy (next to http/1.1), it may select that
# and mitmproxy would enter TCP passthrough mode, which we want to avoid.
alpn = [
x for x in self._client_hello.alpn_protocols if
not (x.startswith(b"h2-") or x.startswith(b"spdy"))
]
if alpn and b"h2" in alpn and not self.config.options.http2:
alpn.remove(b"h2")
if self.client_conn.tls_established and self.client_conn.get_alpn_proto_negotiated():
# If the client has already negotiated an ALP, then force the
# server to use the same. This can only happen if the host gets
# changed after the initial connection was established. E.g.:
# * the client offers http/1.1 and h2,
# * the initial host is only capable of http/1.1,
# * then the first server connection negotiates http/1.1,
# * but after the server_conn change, the new host offers h2
# * which results in garbage because the layers don' match.
alpn = [self.client_conn.get_alpn_proto_negotiated()]
# We pass through the list of ciphers send by the client, because some HTTP/2 servers
# will select a non-HTTP/2 compatible cipher from our default list and then hang up
# because it's incompatible with h2. :-)
ciphers_server = self.config.options.ciphers_server
if not ciphers_server and self._client_tls:
ciphers_server = []
for id in self._client_hello.cipher_suites:
if id in CIPHER_ID_NAME_MAP.keys():
ciphers_server.append(CIPHER_ID_NAME_MAP[id])
ciphers_server = ':'.join(ciphers_server)
args = net_tls.client_arguments_from_options(self.config.options)
args["cipher_list"] = ciphers_server
self.server_conn.establish_tls(
sni=self.server_sni,
alpn_protos=alpn,
**args
)
tls_cert_err = self.server_conn.ssl_verification_error
if tls_cert_err is not None:
self.log(str(tls_cert_err), "warn")
self.log("Ignoring server verification error, continuing with connection", "warn")
except exceptions.InvalidCertificateException as e:
raise exceptions.InvalidServerCertificate(str(e))
except exceptions.TlsException as e:
raise exceptions.TlsProtocolException(
"Cannot establish TLS with {host}:{port} (sni: {sni}): {e}".format(
host=self.server_conn.address[0],
port=self.server_conn.address[1],
sni=self.server_sni,
e=repr(e)
)
)
proto = self.alpn_for_client_connection.decode() if self.alpn_for_client_connection else '-'
self.log("ALPN selected by server: {}".format(proto), "debug")
def _find_cert(self):
"""
This function determines the Common Name (CN), Subject Alternative Names (SANs) and Organization Name
our certificate should have and then fetches a matching cert from the certstore.
"""
host = None
sans = set()
organization = None
# In normal operation, the server address should always be known at this point.
# However, we may just want to establish TLS so that we can send an error message to the client,
# in which case the address can be None.
if self.server_conn.address:
host = self.server_conn.address[0].encode("idna")
# Should we incorporate information from the server certificate?
use_upstream_cert = (
self.server_conn and
self.server_conn.tls_established and
self.config.options.upstream_cert
)
if use_upstream_cert:
upstream_cert = self.server_conn.cert
sans.update(upstream_cert.altnames)
if upstream_cert.cn:
sans.add(host)
host = upstream_cert.cn.decode("utf8").encode("idna")
if upstream_cert.organization:
organization = upstream_cert.organization
# Also add SNI values.
if self._client_hello.sni:
sans.add(self._client_hello.sni)
if self._custom_server_sni:
sans.add(self._custom_server_sni.encode("idna"))
# RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.
# In other words, the Common Name is irrelevant then.
if host:
sans.add(host)
return self.config.certstore.get_cert(host, list(sans), organization)
| mit | -2,940,024,410,166,875,000 | 39.216535 | 118 | 0.603279 | false |
fragaria/BorIS | boris/services/migrations/0018_delete_pregnancytest.py | 1 | 1313 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import models, migrations
def reconvert_tests(apps, schema_editor):
pass
def convert_tests(apps, schema_editor):
# convert PregnancyTest to UrineTest.pregnancy_test
PregnancyTest = apps.get_model('services', 'PregnancyTest')
UrineTest = apps.get_model('services', 'UrineTest')
try:
ct = ContentType.objects.get_by_natural_key('services', 'pregnancytest')
for ic in PregnancyTest.objects.filter(content_type_id=ct.id):
new = UrineTest(encounter=ic.encounter, title=UrineTest._meta.verbose_name)
new.created = ic.created
new.modified = ic.modified
new.pregnancy_test = True
ct = ContentType.objects.get_for_model(new)
new.content_type_id = ct.id
new.save()
except ContentType.DoesNotExist:
pass # new installations don't have the ct
class Migration(migrations.Migration):
dependencies = [
('services', '0017_delete_individualcounseling'),
]
operations = [
migrations.RunPython(convert_tests, reverse_code=reconvert_tests),
migrations.DeleteModel(
name='PregnancyTest',
),
]
| mit | 1,142,179,289,645,332,700 | 30.261905 | 87 | 0.657273 | false |
supriyantomaftuh/zget | zget/put.py | 1 | 7124 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os
import sys
import time
import socket
try:
import urllib.request as urllib
except ImportError:
import urllib
import hashlib
import argparse
import logging
from zeroconf import ServiceInfo, Zeroconf
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from . import utils
__all__ = ["put"]
def validate_address(address):
""" Validate IP address
"""
try:
socket.inet_aton(address)
return address
except socket.error:
raise argparse.ArgumentTypeError(
"%s is not a valid IP address" % address
)
class StateHTTPServer(HTTPServer):
"""
HTTP Server that knows a certain filename and can be set to remember if
that file has been transferred using :class:`FileHandler`
"""
downloaded = False
filename = ""
basename = ""
reporthook = None
class FileHandler(BaseHTTPRequestHandler):
"""
Custom HTTP upload handler that allows one single filename to be requested.
"""
def do_GET(self):
if self.path == urllib.pathname2url(
os.path.join('/', self.server.basename)
):
utils.logger.info("Peer found. Uploading...")
full_path = os.path.join(os.curdir, self.server.filename)
with open(full_path, 'rb') as fh:
maxsize = os.path.getsize(full_path)
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('Content-length', maxsize)
self.end_headers()
i = 0
while True:
data = fh.read(1024 * 8) # chunksize taken from urllib
if not data:
break
self.wfile.write(data)
if self.server.reporthook is not None:
self.server.reporthook(i, 1024 * 8, maxsize)
i += 1
self.server.downloaded = True
else:
self.send_response(404)
self.end_headers()
raise RuntimeError("Invalid request received. Aborting.")
def log_message(self, format, *args):
"""
Suppress log messages by overloading this function
"""
return
def cli(inargs=None):
"""
Commandline interface for sending files
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--port', '-p',
type=int, nargs='?',
help="The port to share the file on"
)
parser.add_argument(
'--address', '-a', nargs='?',
type=validate_address,
help="The address to share the file on"
)
parser.add_argument(
'--interface', '-i', nargs='?',
help="The interface to share the file on"
)
parser.add_argument(
'--verbose', '-v',
action='count', default=0,
help="Verbose mode. Multiple -v options increase the verbosity"
)
parser.add_argument(
'--quiet', '-q',
action='count', default=0,
help="Quiet mode. Hides progess bar"
)
parser.add_argument(
'--timeout', '-t',
type=int, metavar="SECONDS",
help="Set timeout after which program aborts transfer"
)
parser.add_argument(
'--version', '-V',
action='version',
version='%%(prog)s %s' % utils.__version__
)
parser.add_argument(
'input',
help="The file to share on the network"
)
args = parser.parse_args(inargs)
utils.enable_logger(args.verbose)
try:
if not os.path.isfile(args.input):
raise ValueError(
"File %s does not exist" % args.input
)
if args.interface and args.address:
raise ValueError(
"You may only provide one of --address "
"or --interface"
)
with utils.Progresshook(args.input) as progress:
put(
args.input,
interface=args.interface,
address=args.address,
port=args.port,
reporthook=progress if args.quiet == 0 else None,
timeout=args.timeout,
)
except Exception as e:
if args.verbose:
raise
utils.logger.error(e.message)
sys.exit(1)
def put(
filename,
interface=None,
address=None,
port=None,
reporthook=None,
timeout=None,
):
"""Send a file using the zget protocol.
Parameters
----------
filename : string
The filename to be transferred
interface : string
The network interface to use. Optional.
address : string
The network address to use. Optional.
port : int
The network port to use. Optional.
reporthook : callable
A hook that will be called during transfer. Handy for watching the
transfer. See :code:`urllib.urlretrieve` for callback parameters.
Optional.
timeout : int
Seconds to wait until process is aborted. A running transfer is not
aborted even when timeout was hit. Optional.
Raises
-------
TimeoutException
When a timeout occurred.
"""
if port is None:
port = utils.config().getint('DEFAULT', 'port')
if interface is None:
interface = utils.config().get('DEFAULT', 'interface')
if not 0 <= port <= 65535:
raise ValueError("Port %d exceeds allowed range" % port)
basename = os.path.basename(filename)
filehash = hashlib.sha1(basename.encode('utf-8')).hexdigest()
if interface is None:
interface = utils.default_interface()
if address is None:
address = utils.ip_addr(interface)
server = StateHTTPServer((address, port), FileHandler)
server.timeout = timeout
server.filename = filename
server.basename = basename
server.reporthook = reporthook
port = server.server_port
utils.logger.debug(
"Using interface %s" % interface
)
utils.logger.debug(
"Listening on %s:%d \n"
"you may change address using --address and "
"port using --port" % (address, port)
)
utils.logger.debug(
"Broadcasting as %s._zget._http._tcp.local." % filehash
)
info = ServiceInfo(
"_zget._http._tcp.local.",
"%s._zget._http._tcp.local." % filehash,
socket.inet_aton(address), port, 0, 0,
{'path': None}
)
zeroconf = Zeroconf()
try:
zeroconf.register_service(info)
server.handle_request()
except KeyboardInterrupt:
pass
server.socket.close()
zeroconf.unregister_service(info)
zeroconf.close()
if timeout is not None and not server.downloaded:
raise utils.TimeoutException()
else:
utils.logger.info("Done.")
if __name__ == '__main__':
cli(sys.argv[1:])
| mit | -6,137,828,054,823,278,000 | 25.681648 | 79 | 0.580011 | false |
russellhadley/coreclr | src/scripts/genEventPipe.py | 1 | 17048 | from __future__ import print_function
from genXplatEventing import *
from genXplatLttng import *
import os
import xml.dom.minidom as DOM
stdprolog = """// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genEventPipe.py
******************************************************************/
"""
stdprolog_cmake = """#
#
#******************************************************************
#DO NOT MODIFY. AUTOGENERATED FILE.
#This file is generated using the logic from <root>/src/scripts/genEventPipe.py
#******************************************************************
"""
def generateClrEventPipeWriteEventsImpl(
providerName, eventNodes, allTemplates, exclusionListFile):
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerPrettyName = providerPrettyName.replace('-', '_')
WriteEventImpl = []
# EventPipeEvent declaration
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
WriteEventImpl.append(
"EventPipeEvent *EventPipeEvent" +
eventName +
" = nullptr;\n")
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
# generate EventPipeEventEnabled function
eventEnabledImpl = """bool EventPipeEventEnabled%s()
{
return EventPipeEvent%s->IsEnabled();
}
""" % (eventName, eventName)
WriteEventImpl.append(eventEnabledImpl)
# generate EventPipeWriteEvent function
fnptype = []
linefnptype = []
fnptype.append("extern \"C\" ULONG EventPipeWriteEvent")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
template = allTemplates[templateName]
else:
template = None
if template:
fnSig = template.signature
for paramName in fnSig.paramlist:
fnparam = fnSig.getParam(paramName)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if paramName in template.structs:
linefnptype.append(
"%sint %s_ElementSize,\n" %
(lindent, paramName))
linefnptype.append(lindent)
linefnptype.append(typewName)
if countw != " ":
linefnptype.append(countw)
linefnptype.append(" ")
linefnptype.append(fnparam.name)
linefnptype.append(",\n")
if len(linefnptype) > 0:
del linefnptype[-1]
fnptype.extend(linefnptype)
fnptype.append(")\n{\n")
checking = """ if (!EventPipeEventEnabled%s())
return ERROR_SUCCESS;
""" % (eventName)
fnptype.append(checking)
WriteEventImpl.extend(fnptype)
if template:
body = generateWriteEventBody(template, providerName, eventName)
WriteEventImpl.append(body)
else:
WriteEventImpl.append(
" EventPipe::WriteEvent(*EventPipeEvent" +
eventName +
", (BYTE*) nullptr, 0);\n")
WriteEventImpl.append("\n return ERROR_SUCCESS;\n}\n\n")
# EventPipeProvider and EventPipeEvent initialization
WriteEventImpl.append(
"extern \"C\" void Init" +
providerPrettyName +
"()\n{\n")
WriteEventImpl.append(
" EventPipeProvider" +
providerPrettyName +
" = EventPipe::CreateProvider(" +
providerPrettyName +
"GUID);\n")
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
eventKeywords = eventNode.getAttribute('keywords')
eventKeywordsMask = generateEventKeywords(eventKeywords)
eventValue = eventNode.getAttribute('value')
eventVersion = eventNode.getAttribute('version')
eventLevel = eventNode.getAttribute('level')
eventLevel = eventLevel.replace("win:", "EventPipeEventLevel::")
exclusionInfo = parseExclusionList(exclusionListFile)
taskName = eventNode.getAttribute('task')
initEvent = """ EventPipeEvent%s = EventPipeProvider%s->AddEvent(%s,%s,%s,%s);
""" % (eventName, providerPrettyName, eventValue, eventKeywordsMask, eventVersion, eventLevel)
WriteEventImpl.append(initEvent)
WriteEventImpl.append("}")
return ''.join(WriteEventImpl)
def generateWriteEventBody(template, providerName, eventName):
header = """
char stackBuffer[%s];
char *buffer = stackBuffer;
unsigned int offset = 0;
unsigned int size = %s;
bool fixedBuffer = true;
bool success = true;
""" % (template.estimated_size, template.estimated_size)
fnSig = template.signature
pack_list = []
for paramName in fnSig.paramlist:
parameter = fnSig.getParam(paramName)
if paramName in template.structs:
size = "(int)%s_ElementSize * (int)%s" % (
paramName, parameter.prop)
if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]:
size = "(int)(%s)" % specialCaseSizes[template.name][paramName]
pack_list.append(
" success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" %
(paramName, size))
elif paramName in template.arrays:
size = "sizeof(%s) * (int)%s" % (
lttngDataTypeMapping[parameter.winType],
parameter.prop)
if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]:
size = "(int)(%s)" % specialCaseSizes[template.name][paramName]
pack_list.append(
" success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" %
(paramName, size))
elif parameter.winType == "win:GUID":
pack_list.append(
" success &= WriteToBuffer(*%s, buffer, offset, size, fixedBuffer);" %
(parameter.name,))
else:
pack_list.append(
" success &= WriteToBuffer(%s, buffer, offset, size, fixedBuffer);" %
(parameter.name,))
code = "\n".join(pack_list) + "\n\n"
checking = """ if (!success)
{
if (!fixedBuffer)
delete[] buffer;
return ERROR_WRITE_FAULT;
}\n\n"""
body = " EventPipe::WriteEvent(*EventPipeEvent" + \
eventName + ", (BYTE *)buffer, size);\n"
footer = """
if (!fixedBuffer)
delete[] buffer;
"""
return header + code + checking + body + footer
providerGUIDMap = {}
providerGUIDMap[
"{e13c0d23-ccbc-4e12-931b-d9cc2eee27e4}"] = "{0xe13c0d23,0xccbc,0x4e12,{0x93,0x1b,0xd9,0xcc,0x2e,0xee,0x27,0xe4}}"
providerGUIDMap[
"{A669021C-C450-4609-A035-5AF59AF4DF18}"] = "{0xA669021C,0xC450,0x4609,{0xA0,0x35,0x5A,0xF5,0x9A,0xF4,0xDF,0x18}}"
providerGUIDMap[
"{CC2BCBBA-16B6-4cf3-8990-D74C2E8AF500}"] = "{0xCC2BCBBA,0x16B6,0x4cf3,{0x89,0x90,0xD7,0x4C,0x2E,0x8A,0xF5,0x00}}"
providerGUIDMap[
"{763FD754-7086-4dfe-95EB-C01A46FAF4CA}"] = "{0x763FD754,0x7086,0x4dfe,{0x95,0xEB,0xC0,0x1A,0x46,0xFA,0xF4,0xCA}}"
def generateGUID(tmpGUID):
return providerGUIDMap[tmpGUID]
keywordMap = {}
def generateEventKeywords(eventKeywords):
mask = 0
# split keywords if there are multiple
allKeywords = eventKeywords.split()
for singleKeyword in allKeywords:
mask = mask | keywordMap[singleKeyword]
return mask
def generateEventPipeCmakeFile(etwmanifest, eventpipe_directory):
tree = DOM.parse(etwmanifest)
with open(eventpipe_directory + "CMakeLists.txt", 'w') as topCmake:
topCmake.write(stdprolog_cmake + "\n")
topCmake.write("""cmake_minimum_required(VERSION 2.8.12.2)
project(eventpipe)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
include_directories(${CLR_DIR}/src/vm)
add_library(eventpipe
STATIC\n""")
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerName = providerName.replace("Windows-", '')
providerName = providerName.replace("Microsoft-", '')
providerName_File = providerName.replace('-', '')
providerName_File = providerName_File.lower()
topCmake.write(' "%s.cpp"\n' % (providerName_File))
topCmake.write(' "eventpipehelpers.cpp"\n')
topCmake.write(""" )
add_dependencies(eventpipe GeneratedEventingFiles)
# Install the static eventpipe library
install(TARGETS eventpipe DESTINATION lib)
""")
topCmake.close()
def generateEventPipeHelperFile(etwmanifest, eventpipe_directory):
with open(eventpipe_directory + "eventpipehelpers.cpp", 'w') as helper:
helper.write(stdprolog)
helper.write("""
#include "stdlib.h"
bool ResizeBuffer(char *&buffer, unsigned int& size, unsigned int currLen, unsigned int newSize, bool &fixedBuffer)
{
newSize *= 1.5;
_ASSERTE(newSize > size); // check for overflow
if (newSize < 32)
newSize = 32;
char *newBuffer = new char[newSize];
memcpy(newBuffer, buffer, currLen);
if (!fixedBuffer)
delete[] buffer;
buffer = newBuffer;
size = newSize;
fixedBuffer = false;
return true;
}
bool WriteToBuffer(const BYTE *src, unsigned int len, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if(!src) return true;
if (offset + len > size)
{
if (!ResizeBuffer(buffer, size, offset, size + len, fixedBuffer))
return false;
}
memcpy(buffer + offset, src, len);
offset += len;
return true;
}
bool WriteToBuffer(PCWSTR str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if(!str) return true;
unsigned int byteCount = (PAL_wcslen(str) + 1) * sizeof(*str);
if (offset + byteCount > size)
{
if (!ResizeBuffer(buffer, size, offset, size + byteCount, fixedBuffer))
return false;
}
memcpy(buffer + offset, str, byteCount);
offset += byteCount;
return true;
}
bool WriteToBuffer(const char *str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if(!str) return true;
unsigned int len = strlen(str) + 1;
if (offset + len > size)
{
if (!ResizeBuffer(buffer, size, offset, size + len, fixedBuffer))
return false;
}
memcpy(buffer + offset, str, len);
offset += len;
return true;
}
""")
tree = DOM.parse(etwmanifest)
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerPrettyName = providerPrettyName.replace('-', '_')
helper.write(
"extern \"C\" void Init" +
providerPrettyName +
"();\n\n")
helper.write("extern \"C\" void InitProvidersAndEvents()\n{\n")
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerPrettyName = providerPrettyName.replace('-', '_')
helper.write(" Init" + providerPrettyName + "();\n")
helper.write("}")
helper.close()
def generateEventPipeImplFiles(
etwmanifest, eventpipe_directory, exclusionListFile):
tree = DOM.parse(etwmanifest)
coreclrRoot = os.getcwd()
for providerNode in tree.getElementsByTagName('provider'):
providerGUID = providerNode.getAttribute('guid')
providerGUID = generateGUID(providerGUID)
providerName = providerNode.getAttribute('name')
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerName_File = providerPrettyName.replace('-', '')
providerName_File = providerName_File.lower()
providerPrettyName = providerPrettyName.replace('-', '_')
eventpipefile = eventpipe_directory + providerName_File + ".cpp"
eventpipeImpl = open(eventpipefile, 'w')
eventpipeImpl.write(stdprolog)
header = """
#include \"%s/src/vm/common.h\"
#include \"%s/src/vm/eventpipeprovider.h\"
#include \"%s/src/vm/eventpipeevent.h\"
#include \"%s/src/vm/eventpipe.h\"
bool ResizeBuffer(char *&buffer, unsigned int& size, unsigned int currLen, unsigned int newSize, bool &fixedBuffer);
bool WriteToBuffer(PCWSTR str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer);
bool WriteToBuffer(const char *str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer);
bool WriteToBuffer(const BYTE *src, unsigned int len, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer);
template <typename T>
bool WriteToBuffer(const T &value, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if (sizeof(T) + offset > size)
{
if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer))
return false;
}
*(T *)(buffer + offset) = value;
offset += sizeof(T);
return true;
}
""" % (coreclrRoot, coreclrRoot, coreclrRoot, coreclrRoot)
eventpipeImpl.write(header)
eventpipeImpl.write(
"GUID const " +
providerPrettyName +
"GUID = " +
providerGUID +
";\n")
eventpipeImpl.write(
"EventPipeProvider *EventPipeProvider" +
providerPrettyName +
" = nullptr;\n")
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
eventpipeImpl.write(
generateClrEventPipeWriteEventsImpl(
providerName,
eventNodes,
allTemplates,
exclusionListFile) + "\n")
eventpipeImpl.close()
def generateEventPipeFiles(
etwmanifest, eventpipe_directory, exclusionListFile):
eventpipe_directory = eventpipe_directory + "/"
tree = DOM.parse(etwmanifest)
if not os.path.exists(eventpipe_directory):
os.makedirs(eventpipe_directory)
# generate Cmake file
generateEventPipeCmakeFile(etwmanifest, eventpipe_directory)
# generate helper file
generateEventPipeHelperFile(etwmanifest, eventpipe_directory)
# generate all keywords
for keywordNode in tree.getElementsByTagName('keyword'):
keywordName = keywordNode.getAttribute('name')
keywordMask = keywordNode.getAttribute('mask')
keywordMap[keywordName] = int(keywordMask, 0)
# generate .cpp file for each provider
generateEventPipeImplFiles(
etwmanifest,
eventpipe_directory,
exclusionListFile)
import argparse
import sys
def main(argv):
# parse the command line
parser = argparse.ArgumentParser(
description="Generates the Code required to instrument eventpipe logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--intermediate', type=str, required=True,
help='full path to eventprovider intermediate directory')
required.add_argument('--exc', type=str, required=True,
help='full path to exclusion list')
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknownArguments
sClrEtwAllMan = args.man
intermediate = args.intermediate
exclusionListFile = args.exc
generateEventPipeFiles(sClrEtwAllMan, intermediate, exclusionListFile)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
| mit | -4,561,604,318,752,009,700 | 33.297787 | 130 | 0.622492 | false |
wasserfeder/lomap | lomap/algorithms/dijkstra.py | 1 | 12332 | #! /usr/bin/python
# Copyright (C) 2012-2015, Alphan Ulusoy ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
__all__ = ['subset_to_subset_dijkstra_path_value', 'source_to_target_dijkstra',
'dijkstra_to_all']
def subset_to_subset_dijkstra_path_value(source_set, G, target_set,
combine_fn='sum', degen_paths=False, weight_key='weight'):
"""
Compute the shortest path lengths between two sets of nodes in a weighted graph.
Adapted from 'single_source_dijkstra_path_length' in NetworkX, available at
http://networkx.github.io.
Parameters
----------
G: NetworkX graph
source_set: Set of node labels
Starting nodes for paths
target_set: Set of node labels
Ending nodes for paths
combine_fn: Function, optional (default: (lambda a,b: a+b))
Function used to combine two path values
degen_paths: Boolean, optional (default: False)
Controls whether degenerate paths (paths that do not traverse any edges)
are acceptable.
weight_key: String, optional (default: 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
length : dictionary
Dictionary of dictionaries of shortest lengths keyed by source and
target labels.
Notes
-----
Edge weight attributes must be numerical.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
Input is assumed to be a MultiDiGraph with singleton edges.
"""
import heapq
all_dist = {} # dictionary of final distances from source_set to target_set
if combine_fn == 'sum':
# Classical dijkstra
for source in source_set:
dist = {} # dictionary of final distances from source
fringe=[] # use heapq with (distance,label) tuples
if degen_paths:
# Allow degenerate paths
# Add zero length path from source to source
seen = {source:0}
heapq.heappush(fringe,(0,source))
else:
# Don't allow degenerate paths
# Add all neighbors of source to start the algorithm
seen = dict()
for _, w, edgedata in G.edges_iter([source], data=True):
vw_dist = edgedata[weight_key]
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # Already searched this node.
dist[v] = d # Update distance to this node
for _, w, edgedata in G.edges_iter([v], data=True):
vw_dist = dist[v] + edgedata[weight_key]
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
# Remove the entries that we are not interested in
for key in dist.keys():
if key not in target_set:
dist.pop(key)
# Add inf cost to target nodes not in dist
for t in target_set:
if t not in dist.keys():
dist[t] = float('inf')
# Save the distance info for this source
all_dist[source] = dist
elif combine_fn == 'max':
# Path length is (max edge length, total edge length)
for source in source_set:
dist = {} # dictionary of final distances from source
fringe=[] # use heapq with (bot_dist,dist,label) tuples
if degen_paths:
# Allow degenerate paths
# Add zero length path from source to source
seen = {source:(0,0)}
heapq.heappush(fringe,(0,0,source))
else:
# Don't allow degenerate paths
# Add all neighbors of source to start the algorithm
seen = dict()
for _, w, edgedata in G.edges_iter([source], data=True):
vw_dist = edgedata[weight_key]
seen[w] = (vw_dist,vw_dist)
heapq.heappush(fringe,(vw_dist,vw_dist,w))
while fringe:
(d_bot,d_sum,v)=heapq.heappop(fringe)
if v in dist:
continue # Already searched this node.
dist[v] = (d_bot,d_sum) # Update distance to this node
for _, w, edgedata in G.edges_iter([v], data=True):
vw_dist_bot = max(dist[v][0],edgedata[weight_key])
vw_dist_sum = dist[v][1] + edgedata[weight_key]
if w in dist:
if vw_dist_bot < dist[w][0]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist_bot < seen[w][0] \
or (vw_dist_bot == seen[w][0] \
and vw_dist_sum < seen[w][1]):
seen[w] = (vw_dist_bot, vw_dist_sum)
heapq.heappush(fringe,(vw_dist_bot,vw_dist_sum,w))
# Remove the entries that we are not interested in
for key in dist.keys():
if key not in target_set:
dist.pop(key)
# Add inf cost to target nodes not in dist
for t in target_set:
if t not in dist.keys():
dist[t] = (float('inf'),float('inf'))
# Save the distance info for this source
all_dist[source] = dist
else:
assert(False)
return all_dist
def dijkstra_to_all(G, source, degen_paths = False, weight_key='weight'):
"""
Compute shortest paths and lengths in a weighted graph G.
Adapted from 'single_source_dijkstra_path' in NetworkX, available at
http://networkx.github.io.
Parameters
----------
G : NetworkX graph
source : Node label
Starting node for the path
weight_key: String, optional (default: 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance,path : Tuple
Returns a tuple distance and path from source to target.
Notes
---------
Edge weight attributes must be numerical.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
"""
import heapq
dist = {} # dictionary of final distances
fringe=[] # use heapq with (distance,label) tuples
if degen_paths:
# Allow degenerate paths
# Add zero length path from source to source
seen = {source:0}
heapq.heappush(fringe,(0,source))
paths = {source:[source]}
else:
# Don't allow degenerate paths
# Add all neighbors of source to start the algorithm
paths = dict()
seen = dict()
for _, w, edgedata in G.edges_iter([source], data=True):
vw_dist = edgedata[weight_key]
paths[w] = [source, w]
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d # Update distance to this node
for _, w, edgedata in G.edges_iter([v], data=True):
vw_dist = dist[v] + edgedata[weight_key]
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
paths[w] = paths[v]+[w]
heapq.heappush(fringe,(vw_dist,w))
return (dist, paths)
def source_to_target_dijkstra(G, source, target, combine_fn='sum',
degen_paths=False, cutoff=None, weight_key='weight'):
"""
Compute shortest paths and lengths in a weighted graph G.
Adapted from 'single_source_dijkstra_path' in NetworkX, available at
http://networkx.github.io.
Parameters
----------
G : NetworkX graph
source : Node label
Starting node for the path
target : Node label
Ending node for the path
degen_paths: Boolean, optional (default: False)
Controls whether degenerate paths (paths that do not traverse any edges)
are acceptable.
cutoff : integer or float, optional (default: None)
Depth to stop the search. Only paths of length <= cutoff are returned.
weight_key: String, optional (default: 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance,path : Tuple
Returns a tuple distance and path from source to target.
Examples
--------
>>> G=networkx.path_graph(5)
>>> length,path=source_to_target_dijkstra(G,0,4)
>>> print(length)
4
>>> path
[0, 1, 2, 3, 4]
Notes
---------
Edge weight attributes must be numerical.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
"""
import heapq
dist = {} # dictionary of final distances
fringe=[] # use heapq with (distance,label) tuples
if combine_fn == 'sum':
if degen_paths:
# Allow degenerate paths
if source==target:
# Terminate immediately if source == target
return (0, [source])
else:
# Add zero length path from source to source
paths = {source:[source]} # dictionary of paths
seen = {source:0}
heapq.heappush(fringe,(0,source))
else:
# Don't allow degenerate paths
# Add all neighbors of source to start the algorithm
paths = dict()
seen = dict()
for _, w, edgedata in G.edges_iter([source], data=True):
vw_dist = edgedata[weight_key]
paths[w] = [source, w]
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d # Update distance to this node
if v == target:
break # Discovered path to target node
for _, w, edgedata in G.edges_iter([v], data=True):
vw_dist = dist[v] + edgedata[weight_key]
if cutoff is not None:
if vw_dist>cutoff:
continue # Longer than cutoff, ignore this path
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
paths[w] = paths[v]+[w]
heapq.heappush(fringe,(vw_dist,w))
# Add inf cost to target if not in dist
if target not in dist.keys():
dist[target] = float('inf')
paths[target] = ['']
return (dist[target],paths[target])
elif combine_fn == 'max':
if degen_paths:
# Allow degenerate paths
if source==target:
# Terminate immediately if source == target
return (0, [source])
else:
# Add zero length path from source to source
paths = {source:[source]} # dictionary of paths
seen = {source:(0,0)}
heapq.heappush(fringe,(0,0,source))
else:
# Don't allow degenerate paths
# Add all neighbors of source to start the algorithm
paths = dict()
seen = dict()
for _, w, edgedata in G.edges_iter([source], data=True):
vw_dist = edgedata[weight_key]
paths[w] = [source, w]
seen[w] = (vw_dist, vw_dist)
heapq.heappush(fringe,(vw_dist,vw_dist,w))
while fringe:
(d_bot,d_sum,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = (d_bot,d_sum) # Update distance to this node
if v == target:
break # Discovered path to target node
for _, w, edgedata in G.edges_iter([v], data=True):
vw_dist_bot = max(dist[v][0], edgedata[weight_key])
vw_dist_sum = dist[v][1] + edgedata[weight_key]
if cutoff is not None:
if vw_dist_bot>cutoff:
continue # Longer than cutoff, ignore this path
if w in dist:
if vw_dist_bot < dist[w][0]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist_bot < seen[w][0] \
or (vw_dist_bot == seen[w][0] \
and vw_dist_sum < seen[w][1]):
seen[w] = (vw_dist_bot, vw_dist_sum)
paths[w] = paths[v]+[w]
heapq.heappush(fringe,(vw_dist_bot,vw_dist_sum,w))
# Add inf cost to target if not in dist
if target not in dist.keys():
dist[target] = (float('inf'),float('inf'))
paths[target] = ['']
return (dist[target][0],paths[target])
else:
assert(False)
| gpl-2.0 | 134,945,866,439,583,540 | 27.948357 | 81 | 0.665504 | false |
jness/MTG-Toolbox | WebMTG/models.py | 1 | 2426 | from django.db import models
from django.contrib.auth.models import User
class MTGSet(models.Model):
label = models.CharField(max_length=75, unique=True)
display_name = models.CharField(max_length=75)
magiccards_info = models.CharField(max_length=10, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.display_name
class MTGCard(models.Model):
magiccard_id = models.CharField(max_length=10)
gatherer_id = models.IntegerField()
tcgplayer_id = models.IntegerField()
card_name = models.CharField(max_length=75)
cost = models.CharField(max_length=20, null=True)
rarity = models.CharField(max_length=50)
type = models.CharField(max_length=50)
set = models.ForeignKey(MTGSet)
low = models.DecimalField(decimal_places=2, max_digits=10)
avg = models.DecimalField(decimal_places=2, max_digits=10)
high = models.DecimalField(decimal_places=2, max_digits=10)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.card_name
class MTGPrice(models.Model):
card = models.ForeignKey(MTGCard)
low = models.DecimalField(decimal_places=2, max_digits=10)
avg = models.DecimalField(decimal_places=2, max_digits=10)
high = models.DecimalField(decimal_places=2, max_digits=10)
created = models.DateTimeField()
modified = models.DateTimeField()
def __unicode__(self):
return self.card.card_name
class MTGPriceArchive(models.Model):
card = models.ForeignKey(MTGCard)
datelabel = models.CharField(max_length=12)
avg = models.DecimalField(decimal_places=2, max_digits=10)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.card.card_name
class MTGHash(models.Model):
card = models.ForeignKey(MTGCard)
hash = models.CharField(max_length=30)
created = models.DateTimeField(auto_now=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.card.card_name
class UserWatch(models.Model):
card = models.ForeignKey(MTGCard)
user = models.ForeignKey(User)
def __unicode__(self):
return self.card.card_name
| gpl-2.0 | -2,379,355,143,322,243,000 | 35.208955 | 64 | 0.694559 | false |
lucasdavid/grapher | grapher/repositories/graph.py | 1 | 6676 | import abc
import py2neo
from py2neo import Graph, Node, Relationship
from . import base
from .. import errors, settings
class GraphRepository(base.Repository, metaclass=abc.ABCMeta):
_g = None
connection_string = settings.effective.DATABASES['neo4j']
@property
def g(self):
self._g = self._g or Graph('http://%s:%s@%s' % (
self.connection_string['username'],
self.connection_string['password'],
self.connection_string['uri'],
))
return self._g
def _build(self, identities):
"""Build entities or relationships based on their identities.
:param identities: :list of identities compatible with
self.schema[self.schema.Meta.identity]['type'].
:return: a list of :nodes or :relationships corresponding to
the identities passed, in order.
"""
raise NotImplementedError
def find(self, identities):
entities = self._build(identities)
try:
self.g.pull(*entities)
except py2neo.GraphError:
raise errors.NotFoundError(('NOT_FOUND', identities))
return self.to_dict_of_dicts(entities)
def create(self, entities, raise_errors=False):
entities = self.from_dict_of_dicts(entities)
entities = self.g.create(*entities)
return self.to_dict_of_dicts(entities), {}
def update(self, entities, raise_errors=False):
entities = self.from_dict_of_dicts(entities)
self.g.push(*entities)
return self.to_dict_of_dicts(entities), {}
def delete(self, identities, raise_errors=False):
entities = self._build(identities)
entities = self.g.delete(*entities)
return self.to_dict_of_dicts(entities), {}
class GraphEntityRepository(GraphRepository, base.EntityRepository):
def _build(self, identities):
return [self.g.node(i) for i in identities]
def from_dict_of_dicts(self, entries):
nodes = []
for i, entry in entries.items():
# Find if the node exists on the database or is a new node.
if self.schema.Meta.identity in entry:
# The entry claims to have an identity,
# bind the node to a database node.
node = self.g.node(entry[self.schema.Meta.identity])
del entry[self.schema.Meta.identity]
else:
# That's a new entry. Create a new node.
node = Node(self.label)
node.properties.update(entry)
nodes.append(node)
return nodes
def to_dict_of_dicts(self, entities, indices=None):
entries, entities = [], list(entities)
for node in entities:
e = node.properties
e[self.schema.Meta.identity] = node._id
entries.append(e)
return super().to_dict_of_dicts(entries)
def all(self, skip=0, limit=None):
if limit is not None:
limit += skip
nodes = self.g.find(self.label, limit=limit)
# Discard :skip elements.
for _ in range(skip):
next(nodes)
return self.to_dict_of_dicts(nodes)
def where(self, skip=0, limit=None, **query):
if len(query) != 1:
raise ValueError('GraphRepository.where does not support '
'multiple parameter filtering yet.')
# TODO: Allow multiple keys when searching. This issue might help:
# http://stackoverflow.com/questions/27795874/py2neo-graph-find-one-with-multiple-key-values
query_item = query.popitem()
if query_item[0] == self.schema.Meta.identity:
return self.find((query_item[1],))
if limit is not None:
limit += skip
nodes = self.g.find(self.label, *query_item, limit=limit)
for _ in range(skip):
next(nodes)
return self.to_dict_of_dicts(nodes)
class GraphRelationshipRepository(GraphRepository, base.RelationshipRepository):
def _build(self, identities):
return [self.g.relationship(i) for i in identities]
def from_dict_of_dicts(self, entries):
entities, indices = super().from_dict_of_dicts(entries)
relationships = []
for r in entities:
if self.schema.Meta.identity in r:
relationship = self.g.relationship(r)
else:
origin = self.g.node(r['_origin'])
target = self.g.node(r['_target'])
relationship = Relationship(origin, self.label.upper(), target)
# Delete meta properties, if present.
if self.schema.Meta.identity in r:
del r[self.schema.Meta.identity]
if '_origin' in r:
del r['_origin']
if '_target' in r:
del r['_target']
relationship.properties.update(r)
relationships.append(relationship)
return relationships, indices
def to_dict_of_dicts(self, entities, indices=None):
relationships = []
for r in entities:
e = r.properties
e[self.schema.Meta.identity] = r._id
e['_origin'] = r.start_node._id
e['_target'] = r.end_node._id
relationships.append(e)
return super().to_dict_of_dicts(relationships, indices)
def all(self, skip=0, limit=None):
"""Match all relationships, as long as they share the same label
with this repository.
:param skip: the number of elements to skip when retrieving.
If None, none element should be skipped.
:param limit: the maximum length of the list retrieved.
If None, returns all elements after :skip.
"""
return self.match(skip=skip, limit=limit)
def match(self, origin=None, target=None, skip=0, limit=None):
if origin:
origin = self.g.node(origin)
if target:
target = self.g.node(target)
if limit is not None:
limit += skip
relationships = self.g.match(origin, self.label.upper(), target,
limit=limit)
for _ in range(skip):
next(relationships)
return self.to_dict_of_dicts(relationships)
def where(self, skip=0, limit=None, **query):
if len(query) != 1:
raise ValueError('GraphRepository.where does not support'
'multiple parameter filtering yet.')
query_item = query.popitem()
if query_item[0] == self.schema.Meta.identity:
return self.find((query_item[1],))
raise NotImplementedError
| mit | -8,926,344,130,431,322,000 | 30.342723 | 100 | 0.587627 | false |
ktnyt/chainer | chainer/training/extensions/variable_statistics_plot.py | 1 | 13261 | from __future__ import division
import os
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.training import extension
from chainer.training import trigger as trigger_module
_available = None
def _try_import_matplotlib():
global matplotlib, _available
global _plot_color, _plot_color_trans, _plot_common_kwargs
try:
import matplotlib
_available = True
except ImportError:
_available = False
if _available:
if hasattr(matplotlib.colors, 'to_rgba'):
_to_rgba = matplotlib.colors.to_rgba
else:
# For matplotlib 1.x
_to_rgba = matplotlib.colors.ColorConverter().to_rgba
_plot_color = _to_rgba('#1f77b4') # C0 color
_plot_color_trans = _plot_color[:3] + (0.2,) # apply alpha
_plot_common_kwargs = {
'alpha': 0.2, 'linewidth': 0, 'color': _plot_color_trans}
def _check_available():
if _available is None:
_try_import_matplotlib()
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
def _unpack_variables(x, memo=None):
if memo is None:
memo = ()
if isinstance(x, chainer.Variable):
memo += (x,)
elif isinstance(x, chainer.Link):
memo += tuple(x.params(include_uninit=True))
elif isinstance(x, (list, tuple)):
for xi in x:
memo += _unpack_variables(xi)
return memo
class Reservoir(object):
"""Reservoir sample with a fixed sized buffer."""
def __init__(self, size, data_shape, dtype=numpy.float32):
self.size = size
self.data = numpy.zeros((size,) + data_shape, dtype=dtype)
self.idxs = numpy.zeros((size,), dtype=numpy.int32)
self.counter = 0
def add(self, x, idx=None):
if self.counter < self.size:
self.data[self.counter] = x
self.idxs[self.counter] = idx or self.counter
elif self.counter >= self.size and \
numpy.random.random() < self.size / float(self.counter + 1):
i = numpy.random.randint(self.size)
self.data[i] = x
self.idxs[i] = idx or self.counter
self.counter += 1
def get_data(self):
idxs = self.idxs[:min(self.counter, self.size)]
sorted_args = numpy.argsort(idxs)
return idxs[sorted_args], self.data[sorted_args]
class Statistician(object):
"""Helper to compute basic NumPy-like statistics."""
def __init__(self, collect_mean, collect_std, percentile_sigmas):
self.collect_mean = collect_mean
self.collect_std = collect_std
self.percentile_sigmas = percentile_sigmas
def __call__(self, x, axis=0, dtype=None, xp=None):
if axis is None:
axis = tuple(range(x.ndim))
elif not isinstance(axis, (tuple, list)):
axis = axis,
return self.collect(x, axis)
def collect(self, x, axis):
out = dict()
if self.collect_mean:
out['mean'] = x.mean(axis=axis)
if self.collect_std:
out['std'] = x.std(axis=axis)
if self.percentile_sigmas:
xp = backend.get_array_module(x)
if xp is numpy:
p = numpy.percentile(x, self.percentile_sigmas, axis=axis)
else:
# TODO(hvy): Use percentile from CuPy once it is supported
p = cuda.to_gpu(
numpy.percentile(
cuda.to_cpu(x), self.percentile_sigmas, axis=axis))
out['percentile'] = p
return out
class VariableStatisticsPlot(extension.Extension):
"""Trainer extension to plot statistics for :class:`Variable`\\s.
This extension collects statistics for a single :class:`Variable`, a list
of :class:`Variable`\\s or similarly a single or a list of
:class:`Link`\\s containing one or more :class:`Variable`\\s. In case
multiple :class:`Variable`\\s are found, the means are computed. The
collected statistics are plotted and saved as an image in the directory
specified by the :class:`Trainer`.
Statistics include mean, standard deviation and percentiles.
This extension uses reservoir sampling to preserve memory, using a fixed
size running sample. This means that collected items in the sample are
discarded uniformly at random when the number of items becomes larger
than the maximum sample size, but each item is expected to occur in the
sample with equal probability.
Args:
targets (:class:`Variable`, :class:`Link` or list of either):
Parameters for which statistics are collected.
max_sample_size (int):
Maximum number of running samples.
report_data (bool):
If ``True``, data (e.g. weights) statistics are plotted. If
``False``, they are neither computed nor plotted.
report_grad (bool):
If ``True``, gradient statistics are plotted. If ``False``, they
are neither computed nor plotted.
plot_mean (bool):
If ``True``, means are plotted. If ``False``, they are
neither computed nor plotted.
plot_std (bool):
If ``True``, standard deviations are plotted. If ``False``, they
are neither computed nor plotted.
percentile_sigmas (float or tuple of floats):
Percentiles to plot in the range :math:`[0, 100]`.
trigger:
Trigger that decides when to save the plots as an image. This is
distinct from the trigger of this extension itself. If it is a
tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it
is passed to :class:`IntervalTrigger`.
file_name (str):
Name of the output image file under the output directory.
figsize (tuple of int):
Matlotlib ``figsize`` argument that specifies the size of the
output image.
marker (str):
Matplotlib ``marker`` argument that specified the marker style of
the plots.
grid (bool):
Matplotlib ``grid`` argument that specifies whether grids are
rendered in in the plots or not.
"""
def __init__(self, targets, max_sample_size=1000,
report_data=True, report_grad=True,
plot_mean=True, plot_std=True,
percentile_sigmas=(
0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87, 100),
trigger=(1, 'epoch'), file_name='statistics.png',
figsize=None, marker=None, grid=True):
if file_name is None:
raise ValueError('Missing output file name of statstics plot')
self._vars = _unpack_variables(targets)
if len(self._vars) == 0:
raise ValueError(
'Need at least one variables for which to collect statistics.'
'\nActual: 0 <= 0')
if not any((plot_mean, plot_std, bool(percentile_sigmas))):
raise ValueError('Nothing to plot')
self._keys = []
if report_data:
self._keys.append('data')
if report_grad:
self._keys.append('grad')
self._report_data = report_data
self._report_grad = report_grad
self._statistician = Statistician(
collect_mean=plot_mean, collect_std=plot_std,
percentile_sigmas=percentile_sigmas)
self._plot_mean = plot_mean
self._plot_std = plot_std
self._plot_percentile = bool(percentile_sigmas)
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = file_name
self._figsize = figsize
self._marker = marker
self._grid = grid
if not self._plot_percentile:
n_percentile = 0
else:
if not isinstance(percentile_sigmas, (list, tuple)):
n_percentile = 1 # scalar, single percentile
else:
n_percentile = len(percentile_sigmas)
self._data_shape = (
len(self._keys), int(plot_mean) + int(plot_std) + n_percentile)
self._samples = Reservoir(max_sample_size, data_shape=self._data_shape)
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if self.available():
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
xp = backend.get_array_module(self._vars[0].data)
stats = xp.zeros(self._data_shape, dtype=xp.float32)
for i, k in enumerate(self._keys):
xs = []
for var in self._vars:
x = getattr(var, k, None)
if x is not None:
xs.append(x.ravel())
if len(xs) > 0:
stat_dict = self._statistician(
xp.concatenate(xs, axis=0), axis=0, xp=xp)
stat_list = []
if self._plot_mean:
stat_list.append(xp.atleast_1d(stat_dict['mean']))
if self._plot_std:
stat_list.append(xp.atleast_1d(stat_dict['std']))
if self._plot_percentile:
stat_list.append(xp.atleast_1d(stat_dict['percentile']))
stats[i] = xp.concatenate(stat_list, axis=0)
if xp != numpy:
stats = cuda.to_cpu(stats)
self._samples.add(stats, idx=trainer.updater.iteration)
if self._trigger(trainer):
file_path = os.path.join(trainer.out, self._file_name)
self.save_plot_using_module(file_path, plt)
def save_plot_using_module(self, file_path, plt):
nrows = int(self._plot_mean or self._plot_std) \
+ int(self._plot_percentile)
ncols = len(self._keys)
fig, axes = plt.subplots(
nrows, ncols, figsize=self._figsize, sharex=True)
if not isinstance(axes, numpy.ndarray): # single subplot
axes = numpy.asarray([axes])
if nrows == 1:
axes = axes[None, :]
elif ncols == 1:
axes = axes[:, None]
assert axes.ndim == 2
idxs, data = self._samples.get_data()
# Offset to access percentile data from `data`
offset = int(self._plot_mean) + int(self._plot_std)
n_percentile = data.shape[-1] - offset
n_percentile_mid_floor = n_percentile // 2
n_percentile_odd = n_percentile % 2 == 1
for col in six.moves.range(ncols):
row = 0
ax = axes[row, col]
ax.set_title(self._keys[col]) # `data` or `grad`
if self._plot_mean or self._plot_std:
if self._plot_mean and self._plot_std:
ax.errorbar(
idxs, data[:, col, 0], data[:, col, 1],
color=_plot_color, ecolor=_plot_color_trans,
label='mean, std', marker=self._marker)
else:
if self._plot_mean:
label = 'mean'
elif self._plot_std:
label = 'std'
ax.plot(
idxs, data[:, col, 0], color=_plot_color, label=label,
marker=self._marker)
row += 1
if self._plot_percentile:
ax = axes[row, col]
for i in six.moves.range(n_percentile_mid_floor + 1):
if n_percentile_odd and i == n_percentile_mid_floor:
# Enters at most once per sub-plot, in case there is
# only a single percentile to plot or when this
# percentile is the mid percentile and the numner of
# percentiles are odd
ax.plot(
idxs, data[:, col, offset + i], color=_plot_color,
label='percentile', marker=self._marker)
else:
if i == n_percentile_mid_floor:
# Last percentiles and the number of all
# percentiles are even
label = 'percentile'
else:
label = '_nolegend_'
ax.fill_between(
idxs,
data[:, col, offset + i],
data[:, col, -i - 1],
label=label,
**_plot_common_kwargs)
ax.set_xlabel('iteration')
for ax in axes.ravel():
ax.legend()
if self._grid:
ax.grid()
ax.set_axisbelow(True)
fig.savefig(file_path)
plt.close()
| mit | -7,535,819,348,694,704,000 | 35.938719 | 79 | 0.544529 | false |
Taywee/texttables | texttables/fixed/_writer.py | 1 | 8641 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2017 Taylor C. Richberger <[email protected]>
# This code is released under the license described in the LICENSE file
from __future__ import division, absolute_import, print_function, unicode_literals
from six.moves import zip
from texttables.dialect import Dialect
class writer(object):
"""Fixed-table document writer, writing tables with predefined column-sizes.
The :class:`texttables.Dialect` class is used to configure how this writes
tables. This works as a context manager, in which case :meth:`writetop` and
:meth:`writebottom` will be called automatically."""
def __init__(self, file, widths, dialect=None, **fmtparams):
"""
:param file: A writable file object with a ``write`` method
:param widths: An iterable of widths, containing the field sizes of the table.
Each width may be prefixed with <, >, =, or ^, for alignment through
the Python format specification.
:param dialect: A dialect class or object used to define aspects of the
table. The stored dialect is always an instance of
:class:`texttables.Dialect`, not necessarily the passed-in object.
All the attributes of Dialect are grabbed from this object using
getattr.
:param fmtparams: parameters to override the parameters in
:obj:`dialect`.
"""
self._file = file
self._widths = tuple(widths)
self.dialect = dialect
for attribute in dir(self.dialect):
if '__' not in attribute:
if attribute in fmtparams:
setattr(self._dialect, attribute, fmtparams[attribute])
self.__wroterow = False
self.__wroteheader = False
def __enter__(self):
if self.dialect.top_border:
self.writetop()
return self
def __exit__(self, type, value, traceback):
if self.dialect.bottom_border:
self.writebottom()
@property
def file(self):
'''The file object that was passed in to the constructor. It is not
safe to change this object until you are finished using the class'''
return self._file
@property
def widths(self):
'''The widths that were passed into the constructor, as a tuple.'''
return self._widths
@property
def dialect(self):
'''The :class:`texttables.Dialect` constructed from the passed-in
dialect. This is always unique, and is not the same object that is
passed in. Assigning to this will also likewise construct a new
:class:`texttables.Dialect`, not simply assign the attribute.'''
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = Dialect()
if value:
for attribute in dir(self._dialect):
if '__' not in attribute:
setattr(self._dialect, attribute, getattr(value, attribute))
def _row(self, row):
dialect = self.dialect
contents = list()
for cell, rawwidth in zip(row, self._widths):
swidth = str(rawwidth)
alignment = '<'
try:
width = int(swidth)
except ValueError:
alignment = swidth[0]
width = int(swidth[1:])
contents.append('{content!s:{alignment}{width}.{width}s}'.format(
content=cell,
alignment=alignment,
width=width))
row = ''
if dialect.left_border:
row = dialect.left_border
row += dialect.cell_delimiter.join(contents)
if dialect.right_border:
row += dialect.right_border
return row
def _rowdelim(self, delimiter):
dialect = self.dialect
delimcontents = list()
for rawwidth in self._widths:
swidth = str(rawwidth)
try:
width = int(swidth)
except ValueError:
width = int(swidth[1:])
delimcontents.append(delimiter * width)
delim = ''
if dialect.left_border:
delim = dialect.corner_border
delim += dialect.corner_border.join(delimcontents)
if dialect.right_border:
delim += dialect.corner_border
return delim
def writerow(self, row):
'''Write a single row out to :meth:`file`, respecting any delimiters and
header separators necessary.
:param row: An iterable representing the row to write
'''
dialect = self.dialect
if self.__wroteheader:
if dialect.header_delimiter and dialect.corner_border:
self._file.write(self._rowdelim(dialect.header_delimiter))
self._file.write(dialect.lineterminator)
elif self.__wroterow:
if dialect.row_delimiter and dialect.corner_border:
self._file.write(self._rowdelim(dialect.row_delimiter))
self._file.write(dialect.lineterminator)
self._file.write(self._row(row))
self._file.write(dialect.lineterminator)
self.__wroteheader = False
self.__wroterow = True
def writerows(self, rows):
'''Write a multiple rows out to :meth:`file`, respecting any delimiters
and header separators necessary.
:param rows: An iterable of iterables representing the rows to write
'''
for row in rows:
self.writerow(row)
def writeheader(self, row):
'''Write the header out to :meth:`file`.
:param row: An iterable representing the row to write as a header
'''
self.writerow(row)
self.__wroteheader = True
def writetop(self):
'''Write the top of the table out to :meth:`file`.'''
dialect = self.dialect
self._file.write(self._rowdelim(dialect.top_border))
self._file.write(dialect.lineterminator)
def writebottom(self):
'''Write the bottom of the table out to :meth:`file`.'''
dialect = self.dialect
self._file.write(self._rowdelim(dialect.bottom_border))
self._file.write(dialect.lineterminator)
class DictWriter(object):
"""Fixed-table document writer, writing tables with predefined column-sizes
and names through dictionary rows passed in.
The :class:`texttables.Dialect` class is used to configure how this writes
tables. This is a simple convenience frontend to
:class:`texttables.fixed.writer`.
This works as a context manager, in which case :meth:`writetop` and
:meth:`writebottom` will be called automatically.
"""
def __init__(self, file, fieldnames, widths, dialect=None, **fmtparams):
"""
All the passed in construction parameters are passed to the
:class:`texttables.fixed.writer` constructor literally. All properties
and most methods also align directly as well.
"""
self._writer = writer(file, widths, dialect, **fmtparams)
self._fieldnames = fieldnames
def __enter__(self):
self._writer.__enter__()
return self
def __exit__(self, type, value, traceback):
self._writer.__exit__(type, value, traceback)
return self
@property
def file(self):
return self._writer.file
@property
def widths(self):
return self._writer.widths
@property
def dialect(self):
return self._writer.dialect
@dialect.setter
def dialect(self, value):
self._writer.dialect = value
@property
def fieldnames(self):
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def writeheader(self):
'''Write the header based on :meth:`fieldnames`.'''
self._writer.writeheader(self._fieldnames)
def writerow(self, row):
'''Write a single row out to :meth:`file`, respecting any delimiters and
header separators necessary.
:param row: A dictionary representing the row to write
'''
self._writer.writerow(row[field] for field in self._fieldnames)
def writerows(self, rows):
'''Write multiple rows out to :meth:`file`, respecting any delimiters and
header separators necessary.
:param row: An iterable of dictionaries representing the rows to write
'''
for row in rows:
self.writerow(row)
def writetop(self):
self._writer.writetop()
def writebottom(self):
self._writer.writebottom()
| mit | 2,384,864,735,107,385,000 | 33.422311 | 86 | 0.612731 | false |
p99tunnel/p99tunnel | setup_database/create_tables.py | 1 | 1180 | #!/usr/bin/env python3
import db
# The limits on character fields were determined by looking at a sample of logs
# and figuring out how big things could be.
CREATE_TABLE_STATEMENTS = [
"""CREATE TABLE characters (
id SERIAL PRIMARY KEY,
name varchar(16)
);""",
"""CREATE TABLE items (
id SERIAL PRIMARY KEY,
wiki_link varchar(128),
canonical_name varchar(128)
);""",
"""CREATE TABLE item_names (
id SERIAL PRIMARY KEY,
item_id integer REFERENCES items(id),
name varchar(128)
);""",
"""CREATE TABLE raw_auctions (
id SERIAL PRIMARY KEY,
timestamp timestamp,
character_id integer REFERENCES characters(id),
message varchar(1024)
);""",
"""CREATE TABLE clean_auctions (
id SERIAL PRIMARY KEY,
raw_auction_id integer REFERENCES raw_auctions(id),
character_id integer REFERENCES characters(id),
item_id integer REFERENCES items(id),
timestamp timestamp,
is_selling bool,
price integer
);""",
]
def main():
with db.connect() as conn:
with conn.cursor() as cur:
for statement in CREATE_TABLE_STATEMENTS:
cur.execute(statement)
if __name__ == '__main__':
main()
| apache-2.0 | 2,982,044,504,163,306,500 | 22.6 | 79 | 0.658475 | false |
thorfi/pass-words-py | pass-words.py | 1 | 6672 | #!/usr/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2014-2021 David Goh <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Python script to generate correct horse battery staple passwords on Unix
# http://xkcd.com/936/
from random import SystemRandom
import getopt
import itertools
import math
import os
import os.path
import string
import sys
COPYRIGHT = "Copyright (c) 2014 David Goh <[email protected]>"
AUTHOR = "David Goh <[email protected]> - https://goh.id.au/~david/"
SOURCE = "GIT: https://github.com/thorfi/pass-words-py"
LICENSE = "MIT License - https://github.com/thorfi/pass-words-py/blob/master/LICENSE"
DEFAULT_MAX_WORD_LEN = 8
DEFAULT_MIN_WORD_LEN = 4
DEFAULT_WORD_COUNT = 5
DEFAULT_WORD_SEPARATOR = " "
WORDS_SUB_PATHS = (
"share/dict/words",
"dict/words",
"share/words",
"words",
)
DEFAULT_WORDS_PATHS = set()
for p in os.environ["PATH"].split(":"):
p = os.path.dirname(p.rstrip(os.path.sep))
for w in WORDS_SUB_PATHS:
w_path = os.path.join(p, w)
if os.path.isfile(w_path):
DEFAULT_WORDS_PATHS.add(w_path)
def usage_exit(msg=None):
"""Exit with a potential error message."""
exitcode = 0
f = sys.stderr if msg else sys.stdout
if msg is not None:
print("Error:", msg, file=f)
exitcode = 1
print("Usage:", sys.argv[0], "[...]", file=f)
print(
"""
Python script that generates correct horse battery staple passwords from Unix dictionaries
See https://xkcd.com/936/
-c n: count n words in password (Default: {})
-m N: max length of words to use (Default: {})
-n n: min length of words to use (Default: {})
-s s: word separator to use (Default: {!r})
-p /path/to/words: Add this file to look for words in.
If none specified, file(s) used: {}
-v: verbose print of more common password entropies for comparison
-h: print this help
""".format(
DEFAULT_WORD_COUNT,
DEFAULT_MAX_WORD_LEN,
DEFAULT_MIN_WORD_LEN,
DEFAULT_WORD_SEPARATOR,
":".join(DEFAULT_WORDS_PATHS),
),
file=f,
)
sys.exit(exitcode)
def main():
words_paths = []
word_count = DEFAULT_WORD_COUNT
max_word_len = DEFAULT_MAX_WORD_LEN
min_word_len = DEFAULT_MIN_WORD_LEN
word_separator = DEFAULT_WORD_SEPARATOR
verbose = False
try:
opts, remainder_args = getopt.getopt(
sys.argv[1:],
"p:c:m:n:s:vh",
[
"path=",
"count=",
"max=",
"min=",
"sep=",
"verbose",
"help",
],
)
except getopt.GetoptError as exc:
usage_exit(str(exc))
assert False
for o, a in opts:
if o in ("-c", "--count"):
try:
word_count = int(a)
except ValueError as exc:
usage_exit(f"--count={a!r} {str(exc)}")
elif o in ("-m", "--max"):
try:
max_word_len = int(a)
except ValueError as exc:
usage_exit(f"--max={a!r} {str(exc)}")
elif o in ("-n", "--min"):
try:
min_word_len = int(a)
except ValueError as exc:
usage_exit(f"--min={a!r} {str(exc)}")
elif o in ("-p", "--path"):
if not os.path.isfile(a):
usage_exit(f"--path={a!r} is not a file")
words_paths.append(a)
elif o in ("-s", "--sep"):
word_separator = a
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-h", "--help"):
usage_exit()
else:
usage_exit(f"unknown option {o} {a!r}")
if max_word_len < min_word_len:
usage_exit(f"--max={max_word_len} < --min={min_word_len}")
min_word_len = DEFAULT_MIN_WORD_LEN
entropies = []
if verbose:
desc_texts = (
("ASCII lowercase letters", string.ascii_lowercase),
("ASCII letters", string.ascii_letters),
("ASCII letters or digits", string.ascii_letters + string.digits),
("ASCII printable non whitespace", "".join(string.printable.split())),
)
counts = (8, 10, 16, 20)
for (desc, text), n in itertools.product(desc_texts, counts):
len_text = len(text)
choices = len_text ** n
choices_desc = f"{n:2d}*[{len_text:d} {desc}]"
entropies.append((choices, choices_desc))
if not words_paths:
words_paths = list(DEFAULT_WORDS_PATHS)
words = set()
for wp in words_paths:
with open(wp) as wf:
for line in (line.strip().lower() for line in wf):
if min_word_len < len(line) < max_word_len:
words.add(line)
def count_choices(len_w, w_count):
if w_count == 1:
return len_w
assert w_count > 1
return len_w * count_choices(len_w - 1, w_count - 1)
len_words = len(words)
choices = count_choices(len_words, word_count)
choices_desc = (
f"{word_count:2d}*[{len_words:d} words ({min_word_len:d}-{max_word_len:d} letters) from {':'.join(words_paths)}]"
)
entropies.append((choices, choices_desc))
if len(entropies) > 1:
print("Bit Entropy comparisons")
entropies.sort()
for n, d in entropies:
print(f"{math.log(n, 2):5.1f} bits - {d}")
random = SystemRandom()
words = random.sample(list(words), word_count)
for word in words:
print(word)
print(word_separator.join(words))
if __name__ == "__main__":
main()
| mit | 7,877,558,534,059,127,000 | 31.546341 | 121 | 0.583633 | false |
kickstandproject/python-ripcordclient | ripcordclient/common/utils.py | 1 | 2525 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Author: Paul Belanger <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_show_list_common_argument(parser):
parser.add_argument(
'-D', '--show-details',
help='show detailed info',
action='store_true',
default=False, )
parser.add_argument(
'--show_details',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument(
'--fields',
help=argparse.SUPPRESS,
action='append',
default=[])
parser.add_argument(
'-F', '--field',
dest='fields', metavar='FIELD',
help='specify the field(s) to be returned by server,'
' can be repeated',
action='append',
default=[])
def get_item_properties(item, fields, mixed_case_fields=[]):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Tenant, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not hasattr(item, field_name) and isinstance(item, dict):
data = item[field_name]
else:
data = getattr(item, field_name, '')
if data is None:
data = ''
row.append(data)
return tuple(row)
| apache-2.0 | -3,201,683,492,522,797,600 | 29.421687 | 74 | 0.629703 | false |
carthagecollege/django-djtools | djtools/context_processors.py | 1 | 1233 | from django.conf import settings
def sitevars(request):
context = {}
try:
context['static_root'] = settings.MEDIA_ROOT
context['media_root'] = settings.MEDIA_ROOT
context['media_url'] = settings.MEDIA_URL
context['static_url'] = settings.STATIC_URL
context['server_url'] = settings.SERVER_URL
context['root_url'] = settings.ROOT_URL
context['login_url'] = settings.LOGIN_URL
context['logout_url'] = settings.LOGOUT_URL
context['templates_debug'] = settings.TEMPLATES[0]['OPTIONS']['debug']
context['debug'] = settings.DEBUG
# UI helpers for email
context['dl_dt'] = '''
style="background:#efefef; color:#000; float:left; font-weight:bold; margin-right:10px; padding:5px; width:200px;"
'''
context['dl_dd'] = '''
style="margin:2px 0; padding:5px 0;"
'''
context['dl_detail'] = '''
style="margin-bottom:5px;"
'''
context['dd_desc'] = '''
style="margin-bottom:7px 0;"
'''
context['clear'] = '''
style="clear:both;"
'''
except:
pass
return context
| unlicense | 4,474,720,833,405,657,600 | 33.228571 | 126 | 0.534469 | false |
anilpai/leetcode | Matrix/MatrixRotate90deg.py | 1 | 1767 | # An Inplace function to rotate a N x N matrix by 90 degrees
# In both clockwise and counter clockwise direction
class Solution(object):
def Rotate90Clock(self, mat):
N = len(mat)
for x in range(int(N/2)):
for y in range(x, N-x-1):
temp = mat[x][y]
'''
Move values from left to top.
Move values from bottom to left.
Move values from right to bottom.
Move values from top to right.
'''
mat[x][y] = mat[N-1-y][x]
mat[N-1-y][x] = mat[N-1-x][N-1-y]
mat[N-1-x][N-1-y] = mat[y][N-1-x]
mat[y][N-1-x] = temp
return mat
def Rotate90CounterClock(self, mat):
N = len(mat)
for x in range(0, int(N/2)):
for y in range(x, N-x-1):
temp = mat[x][y]
'''
Move values from right to top.
Move values from bottom to right.
Move values from left to bottom.
Move values from left to bottom.
'''
mat[x][y] = mat[y][N-1-x]
mat[y][N-1-x] = mat[N-1-x][N-1-y]
mat[N-1-x][N-1-y] = mat[N-1-y][x]
mat[N-1-y][x] = temp
return mat
def printMatrix(self, mat):
# Utility Function
print("######")
for row in mat:
print(row)
if __name__=='__main__':
s = Solution()
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
print(matrix)
s.printMatrix(matrix)
s.printMatrix(s.Rotate90Clock(matrix))
s.printMatrix(s.Rotate90CounterClock(matrix)) | mit | -1,409,958,039,993,768,700 | 26.625 | 60 | 0.449915 | false |
isra17/nrs | tests/test_nsisfile.py | 1 | 2037 | from nrs import nsisfile
import pytest
import utils
import os
import sys
EMPTY_PATH = os.path.join(utils.SAMPLES_DIR, 'empty')
EXAMPLE1_PATH = os.path.join(utils.SAMPLES_DIR, 'example1.exe')
EMPTY_PATH = os.path.join(utils.SAMPLES_DIR, 'vopackage.exe')
def test_non_nsis():
with pytest.raises(nsisfile.HeaderNotFound):
nsis = nsisfile.NSIS.from_path(os.path.join(utils.SAMPLES_DIR, 'empty'))
def test_get_version():
with open(EXAMPLE1_PATH, 'rb') as fd:
nsis = nsisfile.NSIS(fd)
assert nsis.version_major == '3'
def test_get_string():
with open(EXAMPLE1_PATH, 'rb') as fd:
nsis = nsisfile.NSIS(fd)
assert nsis.get_string(0x4e) == 'Example1'
assert nsis.get_string(0x4a) == '$__SHELL_16_25__\\Example1'
assert nsis.get_string(0x57) == '$INSTALLDIR'
assert nsis.get_string(0x87) == '$(LangString2) Setup'
def test_get_raw_string():
with open(EXAMPLE1_PATH, 'rb') as fd:
nsis = nsisfile.NSIS(fd)
assert nsis.get_raw_string(0x4e) == b'Example1'
assert nsis.get_raw_string(0x4a) == b'\x02\x10\x19\\Example1'
assert nsis.get_raw_string(0x57) == b'\x03\x95\x80'
assert nsis.get_raw_string(0x87) == b'\x01\x82\x80 Setup'
def test_get_all_strings():
with open(EXAMPLE1_PATH, 'rb') as fd:
nsis = nsisfile.NSIS(fd)
strings = nsis.get_all_strings()
assert 'example1.nsi' in strings
assert '$INSTALLDIR' in strings
def test_block():
with open(EXAMPLE1_PATH, 'rb') as fd:
nsis = nsisfile.NSIS(fd)
assert len(nsis.block(nsisfile.NB_PAGES)) == 0xc0
assert len(nsis.block(nsisfile.NB_SECTIONS)) == 0x418
assert len(nsis.block(nsisfile.NB_ENTRIES)) == 0x54
assert len(nsis.block(nsisfile.NB_STRINGS)) == 0x362
assert len(nsis.block(nsisfile.NB_LANGTABLES)) == 0xe6
assert len(nsis.block(nsisfile.NB_CTLCOLORS)) == 0x0
assert len(nsis.block(nsisfile.NB_BGFONT)) == 0x8
assert len(nsis.block(nsisfile.NB_DATA)) == 0x8
| gpl-3.0 | 3,935,376,642,075,163,000 | 37.433962 | 80 | 0.644084 | false |
Guts/DicoGIS | dicogis/georeaders/Infos_Rasters.py | 1 | 14889 | #! python3 # noqa: E265
# ----------------------------------------------------------------------------
# Name: Infos Rasters
# Purpose: Use GDAL library to extract informations about
# geographic rasters data. It permits a more friendly use as
# submodule.
#
# Author: Julien Moura (https://github.com/Guts/)
# ----------------------------------------------------------------------------
# ############################################################################
# ######### Libraries #############
# #################################
# Standard library
import logging
from collections import OrderedDict
from os import chdir, path
from time import localtime, strftime
# 3rd party libraries
try:
from osgeo import gdal, osr
from osgeo.gdalconst import GA_ReadOnly
except ImportError:
import gdal
import osr
from gdalconst import GA_ReadOnly
# ############################################################################
# ######### Globals ############
# ##############################
logger = logging.getLogger(__name__)
# ############################################################################
# ######### Classes #############
# #################################
class GdalErrorHandler(object):
def __init__(self):
"""Callable error handler.
see: http://trac.osgeo.org/gdal/wiki/PythonGotchas#Exceptionsraisedincustomerrorhandlersdonotgetcaught
and http://pcjericks.github.io/py-gdalogr-cookbook/gdal_general.html#install-gdal-ogr-error-handler
"""
self.err_level = gdal.CE_None
self.err_type = 0
self.err_msg = ""
def handler(self, err_level, err_type, err_msg):
"""Make errors messages more readable."""
# available types
err_class = {
gdal.CE_None: "None",
gdal.CE_Debug: "Debug",
gdal.CE_Warning: "Warning",
gdal.CE_Failure: "Failure",
gdal.CE_Fatal: "Fatal",
}
# getting type
err_type = err_class.get(err_type, "None")
# cleaning message
err_msg = err_msg.replace("\n", " ")
# disabling GDAL exceptions raising to avoid future troubles
gdal.DontUseExceptions()
# propagating
self.err_level = err_level
self.err_type = err_type
self.err_msg = err_msg
# end of function
return self.err_level, self.err_type, self.err_msg
class ReadRasters(object):
def __init__(self, rasterpath, dico_raster, dico_bands, tipo, text=""):
"""Use GDAL functions to extract basic informations about
geographic raster file (handles ECW, GeoTIFF, JPEG2000)
and store into dictionaries.
layerpath = path to the geographic file
dico_raster = dictionary for global informations
dico_bands = dictionary for the bands informations
tipo = format
text = dictionary of text in the selected language
"""
# gdal specific
gdal.AllRegister()
# changing working directory to layer folder
chdir(path.dirname(rasterpath))
# handling specific exceptions
gdalerr = GdalErrorHandler()
errhandler = gdalerr.handler
gdal.PushErrorHandler(errhandler)
self.alert = 0
# gdal.SetConfigOption(str("GTIFF_IGNORE_READ_ERRORS"), str("TRUE"))
gdal.UseExceptions()
# opening file
try:
self.rast = gdal.Open(rasterpath, GA_ReadOnly)
except Exception as err:
logger.error(err)
self.alert += 1
self.erratum(dico_raster, rasterpath, "err_incomp")
return
# check if raster is GDAL friendly
if self.rast is None:
self.alert += 1
self.erratum(dico_raster, rasterpath, "err_incomp")
return
else:
pass
# basic informations
dico_raster["format"] = tipo
self.infos_basics(rasterpath, dico_raster, text)
# geometry information
self.infos_geom(dico_raster, text)
# bands information
for band in range(1, self.rast.RasterCount):
self.infos_bands(band, dico_bands)
band = None
# safe close (see: http://pcjericks.github.io/py-gdalogr-cookbook/)
del self.rast
# warnings messages
dico_raster["err_gdal"] = gdalerr.err_type, gdalerr.err_msg
def infos_basics(self, rasterpath, dico_raster, txt):
"""Get the global informations about the raster."""
# files and folder
dico_raster["name"] = path.basename(rasterpath)
dico_raster["folder"] = path.dirname(rasterpath)
dico_raster["title"] = dico_raster["name"][:-4].replace("_", " ").capitalize()
# dependencies
dependencies = [
path.basename(filedepend)
for filedepend in self.rast.GetFileList()
if filedepend != rasterpath
]
dico_raster["dependencies"] = dependencies
# total size
dependencies.append(rasterpath)
total_size = sum([path.getsize(f) for f in dependencies])
dico_raster["total_size"] = self.sizeof(total_size)
dependencies.pop(-1)
# format
rastMD = self.rast.GetMetadata()
dico_raster["compr_rate"] = rastMD.get("COMPRESSION_RATE_TARGET")
dico_raster["color_ref"] = rastMD.get("COLORSPACE")
if rastMD.get("VERSION"):
dico_raster["format_version"] = "(v{})".format(rastMD.get("VERSION"))
else:
dico_raster["format_version"] = ""
# image specifications
dico_raster["num_cols"] = self.rast.RasterXSize
dico_raster["num_rows"] = self.rast.RasterYSize
dico_raster["num_bands"] = self.rast.RasterCount
# data type
dico_raster["data_type"] = gdal.GetDataTypeName(
self.rast.GetRasterBand(1).DataType
)
# basic dates
dico_raster["date_actu"] = strftime(
"%d/%m/%Y", localtime(path.getmtime(rasterpath))
)
dico_raster["date_crea"] = strftime(
"%d/%m/%Y", localtime(path.getctime(rasterpath))
)
# end of function
return dico_raster
def infos_geom(self, dico_raster, txt):
"""Get the informations about geometry."""
# Spatial extent (bounding box)
geotransform = self.rast.GetGeoTransform()
dico_raster["xOrigin"] = geotransform[0]
dico_raster["yOrigin"] = geotransform[3]
dico_raster["pixelWidth"] = round(geotransform[1], 3)
dico_raster["pixelHeight"] = round(geotransform[5], 3)
dico_raster["orientation"] = geotransform[2]
# -- SRS
# using osr to get the srs
srs = osr.SpatialReference(self.rast.GetProjection())
# srs.ImportFromWkt(self.rast.GetProjection())
srs.AutoIdentifyEPSG()
# srs types
srsmetod = [
(srs.IsCompound(), txt.get("srs_comp")),
(srs.IsGeocentric(), txt.get("srs_geoc")),
(srs.IsGeographic(), txt.get("srs_geog")),
(srs.IsLocal(), txt.get("srs_loca")),
(srs.IsProjected(), txt.get("srs_proj")),
(srs.IsVertical(), txt.get("srs_vert")),
]
# searching for a match with one of srs types
for srsmet in srsmetod:
if srsmet[0] == 1:
typsrs = srsmet[1]
else:
continue
# in case of not match
try:
dico_raster["srs_type"] = typsrs
except UnboundLocalError:
typsrs = txt.get("srs_nr")
dico_raster["srs_type"] = typsrs
# Handling exception in srs names'encoding
if srs.IsProjected():
try:
if srs.GetAttrValue("PROJCS") is not None:
dico_raster["srs"] = srs.GetAttrValue("PROJCS").replace("_", " ")
else:
dico_raster["srs"] = srs.GetAttrValue("PROJECTION").replace(
"_", " "
)
except UnicodeDecodeError:
if srs.GetAttrValue("PROJCS") != "unnamed":
dico_raster["srs"] = (
srs.GetAttrValue("PROJCS").decode("latin1").replace("_", " ")
)
else:
dico_raster["srs"] = (
srs.GetAttrValue("PROJECTION")
.decode("latin1")
.replace("_", " ")
)
else:
try:
if srs.GetAttrValue("GEOGCS") is not None:
dico_raster["srs"] = srs.GetAttrValue("GEOGCS").replace("_", " ")
else:
dico_raster["srs"] = srs.GetAttrValue(
"PROJECTION".replace("_", " ")
)
except UnicodeDecodeError:
if srs.GetAttrValue("GEOGCS") != "unnamed":
dico_raster["srs"] = (
srs.GetAttrValue("GEOGCS").decode("latin1").replace("_", " ")
)
else:
dico_raster["srs"] = (
srs.GetAttrValue("PROJECTION")
.decode("latin1")
.replace("_", " ")
)
dico_raster["epsg"] = srs.GetAttrValue("AUTHORITY", 1)
# end of function
return dico_raster
def infos_bands(self, band, dico_bands):
"""Get the informations about fields definitions."""
# getting band object
band_info = self.rast.GetRasterBand(band)
# band statistics
try:
stats = band_info.GetStatistics(True, True)
except Exception as err:
logger.error(err)
return
if stats:
# band minimum value
if band_info.GetMinimum() is None:
dico_bands["band{}_Min".format(band)] = stats[0]
else:
dico_bands["band{}_Min".format(band)] = band_info.GetMinimum()
# band maximum value
if band_info.GetMinimum() is None:
dico_bands["band{}_Max".format(band)] = stats[1]
else:
dico_bands["band{}_Max".format(band)] = band_info.GetMaximum()
# band mean value
dico_bands["band{}_Mean".format(band)] = round(stats[2], 2)
# band standard deviation value
dico_bands["band{}_Sdev".format(band)] = round(stats[3], 2)
else:
pass
# band no data value
dico_bands["band{}_NoData".format(band)] = band_info.GetNoDataValue()
# band scale value
dico_bands["band{}_Scale".format(band)] = band_info.GetScale()
# band unit type value
dico_bands["band{}_UnitType".format(band)] = band_info.GetUnitType()
# color table
coul_table = band_info.GetColorTable()
if coul_table is None:
dico_bands["band{}_CTabCount".format(band)] = 0
else:
dico_bands["band{}_CTabCount".format(band)] = coul_table.GetCount()
# -- COMENTED BECAUSE IT'S TOO MUCH INFORMATIONS
# for ctab_idx in range(0, coul_table.GetCount()):
# entry = coul_table.GetColorEntry(ctab_idx)
# if not entry:
# continue
# else:
# pass
# dico_bands["band{0}_CTab{1}_RGB".format(band, ctab_idx)] = \
# coul_table.GetColorEntryAsRGB(ctab_idx, entry)
# safe close (quite useless but good practice to have)
del stats
del band_info
# end of function
return dico_bands
def sizeof(self, os_size):
"""return size in different units depending on size
see http://stackoverflow.com/a/1094933"""
for size_cat in ["octets", "Ko", "Mo", "Go"]:
if os_size < 1024.0:
return "%3.1f %s" % (os_size, size_cat)
os_size /= 1024.0
return "%3.1f %s" % (os_size, " To")
def erratum(self, dico_raster, rasterpath, mess):
"""errors handling"""
# storing minimal informations to give clues to solve later
dico_raster["name"] = path.basename(rasterpath)
dico_raster["folder"] = path.dirname(rasterpath)
dico_raster["error"] = mess
# End of function
return dico_raster
# ############################################################################
# #### Stand alone program ########
# #################################
if __name__ == "__main__":
"""standalone execution for tests. Paths are relative considering a test
within the official repository (https://github.com/Guts/DicoShapes/)"""
# listing test files by formats
li_ecw = [r"..\..\test\datatest\rasters\ECW\0468_6740.ecw"] # ECW
li_gtif = [
r"..\..\test\datatest\rasters\GeoTiff\BDP_07_0621_0049_020_LZ1.tif",
r"..\..\test\datatest\rasters\GeoTiff\TrueMarble_16km_2700x1350.tif",
r"..\..\test\datatest\rasters\GeoTiff\ASTGTM_S17W069_dem.tif",
r"..\..\test\datatest\rasters\GeoTiff\completo1-2.tif",
] # GeoTIFF
li_jpg2 = [r"..\..\test\datatest\rasters\JPEG2000\image_jpg2000.jp2"] # JPEG2000
li_rasters = (
path.abspath(li_ecw[0]),
path.abspath(li_gtif[0]),
path.abspath(li_gtif[1]),
path.abspath(li_gtif[2]),
path.abspath(li_gtif[3]),
path.abspath(li_jpg2[0]),
)
# test text dictionary
textos = OrderedDict()
textos["srs_comp"] = "Compound"
textos["srs_geoc"] = "Geocentric"
textos["srs_geog"] = "Geographic"
textos["srs_loca"] = "Local"
textos["srs_proj"] = "Projected"
textos["srs_vert"] = "Vertical"
textos["geom_point"] = "Point"
textos["geom_ligne"] = "Line"
textos["geom_polyg"] = "Polygon"
# execution
for raster in li_rasters:
"""looping on raster files"""
# recipient datas
dico_raster = OrderedDict() # dictionary where will be stored informations
dico_bands = OrderedDict() # dictionary for fields information
# getting the informations
if not path.isfile(raster):
print("\n\t==> File doesn't exist: " + raster)
continue
else:
pass
print(("\n======================\n\t", path.basename(raster)))
# handling odd warnings
info_raster = ReadRasters(
path.abspath(raster),
dico_raster,
dico_bands,
path.splitext(raster)[1],
textos,
)
print("\n\n{0}\n{1}".format(dico_raster, dico_bands))
# deleting dictionaries
del dico_raster, dico_bands, raster
| gpl-3.0 | -6,192,580,079,353,181,000 | 34.877108 | 110 | 0.525287 | false |
leiferikb/bitpop | src/tools/telemetry/telemetry/core/webpagereplay.py | 1 | 9250 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Start and stop Web Page Replay.
Of the public module names, the following one is key:
ReplayServer: a class to start/stop Web Page Replay.
"""
import logging
import os
import re
import signal
import subprocess
import sys
import time
import urllib
_CHROME_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
REPLAY_DIR = os.path.join(
_CHROME_SRC_DIR, 'third_party', 'webpagereplay')
LOG_PATH = os.path.join(
_CHROME_SRC_DIR, 'webpagereplay_logs', 'logs.txt')
# Chrome options to make it work with Web Page Replay.
def GetChromeFlags(replay_host, http_port, https_port):
assert replay_host and http_port and https_port, 'All arguments required'
return [
'--host-resolver-rules=MAP * %s,EXCLUDE localhost' % replay_host,
'--testing-fixed-http-port=%s' % http_port,
'--testing-fixed-https-port=%s' % https_port,
'--ignore-certificate-errors',
]
# Signal masks on Linux are inherited from parent processes. If anything
# invoking us accidentally masks SIGINT (e.g. by putting a process in the
# background from a shell script), sending a SIGINT to the child will fail
# to terminate it. Running this signal handler before execing should fix that
# problem.
def ResetInterruptHandler():
signal.signal(signal.SIGINT, signal.SIG_DFL)
class ReplayError(Exception):
"""Catch-all exception for the module."""
pass
class ReplayNotFoundError(ReplayError):
def __init__(self, label, path):
super(ReplayNotFoundError, self).__init__()
self.args = (label, path)
def __str__(self):
label, path = self.args
return 'Path does not exist for %s: %s' % (label, path)
class ReplayNotStartedError(ReplayError):
pass
class ReplayServer(object):
"""Start and Stop Web Page Replay.
Web Page Replay is a proxy that can record and "replay" web pages with
simulated network characteristics -- without having to edit the pages
by hand. With WPR, tests can use "real" web content, and catch
performance issues that may result from introducing network delays and
bandwidth throttling.
Example:
with ReplayServer(archive_path):
self.NavigateToURL(start_url)
self.WaitUntil(...)
Environment Variables (for development):
WPR_ARCHIVE_PATH: path to alternate archive file (e.g. '/tmp/foo.wpr').
WPR_RECORD: if set, puts Web Page Replay in record mode instead of replay.
WPR_REPLAY_DIR: path to alternate Web Page Replay source.
"""
def __init__(self, archive_path, replay_host, dns_port, http_port, https_port,
replay_options=None, replay_dir=None,
log_path=None):
"""Initialize ReplayServer.
Args:
archive_path: a path to a specific WPR archive (required).
replay_host: the hostname to serve traffic.
dns_port: an integer port on which to serve DNS traffic. May be zero
to let the OS choose an available port. If None DNS forwarding is
disabled.
http_port: an integer port on which to serve HTTP traffic. May be zero
to let the OS choose an available port.
https_port: an integer port on which to serve HTTPS traffic. May be zero
to let the OS choose an available port.
replay_options: an iterable of options strings to forward to replay.py.
replay_dir: directory that has replay.py and related modules.
log_path: a path to a log file.
"""
self.archive_path = os.environ.get('WPR_ARCHIVE_PATH', archive_path)
self.replay_options = list(replay_options or ())
self.replay_dir = os.environ.get('WPR_REPLAY_DIR', replay_dir or REPLAY_DIR)
self.log_path = log_path or LOG_PATH
self.dns_port = dns_port
self.http_port = http_port
self.https_port = https_port
self._replay_host = replay_host
if 'WPR_RECORD' in os.environ and '--record' not in self.replay_options:
self.replay_options.append('--record')
self.is_record_mode = '--record' in self.replay_options
self._AddDefaultReplayOptions()
self.replay_py = os.path.join(self.replay_dir, 'replay.py')
if self.is_record_mode:
self._CheckPath('archive directory', os.path.dirname(self.archive_path))
elif not os.path.exists(self.archive_path):
self._CheckPath('archive file', self.archive_path)
self._CheckPath('replay script', self.replay_py)
self.replay_process = None
def _AddDefaultReplayOptions(self):
"""Set WPR command-line options. Can be overridden if needed."""
self.replay_options = [
'--host', str(self._replay_host),
'--port', str(self.http_port),
'--ssl_port', str(self.https_port),
'--use_closest_match',
'--no-dns_forwarding',
'--log_level', 'warning'
] + self.replay_options
if self.dns_port is not None:
self.replay_options.extend(['--dns_port', str(self.dns_port)])
def _CheckPath(self, label, path):
if not os.path.exists(path):
raise ReplayNotFoundError(label, path)
def _OpenLogFile(self):
log_dir = os.path.dirname(self.log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return open(self.log_path, 'w')
def WaitForStart(self, timeout):
"""Checks to see if the server is up and running."""
port_re = re.compile(
'.*?(?P<protocol>[A-Z]+) server started on (?P<host>.*):(?P<port>\d+)')
start_time = time.time()
elapsed_time = 0
while elapsed_time < timeout:
if self.replay_process.poll() is not None:
break # The process has exited.
# Read the ports from the WPR log.
if not self.http_port or not self.https_port or not self.dns_port:
with open(self.log_path) as f:
for line in f.readlines():
m = port_re.match(line.strip())
if m:
if not self.http_port and m.group('protocol') == 'HTTP':
self.http_port = int(m.group('port'))
elif not self.https_port and m.group('protocol') == 'HTTPS':
self.https_port = int(m.group('port'))
elif not self.dns_port and m.group('protocol') == 'DNS':
self.dns_port = int(m.group('port'))
# Try to connect to the WPR ports.
if self.http_port and self.https_port:
try:
up_url = '%s://%s:%s/web-page-replay-generate-200'
http_up_url = up_url % ('http', self._replay_host, self.http_port)
https_up_url = up_url % ('https', self._replay_host, self.https_port)
if (200 == urllib.urlopen(http_up_url, None, {}).getcode() and
200 == urllib.urlopen(https_up_url, None, {}).getcode()):
return True
except IOError:
pass
poll_interval = min(max(elapsed_time / 10., .1), 5)
time.sleep(poll_interval)
elapsed_time = time.time() - start_time
return False
def StartServer(self):
"""Start Web Page Replay and verify that it started.
Raises:
ReplayNotStartedError: if Replay start-up fails.
"""
cmd_line = [sys.executable, self.replay_py]
cmd_line.extend(self.replay_options)
cmd_line.append(self.archive_path)
logging.debug('Starting Web-Page-Replay: %s', cmd_line)
with self._OpenLogFile() as log_fh:
kwargs = {'stdout': log_fh, 'stderr': subprocess.STDOUT}
if sys.platform.startswith('linux') or sys.platform == 'darwin':
kwargs['preexec_fn'] = ResetInterruptHandler
self.replay_process = subprocess.Popen(cmd_line, **kwargs)
if not self.WaitForStart(30):
with open(self.log_path) as f:
log = f.read()
raise ReplayNotStartedError(
'Web Page Replay failed to start. Log output:\n%s' % log)
def StopServer(self):
"""Stop Web Page Replay."""
if self.replay_process:
logging.debug('Trying to stop Web-Page-Replay gracefully')
try:
url = 'http://localhost:%s/web-page-replay-command-exit'
urllib.urlopen(url % self.http_port, None, {})
except IOError:
# IOError is possible because the server might exit without response.
pass
start_time = time.time()
while time.time() - start_time < 10: # Timeout after 10 seconds.
if self.replay_process.poll() is not None:
break
time.sleep(1)
else:
try:
# Use a SIGINT so that it can do graceful cleanup.
self.replay_process.send_signal(signal.SIGINT)
except: # pylint: disable=W0702
# On Windows, we are left with no other option than terminate().
if 'no-dns_forwarding' not in self.replay_options:
logging.warning('DNS configuration might not be restored!')
try:
self.replay_process.terminate()
except: # pylint: disable=W0702
pass
self.replay_process.wait()
def __enter__(self):
"""Add support for with-statement."""
self.StartServer()
return self
def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
"""Add support for with-statement."""
self.StopServer()
| gpl-3.0 | -2,729,612,260,395,174,000 | 35.132813 | 80 | 0.645838 | false |
LibreGameArchive/silvertree | scons/gl.py | 1 | 1408 | # vi: syntax=python:et:ts=4
def CheckOpenGL(context, libs = ["gl"]):
context.Message("Checking for OpenGL... ")
env = context.env
backup = env.Clone().Dictionary()
if env["PLATFORM"] == "win32":
libnames = { "gl" : "opengl32", "glu" : "glu32" }
else:
libnames = { "gl" : "GL", "glu" : "GLU" }
env.AppendUnique(LIBS = map(libnames.get, libs))
test_program = ""
for lib in libs:
test_program += "#include <GL/%s.h>\n" % lib
test_program += "int main()\n{}\n"
if context.TryLink(test_program, ".c"):
context.Result("yes")
return True
else:
env.Replace(**backup)
context.Result("no")
return False
def CheckGLEW(context):
context.Message("Checking for OpenGL Extension Wrangler... ")
env = context.env
backup = env.Clone().Dictionary()
if env["PLATFORM"] == "win32":
env.AppendUnique(LIBS = ["glew32", "glu32", "opengl32"])
else:
env.AppendUnique(LIBS = ["GLEW", "GLU", "GL"])
test_program = """
#include <GL/glew.h>
int main()
{
glewInit();
}
"""
if context.TryLink(test_program, ".c"):
context.Result("yes")
return True
else:
env.Replace(**backup)
context.Result("no")
return False
def get_checks():
return { "CheckOpenGL" : CheckOpenGL, "CheckGLEW" : CheckGLEW }
| gpl-3.0 | -9,038,046,539,964,435,000 | 28.333333 | 67 | 0.551136 | false |
brunogamacatao/portalsaladeaula | portal/models.py | 1 | 34418 | # -*- coding: utf-8 -*-
import logging
from operator import attrgetter
from django.db import models
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from djangotoolbox.fields import BlobField, SetField
from django.template import Context
from django.template.loader import get_template
from django.core.mail import EmailMultiAlternatives
from google.appengine.api import images
from portal.fields import AutoSlugField
from portal.constants import STATES_CHOICES, REGISTRATION_TYPE_CHOICES, ACCESS_TYPE_CHOICES, MODERATE_REGISTRATION, PUBLIC_ACCESS
from portal.utils import split_string, formata_hora
from portal.updates.models import Update
import datetime
from settings import PERIODO_ATUAL
class Indexable(models.Model):
index = SetField(blank=False)
messages_cache = models.TextField(blank=True, null=True)
updates_cache = models.TextField(blank=True, null=True)
teachers_cache = models.TextField(blank=True, null=True)
students_cache = models.TextField(blank=True, null=True)
def notify_upload(self, user, uploaded_file):
return None
def notify_new_student(self, user, student):
return None
def notify_new_teacher(self, user, teacher):
return None
def notify_comment(self, user, comment):
return None
def get_update_list(self):
return None
class Meta:
abstract = True
class Picture(models.Model):
picture = BlobField(blank=False)
filename = models.CharField(blank=False, max_length=200)
width = models.IntegerField(blank=True)
height = models.IntegerField(blank=True)
format = models.CharField(blank=True, max_length=10)
parent = models.ForeignKey('self', blank=True, null=True, related_name='thumb_set')
'''
The felds below are just for thumbnails. They store the requested width and
height values as they can change according to the image's aspect ratio.
'''
intended_width = models.IntegerField(blank=True, null=True)
intended_height = models.IntegerField(blank=True, null=True)
@classmethod
def create_thumbnail(cls, parent, width, height):
img = images.Image(parent.picture)
img.resize(width=width, height=height)
img.im_feeling_lucky()
thumb = Picture()
thumb.picture = img.execute_transforms(output_encoding=images.JPEG)
thumb.filename = parent.filename.split('.')[0] + '_thumb.jpg'
thumb.parent = parent
thumb.intended_width = width
thumb.intended_height = height
thumb.save()
return thumb
@classmethod
def get_thumbnail(cls, picture, width, height):
if picture.thumb_set.filter(intended_width=width, intended_height=height).exists():
return picture.thumb_set.filter(intended_width=width, intended_height=height)[0]
return cls.create_thumbnail(picture, width, height)
#Here we automatically fill the width, height and format fields of a picture.
def fill_picture_fields(sender, instance, **kw):
image = images.Image(instance.picture)
instance.width = image.width
instance.height = image.height
instance.format = instance.filename.split('.')[-1]
models.signals.pre_save.connect(fill_picture_fields, sender=Picture)
class UserInfo(Indexable):
name = models.CharField(_('Name'), blank=False, max_length=100)
picture = models.ForeignKey(Picture, blank=True, null=True)
city = models.CharField(_('City'), blank=False, max_length=100)
province = models.CharField(_('State or Province'), blank=False, max_length=2, choices=STATES_CHOICES)
email = models.EmailField(_('Email'), blank=False)
user = models.ForeignKey(User, unique=True)
show_help_text = models.NullBooleanField(blank=True, null=True)
is_teacher = models.NullBooleanField(blank=True, null=True)
birth_date = models.DateField(blank=True, null=True)
schedule_cache = models.TextField(blank=True, null=True)
def get_absolute_url(self):
return reverse('portal.accounts.views.user_info', args=[self.user.id],)
def get_disciplines_studies(self):
student_role = UserDisciplineRole.objects.student_role()
queryset = self.reluserdiscipline_set.filter(role=student_role, user=self, period=PERIODO_ATUAL)
if queryset.exists():
disciplines = []
for rel in queryset.all():
disciplines.append(rel.discipline)
return disciplines
return []
def get_disciplines_teaches(self):
teacher_role = UserDisciplineRole.objects.teacher_role()
queryset = self.reluserdiscipline_set.filter(role=teacher_role, user=self, period=PERIODO_ATUAL)
if queryset.exists():
disciplines = []
for rel in queryset.all():
disciplines.append(rel.discipline)
return disciplines
return []
def get_courses_studies(self):
student_role = UserCourseRole.objects.student_role()
queryset = self.relusercourse_set.filter(role=student_role, user=self, period=PERIODO_ATUAL)
if queryset.exists():
courses = []
for rel in queryset.all():
courses.append(rel.course)
return courses
return []
def get_courses_teaches(self):
teacher_role = UserCourseRole.objects.teacher_role()
queryset = self.relusercourse_set.filter(role=teacher_role, user=self, period=PERIODO_ATUAL)
if queryset.exists():
courses = []
for rel in queryset.all():
courses.append(rel.course)
return courses
return []
def get_institutions_studies(self):
student_role = UserInstitutionRole.objects.student_role()
queryset = self.reluserinstitution_set.filter(role=student_role, user=self, period=PERIODO_ATUAL)
if queryset.exists():
institutions = []
for rel in queryset.all():
institutions.append(rel.institution)
return institutions
return []
def get_institutions_teaches(self):
teacher_role = UserInstitutionRole.objects.teacher_role()
queryset = self.reluserinstitution_set.filter(role=teacher_role, user=self, period=PERIODO_ATUAL)
if queryset.exists():
institutions = []
for rel in queryset.all():
institutions.append(rel.institution)
return institutions
return []
def get_update_list(self):
updates = []
if self.is_teacher:
for discipline in self.get_disciplines_teaches():
for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]:
updates.append(update)
for course in self.get_courses_teaches():
for update in Update.objects.for_model(course).order_by('-date_published')[:5]:
updates.append(update)
for institution in self.get_institutions_teaches():
for update in Update.objects.for_model(institution).order_by('-date_published')[:5]:
updates.append(update)
else:
for discipline in self.get_disciplines_studies():
for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]:
updates.append(update)
for course in self.get_courses_studies():
for update in Update.objects.for_model(course).order_by('-date_published')[:5]:
updates.append(update)
for institution in self.get_institutions_studies():
for update in Update.objects.for_model(institution).order_by('-date_published')[:5]:
updates.append(update)
return sorted(updates, key=attrgetter('date_published'), reverse=True)[:5]
class Meta:
verbose_name = _('User Information')
verbose_name_plural = _('User Information')
def __unicode__(self):
return self.name
def fill_user_index(sender, instance, **kw):
index = []
if instance.name:
index += split_string(instance.name)
instance.index = index
models.signals.pre_save.connect(fill_user_index, sender=UserInfo)
class PreInscricao(models.Model):
matricula = models.CharField(blank=False, max_length=50)
nome = models.CharField(blank=False, max_length=100)
cpf = models.CharField(blank=False, max_length=50)
sexo = models.CharField(blank=False, max_length=2)
email = models.EmailField(blank=True, max_length=200)
data_nasc = models.DateField(blank=True)
rua = models.CharField(blank=False, max_length=200)
numero = models.CharField(blank=False, max_length=10)
bairro = models.CharField(blank=False, max_length=100)
cidade = models.CharField(blank=False, max_length=100)
estado = models.CharField(blank=False, max_length=2)
senha = models.CharField(blank=False, max_length=50)
disciplinas = SetField(blank=True, null=True)
user_info = models.ForeignKey(UserInfo, blank=True, null=True)
class Address(models.Model):
address = models.CharField(_('Address'), blank=False, max_length=200)
number = models.CharField(_('Number'), blank=False, max_length=10)
neighborhood = models.CharField(_('Neighborhood'), blank=False, max_length=100)
city = models.CharField(_('City'), blank=False, max_length=100)
province = models.CharField(_('State or Province'), blank=False, max_length=2, choices=STATES_CHOICES)
def get_index(self):
index = []
if self.address:
index += split_string(self.address)
if self.neighborhood:
index += split_string(self.neighborhood)
if self.city:
index += split_string(self.city)
return set(index)
class Meta:
verbose_name = _('Address')
verbose_name_plural = _('Addresses')
class Institution(Indexable):
name = models.CharField(_('Name'), blank=False, max_length=100)
slug = AutoSlugField(prepopulate_from=('acronym',), unique=True, blank=True, max_length=100)
acronym = models.CharField(_('Acronym'), blank=True, null=True, max_length=100)
picture = models.ForeignKey(Picture, blank=True, null=True)
address = models.ForeignKey(Address, blank=True, null=True)
description = models.TextField(_('Description'))
homepage = models.URLField(_('Homepage'), blank=True, null=True)
feed_url = models.CharField(_('News Feed URL'), blank=True, null=True, max_length=512)
twitter_id = models.CharField(_('Twitter ID'), blank=True, null=True, max_length=100)
'''
ATENÇÃO !!!
Pelo jeito, Django-nonrel não suporta campos ManyToMany
'''
'''
Hora do brainstorm:
Que outros campos devem vir aqui ?
descrição ?
endereço ?
telefones ?
página na internet ?
Outras informações, tais como personalização, feeds e instituições parceiras, deverão ser
adicionadas como módulos extra.
Módulos extra:
1. Personalização:
a) Logomarca;
b) Imagens de fundo (topo, meio, rodapé);
c) Esquema de cores;
2. Feeds (rss/atom/twitter) - Caso tenha um twitter, seria legal colocar um siga-nos;
3. Instituições parceiras (do próprio portal);
4. Links (com ou sem imagem)
5. Vestibular
6. Eventos
7. Biblioteca (integrar com o Pergamum)
8. IM (Google Talk Like)
9. Álbum de fotos
10. Arquivos (área de upload/download)
11. Comentários
'''
class Meta:
verbose_name = _('Institution')
verbose_name_plural = _('Institutions')
def get_students(self):
student_role = UserInstitutionRole.objects.student_role()
queryset = self.reluserinstitution_set.filter(role=student_role, institution=self)
if queryset.exists():
students = []
for rel in queryset.all():
try:
students.append(rel.user)
except:
logging.error("[get_students] Not able to find an user for RelUserInstitution %s" % rel.id)
return students
return None
def get_student_count(self):
student_role = UserInstitutionRole.objects.student_role()
return self.reluserinstitution_set.filter(role=student_role, institution=self).count()
def get_teachers(self):
teacher_role = UserInstitutionRole.objects.teacher_role()
queryset = self.reluserinstitution_set.filter(role=teacher_role, institution=self)
if queryset.exists():
teachers = []
for rel in queryset.all():
try:
teachers.append(rel.user)
except:
logging.error("[get_teachers] Not able to find an user for RelUserInstitution %s" % rel.id)
return teachers
return None
def get_update_list(self):
updates = []
for update in Update.objects.for_model(self).order_by('-date_published')[:5]:
updates.append(update)
for course in self.course_set.all():
for update in Update.objects.for_model(course).order_by('-date_published')[:5]:
updates.append(update)
for discipline in course.discipline_set.all():
for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]:
updates.append(update)
return sorted(updates, key=attrgetter('date_published'), reverse=True)[:5]
def get_class_name(self):
return 'portal.models.Institution'
def get_absolute_url(self):
return reverse('portal.institutions.views.detail', args=[self.slug,])
def __unicode__(self):
return self.name
#If the acronym field is not filled, it will receive the value from name field.
def fill_acronym(sender, instance, **kw):
if not instance.acronym or instance.acronym == '':
instance.acronym = instance.name
models.signals.pre_save.connect(fill_acronym, sender=Institution)
def fill_institution_index(sender, instance, **kw):
index = []
if instance.name:
index += split_string(instance.name)
if instance.acronym:
index += split_string(instance.acronym)
if instance.description:
index += split_string(instance.description)
if instance.address:
index += instance.address.get_index()
instance.index = index
models.signals.pre_save.connect(fill_institution_index, sender=Institution)
class InstitutionUpdateCache(models.Model):
text = models.CharField(blank=False, max_length=100)
link = models.CharField(blank=False, max_length=512)
date_published = models.DateTimeField(default=datetime.datetime.now)
author = models.ForeignKey(User, blank=False)
institution = models.ForeignKey(Institution, blank=True, null=True)
class RelInstitutionOwner(models.Model):
owner = models.ForeignKey(UserInfo, blank=True, null=True, related_name='owner_set')
institution = models.ForeignKey(Institution, blank=True, null=True, related_name='owner_set')
class PhoneNumber(models.Model):
region_code = models.CharField(_('Region Code'), blank=False, max_length=5)
telephone = models.CharField(_('Telephone'), blank=False, max_length=20)
description = models.CharField(_('Description'), blank=False, max_length=50)
institution = models.ForeignKey(Institution, blank=False)
class Course(Indexable):
name = models.CharField(_('Name'), blank=False, max_length=100)
slug = AutoSlugField(prepopulate_from=('acronym',), parent_name='institution', unique=True, blank=True, max_length=100)
acronym = models.CharField(_('Acronym'), blank=True, null=True, max_length=100)
picture = models.ForeignKey(Picture, blank=True, null=True)
description = models.TextField(_('Description'))
feed_url = models.CharField(_('News Feed URL'), blank=True, null=True, max_length=512)
twitter_id = models.CharField(_('Twitter ID'), blank=True, null=True, max_length=100)
institution = models.ForeignKey(Institution, blank=False)
def get_students(self):
student_role = UserCourseRole.objects.student_role()
queryset = self.relusercourse_set.filter(role=student_role, course=self)
if queryset.exists():
students = []
for rel in queryset.all():
try:
students.append(rel.user)
except:
logging.error("[get_students] Not able to find an user for RelUserInstitution %s" % rel.id)
return students
return None
def get_teachers(self):
teacher_role = UserCourseRole.objects.teacher_role()
queryset = self.relusercourse_set.filter(role=teacher_role, course=self)
if queryset.exists():
teachers = []
for rel in queryset.all():
try:
teachers.append(rel.user)
except:
logging.error("[get_teachers] Not able to find an user for RelUserInstitution %s" % rel.id)
return teachers
return None
class Meta:
verbose_name = _('Course')
verbose_name_plural = _('Courses')
def get_class_name(self):
return 'portal.models.Course'
def get_absolute_url(self):
return reverse('portal.courses.views.detail', args=[self.institution.slug, self.slug,])
def get_update_list(self):
updates = []
for update in Update.objects.for_model(self).order_by('-date_published')[:5]:
updates.append(update)
for discipline in self.discipline_set.all():
for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]:
updates.append(update)
return sorted(updates, key=attrgetter('date_published'), reverse=True)[:5]
def __unicode__(self):
return self.name
models.signals.pre_save.connect(fill_acronym, sender=Course)
def fill_course_index(sender, instance, **kw):
index = []
if instance.name:
index += split_string(instance.name)
if instance.acronym:
index += split_string(instance.acronym)
if instance.description:
index += split_string(instance.description)
if instance.institution:
index += instance.institution.index
instance.index = index
models.signals.pre_save.connect(fill_course_index, sender=Course)
class RelCourseOwner(models.Model):
owner = models.ForeignKey(UserInfo, blank=True, null=True, related_name='course_owner_set')
course = models.ForeignKey(Course, blank=True, null=True, related_name='course_owner_set')
class Discipline(Indexable):
name = models.CharField(_('Name'), blank=False, max_length=100)
slug = AutoSlugField(prepopulate_from=('acronym',), parent_name='course', unique=True, blank=True, max_length=100)
acronym = models.CharField(_('Acronym'), blank=True, null=True, max_length=100)
picture = models.ForeignKey(Picture, blank=True, null=True)
description = models.TextField(_('Description'))
feed_url = models.CharField(_('News Feed URL'), blank=True, null=True, max_length=512)
twitter_id = models.CharField(_('Twitter ID'), blank=True, null=True, max_length=100)
course = models.ForeignKey(Course, blank=False)
registration_type = models.IntegerField(_('Registration type'), blank=False,
default=MODERATE_REGISTRATION,
choices=REGISTRATION_TYPE_CHOICES)
access_type = models.IntegerField(_('Access type'), blank=False,
default=PUBLIC_ACCESS,
choices=ACCESS_TYPE_CHOICES)
period = models.CharField(_('Period'), blank=True, null=True, max_length=5)
class Meta:
verbose_name = _('Discipline')
verbose_name_plural = _('Disciplines')
def get_students(self):
student_role = UserDisciplineRole.objects.student_role()
queryset = self.reluserdiscipline_set.filter(role=student_role, discipline=self)
if queryset.exists():
students = []
for rel in queryset.all():
try:
students.append(rel.user)
except:
logging.error("[get_students] Not able to find an user for RelUserInstitution %s" % rel.id)
return students
return None
def get_teachers(self):
teacher_role = UserDisciplineRole.objects.teacher_role()
queryset = self.reluserdiscipline_set.filter(role=teacher_role, discipline=self)
if queryset.exists():
teachers = []
for rel in queryset.all():
try:
teachers.append(rel.user)
except:
logging.error("[get_teachers] Not able to find an user for RelUserInstitution %s" % rel.id)
return teachers
return None
def get_horario(self):
not_empty = lambda x: (x and len(x.lstrip()) > 0 and x != 'null') or False
if self.disciplinemetadata_set.exists():
m_data = self.disciplinemetadata_set.all()[0]
horario = u''
if not_empty(m_data.segunda):
horario += u'Segunda-feira ' + unicode(formata_hora(m_data.segunda), 'utf-8')
if not_empty(m_data.terca):
horario += u'\nTerça-feira ' + unicode(formata_hora(m_data.terca), 'utf-8')
if not_empty(m_data.quarta):
horario += u'\nQuarta-feira ' + unicode(formata_hora(m_data.quarta), 'utf-8')
if not_empty(m_data.quinta):
horario += u'\nQuinta-feira ' + unicode(formata_hora(m_data.quinta), 'utf-8')
if not_empty(m_data.sexta):
horario += u'\nSexta-feira ' + unicode(formata_hora(m_data.sexta), 'utf-8')
if not_empty(m_data.sabado):
horario += u'\nSábado ' + unicode(formata_hora(m_data.sabado), 'utf-8')
return horario
return None
def get_sala(self):
if self.disciplinemetadata_set.exists():
return self.disciplinemetadata_set.all()[0].sala
return None
def get_class_name(self):
return 'portal.models.Discipline'
def get_absolute_url(self):
return reverse('portal.disciplines.views.detail', args=[self.course.institution.slug, self.course.slug, self.slug,])
def get_update_list(self):
return Update.objects.for_model(self).order_by('-date_published')[:5]
def notify_upload(self, user, uploaded_file):
text = u'%s postou um novo material didático <a href="%s">%s</a>' % (user.get_profile().name, self.get_absolute_url(), uploaded_file.description, )
link = self.get_absolute_url()
update = Update.createUpdate(user, text, link, self)
ctx = {
'mensagem': update.text,
'link': 'http://www.portalsaladeaula.com%s' % update.link,
}
subject = 'Novo Material Didático'
from_email = 'Portal Sala de Aula <[email protected]>'
text_content = get_template('emails/update.txt').render(Context(ctx))
html_content = get_template('emails/update.html').render(Context(ctx))
if self.get_students():
for student in self.get_students():
msg = EmailMultiAlternatives(subject, text_content, from_email, [student.email,])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except:
logging.error('Não foi possível enviar o email')
if self.get_teachers():
for teacher in self.get_teachers():
if teacher != uploaded_file.user:
msg = EmailMultiAlternatives(subject, text_content, from_email, [teacher.email,])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except:
logging.error('Não foi possível enviar o email')
def __unicode__(self):
return self.name
models.signals.pre_save.connect(fill_acronym, sender=Discipline)
def fill_discipline_index(sender, instance, **kw):
index = []
if instance.name:
index += split_string(instance.name)
if instance.acronym:
index += split_string(instance.acronym)
if instance.description:
index += split_string(instance.description)
if instance.course:
index += instance.course.index
instance.index = index
models.signals.pre_save.connect(fill_discipline_index, sender=Discipline)
class DisciplineMetadata(models.Model):
cod_turma = models.CharField(blank=False, max_length=50)
periodo = models.CharField(blank=False, max_length=50)
senha = models.CharField(blank=False, max_length=50)
discipline = models.ForeignKey(Discipline, blank=True, null=True)
segunda = models.CharField(blank=True, null=True, max_length=5)
terca = models.CharField(blank=True, null=True, max_length=5)
quarta = models.CharField(blank=True, null=True, max_length=5)
quinta = models.CharField(blank=True, null=True, max_length=5)
sexta = models.CharField(blank=True, null=True, max_length=5)
sabado = models.CharField(blank=True, null=True, max_length=5)
sala = models.CharField(blank=True, null=True, max_length=5)
class RelDisciplineOwner(models.Model):
owner = models.ForeignKey(UserInfo, blank=True, null=True, related_name='discipline_owner_set')
discipline = models.ForeignKey(Discipline, blank=True, null=True, related_name='discipline_owner_set')
#To speedup the system, these roles will be queried just once
INSTITUTION_STUDENT_ROLE = None
INSTITUTION_TEACHER_ROLE = None
INSTITUTION_COORDINATOR_ROLE = None
INSTITUTION_MANAGER_ROLE = None
class ManagerUserInstitutionRole(models.Manager):
def student_role(self):
global INSTITUTION_STUDENT_ROLE
if INSTITUTION_STUDENT_ROLE:
return INSTITUTION_STUDENT_ROLE
queryset = self.filter(slug='student')
if queryset.exists():
INSTITUTION_STUDENT_ROLE = queryset.all()[0]
return INSTITUTION_STUDENT_ROLE
INSTITUTION_STUDENT_ROLE = UserInstitutionRole(name='Student', slug='student')
INSTITUTION_STUDENT_ROLE.save()
return INSTITUTION_STUDENT_ROLE
def teacher_role(self):
global INSTITUTION_TEACHER_ROLE
if INSTITUTION_TEACHER_ROLE:
return INSTITUTION_TEACHER_ROLE
queryset = self.filter(slug='teacher')
if queryset.exists():
INSTITUTION_TEACHER_ROLE = queryset.all()[0]
return INSTITUTION_TEACHER_ROLE
INSTITUTION_TEACHER_ROLE = UserInstitutionRole(name='Teacher', slug='teacher')
INSTITUTION_TEACHER_ROLE.save()
return INSTITUTION_TEACHER_ROLE
def coordinator_role(self):
global INSTITUTION_COORDINATOR_ROLE
if INSTITUTION_COORDINATOR_ROLE:
return INSTITUTION_COORDINATOR_ROLE
queryset = self.filter(slug='coordinator')
if queryset.exists():
INSTITUTION_COORDINATOR_ROLE = queryset.all()[0]
return INSTITUTION_COORDINATOR_ROLE
INSTITUTION_COORDINATOR_ROLE = UserInstitutionRole(name='Coordinator', slug='coordinator')
INSTITUTION_COORDINATOR_ROLE.save()
return INSTITUTION_COORDINATOR_ROLE
def manager_role(self):
global INSTITUTION_MANAGER_ROLE
if INSTITUTION_MANAGER_ROLE:
return INSTITUTION_MANAGER_ROLE
queryset = self.filter(slug='manager')
if queryset.exists():
INSTITUTION_MANAGER_ROLE = queryset.all()[0]
return INSTITUTION_MANAGER_ROLE
INSTITUTION_MANAGER_ROLE = UserInstitutionRole(name='Manager', slug='manager')
INSTITUTION_MANAGER_ROLE.save()
return INSTITUTION_MANAGER_ROLE
class UserInstitutionRole(models.Model):
name = models.CharField(_('Role Name'), blank=False, max_length=100)
slug = AutoSlugField(prepopulate_from=('name',), unique=True, blank=True, max_length=100)
objects = ManagerUserInstitutionRole()
class Meta:
verbose_name = _('Role for User/Institution Relationship')
verbose_name_plural = _('Roles for User/Institution Relationship')
def __unicode__(self):
return self.name
class RelUserInstitution(models.Model):
user = models.ForeignKey(UserInfo, blank=False)
institution = models.ForeignKey(Institution, blank=False)
role = models.ForeignKey(UserInstitutionRole, blank=False)
period = models.CharField(_('Period'), blank=True, null=True, max_length=5)
#To speedup the system, these roles will be queried just once
COURSE_STUDENT_ROLE = None
COURSE_TEACHER_ROLE = None
COURSE_COORDINATOR_ROLE = None
COURSE_SECRETARY_ROLE = None
class ManagerUserCourseRole(models.Manager):
def student_role(self):
global COURSE_STUDENT_ROLE
if COURSE_STUDENT_ROLE:
return COURSE_STUDENT_ROLE
queryset = self.filter(slug='student')
if queryset.exists():
COURSE_STUDENT_ROLE = queryset.all()[0]
return COURSE_STUDENT_ROLE
COURSE_STUDENT_ROLE = UserCourseRole(name='Student', slug='student')
COURSE_STUDENT_ROLE.save()
return COURSE_STUDENT_ROLE
def teacher_role(self):
global COURSE_TEACHER_ROLE
if COURSE_TEACHER_ROLE:
return COURSE_TEACHER_ROLE
queryset = self.filter(slug='teacher')
if queryset.exists():
COURSE_TEACHER_ROLE = queryset.all()[0]
return COURSE_TEACHER_ROLE
COURSE_TEACHER_ROLE = UserCourseRole(name='Teacher', slug='teacher')
COURSE_TEACHER_ROLE.save()
return COURSE_TEACHER_ROLE
def coordinator_role(self):
global COURSE_COORDINATOR_ROLE
if COURSE_COORDINATOR_ROLE:
return COURSE_COORDINATOR_ROLE
queryset = self.filter(slug='coordinator')
if queryset.exists():
COURSE_COORDINATOR_ROLE = queryset.all()[0]
return COURSE_COORDINATOR_ROLE
COURSE_COORDINATOR_ROLE = UserCourseRole(name='Coordinator', slug='coordinator')
COURSE_COORDINATOR_ROLE.save()
return COURSE_COORDINATOR_ROLE
def secretary_role(self):
global COURSE_SECRETARY_ROLE
if COURSE_SECRETARY_ROLE:
return COURSE_SECRETARY_ROLE
queryset = self.filter(slug='secretary')
if queryset.exists():
COURSE_SECRETARY_ROLE = queryset.all()[0]
return COURSE_SECRETARY_ROLE
COURSE_SECRETARY_ROLE = UserCourseRole(name='Secretary', slug='secretary')
COURSE_SECRETARY_ROLE.save()
return COURSE_SECRETARY_ROLE
class UserCourseRole(models.Model):
name = models.CharField(_('Role Name'), blank=False, max_length=100)
slug = AutoSlugField(prepopulate_from=('name',), unique=True, blank=True, max_length=100)
objects = ManagerUserCourseRole()
class Meta:
verbose_name = _('Role for User/Course Relationship')
verbose_name_plural = _('Roles for User/Course Relationship')
def __unicode__(self):
return self.name
class RelUserCourse(models.Model):
user = models.ForeignKey(UserInfo, blank=False)
course = models.ForeignKey(Course, blank=False)
role = models.ForeignKey(UserCourseRole, blank=False)
period = models.CharField(_('Period'), blank=True, null=True, max_length=5)
#To speedup the system, these roles will be queried just once
DISCIPLINE_STUDENT_ROLE = None
DISCIPLINE_TEACHER_ROLE = None
class ManagerUserDisciplineRole(models.Manager):
def student_role(self):
global DISCIPLINE_STUDENT_ROLE
if DISCIPLINE_STUDENT_ROLE:
return DISCIPLINE_STUDENT_ROLE
queryset = self.filter(slug='student')
if queryset.exists():
DISCIPLINE_STUDENT_ROLE = queryset.all()[0]
return DISCIPLINE_STUDENT_ROLE
DISCIPLINE_STUDENT_ROLE = UserDisciplineRole(name='Student', slug='student')
DISCIPLINE_STUDENT_ROLE.save()
return DISCIPLINE_STUDENT_ROLE
def teacher_role(self):
global DISCIPLINE_TEACHER_ROLE
if DISCIPLINE_TEACHER_ROLE:
return DISCIPLINE_TEACHER_ROLE
queryset = self.filter(slug='teacher')
if queryset.exists():
DISCIPLINE_TEACHER_ROLE = queryset.all()[0]
return DISCIPLINE_TEACHER_ROLE
DISCIPLINE_TEACHER_ROLE = UserDisciplineRole(name='Teacher', slug='teacher')
DISCIPLINE_TEACHER_ROLE.save()
return DISCIPLINE_TEACHER_ROLE
class UserDisciplineRole(models.Model):
name = models.CharField(_('Role Name'), blank=False, max_length=100)
slug = AutoSlugField(prepopulate_from=('name',), unique=True, blank=True, max_length=100)
objects = ManagerUserDisciplineRole()
class Meta:
verbose_name = _('Role for User/Discipline Relationship')
verbose_name_plural = _('Roles for User/Discipline Relationship')
def __unicode__(self):
return self.name
class RelUserDiscipline(models.Model):
user = models.ForeignKey(UserInfo, blank=False)
discipline = models.ForeignKey(Discipline, blank=False)
role = models.ForeignKey(UserDisciplineRole, blank=False)
period = models.CharField(_('Period'), blank=True, null=True, max_length=5)
def invalidate_reluserdiscipline_cache(sender, instance, **kw):
if instance.user:
instance.user.schedule_cache = None
instance.user.save()
models.signals.pre_save.connect(invalidate_reluserdiscipline_cache, sender=RelUserDiscipline)
| bsd-3-clause | -775,253,353,824,724,200 | 40.678788 | 155 | 0.640686 | false |
j-i-l/cryptsypy | cryptsypy/CryptsyAccount.py | 1 | 7173 | from pyapi import Request,RequestPrivate
#this is going to be it
#from pyapi import AccountStructure
from CryptsyInfo import Info
import time
# <codecell>
#pur account into pyapi and inherit the specific platform account
#from the general class.
class Account():
#class Account(AccountStructure):
#it does not make much sense to have the info in a class...
def __init__(self, PlatformInfo = Info(), public_key = '', private_key = '',):
"""
This class is designed to hold all information specific to
a user account on cryptsy.com.
Be carefull the secret (priv_key)
"""
#AccountStructure.__init__(self,
# PlatfromInfo = PlatformInfo,
# public_key = public_key,
# private_key = private_key,
# )
#
self._init_Requests(PlatformInfo = PlatformInfo)
self.marketid = {}
self.Pairs = {}
self._init_mid_pairs()
self.CryptoAdresses = {}
self.CryptoAdresses['LTC'] = 'LMGgCFsxJBjkPwAW9bn5MnZG4vyTGv1aJr'
#
self.pub_key = public_key
#
self.priv_key = private_key
#self.Request = private_request(Account = self)
self.MyTrades = {}
self.MyOrders = {}
self.MyTransactions = {}
self.TradeHisory = {}
self.Depths = {}
##Those have to adapted to the specific platform
self.command_account_info = 'getinfo'
self.command_market_info = 'getmarkets'
self.command_trades_history = ''
self.command_open_orders = ''
#not used
self.command_my_transactions = ''
self.command_my_trades = ''
self.command_my_orders = 'allmyorders'
self.command_new_order = 'createorder'
self.command_cancel_order = ''
self.command_cancel_all_orders = ''
self.parameter_ordertype = 'ordertype'
self.parameter_market = 'marketid'
self.parameter_quantity = 'quantity'
self.parameter_price = 'price'
self.parameter_order_id = ''
self.parameter_market_id = ''
return None
#
def _init_Requests(self, PlatformInfo):
self.Request = RequestPrivate(Account = self, Info = PlatformInfo)
self.pubRequest = Request(Info = PlatformInfo)
return 0
def _init_mid_pairs(self,):
md = self.pubRequest.fetch('marketdatav2')['markets']
for p in md.keys():
pair = tuple(p.split('/'))
mid = md[p]['marketid']
self.Pairs[mid] = pair
self.marketid[pair] = mid
self.OpenOrders[pair] = md[p]['buyorders']
del md
return 0
#
def update_Info(self,):
return self.Request.fetch('getinfo')
#
def update_MarketInfo(self,):
return self.Request.fetch('getmarkets')
def update_MyTransactions(self,):
m_trans = self.Request.fetch('mytransactions')
for trans in m_trans:
self.MyTransactions[trans.pop('timestamp')] = trans
return 0
def update_TradeHistory(self, market):
"""market is a tuple"""
##self.marketid is to do!!!
mid = self.marketid(market)
history = self.Request.fetch('markettrades',params={'marketid':mid})
pair = self.Pairs[mid]
self.TradeHistory[pair] = history
return 0
def update_OpenOrders(self, market):
"""market is a tuple"""
mid = self.marketid(market)
o_orders = self.Request.fetch('marketorders',params={'marketid':mid})
##check the form of o_orders
print o_orders
#self.OpenOrders[self.Pairs[mid]] =
return 0
def update_MyTrades(self, market = None, limit = 200):
if market:
mid = self.marketid[market]
pair = self.Pairs[mid]
method = 'mytrades'
params = {'marketid':mid, 'limit':limit}
else:
method = 'allmytrades'
params = {}
m_trades = self.Request.fetch(method,params = params)
#check format of m_trades
print m_trades
#self.MyTrades[pair] = m_trades
return 0
def update_MyOrders(self, market = None):
if market:
mid = self.marketid[market]
pair = self.Pairs[mid]
method = 'myorders'
params = {'marketid':mid}
else:
method = 'allmyorders'
params = {}
m_orders = self.Request.fetch(method, params = params)
##check the format.
#self.MyOrders[pair] = ...
print m_orders
return 0
def update_Depths(self, market):
#what is this again?
mid = self.marketid[market]
pair = self.Pairs[mid]
depths = self.Request.fetch('depth',params={'marketid':mid})
##check format
#self.Dephts[pair] = ...
return 0
#
def CreateOrder(self, market, order_type, quantity, price):
mid = self.marketid[market]
pair = self.Pairs[mid]
params = {
'marketid':mid,
'ordertype':order_type,
'quantity':quantity,
'price':price
}
##check if funds are sufficient, if minimal value is exceded, etc
if self._order_possible(params):
now = time.time()
oid = self.Request.fetch('createorder',params = params)
self.MyOpenOrders[oid] = params
self.MyOpenOrders[oid][u'timestamp'] = now
return 0
def _order_possible(self, params):
##to do
#if ok
# return True
#else:
# return False
return True
def CancelOrder(self, **orders):
if 'orderid' in orders:
c_o = self.Request.fetch('cancelorder',params={'orderid':orders['orderid']})
print c_o
#if successfull:
# if orderid in self.MyOpenOrders:
# self.MyOpenOrders.pop(orderid)
if 'marketid' in orders:
mid = orders['marketid']
c_o = self.Request.fetch('cancelmarketorders',params={'marketid':mid})
print c_o
#if successfull:
# remove them from self.MyOpenOrders (use marketid)
if not len(orders.keys()):
all_c_o = self.Request.fetch('cancelallorders')
##check the output
##update self.MyOpenOrders
print all_c_o
return 0
def get_fees(self, ordertype, quantity, price):
"""does this mean same fees for all markets?"""
params = {
'ordertype': ordertype,
'quantity': quantity,
'price': price
}
ret = self.Request.fetch('calculatefees',params=params)
print ret
return 0
def _update_Fees(self,):
""""""
#update self.Fees
#self.get_fees('
return 0
| mit | -7,307,154,705,212,212,000 | 32.0553 | 88 | 0.538269 | false |
akretion/odoo | odoo/addons/base/tests/test_mimetypes.py | 10 | 3575 | import base64
import unittest
from odoo.tests.common import BaseCase
from odoo.tools.mimetypes import guess_mimetype
PNG = b'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVQI12P4//8/AAX+Av7czFnnAAAAAElFTkSuQmCC'
GIF = b"R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs="
BMP = b"""Qk1+AAAAAAAAAHoAAABsAAAAAQAAAAEAAAABABgAAAAAAAQAAAATCwAAEwsAAAAAAAAAAAAAQkdScwAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAD///8A"""
JPG = """/9j/4AAQSkZJRgABAQEASABIAAD//gATQ3JlYXRlZCB3aXRoIEdJTVD/2wBDAP
//////////////////////////////////////////////////////////////////////////////////////2wBDAf///////
///////////////////////////////////////////////////////////////////////////////wgARCAABAAEDAREAAhEB
AxEB/8QAFAABAAAAAAAAAAAAAAAAAAAAAv/EABQBAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhADEAAAAUf/xAAUEAEAAAAAAAA
AAAAAAAAAAAAA/9oACAEBAAEFAn//xAAUEQEAAAAAAAAAAAAAAAAAAAAA/9oACAEDAQE/AX//xAAUEQEAAAAAAAAAAAAAAAAAAA
AA/9oACAECAQE/AX//xAAUEAEAAAAAAAAAAAAAAAAAAAAA/9oACAEBAAY/An//xAAUEAEAAAAAAAAAAAAAAAAAAAAA/9oACAEBA
AE/IX//2gAMAwEAAgADAAAAEB//xAAUEQEAAAAAAAAAAAAAAAAAAAAA/9oACAEDAQE/EH//xAAUEQEAAAAAAAAAAAAAAAAAAAAA
/9oACAECAQE/EH//xAAUEAEAAAAAAAAAAAAAAAAAAAAA/9oACAEBAAE/EH//2Q=="""
SVG = b"""PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iaXNvLTg4NTktMSI/Pgo8IURPQ1RZUEUgc3ZnIFBVQkxJQyAiL
S8vVzNDLy9EVEQgU1ZHIDIwMDAxMTAyLy9FTiIKICJodHRwOi8vd3d3LnczLm9yZy9UUi8yMDAwL0NSLVNWRy0yMDAwMTEwMi9E
VEQvc3ZnLTIwMDAxMTAyLmR0ZCI+Cgo8c3ZnIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiPgogIDxnIHRyYW5zZm9ybT0idHJ
hbnNsYXRlKDUwLDUwKSI+CiAgICA8cmVjdCB4PSIwIiB5PSIwIiB3aWR0aD0iMTUwIiBoZWlnaHQ9IjUwIiBzdHlsZT0iZmlsbD
pyZWQ7IiAvPgogIDwvZz4KCjwvc3ZnPgo="""
class test_guess_mimetype(BaseCase):
def test_default_mimetype_empty(self):
mimetype = guess_mimetype(b'')
# odoo implementation returns application/octet-stream by default
# if available, python-magic returns application/x-empty
self.assertIn(mimetype, ('application/octet-stream', 'application/x-empty'))
def test_default_mimetype(self):
mimetype = guess_mimetype(b'', default='test')
# if available, python-magic returns application/x-empty
self.assertIn(mimetype, ('test', 'application/x-empty'))
def test_mimetype_octet_stream(self):
mimetype = guess_mimetype(b'\0')
self.assertEqual(mimetype, 'application/octet-stream')
def test_mimetype_png(self):
content = base64.b64decode(PNG)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'image/png')
def test_mimetype_bmp(self):
content = base64.b64decode(BMP)
mimetype = guess_mimetype(content, default='test')
# mimetype should match image/bmp, image/x-ms-bmp, ...
self.assertRegexpMatches(mimetype, r'image/.*\bbmp')
def test_mimetype_jpg(self):
content = base64.b64decode(JPG)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'image/jpeg')
def test_mimetype_gif(self):
content = base64.b64decode(GIF)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'image/gif')
def test_mimetype_svg(self):
content = base64.b64decode(SVG)
mimetype = guess_mimetype(content, default='test')
self.assertTrue(mimetype.startswith('image/svg'))
# Tests that whitespace padded SVG are not detected as SVG
mimetype = guess_mimetype(b" " + content, default='test')
self.assertNotIn("svg", mimetype)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -6,423,755,072,895,653,000 | 46.666667 | 101 | 0.726434 | false |
gklyne/annalist | src/annalist_root/annalist_manager/tests/test_annalist_site.py | 1 | 3181 | """
Test module for annalist-manager site data management commands
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2018, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
from utils.py3porting import StringIO
from utils.StdoutContext import SwitchStdout, SwitchStderr
import annalist
from annalist.util import replacetree, removetree
from annalist_manager.tests import get_source_root
from annalist_manager.tests import test_annalist_base
from annalist_manager.am_main import runCommand
# -----------------------------------------------------------------------------
#
# Tests
#
# -----------------------------------------------------------------------------
class AnnalistManagerSiteTest(test_annalist_base.AnnalistManagerTestBase):
@classmethod
def setUpTestData(cls):
cls.setup_annalist_manager_test()
return
def setUp(self):
if os.path.isdir(self.sitehome):
removetree(self.sitehome)
return
def tearDown(self):
return
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_createsitedata(self):
stdoutbuf = StringIO()
with SwitchStdout(stdoutbuf):
runCommand(self.userhome, self.userconfig,
["annalist-manager", "createsitedata", "--config=runtests"]
)
stdoutbuf.seek(0)
stdoutlines = stdoutbuf.read().split("\n")
self.assertEqual(stdoutlines[0], "Initializing Annalist site in "+self.sitehome)
siteexists = os.path.isdir(self.sitehome)
collexists = os.path.isfile(os.path.join(self.sitehome, "c/_annalist_site/d/coll_meta.jsonld"))
self.assertTrue(siteexists, "Annalist site directory exists?")
self.assertTrue(collexists, "Annalist site collection metadata exists?")
return
def test_updatesitedata(self):
stdoutbuf = StringIO()
with SwitchStdout(stdoutbuf):
runCommand(self.userhome, self.userconfig,
["annalist-manager", "createsitedata", "--config=runtests"]
)
stdoutbuf.seek(0)
stdoutbuf = StringIO()
with SwitchStdout(stdoutbuf):
runCommand(self.userhome, self.userconfig,
["annalist-manager", "updatesitedata", "--config=runtests"]
)
stdoutbuf.seek(0)
stdoutlines = stdoutbuf.read().split("\n")
self.assertEqual(stdoutlines[0], "Copy Annalist site data")
siteexists = os.path.isdir(self.sitehome)
collexists = os.path.isfile(os.path.join(self.sitehome, "c/_annalist_site/d/coll_meta.jsonld"))
self.assertTrue(siteexists, "Annalist site directory exists?")
self.assertTrue(collexists, "Annalist site collection metadata exists?")
return
# End.
| mit | -5,898,015,406,387,909,000 | 34.741573 | 103 | 0.582207 | false |
Geotab/mygeotab-python | tests/test_dates.py | 1 | 4844 | # -*- coding: utf-8 -*-
from datetime import datetime
import pytz
from mygeotab import dates
class TestGetUtcDate:
def test_naive_datetime_to_utc(self):
date = datetime(2015, 3, 12, 2, 45, 34)
utc_date = dates.localize_datetime(date, pytz.utc)
assert utc_date.tzinfo is not None
assert utc_date.tzinfo is pytz.utc
assert utc_date.year == date.year
assert utc_date.month == date.month
assert utc_date.day == date.day
assert utc_date.hour == date.hour
def test_utc_datetime_to_utc(self):
date = pytz.utc.localize(datetime(2015, 3, 12, 2, 45, 34))
utc_date = dates.localize_datetime(date, pytz.utc)
assert utc_date.tzinfo is not None
assert utc_date.tzinfo is pytz.utc
assert utc_date.year == date.year
assert utc_date.month == date.month
assert utc_date.day == date.day
assert utc_date.hour == date.hour
def test_zoned_datetime_to_utc(self):
tz = pytz.timezone("US/Eastern")
date = tz.localize(datetime(2015, 3, 12, 2, 45, 34))
utc_date = dates.localize_datetime(date, pytz.utc)
check_date = date.astimezone(pytz.utc)
assert utc_date.tzinfo is not None
assert utc_date.tzinfo is pytz.utc
assert utc_date.year == check_date.year
assert utc_date.month == check_date.month
assert utc_date.day == check_date.day
assert utc_date.hour == check_date.hour
def test_zoned_min_datetime(self):
tz_aus = pytz.timezone("Australia/Sydney")
tz_est = pytz.timezone("America/Toronto")
date = datetime(1, 1, 1, tzinfo=tz_aus)
est_date = dates.localize_datetime(date, tz_est)
check_date = dates.MIN_DATE
assert est_date.tzinfo is not None
assert est_date.year == check_date.year
assert est_date.month == check_date.month
assert est_date.day == check_date.day
assert est_date.hour == check_date.hour
def test_zoned_max_datetime(self):
tz_aus = pytz.timezone("Australia/Sydney")
tz_est = pytz.timezone("America/Toronto")
date = datetime(9999, 12, 31, 23, 59, 59, 999, tzinfo=tz_est)
aus_date = dates.localize_datetime(date, tz_aus)
check_date = dates.MAX_DATE
assert aus_date.tzinfo is not None
assert aus_date.year == check_date.year
assert aus_date.month == check_date.month
assert aus_date.day == check_date.day
assert aus_date.hour == check_date.hour
class TestFormatIsoDate:
def test_format_naive_datetime(self):
date = datetime(2015, 3, 12, 2, 45, 34)
check_fmt = "2015-03-12T02:45:34.000Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_utc_datetime(self):
date = pytz.utc.localize(datetime(2015, 3, 12, 2, 45, 34))
check_fmt = "2015-03-12T02:45:34.000Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_local_datetime(self):
est = pytz.timezone("US/Eastern")
date = est.localize(datetime(2015, 3, 12, 2, 45, 34, 987000))
check_fmt = "2015-03-12T06:45:34.987Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_far_past_date(self):
date = datetime(1, 1, 1, 0, 2, 34, 987000)
check_fmt = "0001-01-01T00:02:34.987Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_far_past_date_utc(self):
date = datetime(1, 1, 1, 0, 2, 34, 987000, tzinfo=pytz.utc)
check_fmt = "0001-01-01T00:02:34.987Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_far_past_date_invalid(self):
date = datetime(1, 1, 1, 0, 2, 34, 987000, tzinfo=pytz.timezone("Asia/Tokyo"))
check_fmt = "0001-01-01T00:00:00.000Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_far_future_date(self):
date = datetime(9999, 12, 31, 23, 59, 58, 987000)
check_fmt = "9999-12-31T23:59:58.987Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_far_future_date_utc(self):
date = datetime(9999, 12, 31, 23, 59, 58, 987000, tzinfo=pytz.utc)
check_fmt = "9999-12-31T23:59:58.987Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
def test_format_far_future_date_invalid(self):
date = datetime(9999, 12, 31, 23, 59, 58, 987000, tzinfo=pytz.timezone("America/Toronto"))
check_fmt = "9999-12-31T23:59:59.999Z"
fmt_date = dates.format_iso_datetime(date)
assert fmt_date == check_fmt
| apache-2.0 | -4,481,764,768,974,608,000 | 38.704918 | 98 | 0.619942 | false |
Micronaet/micronaet-migration | purchase_extra_field/purchase.py | 1 | 5464 | # -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class PurchaseOrder(orm.Model):
_inherit = 'purchase.order'
_columns = {
'delivery_note': fields.text('Delivery note'),
'payment_note': fields.text('Payment note'),
}
class PurchaseOrderLine(orm.Model):
''' Add here maybe not used
'''
_inherit = 'purchase.order.line'
_columns = {
'show_note': fields.text('Show note'),
'note': fields.text('Note'),
}
class ProductProductPurchase(orm.Model):
''' Add extra field in product (used in form and report)
'''
_inherit = 'product.product'
def get_quotation_image(self, cr, uid, item, context=None):
''' Get single image for the file
(default path is ~/photo/db_name/quotation
'''
img = ''
try:
extension = "jpg"
image_path = os.path.expanduser(
"~/photo/%s/product/quotation" % cr.dbname)
empty_image= "%s/%s.%s" % (image_path, "empty", extension)
product_browse = self.browse(cr, uid, item, context=context)
# Image compoesed with code format (code.jpg)
if product_browse.default_code:
(filename, header) = urllib.urlretrieve(
"%s/%s.%s" % (
image_path,
product_browse.default_code.replace(" ", "_"),
extension)) # code image
f = open(filename , 'rb')
img = base64.encodestring(f.read())
f.close()
if not img: # empty image:
(filename, header) = urllib.urlretrieve(empty_image)
f = open(filename , 'rb')
img = base64.encodestring(f.read())
f.close()
except:
try:
print (
"Image error", product_browse.default_code, sys.exc_info())
except:
pass
img = ''
return img
# Fields function:
def _get_quotation_image(self, cr, uid, ids, field_name, arg,
context=None):
''' Field function, for every ids test if there's image and return
base64 format according to code value (images are jpg)
'''
res = {}
for item in ids:
res[item] = self.get_quotation_image(
cr, uid, item, context=context)
return res
_columns = {
'colls_number': fields.integer('Colli'),
'colls': fields.char('Colli', size=30),
'colour_code': fields.char(
'Codice colore fornitore', size=64, translate=True),
# TODO moved in Micronaet/micronaet-product product_fist_supplier
'first_supplier_id': fields.many2one('res.partner', 'First supplier'),
'default_supplier': fields.char('Fornitore default', size=64),
'default_supplier_code': fields.char('Codice forn. default', size=40),
# TODO moved in Micronaet/micronaet-product product_fist_supplier
'package_type': fields.char('Package type', size=80),
'pack_l': fields.float('L. Imb.', digits=(16, 2)),
'pack_h': fields.float('H. Imb.', digits=(16, 2)),
'pack_p': fields.float('P. Imb.', digits=(16, 2)),
}
_defaults = {
#'quantity_x_pack': lambda *a: 1,
}
class PurchaseOrderLine(orm.Model):
''' Add extra field in purchase order line
'''
_inherit = 'purchase.order.line'
_columns = {
'q_x_pack': fields.related(
'product_id', 'q_x_pack', type='integer', string='Package'),
'colour': fields.related(
'product_id', 'colour', type='char', size=64, string='Color'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,421,605,581,973,595,600 | 34.947368 | 79 | 0.58071 | false |
LinDA-tools/LindaWorkbench | linda/linda_app/urls.py | 1 | 10160 | from django.contrib import admin
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.views.generic import RedirectView
from linda_app.forms import AutocompleteModelSearchForm
from haystack.views import SearchView, search_view_factory
from linda_app.installer import views as installer_views
import linda_app.views as views
import query_designer
admin.autodiscover()
admin.site.site_title = 'LinDA Administration'
admin.site.site_header = 'LinDA Administration Panel'
urlpatterns = patterns('',
# Basic pages
url(r'^$', views.index, name='home'),
url(r'^terms-of-use/$', views.terms, name='terms'),
url(r'^community/$', login_required(views.UserListView.as_view()), name='community'),
url(r'^settings/$', login_required(views.ConfigurationUpdateView.as_view()), name='settings'),
url(r'^get-started/$', views.getstarted, name='getstarted'),
# Endpoints
url(r'^sparql/$', views.sparql, name='sparql'),
url(r'^sparql/queries/(?P<q_id>\w+)/$', views.sparql, name='sparql'),
url(r'^sparql/(?P<dtname>[\w-]+)/$', views.datasource_sparql, name='datasource-sparql'),
# Visualizations
url(r'^visualizations/', include('visualisation.urls')),
# Analytics
url(r'^analytics/', include('analytics.urls', namespace="analytics")),
# Transformation Engine
url(r'^transformation/', include('transformation.urls')),
# Query Designer
url(r'^query-designer/', include('query_designer.urls')),
url(r'^api/query/(?P<q_id>\d+)/execute/$', query_designer.views.execute_query_api),
# Endpoint Monitor
url(r'^endpoint-monitor/', include('endpoint_monitor.urls')),
# Authentication
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'},
name='iamapps.logout'),
url(r'^accounts/', include('allauth.urls')),
url(r'^profile/(?P<pk>\d+)/$', login_required(views.profile), name='profile'),
url(r'^profile/(?P<pk>\w+)/edit$', login_required(views.UserUpdateView.as_view()),
name='profile-edit'),
# Messaging
(r'^messages/', include('messages.urls')),
# Site search
url(r'^find/$', views.site_search, name='site-search'), # Vocabulary search
url(r'^vocabularies/all/$', views.VocabularyListView.as_view()),
url(r'^vocabularies/categories/$', views.VocabularyListView.as_view()),
url(r'^classes/all/$', views.ClassListView.as_view()),
url(r'^properties/all/$', views.PropertyListView.as_view()),
url(r'^vocabularies/categories/all/$', views.categories),
url(r'^vocabularies/$', views.vocabulary_search),
url(r'^autocomplete/', views.autocomplete),
url(r'^search/vocabulary/', search_view_factory(
view_class=SearchView,
template='search/autocomplete.html',
form_class=AutocompleteModelSearchForm
), name='haystack_search'),
# Vocabularies
url(r'^vocabulary/(?P<pk>\d+)/$', views.VocabularyDetailsView.as_view(),
name='vocabulary-detail'),
url(r'^class/(?P<pk>\d+)/$', views.VocabularyClassDetailsView.as_view(),
name='class-detail'),
url(r'^property/(?P<pk>\d+)/$', views.VocabularyPropertyDetailsView.as_view(),
name='property-detail'),
url(r'^vocabulary/create/$', views.VocabularyCreateView.as_view(),
name='vocabulary-create'),
url(r'^vocabulary/(?P<pk>\d+)/edit/$', views.VocabularyUpdateView.as_view(),
name='vocabulary-edit'),
url(r'^vocabulary/(?P<pk>\d+)/delete/$', views.VocabularyDeleteView.as_view(),
name='vocabulary-delete'),
url(r'^vocabulary/(?P<pk>\d+)/(?P<slug>[\w-]+)/visualize/$', views.VocabularyVisualize.as_view(),
name='vocabulary-visualize'),
url(r'^vocabulary/(?P<pk>\d+)/comment/', views.postComment, name='vocabulary-comment'),
url(r'^vocabulary/(?P<pk>\d+)/rate/(?P<vt>\d+)/', views.rateDataset, name='vocabulary-rate'),
url(r'^vocabulary/(?P<pk>\d+)/download/(?P<type>[\w-]+)/$', views.downloadRDF,
name='vocabulary-download'),
url(r'^vocabulary/(?P<pk>\d+)/(?P<slug>[\w-]+)/$', views.VocabularyDetailsView.as_view(),
name='vocabulary-detail'),
# Vocabulary updates
url(r'^api/vocabularies/versions/$', views.get_vocabulary_versions,
name='vocabulary-version'),
url(r'^api/vocabularies/(?P<pk>\d+)/$', views.get_vocabulary_data,
name='vocabulary-get'),
url(r'^api/vocabularies/$', views.post_vocabulary_data,
name='vocabulary-get'),
url(r'^api/vocabularies/(?P<pk>\d+)/update/$', views.update_vocabulary_data,
name='vocabulary-update'),
url(r'^api/vocabularies/(?P<pk>\d+)/delete/$', views.delete_vocabulary_data,
name='vocabulary-delete'),
# Vocabulary repo proxy
url(r'^api/vocabulary-repo/(?P<link>.*)', views.vocabulary_repo_api_call,
name='vocabulary-repo-api-call'),
# Datasources
url(r'^datasources/$', views.datasources, name='datasources'),
url(r'^datasources/suggest/$', views.datasources_suggest, name='datasources'),
url(r'^datasources/suggestions/(?P<pk>\d+)/add/$', views.datasource_suggestion_add, name='datasources'),
url(r'^datasource/create/$', views.datasourceCreate,
name='datasource-create'),
url(r'^datasource/create/rdf/$', views.datasourceCreateRDF,
name='datasource-create-rdf'),
url(r'^datasource/(?P<dtname>[\w-]+)/download/$', views.datasourceDownloadRDF,
name='datasource-download-rdf'),
url(r'^datasource/(?P<name>[\w-]+)/replace/$', views.datasourceReplace,
name='datasource-replace'),
url(r'^datasource/(?P<dtname>[\w-]+)/replace/rdf/$', views.datasourceReplaceRDF,
name='datasource-replace-rdf'),
url(r'^datasource/(?P<dtname>[\w-]+)/delete/$', views.datasourceDelete,
name='datasource-delete'),
# Query Builder
url(r'^query/execute_sparql$', views.execute_sparql),
url(r'^query/(?P<pk>\d+)/visualize/', views.query_visualize),
url(r'^query/(?P<link>.*)', views.get_qbuilder_call, name='query-builder-proxy'),
url(r'^rdf2any/(?P<link>.*)', views.get_rdf2any_call, name='rdf2any-proxy'),
url(r'^query-builder/save/(?P<pk>\d+)/$', views.query_update, name='query-builder-update'),
url(r'^query-builder/clone/(?P<pk>\d+)/$', views.query_clone, name='query-builder-clone'),
url(r'^query-builder/delete/(?P<pk>\d+)/$', views.query_delete, name='query-builder-delete'),
url(r'^query-builder/save/$', views.query_save, name='query-builder-save'),
url(r'^query-builder/$', views.queryBuilder,
name='query-builder'),
url(r'^queries', views.QueryListView.as_view(), name='saved-queries'),
url(r'^assets/jar-loading.gif$', RedirectView.as_view(url='/static/images/jar-loading.gif')),
# Installer
url(r'^api/installer/run/$', installer_views.run_installer, name='run-installer'),
# API calls
url(r'^api/users/', login_required(views.api_users), name='users'),
url(r'^api/queries/default-description', views.default_description, name='users'),
url(r'^api/datasources/', views.api_datasources_list, name='datasources-list'),
url(r'^api/datasource/create/', views.api_datasource_create, name='datasource-create'),
url(r'^api/datasource/(?P<dtname>[\w-]+)/replace/', views.api_datasource_replace,
name='datasource-replace'),
url(r'^api/datasource/(?P<dtname>[\w-]+)/rename/', views.api_datasource_rename,
name='datasource-rename'),
url(r'^api/datasource/(?P<dtname>[\w-]+)/delete/', views.api_datasource_delete,
name='datasource-delete'),
url(r'^api/datasource/(?P<dtname>[\w-]+)/', views.api_datasource_get, name='datasource-get'),
url(r'coreapi/', include('coreapi.urls')),
)
| mit | 8,392,267,029,180,516,000 | 58.415205 | 127 | 0.504528 | false |
drewsonne/cfn-json-to-yaml | cfnjsontoyaml/mixins/__init__.py | 1 | 1048 | from cfnjsontoyaml.yamlobject.base64 import Base64
from cfnjsontoyaml.yamlobject.equals import Equals
from cfnjsontoyaml.yamlobject.findinmap import FindInMap
from cfnjsontoyaml.yamlobject.fnand import And
from cfnjsontoyaml.yamlobject.fnif import If
from cfnjsontoyaml.yamlobject.fnnot import Not
from cfnjsontoyaml.yamlobject.fnor import Or
from cfnjsontoyaml.yamlobject.getatt import GetAtt
from cfnjsontoyaml.yamlobject.getazs import GetAZs
from cfnjsontoyaml.yamlobject.importvalue import ImportValue
from cfnjsontoyaml.yamlobject.join import Join
from cfnjsontoyaml.yamlobject.ref import Ref
from cfnjsontoyaml.yamlobject.select import Select
from cfnjsontoyaml.yamlobject.sub import Sub
FUNCTION_MAPPING = {
'Ref': Ref,
'Fn::Base64': Base64,
'Fn::FindInMap': FindInMap,
'Fn::GetAtt': GetAtt,
'Fn::GetAZs':GetAZs,
'Fn::ImportValue': ImportValue,
'Fn::Join': Join,
'Fn::Select': Select,
'Fn::Sub': Sub,
'Fn::And': And,
'Fn::Equals': Equals,
'Fn::If': If,
'Fn::Not': Not,
'Fn::Or': Or
}
| lgpl-3.0 | -9,176,360,153,192,987,000 | 32.806452 | 60 | 0.754771 | false |
DantestyleXD/MVM5B_BOT | plugins/mine.py | 1 | 2025 | # -*- coding: utf-8 -*-
from config import *
print(Color(
'{autored}[{/red}{autoyellow}+{/yellow}{autored}]{/red} {autocyan} mine.py importado.{/cyan}'))
@bot.message_handler(commands=['mine'])
def command_COMANDO(m):
cid = m.chat.id
uid = m.from_user.id
try:
send_udp('mine')
except Exception as e:
bot.send_message(52033876, send_exception(e), parse_mode="Markdown")
if not is_recent(m):
return None
if is_banned(uid):
if not extra['muted']:
bot.reply_to(m, responses['banned'])
return None
if is_user(cid):
if cid in [52033876, 4279004]:
parametro = m.text.split(' ')[1] if len(
m.text.split(' ')) > 1 else None
tmp = int(os.popen('ps aux | grep java | wc -l').read())
if not parametro:
if tmp == 3:
bot.send_message(cid, "Servidor de minecraft encendido.")
elif tmp == 2:
bot.send_message(cid, "Servidor de minecraft apagado.")
else:
bot.send_message(
52033876,
"@Edurolp mira el server del minecraft que algo le pasa. tmp = {}".format(tmp))
else:
if parametro == 'start':
if tmp == 2:
bot.send_message(cid, "Iniciando servidor.")
os.popen('pm2 start 8')
else:
bot.send_message(
cid,
"Se supone que el server ya está encendido, avisa a @Edurolp si no funciona.")
if parametro == 'stop':
if tmp > 2:
bot.send_message(cid, "Apagando servidor.")
os.popen('pm2 stop 8')
else:
bot.semd_message(cid, "El servidor ya estaba apagado.")
else:
bot.send_message(cid, responses['not_user'])
| gpl-2.0 | 7,239,661,459,171,880,000 | 37.188679 | 106 | 0.473814 | false |
fhqgfss/MoHa | moha/posthf/pt/mp.py | 1 | 2331 | import numpy as np
def spinfock(eorbitals):
"""
"""
if type(eorbitals) is np.ndarray:
dim = 2*len(eorbitals)
fs = np.zeros(dim)
for i in range(0,dim):
fs[i] = eorbitals[i//2]
fs = np.diag(fs) # put MO energies in diagonal array
elif type(eorbitals) is dict:
dim = 2*len(eorbitals['alpha'])
fs = np.zeros(dim)
for i in range(0,dim):
if i%2==0:
fs[i] = eorbitals['alpha'][i//2]
elif i%2==0:
fs[i] = eorbitals['beta'][i//2]
fs = np.diag(fs) # put MO energies in diagonal array
return fs
class MPNSolver(object):
def __init__(self,energy):
self.energy = energy
@classmethod
def mp2(cls,hfwavefunction,hamiltonian):
occ = hfwavefunction.occ
C = hfwavefunction.coefficient
eorbitals = hfwavefunction.eorbitals
Emp2 = 0.0
if occ['alpha'] == occ['beta']:
Eri = hamiltonian.operators['electron_repulsion'].basis_transformation(C)
for i in range(occ['alpha']):
for j in range(occ['alpha']):
for a in range(occ['alpha'],hamiltonian.dim):
for b in range(occ['alpha'],hamiltonian.dim):
Emp2 += Eri[i,a,j,b]*(2*Eri[i,a,j,b]-Eri[i,b,j,a])/(eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])
elif occ['alpha'] != occ['beta']:
for spin in C:
Eri = hamiltonian.operators['electron_repulsion'].basis_transformation(C[spin])
for i in range(occ[spin]):
for j in range(occ[spin]):
for a in range(occ[spin],hamiltonian.dim):
for b in range(occ[spin],hamiltonian.dim):
Emp2 += Eri[i,a,j,b]*(Eri[i,a,j,b]-0.5*Eri[i,b,j,a])/(eorbitals[spin][i] + eorbitals[spin][j] -eorbitals[spin][a] - eorbitals[spin][b])
print '{0:2s} {1:3f}'.format('Escf', hfwavefunction.Etot)
print '{0:2s} {1:3f}'.format('Emp2', Emp2)
print '{0:2s} {1:3f}'.format('Etot', hfwavefunction.Etot+Emp2)
return Emp2
@classmethod
def mp3(cls,hfwavefunction,hamiltonian):
occ = hfwavefunction.occ
C = hfwavefunction.coefficient
pass
| mit | -7,517,407,249,570,599,000 | 37.213115 | 167 | 0.531532 | false |
ppGodel/ADA2017 | Tareas/tarea5.py | 1 | 2081 | #flujos y arboles de expancion Ford-fulkerson y grafos densos,grandes y normales
#archivos necesarios dentro de la carpeta files
# graph.py, InstanciesGenerator.py
from files import graph
from files import InstanciesGenerator
import random
import time
# se genera un grafo con densidad alta y con una cantidad de vertices alta, para ver el desempeño del algoritmo con grafos diversos se probara con 105, 205 y 1005 nodos
no_vertices = 205
ddn = InstanciesGenerator.Distribution(InstanciesGenerator.DistributionsTypes.uniform, 1, no_vertices-1 )
dw = InstanciesGenerator.Distribution(InstanciesGenerator.DistributionsTypes.normal, 15, 3)
generadorcon = InstanciesGenerator.GraphInstancesGenerator(graphtype = InstanciesGenerator.GraphTypes.connected,distribution_weight = dw,distribution_degree = ddn, directed = False )
#se probaran diferentes densidades del grafo para ver el desempeño cuando se va a acercando a la densidad de un grafo completo
density = [0.8,0.85,0.90,0.95]
replicas = 5
for d in density:
gc = generadorcon.generateInstance('Test', no_vertices, round((no_vertices-1)*d*no_vertices))
a = random.choice(gc.vertices)
for r in range(replicas):
# se selccionan al azar 3 vertices para calcular el fluje en ellas
b = random.choice(gc.vertices)
while len(gc.vertices)>2 and b.id == a.id:
b = random.choice(gc.vertices)
# se calcua el flujo maximo entre los 2
gc.resetflow()
ti = time.clock()
mf = gc.shortaugmentingmaxflow(a.id,b.id)
tf = time.clock()-ti
ti = time.clock()
mb = gc.breadthfirstsearch(a.id)
tfb = time.clock()-ti
print(no_vertices,round((no_vertices-1)*d*no_vertices),r, mf, tf, tfb)
#se almacenaron los resultados en resultadosTarea5/result.pdf donde se ve que el algoritmo tarda mas cuando calcula mas flujo, (y que calculo mas caminos) o cuando aumenta la densidad del grafo. no se pudo contrastar contra el algoritmo de ford fulkerson que escoge un camino al azar ya que demoraba mucho mas tiempo para los experimentos.
| gpl-3.0 | 9,055,192,903,225,367,000 | 56.75 | 339 | 0.740741 | false |
gschizas/praw | praw/models/reddit/comment.py | 1 | 12884 | """Provide the Comment class."""
from typing import Any, Dict, Optional, TypeVar, Union
from ...const import API_PATH
from ...exceptions import ClientException, InvalidURL
from ...util.cache import cachedproperty
from ..comment_forest import CommentForest
from .base import RedditBase
from .mixins import (
FullnameMixin,
InboxableMixin,
ThingModerationMixin,
UserContentMixin,
)
from .redditor import Redditor
_Comment = TypeVar("_Comment")
_CommentModeration = TypeVar("_CommentModeration")
Reddit = TypeVar("Reddit")
Submission = TypeVar("Submission")
Subreddit = TypeVar("Subreddit")
class Comment(InboxableMixin, UserContentMixin, FullnameMixin, RedditBase):
"""A class that represents a reddit comments.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
======================= ===================================================
Attribute Description
======================= ===================================================
``author`` Provides an instance of :class:`.Redditor`.
``body`` The body of the comment.
``created_utc`` Time the comment was created, represented in
`Unix Time`_.
``distinguished`` Whether or not the comment is distinguished.
``edited`` Whether or not the comment has been edited.
``id`` The ID of the comment.
``is_submitter`` Whether or not the comment author is also the
author of the submission.
``link_id`` The submission ID that the comment belongs to.
``parent_id`` The ID of the parent comment. If it is a top-level
comment, this returns the submission ID instead
(prefixed with 't3').
``permalink`` A permalink for the comment. Comment objects from
the inbox have a ``context`` attribute instead.
``replies`` Provides an instance of :class:`.CommentForest`.
``score`` The number of upvotes for the comment.
``stickied`` Whether or not the comment is stickied.
``submission`` Provides an instance of :class:`.Submission`. The
submission that the comment belongs to.
``subreddit`` Provides an instance of :class:`.Subreddit`. The
subreddit that the comment belongs to.
``subreddit_id`` The subreddit ID that the comment belongs to.
======================= ===================================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
MISSING_COMMENT_MESSAGE = (
"This comment does not appear to be in the comment tree"
)
STR_FIELD = "id"
@staticmethod
def id_from_url(url: str) -> str:
"""Get the ID of a comment from the full URL."""
parts = RedditBase._url_parts(url)
try:
comment_index = parts.index("comments")
except ValueError:
raise InvalidURL(url)
if len(parts) - 4 != comment_index:
raise InvalidURL(url)
return parts[-1]
@property
def _kind(self):
"""Return the class's kind."""
return self._reddit.config.kinds["comment"]
@property
def is_root(self) -> bool:
"""Return True when the comment is a top level comment."""
parent_type = self.parent_id.split("_", 1)[0]
return parent_type == self._reddit.config.kinds["submission"]
@cachedproperty
def mod(self) -> _CommentModeration:
"""Provide an instance of :class:`.CommentModeration`.
Example usage:
.. code-block:: python
comment = reddit.comment('dkk4qjd')
comment.mod.approve()
"""
return CommentModeration(self)
@property
def replies(self) -> CommentForest:
"""Provide an instance of :class:`.CommentForest`.
This property may return an empty list if the comment
has not been refreshed with :meth:`.refresh()`
Sort order and reply limit can be set with the ``reply_sort`` and
``reply_limit`` attributes before replies are fetched, including
any call to :meth:`.refresh`:
.. code-block:: python
comment.reply_sort = 'new'
comment.refresh()
replies = comment.replies
.. note:: The appropriate values for ``reply_sort`` include ``best``,
``top``, ``new``, ``controversial``, ``old`` and ``q&a``.
"""
if isinstance(self._replies, list):
self._replies = CommentForest(self.submission, self._replies)
return self._replies
@property
def submission(self) -> Submission:
"""Return the Submission object this comment belongs to."""
if not self._submission: # Comment not from submission
self._submission = self._reddit.submission(
self._extract_submission_id()
)
return self._submission
@submission.setter
def submission(self, submission: Submission):
"""Update the Submission associated with the Comment."""
submission._comments_by_id[self.name] = self
self._submission = submission
# pylint: disable=not-an-iterable
for reply in getattr(self, "replies", []):
reply.submission = submission
def __init__(
self,
reddit: Reddit,
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the Comment object."""
if (id, url, _data).count(None) != 2:
raise TypeError(
"Exactly one of `id`, `url`, or `_data` must be provided."
)
self._replies = []
self._submission = None
super().__init__(reddit, _data=_data)
if id:
self.id = id
elif url:
self.id = self.id_from_url(url)
else:
self._fetched = True
def __setattr__(
self,
attribute: str,
value: Union[str, Redditor, CommentForest, Subreddit],
):
"""Objectify author, replies, and subreddit."""
if attribute == "author":
value = Redditor.from_data(self._reddit, value)
elif attribute == "replies":
if value == "":
value = []
else:
value = self._reddit._objector.objectify(value).children
attribute = "_replies"
elif attribute == "subreddit":
value = self._reddit.subreddit(value)
super().__setattr__(attribute, value)
def _fetch_info(self):
return ("info", {}, {"id": self.fullname})
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
if not data["children"]:
raise ClientException(
"No data returned for comment {}".format(
self.__class__.__name__, self.fullname
)
)
comment_data = data["children"][0]["data"]
other = type(self)(self._reddit, _data=comment_data)
self.__dict__.update(other.__dict__)
self._fetched = True
def _extract_submission_id(self):
if "context" in self.__dict__:
return self.context.rsplit("/", 4)[1]
return self.link_id.split("_", 1)[1]
def parent(self) -> Union[_Comment, Submission]:
"""Return the parent of the comment.
The returned parent will be an instance of either
:class:`.Comment`, or :class:`.Submission`.
If this comment was obtained through a :class:`.Submission`, then its
entire ancestry should be immediately available, requiring no extra
network requests. However, if this comment was obtained through other
means, e.g., ``reddit.comment('COMMENT_ID')``, or
``reddit.inbox.comment_replies``, then the returned parent may be a
lazy instance of either :class:`.Comment`, or :class:`.Submission`.
Lazy comment example:
.. code-block:: python
comment = reddit.comment('cklhv0f')
parent = comment.parent()
# `replies` is empty until the comment is refreshed
print(parent.replies) # Output: []
parent.refresh()
print(parent.replies) # Output is at least: [Comment(id='cklhv0f')]
.. warning:: Successive calls to :meth:`.parent()` may result in a
network request per call when the comment is not obtained through a
:class:`.Submission`. See below for an example of how to minimize
requests.
If you have a deeply nested comment and wish to most efficiently
discover its top-most :class:`.Comment` ancestor you can chain
successive calls to :meth:`.parent()` with calls to :meth:`.refresh()`
at every 9 levels. For example:
.. code-block:: python
comment = reddit.comment('dkk4qjd')
ancestor = comment
refresh_counter = 0
while not ancestor.is_root:
ancestor = ancestor.parent()
if refresh_counter % 9 == 0:
ancestor.refresh()
refresh_counter += 1
print('Top-most Ancestor: {}'.format(ancestor))
The above code should result in 5 network requests to Reddit. Without
the calls to :meth:`.refresh()` it would make at least 31 network
requests.
"""
# pylint: disable=no-member
if self.parent_id == self.submission.fullname:
return self.submission
if self.parent_id in self.submission._comments_by_id:
# The Comment already exists, so simply return it
return self.submission._comments_by_id[self.parent_id]
# pylint: enable=no-member
parent = Comment(self._reddit, self.parent_id.split("_", 1)[1])
parent._submission = self.submission
return parent
def refresh(self):
"""Refresh the comment's attributes.
If using :meth:`.Reddit.comment` this method must be called in order to
obtain the comment's replies.
Example usage:
.. code-block:: python
comment = reddit.comment('dkk4qjd')
comment.refresh()
"""
if "context" in self.__dict__: # Using hasattr triggers a fetch
comment_path = self.context.split("?", 1)[0]
else:
path = API_PATH["submission"].format(id=self.submission.id)
comment_path = "{}_/{}".format(path, self.id)
# The context limit appears to be 8, but let's ask for more anyway.
params = {"context": 100}
if "reply_limit" in self.__dict__:
params["limit"] = self.reply_limit
if "reply_sort" in self.__dict__:
params["sort"] = self.reply_sort
comment_list = self._reddit.get(comment_path, params=params)[
1
].children
if not comment_list:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
# With context, the comment may be nested so we have to find it
comment = None
queue = comment_list[:]
while queue and (comment is None or comment.id != self.id):
comment = queue.pop()
if isinstance(comment, Comment):
queue.extend(comment._replies)
if comment.id != self.id:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
if self._submission is not None:
del comment.__dict__["_submission"] # Don't replace if set
self.__dict__.update(comment.__dict__)
for reply in comment_list:
reply.submission = self.submission
return self
class CommentModeration(ThingModerationMixin):
"""Provide a set of functions pertaining to Comment moderation.
Example usage:
.. code-block:: python
comment = reddit.comment('dkk4qjd')
comment.mod.approve()
"""
REMOVAL_MESSAGE_API = "removal_comment_message"
def __init__(self, comment: Comment):
"""Create a CommentModeration instance.
:param comment: The comment to moderate.
"""
self.thing = comment
| bsd-2-clause | 8,281,210,984,734,887,000 | 34.888579 | 79 | 0.574589 | false |
DIPlib/diplib | pydip/src/__main__.py | 1 | 1301 | # PyDIP 3.0, Python bindings for DIPlib 3.0
# This file contains functionality to download bioformats
#
# (c)2020, Wouter Caarls
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, urllib.request
def progress(blocks, bs, size):
barsize = 52
pct = blocks*bs / float(size)
bardone = int(pct*barsize)
print('[{0}{1}] {2: >3}%'.format('=' * bardone, '.'*(barsize-bardone), int(pct*100)), end='\r', flush=True)
if __name__ == '__main__':
if 'download_bioformats' in sys.argv:
url = 'https://downloads.openmicroscopy.org/bio-formats/6.5.0/artifacts/bioformats_package.jar'
filename = os.path.join(os.path.dirname(__file__), 'bioformats_package.jar')
print('Retrieving', url)
urllib.request.urlretrieve(url, filename, progress)
print()
| apache-2.0 | 2,904,086,616,590,238,000 | 39.65625 | 111 | 0.69485 | false |
FCP-INDI/C-PAC | CPAC/pipeline/cpac_runner.py | 1 | 27944 | import os
import glob
import warnings
import yaml
from multiprocessing import Process
from time import strftime
from voluptuous.error import Invalid
from CPAC.utils.configuration import Configuration
from CPAC.utils.ga import track_run
from CPAC.longitudinal_pipeline.longitudinal_workflow import (
anat_longitudinal_wf,
func_preproc_longitudinal_wf,
func_longitudinal_template_wf
)
from CPAC.utils.yaml_template import upgrade_pipeline_to_1_8
# Run condor jobs
def run_condor_jobs(c, config_file, subject_list_file, p_name):
# Import packages
import subprocess
from time import strftime
try:
sublist = yaml.safe_load(open(os.path.realpath(subject_list_file), 'r'))
except:
raise Exception("Subject list is not in proper YAML format. Please check your file")
cluster_files_dir = os.path.join(os.getcwd(), 'cluster_files')
subject_bash_file = os.path.join(cluster_files_dir, 'submit_%s.condor' % str(strftime("%Y_%m_%d_%H_%M_%S")))
f = open(subject_bash_file, 'w')
print("Executable = /usr/bin/python", file=f)
print("Universe = vanilla", file=f)
print("transfer_executable = False", file=f)
print("getenv = True", file=f)
print("log = %s" % os.path.join(cluster_files_dir, 'c-pac_%s.log' % str(strftime("%Y_%m_%d_%H_%M_%S"))), file=f)
for sidx in range(1,len(sublist)+1):
print("error = %s" % os.path.join(cluster_files_dir, 'c-pac_%s.%s.err' % (str(strftime("%Y_%m_%d_%H_%M_%S")), str(sidx))), file=f)
print("output = %s" % os.path.join(cluster_files_dir, 'c-pac_%s.%s.out' % (str(strftime("%Y_%m_%d_%H_%M_%S")), str(sidx))), file=f)
print("arguments = \"-c 'import CPAC; CPAC.pipeline.cpac_pipeline.run( ''%s'',''%s'',''%s'',''%s'',''%s'',''%s'',''%s'')\'\"" % (str(config_file), subject_list_file, str(sidx), c.maskSpecificationFile, c.roiSpecificationFile, c.templateSpecificationFile, p_name), file=f)
print("queue", file=f)
f.close()
#commands.getoutput('chmod +x %s' % subject_bash_file )
print(subprocess.getoutput("condor_submit %s " % (subject_bash_file)))
# Create and run script for CPAC to run on cluster
def run_cpac_on_cluster(config_file, subject_list_file,
cluster_files_dir):
'''
Function to build a SLURM batch job submission script and
submit it to the scheduler via 'sbatch'
'''
# Import packages
import subprocess
import getpass
import re
from time import strftime
from indi_schedulers import cluster_templates
# Load in pipeline config
try:
pipeline_dict = yaml.safe_load(open(os.path.realpath(config_file), 'r'))
pipeline_config = Configuration(pipeline_dict)
except:
raise Exception('Pipeline config is not in proper YAML format. '\
'Please check your file')
# Load in the subject list
try:
sublist = yaml.safe_load(open(os.path.realpath(subject_list_file), 'r'))
except:
raise Exception('Subject list is not in proper YAML format. '\
'Please check your file')
# Init variables
timestamp = str(strftime("%Y_%m_%d_%H_%M_%S"))
job_scheduler = pipeline_config.pipeline_setup['system_config']['on_grid']['resource_manager'].lower()
# For SLURM time limit constraints only, hh:mm:ss
hrs_limit = 8 * len(sublist)
time_limit = '%d:00:00' % hrs_limit
# Batch file variables
shell = subprocess.getoutput('echo $SHELL')
user_account = getpass.getuser()
num_subs = len(sublist)
# Run CPAC via python -c command
python_cpac_str = 'python -c "from CPAC.pipeline.cpac_pipeline import run; '\
'run(\'%(config_file)s\', \'%(subject_list_file)s\', '\
'%(env_arr_idx)s, \'%(pipeline_name)s\', '\
'plugin=\'MultiProc\', plugin_args=%(plugin_args)s)"'
# Init plugin arguments
plugin_args = {'n_procs': pipeline_config.pipeline_setup['system_config']['max_cores_per_participant'],
'memory_gb': pipeline_config.pipeline_setup['system_config']['maximum_memory_per_participant']}
# Set up run command dictionary
run_cmd_dict = {'config_file' : config_file,
'subject_list_file' : subject_list_file,
'pipeline_name' : pipeline_config.pipeline_setup['pipeline_name'],
'plugin_args' : plugin_args}
# Set up config dictionary
config_dict = {'timestamp' : timestamp,
'shell' : shell,
'job_name' : 'CPAC_' + pipeline_config.pipeline_setup['pipeline_name'],
'num_tasks' : num_subs,
'queue' : pipeline_config.pipeline_setup['system_config']['on_grid']['SGE']['queue'],
'par_env' : pipeline_config.pipeline_setup['system_config']['on_grid']['SGE']['parallel_environment'],
'cores_per_task' : pipeline_config.pipeline_setup['system_config']['max_cores_per_participant'],
'user' : user_account,
'work_dir' : cluster_files_dir,
'time_limit' : time_limit}
# Get string template for job scheduler
if job_scheduler == 'pbs':
env_arr_idx = '$PBS_ARRAYID'
batch_file_contents = cluster_templates.pbs_template
confirm_str = '(?<=Your job-array )\d+'
exec_cmd = 'qsub'
elif job_scheduler == 'sge':
env_arr_idx = '$SGE_TASK_ID'
batch_file_contents = cluster_templates.sge_template
confirm_str = '(?<=Your job-array )\d+'
exec_cmd = 'qsub'
elif job_scheduler == 'slurm':
env_arr_idx = '$SLURM_ARRAY_TASK_ID'
batch_file_contents = cluster_templates.slurm_template
confirm_str = '(?<=Submitted batch job )\d+'
exec_cmd = 'sbatch'
# Populate rest of dictionary
config_dict['env_arr_idx'] = env_arr_idx
run_cmd_dict['env_arr_idx'] = env_arr_idx
config_dict['run_cmd'] = python_cpac_str % run_cmd_dict
# Populate string from config dict values
batch_file_contents = batch_file_contents % config_dict
# Write file
batch_filepath = os.path.join(cluster_files_dir, 'cpac_submit_%s.%s' \
% (timestamp, job_scheduler))
with open(batch_filepath, 'w') as f:
f.write(batch_file_contents)
# Get output response from job submission
out = subprocess.getoutput('%s %s' % (exec_cmd, batch_filepath))
# Check for successful qsub submission
if re.search(confirm_str, out) == None:
err_msg = 'Error submitting C-PAC pipeline run to %s queue' \
% job_scheduler
raise Exception(err_msg)
# Get pid and send to pid file
pid = re.search(confirm_str, out).group(0)
pid_file = os.path.join(cluster_files_dir, 'pid.txt')
with open(pid_file, 'w') as f:
f.write(pid)
def run_T1w_longitudinal(sublist, cfg):
subject_id_dict = {}
for sub in sublist:
if sub['subject_id'] in subject_id_dict:
subject_id_dict[sub['subject_id']].append(sub)
else:
subject_id_dict[sub['subject_id']] = [sub]
# subject_id_dict has the subject_id as keys and a list of
# sessions for each participant as value
valid_longitudinal_data = False
for subject_id, sub_list in subject_id_dict.items():
if len(sub_list) > 1:
valid_longitudinal_data = True
anat_longitudinal_wf(subject_id, sub_list, cfg)
elif len(sub_list) == 1:
warnings.warn("\n\nThere is only one anatomical session "
"for sub-%s. Longitudinal preprocessing "
"will be skipped for this subject."
"\n\n" % subject_id)
# Run C-PAC subjects via job queue
def run(subject_list_file, config_file=None, p_name=None, plugin=None,
plugin_args=None, tracking=True, num_subs_at_once=None, debug=False,
test_config=False):
# Import packages
import subprocess
import os
import pickle
import time
from CPAC.pipeline.cpac_pipeline import run_workflow
print('Run called with config file {0}'.format(config_file))
if not config_file:
import pkg_resources as p
config_file = \
p.resource_filename("CPAC",
os.path.join("resources",
"configs",
"pipeline_config_template.yml"))
# Init variables
sublist = None
if '.yaml' in subject_list_file or '.yml' in subject_list_file:
subject_list_file = os.path.realpath(subject_list_file)
else:
from CPAC.utils.bids_utils import collect_bids_files_configs, \
bids_gen_cpac_sublist
(file_paths, config) = collect_bids_files_configs(subject_list_file,
None)
sublist = bids_gen_cpac_sublist(subject_list_file, file_paths,
config, None)
if not sublist:
import sys
print("Did not find data in {0}".format(subject_list_file))
sys.exit(1)
# take date+time stamp for run identification purposes
unique_pipeline_id = strftime("%Y%m%d%H%M%S")
pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
# Load in pipeline config file
config_file = os.path.realpath(config_file)
try:
if not os.path.exists(config_file):
raise IOError
else:
try:
c = Configuration(yaml.safe_load(open(config_file, 'r')))
except Invalid:
try:
upgrade_pipeline_to_1_8(config_file)
c = Configuration(yaml.safe_load(open(config_file, 'r')))
except Exception as e:
import sys
print(
'C-PAC could not upgrade pipeline configuration file '
f'{config_file} to v1.8 syntax',
file=sys.stderr
)
raise e
except Exception as e:
raise e
except IOError:
print("config file %s doesn't exist" % config_file)
raise
except yaml.parser.ParserError as e:
error_detail = "\"%s\" at line %d" % (
e.problem,
e.problem_mark.line
)
raise Exception(
"Error parsing config file: {0}\n\n"
"Error details:\n"
" {1}"
"\n\n".format(config_file, error_detail)
)
except Exception as e:
raise Exception(
"Error parsing config file: {0}\n\n"
"Error details:\n"
" {1}"
"\n\n".format(config_file, e)
)
c.pipeline_setup['log_directory']['path'] = os.path.abspath(c.pipeline_setup['log_directory']['path'])
c.pipeline_setup['working_directory']['path'] = os.path.abspath(c.pipeline_setup['working_directory']['path'])
if 's3://' not in c.pipeline_setup['output_directory']['path']:
c.pipeline_setup['output_directory']['path'] = os.path.abspath(c.pipeline_setup['output_directory']['path'])
if debug:
c.pipeline_setup['output_directory']['path']['write_debugging_outputs'] = "[1]"
if num_subs_at_once:
if not str(num_subs_at_once).isdigit():
raise Exception('[!] Value entered for --num_cores not a digit.')
c.pipeline_setup['system_config']['num_participants_at_once'] = int(num_subs_at_once)
# Do some validation
if not c.pipeline_setup['working_directory']['path']:
raise Exception('Working directory not specified')
if len(c.pipeline_setup['working_directory']['path']) > 70:
warnings.warn("We recommend that the working directory full path "
"should have less then 70 characters. "
"Long paths might not work in your operational system.")
warnings.warn("Current working directory: %s" % c.pipeline_setup['working_directory']['path'])
# Get the pipeline name
p_name = p_name or c.pipeline_setup['pipeline_name']
# Load in subject list
try:
if not sublist:
sublist = yaml.safe_load(open(subject_list_file, 'r'))
except:
print("Subject list is not in proper YAML format. Please check " \
"your file")
raise Exception
# Populate subject scan map
sub_scan_map = {}
try:
for sub in sublist:
if sub['unique_id']:
s = sub['subject_id'] + "_" + sub["unique_id"]
else:
s = sub['subject_id']
scan_ids = ['scan_anat']
if 'func' in sub:
for id in sub['func']:
scan_ids.append('scan_'+ str(id))
if 'rest' in sub:
for id in sub['rest']:
scan_ids.append('scan_'+ str(id))
sub_scan_map[s] = scan_ids
except:
print("\n\n" + "ERROR: Subject list file not in proper format - " \
"check if you loaded the correct file?" + "\n" + \
"Error name: cpac_runner_0001" + "\n\n")
raise Exception
pipeline_timing_info = []
pipeline_timing_info.append(unique_pipeline_id)
pipeline_timing_info.append(pipeline_start_stamp)
pipeline_timing_info.append(len(sublist))
if tracking:
try:
track_run(
level='participant' if not test_config else 'test',
participants=len(sublist)
)
except:
print("Usage tracking failed for this run.")
# If we're running on cluster, execute job scheduler
if c.pipeline_setup['system_config']['on_grid']['run']:
# Create cluster log dir
cluster_files_dir = os.path.join(c.pipeline_setup['log_directory']['path'], 'cluster_files')
if not os.path.exists(cluster_files_dir):
os.makedirs(cluster_files_dir)
# Check if its a condor job, and run that
if 'condor' in c.pipeline_setup['system_config']['on_grid']['resource_manager'].lower():
run_condor_jobs(c, config_file, subject_list_file, p_name)
# All other schedulers are supported
else:
run_cpac_on_cluster(config_file, subject_list_file, cluster_files_dir)
# Run on one computer
else:
# Create working dir
if not os.path.exists(c.pipeline_setup['working_directory']['path']):
try:
os.makedirs(c.pipeline_setup['working_directory']['path'])
except:
err = "\n\n[!] CPAC says: Could not create the working " \
"directory: %s\n\nMake sure you have permissions " \
"to write to this directory.\n\n" % c.pipeline_setup['working_directory']['path']
raise Exception(err)
'''
if not os.path.exists(c.pipeline_setup['log_directory']['path']):
try:
os.makedirs(c.pipeline_setup['log_directory']['path'])
except:
err = "\n\n[!] CPAC says: Could not create the log " \
"directory: %s\n\nMake sure you have permissions " \
"to write to this directory.\n\n" % c.pipeline_setup['log_directory']['path']
raise Exception(err)
'''
# BEGIN LONGITUDINAL TEMPLATE PIPELINE
if hasattr(c, 'longitudinal_template_generation') and \
c.longitudinal_template_generation['run']:
run_T1w_longitudinal(sublist, c)
# TODO functional longitudinal pipeline
'''
if valid_longitudinal_data:
rsc_file_list = []
for dirpath, dirnames, filenames in os.walk(c.pipeline_setup[
'output_directory']['path']):
for f in filenames:
# TODO is there a better way to check output folder name?
if f != '.DS_Store' and 'T1w_longitudinal_pipeline' in dirpath:
rsc_file_list.append(os.path.join(dirpath, f))
subject_specific_dict = {subj: [] for subj in subject_id_dict.keys()}
session_specific_dict = {os.path.join(session['subject_id'], session['unique_id']): [] for session in sublist}
for rsc_path in rsc_file_list:
key = [s for s in session_specific_dict.keys() if s in rsc_path]
if key:
session_specific_dict[key[0]].append(rsc_path)
else:
subj = [s for s in subject_specific_dict.keys() if s in rsc_path]
if subj:
subject_specific_dict[subj[0]].append(rsc_path)
# update individual-specific outputs:
# anatomical_brain, anatomical_brain_mask and anatomical_reorient
for key in session_specific_dict.keys():
for f in session_specific_dict[key]:
sub, ses = key.split('/')
ses_list = [subj for subj in sublist if sub in subj['subject_id'] and ses in subj['unique_id']]
if len(ses_list) > 1:
raise Exception("There are several files containing " + f)
if len(ses_list) == 1:
ses = ses_list[0]
subj_id = ses['subject_id']
tmp = f.split(c.pipeline_setup['output_directory']['path'])[-1]
keys = tmp.split(os.sep)
if keys[0] == '':
keys = keys[1:]
if len(keys) > 1:
if ses.get('resource_pool') is None:
ses['resource_pool'] = {
keys[0].split(c.pipeline_setup['pipeline_name'] + '_')[-1]: {
keys[-2]: f
}
}
else:
strat_key = keys[0].split(c.pipeline_setup['pipeline_name'] + '_')[-1]
if ses['resource_pool'].get(strat_key) is None:
ses['resource_pool'].update({
strat_key: {
keys[-2]: f
}
})
else:
ses['resource_pool'][strat_key].update({
keys[-2]: f
})
for key in subject_specific_dict:
for f in subject_specific_dict[key]:
ses_list = [subj for subj in sublist if key in subj['anat']]
for ses in ses_list:
tmp = f.split(c.pipeline_setup['output_directory']['path'])[-1]
keys = tmp.split(os.sep)
if keys[0] == '':
keys = keys[1:]
if len(keys) > 1:
if ses.get('resource_pool') is None:
ses['resource_pool'] = {
keys[0].split(c.pipeline_setup['pipeline_name'] + '_')[-1]: {
keys[-2]: f
}
}
else:
strat_key = keys[0].split(c.pipeline_setup['pipeline_name'] + '_')[-1]
if ses['resource_pool'].get(strat_key) is None:
ses['resource_pool'].update({
strat_key: {
keys[-2]: f
}
})
else:
if keys[-2] == 'anatomical_brain' or keys[-2] == 'anatomical_brain_mask' or keys[-2] == 'anatomical_skull_leaf':
pass
elif 'apply_warp_anat_longitudinal_to_standard' in keys[-2] or 'fsl_apply_xfm_longitudinal' in keys[-2]:
# TODO update!!!
# it assumes session id == last key (ordered by session count instead of session id) + 1
# might cause problem if session id is not continuous
def replace_index(target1, target2, file_path):
index1 = file_path.index(target1)+len(target1)
index2 = file_path.index(target2)+len(target2)
file_str_list = list(file_path)
file_str_list[index1] = "*"
file_str_list[index2] = "*"
file_path_updated = "".join(file_str_list)
file_list = glob.glob(file_path_updated)
file_list.sort()
return file_list
if ses['unique_id'] == str(int(keys[-2][-1])+1):
if keys[-3] == 'seg_probability_maps':
f_list = replace_index('seg_probability_maps_', 'segment_prob_', f)
ses['resource_pool'][strat_key].update({
keys[-3]: f_list
})
elif keys[-3] == 'seg_partial_volume_files':
f_list = replace_index('seg_partial_volume_files_', 'segment_pve_', f)
ses['resource_pool'][strat_key].update({
keys[-3]: f_list
})
else:
ses['resource_pool'][strat_key].update({
keys[-3]: f # keys[-3]: 'anatomical_to_standard'
})
elif keys[-2] != 'warp_list':
ses['resource_pool'][strat_key].update({
keys[-2]: f
})
elif keys[-2] == 'warp_list':
if 'ses-'+ses['unique_id'] in tmp:
ses['resource_pool'][strat_key].update({
keys[-2]: f
})
for key in subject_specific_dict:
ses_list = [subj for subj in sublist if key in subj['anat']]
for ses in ses_list:
for reg_strat in strat_list:
try:
ss_strat_list = list(ses['resource_pool'])
for strat_key in ss_strat_list:
try:
ses['resource_pool'][strat_key].update({
'registration_method': reg_strat['registration_method']
})
except KeyError:
pass
except KeyError:
pass
yaml.dump(sublist, open(os.path.join(c.pipeline_setup['working_directory']['path'],'data_config_longitudinal.yml'), 'w'), default_flow_style=False)
print('\n\n' + 'Longitudinal pipeline completed.' + '\n\n')
# skip main preprocessing
if not c.anatomical_preproc['run'] and not c.functional_preproc['run']:
import sys
sys.exit()
'''
# END LONGITUDINAL TEMPLATE PIPELINE
# If it only allows one, run it linearly
if c.pipeline_setup['system_config']['num_participants_at_once'] == 1:
for sub in sublist:
run_workflow(sub, c, True, pipeline_timing_info,
p_name, plugin, plugin_args, test_config)
return
pid = open(os.path.join(c.pipeline_setup['working_directory']['path'], 'pid.txt'), 'w')
# Init job queue
job_queue = []
# Allocate processes
processes = [
Process(target=run_workflow,
args=(sub, c, True, pipeline_timing_info,
p_name, plugin, plugin_args, test_config))
for sub in sublist
]
# If we're allocating more processes than are subjects, run them all
if len(sublist) <= c.pipeline_setup['system_config']['num_participants_at_once']:
for p in processes:
p.start()
print(p.pid, file=pid)
# Otherwise manage resources to run processes incrementally
else:
idx = 0
while idx < len(sublist):
# If the job queue is empty and we haven't started indexing
if len(job_queue) == 0 and idx == 0:
# Init subject process index
idc = idx
# Launch processes (one for each subject)
for p in processes[idc: idc+c.pipeline_setup['system_config']['num_participants_at_once']]:
p.start()
print(p.pid, file=pid)
job_queue.append(p)
idx += 1
# Otherwise, jobs are running - check them
else:
# Check every job in the queue's status
for job in job_queue:
# If the job is not alive
if not job.is_alive():
# Find job and delete it from queue
print('found dead job ', job)
loc = job_queue.index(job)
del job_queue[loc]
# ...and start the next available process
# (subject)
processes[idx].start()
# Append this to job queue and increment index
job_queue.append(processes[idx])
idx += 1
# Add sleep so while loop isn't consuming 100% of CPU
time.sleep(2)
# Close PID txt file to indicate finish
pid.close() | bsd-3-clause | -3,887,706,195,881,829,400 | 45.266556 | 279 | 0.482179 | false |
Esri/executive-dashboard | ExecutiveReportingScript/executive_dashboard.py | 1 | 13149 | #-------------------------------------------------------------------------------
# Name: executive_dashboard.py
# Purpose:
#
# Author: Local Government
#
# Created: 05/06/2016 AM
# Version: Python 2.7
#-------------------------------------------------------------------------------
import json, urllib, arcrest, re
from arcrest.security import AGOLTokenSecurityHandler
from arcresthelper import securityhandlerhelper
from arcresthelper import common
from arcrest.agol import FeatureLayer
from datetime import datetime as dt
from datetime import timedelta as td
import getpass
import indicator_constants as ic
from os.path import dirname, join
# Messages
m1 = "Can not not create token to access map. Please check username, password, and organization URL."
m2 = "Can not access web map JSON. Please check map ID."
m3 = "Map does not contain the specified data layer"
m4 = "Map does not contain the specified stats layer"
m5 = "Apply a filter to the stats layer so that exactly one record is available in the map."
m6 = "Layer does not contain a filter that uses the provided date field, {0}, and the BETWEEN operator."
m7 = "Stats layer capabilities must include 'Update'."
def get_layer_properties(title, layers):
"""Parse the JSON of a web map and retrieve the URL of a specific layer,
and any filters that have been applied to that layer."""
for layer in layers:
if layer['title'] == title:
url = layer['url']
if 'layerDefinition' in layer:
query = layer['layerDefinition']['definitionExpression']
else:
query = "1=1"
return url, query
return "", ""
def connect_to_layer(url, sh, proxy_port=None, proxy_url=None, initialize=True):
"""Establish a connection to an ArcGIS Online feature layer"""
fl = FeatureLayer(
url=url,
securityHandler=sh,
proxy_port=proxy_port,
proxy_url=proxy_url,
initialize=initialize)
return fl
def count_features(layer, query="1=1"):
"""Count feature in a feature layer, optionally respecting a where clause"""
cnt = layer.query(where=query, returnGeometry=False, returnCountOnly=True)
return cnt['count']
def featureset_to_dict(fs):
"""Returns JSON of a feature set in dictionary format"""
fs_str = fs.toJSON
fs_dict =json.loads(fs_str)
return fs_dict
def get_attributes(layer, query="1=1", fields="*"):
"""Get all attributes for a record in a table"""
vals = layer.query(where=query, out_fields=fields, returnGeometry=False)
valsdict = featureset_to_dict(vals)
return valsdict['features'][0]['attributes']
def update_values(layer, field_info, query="1=1"):
"""Update feature values """
out_fields = ['objectid']
for fld in field_info:
out_fields.append(fld['FieldName'])
resFeats = layer.query(where=query, out_fields=",".join(out_fields))
for feat in resFeats:
for fld in field_info:
feat.set_value(fld["FieldName"],fld['ValueToSet'])
return layer
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect,sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def create_security_handler(security_type='Portal', username="", password="",
org_url="", proxy_url=None, proxy_port=None,
referer_url=None, token_url=None, certificatefile=None,
keyfile=None, client_id=None, secret_id=None):
"""Creates a security handler helper using the specified properties."""
securityinfo = {}
securityinfo['security_type'] = security_type#LDAP, NTLM, OAuth, Portal, PKI, ArcGIS
securityinfo['username'] = username
securityinfo['password'] = password
securityinfo['org_url'] = org_url
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = referer_url
securityinfo['token_url'] = token_url
securityinfo['certificatefile'] = certificatefile
securityinfo['keyfile'] = keyfile
securityinfo['client_id'] = client_id
securityinfo['secret_id'] = secret_id
return securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo)
def get_epoch_time(date):
epoch = dt.utcfromtimestamp(0)
return (date - epoch).total_seconds() * 1000
def main():
with open(join(dirname(__file__), 'DashboardLog.log'), 'a') as log_file:
# Get current time for report datetime range
start_time = dt.utcnow()
today_agol = get_epoch_time(start_time)
temp_fc = arcpy.env.scratchGDB + "\\temp_fc"
proj_out = "{}_proj".format(temp_fc)
min_date = None
try:
# Get security handler for organization content
org_shh = create_security_handler(security_type='Portal',
username=ic.org_username,
password=ic.org_password,
org_url=ic.org_url)
if org_shh.valid == False:
raise Exception(org_shh.message)
org_sh = org_shh.securityhandler
# Access map JSON
admin = arcrest.manageorg.Administration(securityHandler=org_sh)
item = admin.content.getItem(ic.map_id)
mapjson = item.itemData()
if 'error' in mapjson:
raise Exception(m2)
# Get security handler for ags services
ags_sh = None
if ic.ags_username is not None and ic.ags_username != "":
ags_sh = arcrest.AGSTokenSecurityHandler(username=ic.ags_username,
password=ic.ags_password,
token_url=ic.ags_token_url,
proxy_url=None,
proxy_port=None)
print "Getting stats layer info..."
# Get attributes of a single row in stats layer
statsurl, statsquery = get_layer_properties(ic.stats_layer_name,
mapjson['operationalLayers'])
if not statsurl:
raise Exception(m4)
if ic.stats_service_type in ['AGOL', 'Portal']:
statslayer = connect_to_layer(statsurl, org_sh)
else:
statslayer = connect_to_layer(statsurl, ags_sh)
if not count_features(statslayer, query=statsquery) == 1:
raise Exception(m5)
stats = get_attributes(statslayer, query=statsquery)
# If requested, update layer query using today as max date
if ic.auto_update_date_query:
print "Updating date filter on layer..."
if ic.report_duration:
# get diff value to min date
if ic.report_time_unit == 'minutes':
delta = td(minute=ic.report_duration)
elif ic.report_time_unit == 'hours':
delta = td(hours=ic.report_duration)
elif ic.report_time_unit == 'days':
delta = td(days=ic.report_duration)
elif ic.report_time_unit == 'weeks':
delta = td(weeks=ic.report_duration)
min_date = start_time - delta
else:
# Use end date of previous report
min_date = stats[ic.end_date]
# update filter on layer
for layer in mapjson['operationalLayers']:
if layer['title'] == ic.data_layer_name:
try:
original_query = layer['layerDefinition']['definitionExpression']
#Find if the expression has a clause using the date field and Between operator
match = re.search(".*?{0} BETWEEN.*?'(.*?)'.*?AND.*?'(.*?)'.*".format(ic.date_field), original_query)
if match is None:
raise ValueError()
#Construct a new query replacing the min and max date values with the new dates
new_query = match.group()[0:match.start(1)] + min_date.strftime("%Y-%m-%d %H:%M:%S") + match.group()[match.end(1):match.start(2)] + start_time.strftime("%Y-%m-%d %H:%M:%S") + match.group()[match.end(2):]
# Update JSON with new query
layer['layerDefinition']['definitionExpression'] = new_query
except ValueError, KeyError:
d = dt.strftime(dt.now(), "%Y-%m-%d %H:%M:%S")
log_file.write("{}:\n".format(d))
log_file.write("{}\n".format(m6.format(ic.date_field)))
print(m6.format(ic.date_field))
continue
# Commit update to AGOL item
useritem = item.userItem
params = arcrest.manageorg.ItemParameter()
useritem.updateItem(itemParameters = params,
text=json.dumps(mapjson))
# Retrieve the url and queries associated with the data and stats layers
print "Getting layer info..."
dataurl, dataquery = get_layer_properties(ic.data_layer_name, mapjson['operationalLayers'])
if not dataurl:
raise Exception(m3)
# Connect to the services
print "Connecting to data layer..."
if ic.data_service_type in ['AGOL', 'Portal']:
datalayer = connect_to_layer(dataurl, org_sh)
else:
datalayer = connect_to_layer(dataurl, ags_sh)
# If necessary, load new points to hosted service
if ic.data_feature_class:
# only attemp append if there are new features
temp_fc = arcpy.CopyFeatures_management(ic.data_feature_class, temp_fc)
sr_output = datalayer.extent['spatialReference']['wkid']
temp_fc_proj = arcpy.Project_management(temp_fc, proj_out, sr_output)
# Load data from layer to service
datalayer.deleteFeatures(where="1=1")
datalayer.addFeatures(temp_fc_proj)
arcpy.Delete_management(temp_fc)
arcpy.Delete_management(temp_fc_proj)
# Count the data features that meet the map query
print "Counting features"
feature_cnt = count_features(datalayer, query=dataquery)
print "Getting new stats..."
# Current editor
editor = getpass.getuser()
attributes = get_attributes(statslayer, statsquery)
attributes[ic.datecurr] = today_agol
attributes[ic.date1] = stats[ic.datecurr]
attributes[ic.date2] = stats[ic.date1]
attributes[ic.date3] = stats[ic.date2]
attributes[ic.date4] = stats[ic.date3]
attributes[ic.observcurr] = feature_cnt
attributes[ic.observ1] = stats[ic.observcurr]
attributes[ic.observ2] = stats[ic.observ1]
attributes[ic.observ3] = stats[ic.observ2]
attributes[ic.observ4] = stats[ic.observ3]
attributes[ic.last_update] = today_agol
attributes[ic.last_editor] = editor
attributes[ic.end_date] = today_agol
if min_date is None:
attributes[ic.start_date] = stats[ic.end_date]
else:
attributes[ic.start_date] = get_epoch_time(min_date)
edits = [{"attributes" : attributes}]
statslayer.applyEdits(updateFeatures=edits)
print "Done."
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except Exception as ex:
print("{}\n".format(ex))
d = dt.strftime(dt.now(), "%Y-%m-%d %H:%M:%S")
log_file.write("{}:\n".format(d))
log_file.write("{}\n".format(ex))
finally:
if arcpy.Exists(temp_fc):
arcpy.Delete_management(temp_fc)
if arcpy.Exists(proj_out):
arcpy.Delete_management(proj_out)
# End main function
if __name__ == '__main__':
main()
| apache-2.0 | 8,910,792,757,372,526,000 | 39.962617 | 231 | 0.558369 | false |
m-ober/byceps | byceps/services/attendance/service.py | 1 | 3675 | """
byceps.services.attendance.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import defaultdict
from typing import Dict, Iterable, List, Optional, Set, Tuple
from ...database import db, paginate, Pagination
from ...typing import PartyID, UserID
from ..ticketing.models.ticket import Category as DbCategory, Ticket as DbTicket
from ..user.models.user import User as DbUser
from .transfer.models import Attendee, AttendeeTicket
def get_attendees_paginated(
party_id: PartyID,
page: int,
per_page: int,
*,
search_term: Optional[str] = None,
) -> Pagination:
"""Return the party's ticket users with tickets and seats."""
users_paginated = _get_users_paginated(
party_id, page, per_page, search_term=search_term
)
users = users_paginated.items
user_ids = {u.id for u in users}
tickets = _get_tickets_for_users(party_id, user_ids)
tickets_by_user_id = _index_tickets_by_user_id(tickets)
attendees = list(_generate_attendees(users, tickets_by_user_id))
users_paginated.items = attendees
return users_paginated
def _get_users_paginated(
party_id: PartyID,
page: int,
per_page: int,
*,
search_term: Optional[str] = None,
) -> Pagination:
# Drop revoked tickets here already to avoid users without tickets
# being included in the list.
query = DbUser.query \
.distinct() \
.options(
db.load_only('id', 'screen_name', 'deleted'),
db.joinedload('avatar_selection').joinedload('avatar'),
) \
.join(DbTicket, DbTicket.used_by_id == DbUser.id) \
.filter(DbTicket.revoked == False) \
.join(DbCategory).filter(DbCategory.party_id == party_id)
if search_term:
query = query \
.filter(DbUser.screen_name.ilike(f'%{search_term}%'))
query = query \
.order_by(db.func.lower(DbUser.screen_name))
return paginate(query, page, per_page)
def _get_tickets_for_users(
party_id: PartyID, user_ids: Set[UserID]
) -> List[DbTicket]:
return DbTicket.query \
.options(
db.joinedload('category'),
db.joinedload('occupied_seat').joinedload('area'),
) \
.for_party(party_id) \
.filter(DbTicket.used_by_id.in_(user_ids)) \
.filter(DbTicket.revoked == False) \
.all()
def _index_tickets_by_user_id(
tickets: Iterable[DbTicket]
) -> Dict[UserID, Set[DbTicket]]:
tickets_by_user_id = defaultdict(set)
for ticket in tickets:
tickets_by_user_id[ticket.used_by_id].add(ticket)
return tickets_by_user_id
def _generate_attendees(
users: Iterable[DbUser], tickets_by_user_id: Dict[UserID, Set[DbTicket]]
) -> Iterable[Attendee]:
for user in users:
tickets = tickets_by_user_id[user.id]
attendee_tickets = _to_attendee_tickets(tickets)
yield Attendee(user, attendee_tickets)
def _to_attendee_tickets(tickets: Iterable[DbTicket]) -> List[AttendeeTicket]:
attendee_tickets = [
AttendeeTicket(t.occupied_seat, t.user_checked_in) for t in tickets
]
attendee_tickets.sort(key=_get_attendee_ticket_sort_key)
return attendee_tickets
def _get_attendee_ticket_sort_key(
attendee_ticket: AttendeeTicket
) -> Tuple[bool, str, bool]:
return (
# List tickets with occupied seat first.
attendee_ticket.seat is None,
# Sort by seat label.
attendee_ticket.seat.label if attendee_ticket.seat else None,
# List checked in tickets first.
not attendee_ticket.checked_in,
)
| bsd-3-clause | 7,584,223,255,903,972,000 | 28.4 | 80 | 0.647891 | false |
adamhaney/pykell | tests.py | 1 | 3811 | from unittest import TestCase
from .types import expects_type, returns_type, T
@expects_type(a=T(int), b=T(str))
def example_kw_arg_function(a, b):
return a, b
class ExpectsTests(TestCase):
def test_correct_expectations_kw(self):
self.assertEqual(example_kw_arg_function(a=1, b="baz"), (1, "baz"))
@returns_type(T(int))
def add(x, y):
return x + y
@returns_type(T(str))
def bad_add(x, y):
return x + y
class ReturnTests(TestCase):
def test_returns_type_positive(self):
self.assertEqual(add(x=1, y=2), 3)
def test_returns_type_negative(self):
with self.assertRaises(TypeError):
bad_add(x=1, y=2)
class TypeClassTests(TestCase):
def test_type_enforcement_positive(self):
str_type = T(str)
self.assertTrue(str_type.validate("abc"))
def test_type_enforcement_negative(self):
str_type = T(str)
with self.assertRaises(TypeError):
str_type.validate(27)
def test_data_enforcement_positive(self):
z_string = T(str, lambda d: d.startswith('z'))
self.assertTrue(z_string.validate('zab'))
def test_data_enforcement_negative(self):
z_string = T(str, lambda d: d.startswith('z'))
with self.assertRaises(TypeError):
z_string.validate('abc')
def test_multiple_types_positive(self):
"""
make sure we can add two types to the class and that it then
says an object having one of those types is valid
"""
str_int_type = T(int)
str_int_type.contribute_type(str)
self.assertTrue(str_int_type.validate(2))
self.assertTrue(str_int_type.validate("boo"))
def test_multiple_types_negative(self):
str_int_type = T(int)
str_int_type.contribute_type(str)
with self.assertRaises(TypeError):
str_int_type.validate(2.0)
def test_multiple_validators_positive(self):
a_z_type = T(str, lambda d: d.startswith('a'))
a_z_type.contribute_validator(lambda d: d.endswith('z'))
self.assertTrue("abcdz")
def test_multiple_validators_negative(self):
a_z_type = T(str, lambda d: d.startswith('a'))
a_z_type.contribute_validator(lambda d: d.endswith('z'))
with self.assertRaises(TypeError):
a_z_type.validate("abc")
def test_pipe_multi_type_syntax(self):
str_int_type = T(int) | T(str)
self.assertTrue(str_int_type.validate(2))
self.assertTrue(str_int_type.validate("boo"))
class PykellContributionTests(TestCase):
def setUp(self):
self.positive_even_number = T(int, lambda d: d > 0) | T(float, lambda d: d % 2 == 0)
def test_postive_float_is_valid(self):
self.assertTrue(self.positive_even_number.validate(2.0))
def test_positive_integer_is_valid(self):
self.assertTrue(self.positive_even_number.validate(4))
def test_negative_float_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(-4.0)
def test_negative_int_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(-4)
def test_odd_float_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(3.0)
def test_odd_int_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(3)
class TypeNotRequiredTests(TestCase):
"""
In some cases we may just care that a validator is true, not
what the underlying type is
"""
def setUp(self):
self.positive_something = T(validator=lambda d: d > 0)
def test_validator_without_type(self):
self.assertTrue(self.positive_something.validate(2))
| mit | 7,787,685,935,964,463,000 | 29.246032 | 92 | 0.635791 | false |
psathyrella/partis | python/treeutils.py | 1 | 183169 | import __builtin__
import operator
import string
import itertools
import copy
import collections
import random
import csv
from cStringIO import StringIO
import subprocess
import tempfile
import os
import numpy
import sys
from distutils.version import StrictVersion
import dendropy
import time
import math
import json
import pickle
import warnings
import traceback
if StrictVersion(dendropy.__version__) < StrictVersion('4.0.0'): # not sure on the exact version I need, but 3.12.0 is missing lots of vital tree fcns
raise RuntimeError("dendropy version 4.0.0 or later is required (found version %s)." % dendropy.__version__)
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import utils
lb_metrics = collections.OrderedDict(('lb' + let, 'lb ' + lab) for let, lab in (('i', 'index'), ('r', 'ratio')))
selection_metrics = ['lbi', 'lbr', 'cons-dist-aa', 'cons-frac-aa', 'aa-lbi', 'aa-lbr'] # I really thought this was somewhere, but can't find it so adding it here
typical_bcr_seq_len = 400
default_lb_tau = 0.0025
default_lbr_tau_factor = 20
default_min_selection_metric_cluster_size = 10
dummy_str = 'x-dummy-x'
legtexts = {
'metric-for-target-distance' : 'target dist. metric',
'n-sim-seqs-per-generation' : 'N sampled',
'leaf-sampling-scheme' : 'sampling scheme',
'target-count' : 'N target seqs',
'n-target-clusters' : 'N target clust.',
'min-target-distance' : 'min target dist.',
'uniform-random' : 'unif. random',
'affinity-biased' : 'affinity biased',
'high-affinity' : 'perf. affinity',
'cons-dist-aa' : 'aa-cdist',
'cons-frac-aa' : 'aa-cfrac',
'cons-dist-nuc' : 'nuc-cdist',
'shm' : 'n-shm',
'aa-lbi' : 'aa-lbi',
'aa-lbr' : 'aa-lbr',
}
# ----------------------------------------------------------------------------------------
def smetric_fname(fname):
return utils.insert_before_suffix('-selection-metrics', fname)
# ----------------------------------------------------------------------------------------
def add_cons_seqs(line, aa=False):
ckey = 'consensus_seq'
if ckey not in line:
line[ckey] = utils.cons_seq_of_line(line)
if aa:
ckey += '_aa'
if ckey not in line:
line[ckey] = utils.cons_seq_of_line(line, aa=True)
# ----------------------------------------------------------------------------------------
def lb_cons_dist(line, iseq, aa=False, frac=False): # at every point where this can add something to <line> (i.e. consensus seqs and aa seqs) it checks that they're not already there, so it will never do those calculations twice. But the final hamming calculation is *not* cached so will get redone if you call more than once
if aa and 'seqs_aa' not in line:
utils.add_seqs_aa(line)
add_cons_seqs(line, aa=aa)
tstr = '_aa' if aa else ''
hfcn = utils.hamming_fraction if frac else utils.hamming_distance # NOTE it's important to use this if you want the fraction (rather than dividing by sequence length afterward) since you also need to account for ambig bases in the cons seq
return hfcn(line['consensus_seq'+tstr], line['seqs'+tstr][iseq], amino_acid=aa)
# ----------------------------------------------------------------------------------------
def add_cons_dists(line, aa=False, debug=False):
ckey = 'cons_dists_' + ('aa' if aa else 'nuc')
if ckey not in line:
line[ckey] = [lb_cons_dist(line, i, aa=aa) for i, u in enumerate(line['unique_ids'])]
if debug: # it would kind of make more sense to have this in some of the fcns that this fcn is calling, but then I'd have to pass the debug arg through a bunch of tiny fcns that don't really need it
tstr = '_aa' if aa else ''
# don't need this unless we turn the tie resolver stuff back on:
# if aa: # we have to add this by hand since we don't actually use it to calculate the aa cons seq -- we get that by just translating the nuc cons seq
# utils.add_naive_seq_aa(line)
hfkey = ckey.replace('cons_dists_', 'cons_fracs_')
line[hfkey] = [lb_cons_dist(line, i, aa=aa, frac=True) for i, u in enumerate(line['unique_ids'])]
extra_keys = [ckey, hfkey]
if 'cell-types' in line:
extra_keys.append('cell-types')
utils.print_cons_seq_dbg(utils.seqfos_from_line(line, aa=aa, extra_keys=extra_keys), line['consensus_seq'+tstr], align=False, aa=aa) # NOTE you probably don't want to turn the naive tie resolver back on in utils.cons_seq_of_line(), but if you do, this reminds you to also do it here so the dbg is correct, tie_resolver_seq=line['naive_seq'+tstr], tie_resolver_label='naive seq')
# ----------------------------------------------------------------------------------------
def add_cdists_to_lbfo(line, lbfo, cdist, debug=False): # it's kind of dumb to store them both in <line> and in <lbfo> (and thus in <line['tree-info']['lb']>), but I think it's ultimately the most sensible thing, given the inherent contradiction that a) we want to *treat* the cons dists like lbi/lbr tree metrics in almost every way, but b) they're *not* actually tree metrics in the sense that they don't use a tree (also, we want the minus sign in lbfo)
add_cons_dists(line, aa='-aa' in cdist, debug=debug)
tkey = cdist.replace('cons-dist-', 'cons_dists_') # yes, I want the names to be different (although admittedly with a time machine it'd be set up differently)
lbfo[cdist] = {u : -line[tkey][i] for i, u in enumerate(line['unique_ids'])}
# ----------------------------------------------------------------------------------------
def smvals(line, smetric, iseq=None, nullval=None): # retrieve selection metric values from within line['tree-info']['lb'][yadda yadda], i.e. as if they were a normal list-based per-seq quantity
# NOTE this is what you use if the values are already there, in 'tree-info' -- if you want to calculate them, there's other fcns
if 'tree-info' not in line or 'lb' not in line['tree-info'] or smetric not in line['tree-info']['lb']:
return [nullval for _ in line['unique_ids']] if iseq is None else nullval
lbfo = line['tree-info']['lb'][smetric]
if iseq is None:
return [lbfo.get(u, nullval) for u in line['unique_ids']]
else:
return lbfo.get(line['unique_ids'][iseq], nullval)
# ----------------------------------------------------------------------------------------
def lb_cons_seq_shm(line, aa=False):
add_cons_seqs(line, aa=aa)
if aa and 'naive_seq_aa' not in line:
utils.add_naive_seq_aa(line)
tstr = '_aa' if aa else ''
return utils.hamming_distance(line['naive_seq'+tstr], line['consensus_seq'+tstr], amino_acid=aa)
# ----------------------------------------------------------------------------------------
def edge_dist_fcn(dtree, uid): # duplicates fcn in lbplotting.make_lb_scatter_plots()
node = dtree.find_node_with_taxon_label(uid)
return min(node.distance_from_tip(), node.distance_from_root()) # NOTE the tip one gives the *maximum* distance to a leaf, but I think that's ok
# ----------------------------------------------------------------------------------------
cgroups = ['within-families', 'among-families'] # different ways of grouping clusters, i.e. "cluster groupings"
dtr_targets = {'within-families' : ['affinity', 'delta-affinity'], 'among-families' : ['affinity', 'delta-affinity']} # variables that we try to predict, i.e. we train on dtr for each of these
pchoices = ['per-seq', 'per-cluster'] # per-? choice, i.e. is this a per-sequence or per-cluster quantity
dtr_metrics = ['%s-%s-dtr'%(cg, tv) for cg in cgroups for tv in dtr_targets[cg]] # NOTE order of this has to remain the same as in the loops used to generate it
dtr_vars = {'within-families' : {'per-seq' : ['lbi', 'cons-dist-nuc', 'cons-dist-aa', 'edge-dist', 'lbr', 'shm', 'shm-aa'], # NOTE when iterating over this, you have to take the order from <pchoices>, since both pchoices go into the same list of variable values
'per-cluster' : []},
'among-families' : {'per-seq' : ['lbi', 'cons-dist-nuc', 'cons-dist-aa', 'edge-dist', 'lbr', 'shm', 'shm-aa'],
'per-cluster' : ['fay-wu-h', 'cons-seq-shm-nuc', 'cons-seq-shm-aa', 'mean-shm', 'max-lbi', 'max-lbr']},
}
default_dtr_options = {
# 'base-regr' :
'vars' : None, # uses <dtr_vars> for default
'min_samples_leaf' : 5, # only used for grad-boost and bag
'max_depth' : 5, # only used for grad-boost and bag
'ensemble' : 'grad-boost', # ['bag', 'forest', 'ada-boost',
'n_estimators' : 100,
'n_train_per_family' : 1, # for among-families dtr, only train on this many cells per family (to avoid over training). Set to None to use all of 'em
'n_jobs' : None, # default set below (also, this is not used for boosted ensembles)
}
# ----------------------------------------------------------------------------------------
def get_dtr_varnames(cgroup, varlists, with_pc=False): # arg, <with_pc> is fucking ugly
return [(pc, vn) if with_pc else vn for pc in pchoices for vn in varlists[cgroup][pc]]
# ----------------------------------------------------------------------------------------
def get_dtr_vals(cgroup, varlists, line, lbfo, dtree):
# ----------------------------------------------------------------------------------------
def getval(pchoice, var, uid):
if pchoice == 'per-seq':
if var in ['lbi', 'lbr', 'cons-dist-nuc', 'cons-dist-aa']:
return lbfo[var][uid] # NOTE this will fail in (some) cases where the uids in the tree and annotation aren't the same, but I don't care atm since it looks like we won't really be using the dtr
elif var == 'edge-dist':
return edge_dist_fcn(dtree, uid)
elif var == 'shm':
return utils.per_seq_val(line, 'n_mutations', uid)
elif var == 'shm-aa':
return utils.shm_aa(line, line['unique_ids'].index(uid))
else:
assert False
elif pchoice == 'per-cluster':
return per_cluster_vals[var]
else:
assert False
# ----------------------------------------------------------------------------------------
if cgroup == 'among-families':
per_cluster_vals = {
'cons-seq-shm-nuc' : lb_cons_seq_shm(line),
'cons-seq-shm-aa' : lb_cons_seq_shm(line, aa=True),
'fay-wu-h' : -utils.fay_wu_h(line),
'mean-shm' : numpy.mean(line['n_mutations']),
'max-lbi' : max(lbfo['lbi'].values()),
'max-lbr' : max(lbfo['lbr'].values()),
}
vals = []
for uid in line['unique_ids']:
vals.append([getval(pc, var, uid) for pc, var in get_dtr_varnames(cgroup, varlists, with_pc=True)])
return vals
# ----------------------------------------------------------------------------------------
def dtrfname(dpath, cg, tvar, suffix='pickle'):
return '%s/%s-%s-dtr-model.%s' % (dpath, cg, tvar, suffix)
# ----------------------------------------------------------------------------------------
def tmfname(plotdir, metric, x_axis_label, cg=None, tv=None, use_relative_affy=False): # tree metric fname
assert x_axis_label in ['affinity', 'n-ancestor'] # arg, this is messy
assert tv in [None, 'affinity', 'delta-affinity']
metric_str = metric if metric != 'dtr' else '-'.join([cg, tv, metric])
vs_str = '%s-vs%s-%s' % (metric_str, '-relative' if x_axis_label == 'affinity' and use_relative_affy else '', x_axis_label)
return '%s/true-tree-metrics/%s/%s-ptiles/%s-true-tree-ptiles-all-clusters.yaml' % (plotdir, metric_str, vs_str, vs_str) # NOTE has 'true-tree' in there, which is fine for now but may need to change
# ----------------------------------------------------------------------------------------
def write_pmml(pmmlfname, dmodel, varlist, targetvar):
try: # seems to crash for no @**($ing reason sometimes
if 'sklearn2pmml' not in sys.modules: # just so people don't need to install/import it if they're not training
import sklearn2pmml
pmml_pipeline = sys.modules['sklearn2pmml'].make_pmml_pipeline(dmodel, active_fields=varlist, target_fields=targetvar)
sys.modules['sklearn2pmml'].sklearn2pmml(pmml_pipeline, pmmlfname)
except:
elines = traceback.format_exception(*sys.exc_info())
print utils.pad_lines(''.join(elines))
print ' %s pmml conversion failed (see above), but continuing' % utils.color('red', 'error')
# ----------------------------------------------------------------------------------------
def train_dtr_model(trainfo, outdir, cfgvals, cgroup, tvar):
if os.path.exists(dtrfname(outdir, cgroup, tvar)):
print ' %s dtr model file exists, so skipping training: %s' % (utils.color('yellow', 'warning'), dtrfname(outdir, cgroup, tvar))
return
if 'sklearn.ensemble' not in sys.modules:
with warnings.catch_warnings(): # NOTE not sure this is actually catching the warnings UPDATE oh, I think the warnings are getting thrown by function calls, not imports
warnings.simplefilter('ignore', category=DeprecationWarning) # numpy is complaining about how sklearn is importing something, and I really don't want to *@*($$ing hear about it
from sklearn import tree
from sklearn import ensemble
skens = sys.modules['sklearn.ensemble']
sktree = sys.modules['sklearn.tree']
start = time.time()
base_kwargs, kwargs = {}, {'n_estimators' : cfgvals['n_estimators']}
if cfgvals['ensemble'] == 'bag':
base_kwargs = {'min_samples_leaf' : cfgvals['min_samples_leaf'], 'max_depth' : cfgvals['max_depth']}
kwargs['base_estimator'] = sktree.DecisionTreeRegressor(**base_kwargs) # we can pass this to ada-boost, but I'm not sure if we should (it would override the default max_depth=3, for instance)
if 'grad-boost' in cfgvals['ensemble']:
kwargs['max_depth'] = cfgvals['max_depth']
kwargs['min_samples_leaf'] = cfgvals['min_samples_leaf']
if 'boost' not in cfgvals['ensemble']:
kwargs['n_jobs'] = cfgvals['n_jobs']
if cfgvals['ensemble'] == 'bag':
model = skens.BaggingRegressor(**kwargs)
elif cfgvals['ensemble'] == 'forest':
model = skens.RandomForestRegressor(**kwargs)
elif cfgvals['ensemble'] == 'ada-boost':
model = skens.AdaBoostRegressor(**kwargs)
elif cfgvals['ensemble'] == 'grad-boost':
model = skens.GradientBoostingRegressor(**kwargs) # if too slow, maybe try the new hist gradient boosting stuff
else:
assert False
model.fit(trainfo['in'], trainfo['out']) #, sample_weight=trainfo['weights'])
tmpkeys = [k for k in cfgvals if k != 'vars' and (k in kwargs or k in base_kwargs)] # don't want to print the inapplicable ones
print ' %s-families %s (%d observations in %.1fs): %s' % (utils.color('green', cgroup.split('-')[0]), utils.color('blue', tvar), len(trainfo['in']), time.time() - start, ' '.join('%s %s'%(k, cfgvals[k]) for k in sorted(tmpkeys)))
print ' feature importances:'
print ' mean err'
for iv, vname in enumerate([v for pc in pchoices for v in cfgvals['vars'][cgroup][pc]]):
if cfgvals['ensemble'] == 'grad-boost':
filist = [model.feature_importances_[iv]]
else:
filist = [estm.feature_importances_[iv] for estm in model.estimators_]
wlist = None
if cfgvals['ensemble'] == 'ada-boost':
wlist = [w for w in model.estimator_weights_ if w > 0]
assert len(wlist) == len(model.estimators_) # it terminates early (i.e. before making all the allowed estimators) if it already has perfect performance, but doesn't leave the lists the same length
print ' %17s %5.3f %5.3f' % (vname, numpy.average(filist, weights=wlist), (numpy.std(filist, ddof=1) / math.sqrt(len(filist))) if len(filist) > 1 else 0.) # NOTE not sure if std should also use the weights
if not os.path.exists(outdir):
os.makedirs(outdir)
if 'joblib' not in sys.modules: # just so people don't need to install it unless they're training (also scons seems to break it https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp)
import joblib
with open(dtrfname(outdir, cgroup, tvar), 'w') as dfile:
sys.modules['joblib'].dump(model, dfile)
write_pmml(dtrfname(outdir, cgroup, tvar, suffix='pmml'), model, get_dtr_varnames(cgroup, cfgvals['vars']), tvar)
# ----------------------------------------------------------------------------------------
# NOTE the min lbi is just tau, but I still like doing it this way
lb_bounds = { # calculated to 17 generations, which is quite close to the asymptote
typical_bcr_seq_len : { # seq_len
0.0030: (0.0030, 0.0331), # if tau is any bigger than this it doesn't really converge
0.0025: (0.0025, 0.0176),
0.0020: (0.0020, 0.0100),
0.0010: (0.0010, 0.0033),
0.0005: (0.0005, 0.0015),
},
# it turns out the aa lb metrics need the above nuc normalization (i.e. if we normalize with the below, the values are huge, like lots are 10ish). I guess maybe this makes sense, since i'm taking the nuc tree topology and scaling it to aa
# int(typical_bcr_seq_len / 3.) : { # amino acid (133)
# 0.0030: (0.0030, 0.0099),
# 0.0025: (0.0025, 0.0079),
# 0.0020: (0.0020, 0.0061),
# 0.0010: (0.0010, 0.0030),
# 0.0005: (0.0005, 0.0015),
# }
}
# ----------------------------------------------------------------------------------------
def normalize_lb_val(metric, lbval, tau, seq_len=typical_bcr_seq_len):
if metric == 'lbr':
return lbval
if seq_len not in lb_bounds:
raise Exception('seq len %d not in cached lb bound values (available: %s)' % (seq_len, lb_bounds.keys()))
if tau not in lb_bounds[seq_len]:
raise Exception('tau value %f not in cached lb bound values (available: %s)' % (tau, lb_bounds[seq_len].keys()))
lbmin, lbmax = lb_bounds[seq_len][tau]
return (lbval - lbmin) / (lbmax - lbmin)
# ----------------------------------------------------------------------------------------
def get_treestr_from_file(treefname):
with open(treefname) as treefile:
return '\n'.join(treefile.readlines())
# ----------------------------------------------------------------------------------------
def as_str(dtree): # just a shortand (adding this very late, so could stand to add this to a lot of paces that use dtree.as_string())
return dtree.as_string(schema='newick').strip()
# ----------------------------------------------------------------------------------------
def cycle_through_ascii_conversion(dtree=None, treestr=None, taxon_namespace=None): # run once through the cycle of str -> dtree -> str (or dtree -> str -> dtree)
if dtree is not None:
return get_dendro_tree(treestr=as_str(dtree), taxon_namespace=taxon_namespace)
elif treestr is not None:
return as_str(get_dendro_tree(treestr=treestr))
else:
assert False
# ----------------------------------------------------------------------------------------
def get_dendro_tree(treestr=None, treefname=None, taxon_namespace=None, schema='newick', ignore_existing_internal_node_labels=False, suppress_internal_node_taxa=False, debug=False): # specify either <treestr> or <treefname>
# <ignore_existing_internal_node_labels> is for when you want the internal nodes labeled (which we usually do, since we want to calculate selection metrics for internal nodes), but you also want to ignore the existing internal node labels (e.g. with FastTree output, where they're floats)
# <suppress_internal_node_taxa> on the other hand is for when you don't want to have taxa for any internal nodes (e.g. when calculating the tree difference metrics, the two trees have to have the same taxon namespace, but since they in general have different internal nodes, the internal nodes can't have taxa)
assert treestr is None or treefname is None
if ignore_existing_internal_node_labels and suppress_internal_node_taxa:
raise Exception('doesn\'t make sense to specify both')
if treestr is None:
treestr = get_treestr_from_file(treefname)
if debug:
print ' getting dendro tree from string:\n %s' % treestr
if taxon_namespace is not None:
print ' and taxon namespace: %s' % ' '.join([t.label for t in taxon_namespace])
# dendropy doesn't make taxons for internal nodes by default, so it puts the label for internal nodes in node.label instead of node.taxon.label, but it crashes if it gets duplicate labels, so you can't just always turn off internal node taxon suppression
dtree = dendropy.Tree.get_from_string(treestr, schema, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=(ignore_existing_internal_node_labels or suppress_internal_node_taxa), preserve_underscores=True, rooting='force-rooted') # make sure the tree is rooted, to avoid nodes disappearing in remove_dummy_branches() (and proably other places as well)
if dtree.seed_node.edge_length > 0:
# this would be easy to fix, but i think it only happens from simulation trees from treegenerator
print ' %s seed/root node has non-zero edge length (i.e. there\'s a branch above it)' % utils.color('red', 'warning')
label_nodes(dtree, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, suppress_internal_node_taxa=suppress_internal_node_taxa, debug=debug) # set internal node labels to any found in <treestr> (unless <ignore_existing_internal_node_labels> is set), otherwise make some up (e.g. aa, ab, ac)
# # uncomment for more verbosity:
# check_node_labels(dtree, debug=debug) # makes sure that for all nodes, node.taxon is not None, and node.label *is* None (i.e. that label_nodes did what it was supposed to, as long as suppress_internal_node_taxa wasn't set)
# if debug:
# print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
return dtree
# ----------------------------------------------------------------------------------------
def import_bio_phylo():
if 'Bio.Phylo' not in sys.modules:
from Bio import Phylo # slow af to import
return sys.modules['Bio.Phylo']
# ----------------------------------------------------------------------------------------
def get_bio_tree(treestr=None, treefname=None, schema='newick'): # NOTE don't use this in future (all current uses are commented)
Phylo = import_bio_phylo()
if treestr is not None:
return Phylo.read(StringIO(treestr), schema)
elif treefname is not None:
with open(treefname) as treefile:
return Phylo.read(treefile, schema)
else:
assert False
# ----------------------------------------------------------------------------------------
def get_leaf_depths(tree, treetype='dendropy'): # NOTE structure of dictionary may depend on <treetype>, e.g. whether non-named nodes are included (maybe it doesn't any more? unless you return <clade_keyed_depths> at least)
if treetype == 'dendropy':
depths = {n.taxon.label : n.distance_from_root() for n in tree.leaf_node_iter()}
elif treetype == 'Bio':
clade_keyed_depths = tree.depths() # keyed by clade, not clade name (so unlabelled nodes are accessible)
depths = {n.name : clade_keyed_depths[n] for n in tree.find_clades()}
else:
assert False
return depths
# ----------------------------------------------------------------------------------------
def get_n_leaves(tree):
return len(tree.leaf_nodes())
# ----------------------------------------------------------------------------------------
def get_n_nodes(tree):
return len(list(tree.preorder_node_iter()))
# ----------------------------------------------------------------------------------------
def collapse_nodes(dtree, keep_name, remove_name, keep_name_node=None, remove_name_node=None, debug=False): # collapse edge between <keep_name> and <remove_name>, leaving remaining node with name <keep_name>
# NOTE I wrote this to try to fix the phylip trees from lonr.r, but it ends up they're kind of unfixable... but this fcn may be useful in the future, I guess, and it works UPDATE yep using it now for something else
if debug:
print ' collapsing %s and %s (the former will be the label for the surviving node)' % (keep_name, remove_name)
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
if keep_name_node is None:
keep_name_node = dtree.find_node_with_taxon_label(keep_name)
if remove_name_node is None:
assert remove_name is not None # if we *are* passed <remove_name_node>, it's ok for <remove_name> to be None
remove_name_node = dtree.find_node_with_taxon_label(remove_name)
swapped = False
if keep_name_node in remove_name_node.child_nodes():
assert remove_name_node not in keep_name_node.child_nodes()
parent_node = remove_name_node
if parent_node.taxon is None:
parent_node.taxon = dendropy.Taxon()
parent_node.taxon.label = keep_name # have to rename it, since we always actually keep the parent
swapped = True
child_node = keep_name_node
elif remove_name_node in keep_name_node.child_nodes():
assert keep_name_node not in remove_name_node.child_nodes()
parent_node = keep_name_node
child_node = remove_name_node
else:
print ' node names %s and %s don\'t share an edge:' % (keep_name, remove_name)
print ' keep node children: %s' % ' '.join([n.taxon.label for n in keep_name_node.child_nodes()])
print ' remove node children: %s' % ' '.join([n.taxon.label for n in remove_name_node.child_nodes()])
raise Exception('see above')
if child_node.is_leaf():
dtree.prune_taxa([child_node.taxon], suppress_unifurcations=False)
if debug:
print ' pruned leaf node %s' % (('%s (renamed parent to %s)' % (remove_name, keep_name)) if swapped else remove_name)
else:
found = False
for edge in parent_node.child_edge_iter():
if edge.head_node is child_node:
edge.collapse() # removes child node (in dendropy language: inserts all children of the head_node (child) of this edge as children of the edge's tail_node (parent)) Doesn't modify edge lengths by default (i.e. collapsed edge should have zero length).
found = True
break
assert found
if debug:
print ' collapsed edge between %s and %s' % (keep_name, remove_name)
if debug:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
assert dtree.find_node_with_taxon_label(remove_name) is None
# NOTE do i need to add this?
# dtree.purge_taxon_namespace()
# ----------------------------------------------------------------------------------------
def check_node_labels(dtree, debug=False):
if debug:
print 'checking node labels for:'
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=250))
for node in dtree.preorder_node_iter():
if node.taxon is None:
raise Exception('taxon is None')
if debug:
print ' ok: %s' % node.taxon.label
if node.label is not None:
raise Exception('node.label not set to None')
# ----------------------------------------------------------------------------------------
# by default, mostly adds labels to internal nodes (also sometimes the root node) that are missing them
def label_nodes(dendro_tree, ignore_existing_internal_node_labels=False, ignore_existing_internal_taxon_labels=False, suppress_internal_node_taxa=False, initial_length=3, debug=False):
if ignore_existing_internal_node_labels and suppress_internal_node_taxa:
raise Exception('doesn\'t make sense to specify both')
if debug:
print ' labeling nodes'
# print ' before:'
# print utils.pad_lines(get_ascii_tree(dendro_tree))
tns = dendro_tree.taxon_namespace
initial_names = set([t.label for t in tns]) # should all be leaf nodes, except the naive sequence (at least for now)
if debug:
print ' initial taxon labels: %s' % ' '.join(sorted(initial_names))
potential_names, used_names = None, None
new_label, potential_names, used_names = utils.choose_new_uid(potential_names, used_names, initial_length=initial_length, shuffle=True)
skipped_dbg, relabeled_dbg = [], []
for node in dendro_tree.preorder_node_iter():
if node.taxon is not None and not (ignore_existing_internal_taxon_labels and not node.is_leaf()):
skipped_dbg += ['%s' % node.taxon.label]
assert node.label is None # if you want to change this, you have to start setting the node labels in build_lonr_tree(). For now, I like having the label in _one_ freaking place
continue # already properly labeled
current_label = node.label
node.label = None
if suppress_internal_node_taxa and not node.is_leaf():
continue
if current_label is None or ignore_existing_internal_node_labels:
new_label, potential_names, used_names = utils.choose_new_uid(potential_names, used_names)
else:
# turning this off since it's slow, and has been here a while without getting tripped (and I'm pretty sure the tns checks, anyway)
# if tns.has_taxon_label(current_label):
# raise Exception('duplicate node label \'%s\'' % current_label)
new_label = current_label
# turning this off since it's slow, and has been here a while without getting tripped (and I'm pretty sure the tns checks, anyway)
# if tns.has_taxon_label(new_label):
# raise Exception('failed labeling internal nodes (chose name \'%s\' that was already in the taxon namespace)' % new_label)
node.taxon = dendropy.Taxon(new_label)
tns.add_taxon(node.taxon)
relabeled_dbg += ['%s' % new_label]
if debug:
print ' skipped (already labeled): %s' % ' '.join(sorted(skipped_dbg))
print ' (re-)labeled: %s' % ' '.join(sorted(relabeled_dbg))
# print ' after:'
# print utils.pad_lines(get_ascii_tree(dendro_tree))
# ----------------------------------------------------------------------------------------
def translate_labels(dendro_tree, translation_pairs, debug=False):
if debug:
print get_ascii_tree(dendro_tree=dendro_tree)
for old_label, new_label in translation_pairs:
taxon = dendro_tree.taxon_namespace.get_taxon(old_label)
if taxon is None:
raise Exception('requested taxon with old name \'%s\' not present in tree' % old_label)
taxon.label = new_label
if debug:
print '%20s --> %s' % (old_label, new_label)
if debug:
print get_ascii_tree(dendro_tree=dendro_tree)
# ----------------------------------------------------------------------------------------
def get_mean_leaf_height(tree=None, treestr=None):
assert tree is None or treestr is None
if tree is None:
tree = get_dendro_tree(treestr=treestr, schema='newick')
heights = get_leaf_depths(tree).values()
return sum(heights) / len(heights)
# ----------------------------------------------------------------------------------------
def get_ascii_tree(dendro_tree=None, treestr=None, treefname=None, extra_str='', width=200, schema='newick', label_fcn=None):
"""
AsciiTreePlot docs (don't show up in as_ascii_plot()):
plot_metric : str
A string which specifies how branches should be scaled, one of:
'age' (distance from tips), 'depth' (distance from root),
'level' (number of branches from root) or 'length' (edge
length/weights).
show_internal_node_labels : bool
Whether or not to write out internal node labels.
leaf_spacing_factor : int
Positive integer: number of rows between each leaf.
width : int
Force a particular display width, in terms of number of columns.
node_label_compose_fn : function object
A function that takes a Node object as an argument and returns
the string to be used to display it.
"""
if dendro_tree is None:
assert treestr is None or treefname is None
if treestr is None:
treestr = get_treestr_from_file(treefname)
dendro_tree = get_dendro_tree(treestr=treestr, schema=schema)
if get_mean_leaf_height(dendro_tree) == 0.: # we really want the max height, but since we only care whether it's zero or not this is the same
return '%szero height' % extra_str
# elif: get_n_nodes(dendro_tree) > 1: # not sure if I really need this if any more (it used to be for one-leaf trees (and then for one-node trees), but the following code (that used to be indented) seems to be working fine on one-leaf, one-node, and lots-of-node trees a.t.m.)
start_char, end_char = '', ''
def compose_fcn(x):
if x.taxon is not None: # if there's a taxon defined, use its label
lb = x.taxon.label
elif x.label is not None: # use node label
lb = x.label
else:
lb = 'o'
if label_fcn is not None:
lb = label_fcn(lb)
return '%s%s%s' % (start_char, lb, end_char)
dendro_str = dendro_tree.as_ascii_plot(width=width, plot_metric='length', show_internal_node_labels=True, node_label_compose_fn=compose_fcn)
special_chars = [c for c in reversed(string.punctuation) if c not in set(dendro_str)] # find some special characters that we can use to identify the start and end of each label (could also use non-printable special characters, but it shouldn't be necessary)
if len(special_chars) >= 2: # can't color them directly, since dendropy counts the color characters as printable
start_char, end_char = special_chars[:2] # NOTE the colors get screwed up when dendropy overlaps labels (or sometimes just straight up strips stuff), which it does when it runs out of space
dendro_str = dendro_tree.as_ascii_plot(width=width, plot_metric='length', show_internal_node_labels=True, node_label_compose_fn=compose_fcn) # call again after modiying compose fcn (kind of wasteful to call it twice, but it shouldn't make a difference)
dendro_str = dendro_str.replace(start_char, utils.Colors['blue']).replace(end_char, utils.Colors['end'] + ' ')
else:
print ' %s can\'t color tree, no available special characters in get_ascii_tree()' % utils.color('red', 'note:')
if get_n_nodes(dendro_tree) == 1:
extra_str += ' (one node)'
return_lines = [('%s%s' % (extra_str, line)) for line in dendro_str.split('\n')]
return '\n'.join(return_lines)
# ----------------------------------------------------------------------------------------
def rescale_tree(new_mean_height, dtree=None, treestr=None, debug=False):
# NOTE if you pass in <dtree>, it gets modified, but if you pass in <treestr> you get back a new dtree (which is kind of a dumb way to set this up, but I don't want to change it now. Although I guess it returns None if you pass <dtree>, so you shouldn't get in too much trouble)
# TODO (maybe) switch calls of this to dendro's scale_edges() (but note you'd then have to get the mean depth beforehand, since that just multiplies by factor, whereas this rescales to get a particular new height)
""" rescale the branch lengths in dtree/treestr by a factor such that the new mean height is <new_mean_height> """
if dtree is None:
dtree = get_dendro_tree(treestr=treestr, suppress_internal_node_taxa=True)
mean_height = get_mean_leaf_height(tree=dtree)
if debug:
print ' current mean: %.4f target height: %.4f' % (mean_height, new_mean_height)
for edge in dtree.postorder_edge_iter():
if edge.head_node is dtree.seed_node: # why tf does the root node have an edge where it's the child?
continue
if debug:
print ' %5s %7e --> %7e' % (edge.head_node.taxon.label if edge.head_node.taxon is not None else 'None', edge.length, edge.length * new_mean_height / mean_height)
edge.length *= new_mean_height / mean_height # rescale every branch length in the tree by the ratio of desired to existing height (everybody's heights should be the same... but they never quite were when I was using Bio.Phylo, so, uh. yeah, uh. not sure what to do, but this is fine. It's checked below, anyway)
if not treestr: # i'm really pretty sure there's no point in doing this if we're just going to immediately convert to string (and it just caused huge fucking problems because it was missing the suppress unifurcations arg. I'm so *!$@(($@ing tired of that shit this is like the fourth time I've wasted hours chasing down weirdness that stems from that)
dtree.update_bipartitions(suppress_unifurcations=False) # probably doesn't really need to be done
if debug:
print ' final mean: %.4f' % get_mean_leaf_height(tree=dtree)
if treestr:
return dtree.as_string(schema='newick').strip()
# ----------------------------------------------------------------------------------------
def get_tree_difference_metrics(region, in_treestr, leafseqs, naive_seq):
taxon_namespace = dendropy.TaxonNamespace() # in order to compare two trees with the metrics below, the trees have to have the same taxon namespace
in_dtree = get_dendro_tree(treestr=in_treestr, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=True)
seqfos = [{'name' : 't%d' % (iseq + 1), 'seq' : seq} for iseq, seq in enumerate(leafseqs)]
out_dtree = get_fasttree_tree(seqfos, naive_seq=naive_seq, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=True)
in_height = get_mean_leaf_height(tree=in_dtree)
out_height = get_mean_leaf_height(tree=out_dtree)
base_width = 100
in_ascii_str = get_ascii_tree(dendro_tree=in_dtree, extra_str=' ', width=base_width) # make copies before the following functions mess the trees up
out_ascii_str = get_ascii_tree(dendro_tree=out_dtree, extra_str=' ', width=int(base_width*out_height/in_height))
print ' comparing input and bppseqgen output trees:'
print ' heights: %.3f %.3f' % (in_height, out_height)
print ' symmetric difference: %d' % dendropy.calculate.treecompare.symmetric_difference(in_dtree, out_dtree) # WARNING these functions modify the tree (i think by removing unifurcations) becuase OF COURSE THEY DO, wtf
print ' euclidean distance: %f' % dendropy.calculate.treecompare.euclidean_distance(in_dtree, out_dtree)
print ' r-f distance: %f' % dendropy.calculate.treecompare.robinson_foulds_distance(in_dtree, out_dtree)
print ' %s' % utils.color('blue', 'input:')
print in_ascii_str
print ' %s' % utils.color('blue', 'output:')
print out_ascii_str
# ----------------------------------------------------------------------------------------
# loops over uids in <hline> and <lline> (which, in order, must correspond to each other), chooses a new joint uid and applies it to both h and l trees, then checks to make sure the trees are identical
def merge_heavy_light_trees(hline, lline, use_identical_uids=False, check_trees=True, debug=False):
def ladd(uid, locus):
return '%s-%s' % (uid, locus)
def lrm(uid, locus):
assert '-' in uid and uid.split('-')[-1] == locus
return uid.replace('-%s' % locus, '')
if debug:
print ' before:'
print ' heavy:'
print utils.pad_lines(get_ascii_tree(treestr=hline['tree']))
print ' light:'
print utils.pad_lines(get_ascii_tree(treestr=lline['tree']))
assert len(hline['unique_ids']) == len(lline['unique_ids'])
lpair = [hline, lline]
joint_reco_id = utils.uidhashstr(hline['reco_id'] + lline['reco_id'])
for ltmp in lpair:
ltmp['reco_id'] = joint_reco_id
ltmp['paired-uids'] = []
dtrees = [get_dendro_tree(treestr=l['tree']) for l in lpair]
for iuid, (huid, luid) in enumerate(zip(hline['unique_ids'], lline['unique_ids'])):
joint_uid = utils.uidhashstr(huid + luid)
for ltmp in lpair:
ltmp['unique_ids'][iuid] = joint_uid
if not use_identical_uids:
ltmp['unique_ids'][iuid] = ladd(ltmp['unique_ids'][iuid], ltmp['loci'][iuid])
for l1, l2 in zip(lpair, reversed(lpair)):
l1['paired-uids'].append([l2['unique_ids'][iuid]])
for dt, uid, ltmp in zip(dtrees, [huid, luid], lpair): # NOTE huid and luid here are the *old* ones
dt.find_node_with_taxon_label(uid).taxon = dendropy.Taxon(ltmp['unique_ids'][iuid]) # don't need to update the taxon namespace since we don't use it afterward
hline['tree'], lline['tree'] = [as_str(dt) for dt in dtrees] # have to make a separate tree to actually put in the <line>s, since the symmetric difference function screws up the tree
if check_trees:
if not use_identical_uids: # reset back to the plain <joint_uid> so we can compare
for dt, ltmp in zip(dtrees, lpair):
for uid, locus in zip(ltmp['unique_ids'], ltmp['loci']): # yes, they all have the same locus, but see note in utils
dt.find_node_with_taxon_label(uid).taxon = dendropy.Taxon(lrm(uid, locus)) # don't need to update the taxon namespace since we don't use it afterward
tns = dendropy.TaxonNamespace()
dtrees = [cycle_through_ascii_conversion(dtree=dt, taxon_namespace=tns) for dt in dtrees] # have to recreate from str before calculating symmetric difference to avoid the taxon namespace being screwed up (I tried a bunch to avoid this, I don't know what it's changing, the tns looks fine, but something's wrong)
sym_diff = dendropy.calculate.treecompare.symmetric_difference(*dtrees) # WARNING this function modifies the tree (i think by removing unifurcations) becuase OF COURSE THEY DO, wtf
if sym_diff != 0: # i guess in principle we could turn this off after we've run a fair bit, but it seems really dangerous, since if the heavy and light trees get out of sync the whole simulation is ruined
raise Exception('trees differ (symmetric difference %d) for heavy and light chains' % sym_diff)
if debug:
print ' after:'
print ' symmetric difference: %d' % sym_diff
print ' heavy:'
print utils.pad_lines(get_ascii_tree(treestr=hline['tree']))
print ' light:'
print utils.pad_lines(get_ascii_tree(treestr=lline['tree']))
# ----------------------------------------------------------------------------------------
def collapse_zero_length_leaves(dtree, sequence_uids, debug=False): # <sequence_uids> is uids for which we have actual sequences (i.e. not internal nodes inferred by the tree program without sequences)
if debug > 1:
print ' merging trivially-dangling leaves into parent internal nodes'
print ' distance leaf parent'
removed_nodes = []
for leaf in list(dtree.leaf_node_iter()): # subsume super short/zero length leaves into their parent internal nodes
recursed = False
while leaf.edge_length is not None and leaf.edge_length < 1./(2*typical_bcr_seq_len): # if distance corresponds to less than one mutation, it's probably (always?) just fasttree dangling an internal node as a leaf
if leaf.parent_node is None: # why tf can i get the root node here?
break
if leaf.parent_node.taxon is not None and leaf.parent_node.taxon.label in sequence_uids: # only want to do it if the parent node is a (spurious) internal node added by fasttree (this parent's taxon will be None if suppress_internal_node_taxa was set)
break
if debug > 1:
print ' %8.5f %-20s %-20s' % (leaf.edge_length, ' " ' if recursed else leaf.taxon.label, 'none' if leaf.parent_node.taxon is None else leaf.parent_node.taxon.label)
parent_node = leaf.parent_node
removed_nodes.append(parent_node.taxon.label if parent_node.taxon is not None else None)
collapse_nodes(dtree, leaf.taxon.label, None, keep_name_node=leaf, remove_name_node=leaf.parent_node)
leaf = parent_node
recursed = True
dtree.update_bipartitions(suppress_unifurcations=False)
dtree.purge_taxon_namespace()
if debug:
print ' merged %d trivially-dangling leaves into parent internal nodes: %s' % (len(removed_nodes), ' '.join(str(n) for n in removed_nodes))
# print get_ascii_tree(dendro_tree=dtree, extra_str=' ', width=350)
# print dtree.as_string(schema='newick').strip()
# ----------------------------------------------------------------------------------------
def get_fasttree_tree(seqfos, naive_seq=None, naive_seq_name='XnaiveX', taxon_namespace=None, suppress_internal_node_taxa=False, debug=False):
if debug:
print ' running FastTree on %d sequences plus a naive' % len(seqfos)
uid_list = [sfo['name'] for sfo in seqfos]
if any(uid_list.count(u) > 1 for u in uid_list):
raise Exception('duplicate uid(s) in seqfos for FastTree, which\'ll make it crash: %s' % ' '.join(u for u in uid_list if uid_list.count(u) > 1))
with tempfile.NamedTemporaryFile() as tmpfile:
if naive_seq is not None:
tmpfile.write('>%s\n%s\n' % (naive_seq_name, naive_seq))
for sfo in seqfos:
tmpfile.write('>%s\n%s\n' % (sfo['name'], sfo['seq'])) # NOTE the order of the leaves/names is checked when reading bppseqgen output
tmpfile.flush() # BEWARE if you forget this you are fucked
with open(os.devnull, 'w') as fnull:
treestr = subprocess.check_output('./bin/FastTree -gtr -nt ' + tmpfile.name, shell=True, stderr=fnull)
if debug:
print ' converting FastTree newick string to dendro tree'
dtree = get_dendro_tree(treestr=treestr, taxon_namespace=taxon_namespace, ignore_existing_internal_node_labels=not suppress_internal_node_taxa, suppress_internal_node_taxa=suppress_internal_node_taxa, debug=debug)
naive_node = dtree.find_node_with_taxon_label(naive_seq_name)
if naive_node is not None:
dtree.reroot_at_node(naive_node, suppress_unifurcations=False, update_bipartitions=True)
if not suppress_internal_node_taxa: # if we *are* suppressing internal node taxa, we're probably calling this from clusterpath, in which case we need to mess with the internal nodes in a way that assumes they can be ignored (so we collapse zero length leaves afterwards)
collapse_zero_length_leaves(dtree, uid_list + [naive_seq_name], debug=debug)
return dtree
# ----------------------------------------------------------------------------------------
# copied from https://github.com/nextstrain/augur/blob/master/base/scores.py
# also see explanation here https://photos.app.goo.gl/gtjQziD8BLATQivR6
def set_lb_values(dtree, tau, only_calc_metric=None, dont_normalize=False, multifo=None, debug=False):
"""
traverses <dtree> in postorder and preorder to calculate the up and downstream tree length exponentially weighted by distance, then adds them as LBI (and divides as LBR)
"""
def getmulti(node): # number of reads with the same sequence
if multifo is None or node.taxon.label not in multifo or multifo[node.taxon.label] is None: # most all of them should be in there, but for instance I'm not adding the dummy branch nodes
return 1
return multifo[node.taxon.label]
metrics_to_calc = lb_metrics.keys() if only_calc_metric is None else [only_calc_metric]
if debug:
print ' setting %s values with tau %.4f' % (' and '.join(metrics_to_calc), tau)
initial_labels = set([n.taxon.label for n in dtree.preorder_node_iter()])
dtree = get_tree_with_dummy_branches(dtree, tau) # this returns a new dtree, but the old tree is a subtree of the new one (or at least its collection of nodes are), and these nodes get modified by the process (hence the reversal fcn below)
# calculate clock length (i.e. for each node, the distance to that node's parent)
for node in dtree.postorder_node_iter(): # postorder vs preorder doesn't matter, but I have to choose one
if node.parent_node is None: # root node
node.clock_length = 0.
for child in node.child_node_iter():
child.clock_length = child.distance_from_root() - node.distance_from_root()
# lbi is the sum of <node.down_polarizer> (downward message from <node>'s parent) and its children's up_polarizers (upward messages)
# traverse the tree in postorder (children first) to calculate message to parents (i.e. node.up_polarizer)
for node in dtree.postorder_node_iter():
node.down_polarizer = 0 # used for <node>'s lbi (this probabably shouldn't be initialized here, since it gets reset in the next loop [at least I think they all do])
node.up_polarizer = 0 # used for <node>'s parent's lbi (but not <node>'s lbi)
for child in node.child_node_iter():
node.up_polarizer += child.up_polarizer
bl = node.clock_length / tau
node.up_polarizer *= numpy.exp(-bl) # sum of child <up_polarizer>s weighted by an exponential decayed by the distance to <node>'s parent
node.up_polarizer += getmulti(node) * tau * (1 - numpy.exp(-bl)) # add the actual contribution (to <node>'s parent's lbi) of <node>: zero if the two are very close, increasing toward asymptote of <tau> for distances near 1/tau (integral from 0 to l of decaying exponential)
# traverse the tree in preorder (parents first) to calculate message to children (i.e. child1.down_polarizer)
for node in dtree.preorder_internal_node_iter():
for child1 in node.child_node_iter(): # calculate down_polarizer for each of <node>'s children
child1.down_polarizer = node.down_polarizer # first sum <node>'s down_polarizer...
for child2 in node.child_node_iter(): # and the *up* polarizers of any other children of <node>
if child1 != child2:
child1.down_polarizer += child2.up_polarizer # add the contribution of <child2> to its parent's (<node>'s) lbi (i.e. <child2>'s contribution to the lbi of its *siblings*)
bl = child1.clock_length / tau
child1.down_polarizer *= numpy.exp(-bl) # and decay the previous sum by distance between <child1> and its parent (<node>)
child1.down_polarizer += getmulti(child1) * tau * (1 - numpy.exp(-bl)) # add contribution of <child1> to its own lbi: zero if it's very close to <node>, increasing to max of <tau> (integral from 0 to l of decaying exponential)
returnfo = {m : {} for m in metrics_to_calc}
# go over all nodes and calculate lb metrics (can be done in any order)
for node in dtree.postorder_node_iter():
vals = {'lbi' : node.down_polarizer, 'lbr' : 0.}
for child in node.child_node_iter():
vals['lbi'] += child.up_polarizer
vals['lbr'] += child.up_polarizer
if node.down_polarizer > 0.:
vals['lbr'] /= node.down_polarizer # it might make more sense to not include the branch between <node> and its parent in either the numerator or denominator (here it's included in the denominator), but this way I don't have to change any of the calculations above
if dummy_str in node.taxon.label:
continue
if node is dtree.seed_node or node.parent_node is dtree.seed_node: # second clause is only because of dummy root addition (well, and if we are adding dummy root the first clause doesn't do anything)
vals['lbr'] = 0.
for metric in metrics_to_calc:
returnfo[metric][node.taxon.label] = float(vals[metric]) if dont_normalize else normalize_lb_val(metric, float(vals[metric]), tau)
if debug:
max_width = str(max([len(n.taxon.label) for n in dtree.postorder_node_iter()]))
print (' %'+max_width+'s %s%s multi') % ('node', ''.join(' %s' % m for m in metrics_to_calc), 16*' ' if 'lbr' in metrics_to_calc else '')
for node in dtree.preorder_node_iter():
if dummy_str in node.taxon.label:
continue
multi_str = ''
if multifo is not None:
multi_str = str(getmulti(node))
if getmulti(node) > 1:
multi_str = utils.color('blue', multi_str, width=3)
lbstrs = ['%8.3f' % returnfo[m][node.taxon.label] for m in metrics_to_calc]
if 'lbr' in metrics_to_calc:
lbstrs += [' = %-5.3f / %-5.3f' % (returnfo['lbr'][node.taxon.label] * node.down_polarizer, node.down_polarizer)]
print (' %' + max_width + 's %s %3s') % (node.taxon.label, ''.join(lbstrs), multi_str)
# this is maybe time consuming, but I want to leave the tree that was passed in as unmodified as I can (especially since I have to run this fcn twice for lbi/lbr since they need different tau values)
for node in dtree.postorder_node_iter():
delattr(node, 'clock_length')
delattr(node, 'up_polarizer')
delattr(node, 'down_polarizer')
remove_dummy_branches(dtree, initial_labels)
return returnfo
# ----------------------------------------------------------------------------------------
def get_tree_with_dummy_branches(old_dtree, tau, n_tau_lengths=10, add_dummy_leaves=False, debug=False): # add long branches above root and/or below each leaf, since otherwise we're assuming that (e.g.) leaf node fitness is zero
# commenting this since I'm pretty sure I've fixed it, but not removing it since if a similar problem surfaces with dummy branch addition, deep copying is an easy way out
# zero_length_edges = [e for e in old_dtree.preorder_edge_iter() if e.length == 0 and not e.head_node.is_leaf()]
# if len(zero_length_edges) > 0: # rerooting to remove dummy branches screws up the tree in some cases with zero length branches (see comment in that fcn)
# old_dtree = copy.deepcopy(old_dtree) # could maybe do this by default, but it'll probably be really slow on large trees (at least iterating through the trees is; although I suppose maybe deepcopy is smater than that)
# print ' %s found %d zero length branches in tree, so deep copying before adding dummy branches (this is probably ok ish, but in general it\'s a bad idea to have zero length branches in your trees): %s' % (utils.color('yellow', 'warning'), len(zero_length_edges), ' '.join([e.head_node.taxon.label for e in zero_length_edges]))
dummy_edge_length = n_tau_lengths * tau
new_root_taxon = dendropy.Taxon(dummy_str + '-root')
old_dtree.taxon_namespace.add_taxon(new_root_taxon)
new_root_node = dendropy.Node(taxon=new_root_taxon)
new_dtree = dendropy.Tree(seed_node=new_root_node, taxon_namespace=old_dtree.taxon_namespace, is_rooted=True)
# then add the entire old tree under this new tree
new_root_node.add_child(old_dtree.seed_node)
for edge in new_root_node.child_edge_iter():
edge.length = dummy_edge_length
if add_dummy_leaves: # add dummy child branches to each leaf
for lnode in new_dtree.leaf_node_iter():
new_label = '%s-%s' % (dummy_str, lnode.taxon.label)
tns.add_taxon(dendropy.Taxon(new_label))
new_child_node = lnode.new_child(taxon=tns.get_taxon(new_label), edge_length=dummy_edge_length)
# TODO commenting this because it gets triggered way too much, but I'm not actually sure that I can really just ignore the problem (but maybe I can)
# zero_len_edge_nodes = [e.head_node for n in new_dtree.preorder_node_iter() for e in n.child_edge_iter() if e.length == 0 and not e.head_node.is_leaf()] # zero len edges above leaves are fine, since leaves don't count for lbr
# if len(zero_len_edge_nodes) > 0:
# print ' %s found %d zero length internal edges in tree, which means lb ratio may mis-categorize branches: %s' % (utils.color('red', 'warning'), len(zero_len_edge_nodes), ' '.join([n.taxon.label for n in zero_len_edge_nodes]))
# # for node in zero_len_edge_nodes: # we don't really want to modify the tree this drastically here (and a.t.m. this causes a crash later on), but I'm leaving it as a placeholder for how to remove zero length edges
# # collapse_nodes(new_dtree, node.taxon.label, node.parent_node.taxon.label) # keep the child, since it can be a leaf
# # print utils.pad_lines(get_ascii_tree(dendro_tree=new_dtree))
new_dtree.update_bipartitions(suppress_unifurcations=False) # not sure if I need this? (suppress_unifurcations is because otherwise it removes the branch between the old and new root nodes)
if debug:
print ' added dummy branches to tree:'
print get_ascii_tree(dendro_tree=new_dtree, extra_str=' ', width=350)
return new_dtree
# ----------------------------------------------------------------------------------------
def remove_dummy_branches(dtree, initial_labels, add_dummy_leaves=False, debug=False):
if add_dummy_leaves:
raise Exception('not implemented (shouldn\'t be too hard, but a.t.m. I don\'t think I\'ll need it)')
if len(dtree.seed_node.child_nodes()) != 1:
print ' %s root node has more than one child when removing dummy branches: %s' % (utils.color('yellow', 'warning'), ' '.join([n.taxon.label for n in dtree.seed_node.child_nodes()]))
new_root_node = dtree.seed_node.child_nodes()[0]
if debug:
print ' rerooting at %s' % new_root_node.taxon.label
print ' current children: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()])
# NOTE if the new root has a child separated by a zero-length edge, this reroot call for some reason deletes that child from the tree (both with and without suppress_unifurcations set). After messing around a bunch to try to fix it, the message I'm taking is just that zero length branches (and unifurcations) are a bad idea and I should just forbid them
# UPDATE I think I was just missing the suppress_unifurcations=False in update_bipartitions(), but leaving these comments here in case there was another problem
# UPDATE actually the reroot still seems to eat a node sometimes if the tree is unrooted (so adding the extra reroot above)
# UPDATE this is more or less expectd, from dendropy's perspective; see https://github.com/jeetsukumaran/DendroPy/issues/118
assert dtree.is_rooted # make sure it's rooted, to avoid unifurcations getting suppressed (even with the arg set to false)
dtree.reroot_at_node(new_root_node, suppress_unifurcations=False) # reroot at old root node
if debug:
print ' children after reroot: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()])
dtree.prune_taxa_with_labels([dummy_str + '-root'], suppress_unifurcations=False)
dtree.purge_taxon_namespace() # I'm sure there's a good reason the previous line doesn't do this
dtree.update_bipartitions(suppress_unifurcations=False)
if debug:
print ' children after purge: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()])
final_labels = set([n.taxon.label for n in dtree.preorder_node_iter()])
if initial_labels != final_labels: # this was only happening with a zero-length node hanging off root (see above), which probably won't happen any more since I'm now removing zero length (non-leaf) branches in bcr-phylo simulator.py
print ' %s nodes after dummy branch addition and removal not the same as before:' % utils.color('red', 'error')
print ' missing: %s' % ' '.join(initial_labels - final_labels)
print ' extra: %s' % ' '.join(final_labels - initial_labels)
print ' tree:'
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
# ----------------------------------------------------------------------------------------
def get_aa_tree(dtree, annotation, extra_str=None, debug=False):
very_different_frac = 0.5
if debug:
print ' converting nuc tree (mean depth %.3f) to aa' % get_mean_leaf_height(dtree)
# print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
changes = {}
aa_dtree = copy.deepcopy(dtree)
nuc_seqs = {uid : seq for uid, seq in zip(annotation['unique_ids'], annotation['seqs'])}
aa_seqs = {uid : seq for uid, seq in zip(annotation['unique_ids'], annotation['seqs_aa'])}
skipped_edges = []
if debug > 1:
print ' N mutations branch length'
print ' nuc aa nuc aa child node'
for edge in aa_dtree.preorder_edge_iter():
if edge.tail_node is None: # edge above root (no, i don't know why root has an edge above it, but that's how it is)
continue
cnode = edge.head_node # child of this edge
clabel, plabel = cnode.taxon.label, cnode.parent_node.taxon.label # turns out there's also a .tail_node attribute of the edge that isn't listed properly in the docs
if clabel not in aa_seqs or plabel not in aa_seqs: # if either of the seqs are missing, leave the existing (presumably nucleotide-based) branch length unchanged
skipped_edges.append(edge)
continue
nuc_branch_length = edge.length # nucleotide distance from parent node (only used for debug, but we have to grab it before we change the edge length)
aa_mut_frac, aa_n_muts = utils.hamming_fraction(aa_seqs[plabel], aa_seqs[clabel], amino_acid=True, also_return_distance=True)
edge.length = aa_mut_frac
if debug:
nuc_mut_frac, nuc_n_muts = utils.hamming_fraction(nuc_seqs[plabel], nuc_seqs[clabel], also_return_distance=True)
if nuc_mut_frac > 0 and abs(nuc_branch_length - nuc_mut_frac) / nuc_mut_frac > very_different_frac:
print ' %s nuc branch length %.4f and mut frac %.4f very different for branch between %s --> %s' % (utils.color('red', 'warning'), nuc_branch_length, nuc_mut_frac, clabel, plabel)
changes[edge] = (nuc_n_muts, aa_n_muts)
if debug > 1:
print ' %3d %3d %.3f %.3f %s' % (nuc_n_muts, aa_n_muts, nuc_branch_length, aa_mut_frac, clabel)
aa_dtree.update_bipartitions(suppress_unifurcations=False)
if len(skipped_edges) > 0:
print ' %s get_aa_tree()%s: skipped %d/%d edges for which we didn\'t have sequences for both nodes (i.e. left the original branch length unmodified)' % (utils.color('yellow', 'warning'), '' if extra_str is None else ' %s'%extra_str, len(skipped_edges), len(list(aa_dtree.preorder_edge_iter())))
if debug:
assert len(changes) + len(skipped_edges) + 1 == len(list(aa_dtree.preorder_edge_iter())) # +1 is for root edge
print ' rescaled %d/%d edges' % (len(changes), len(list(aa_dtree.preorder_edge_iter())))
print ' aa tree mean depth: %.3f' % get_mean_leaf_height(aa_dtree)
n_to_print = 10
print ' child nodes with %d largest differences between N nuc and N aa changes' % n_to_print
print ' nuc aa parent node child node'
for edge in sorted(changes, key=lambda k: changes[k][1] - changes[k][0])[:n_to_print]:
nuc_n_muts, aa_n_muts = changes[edge]
print ' %3d %3d %-15s %s' % (nuc_n_muts, aa_n_muts, edge.tail_node.taxon.label, edge.head_node.taxon.label)
# print utils.pad_lines(get_ascii_tree(dendro_tree=aa_dtree, width=400))
return aa_dtree
# ----------------------------------------------------------------------------------------
# check whether 1) node depth and 2) node pairwise distances are super different when calculated with tree vs sequences (not really sure why it's so different sometimes, best guess is fasttree sucks, partly because it doesn't put the root node anywhere near the root of the tree)
def compare_tree_distance_to_shm(dtree, annotation, max_frac_diff=0.5, min_warn_frac=0.25, extra_str=None, debug=False):
common_nodes = [n for n in dtree.preorder_node_iter() if n.taxon.label in annotation['unique_ids']]
tdepths, mfreqs, fracs = {}, {}, {}
for node in common_nodes:
tdepth = node.distance_from_root()
mfreq = utils.per_seq_val(annotation, 'mut_freqs', node.taxon.label)
frac_diff = abs(tdepth - mfreq) / tdepth if tdepth > 0 else 0
if frac_diff > max_frac_diff:
key = node.taxon.label
tdepths[key] = tdepth
mfreqs[key] = mfreq
fracs[key] = frac_diff
if debug or len(fracs) > 0:
warnstr = utils.color('yellow', 'warning ') if len(fracs) / float(len(common_nodes)) > min_warn_frac else ''
if debug or warnstr != '':
print ' %stree depth and mfreq differ by more than %.0f%% for %d/%d nodes%s' % (warnstr, 100*max_frac_diff, len(fracs), len(common_nodes), '' if extra_str is None else ' for %s' % extra_str)
if debug and len(fracs) > 0:
print ' tree depth mfreq frac diff'
for key, frac in sorted(fracs.items(), key=operator.itemgetter(1), reverse=True):
print ' %.4f %.4f %.4f %s' % (tdepths[key], mfreqs[key], frac, key)
dmatrix = dtree.phylogenetic_distance_matrix()
dmx_taxa = set(dmatrix.taxon_iter()) # phylogenetic_distance_matrix() seems to only return values for leaves, which maybe I'm supposed to expect?
tdists, mdists, fracs = {}, {}, {} # NOTE reusing these names is kind of dangerous
for n1, n2 in itertools.combinations([n for n in common_nodes if n.taxon in dmx_taxa], 2):
tdist = dmatrix.distance(n1.taxon, n2.taxon)
mdist = utils.hamming_fraction(utils.per_seq_val(annotation, 'seqs', n1.taxon.label), utils.per_seq_val(annotation, 'seqs', n2.taxon.label))
frac_diff = abs(tdist - mdist) / tdist if tdist > 0 else 0
if frac_diff > max_frac_diff:
key = (n1.taxon.label, n2.taxon.label)
tdists[key] = tdist
mdists[key] = mdist
fracs[key] = frac_diff
if debug or len(fracs) > 0:
warnstr = utils.color('yellow', 'warning ') if len(fracs) / float(len(common_nodes)) > min_warn_frac else ''
if debug or warnstr != '':
print ' %spairwise distance from tree and sequence differ by more than %.f%% for %d/%d node pairs%s' % (warnstr, 100*max_frac_diff, len(fracs), 0.5 * len(common_nodes) * (len(common_nodes)-1), '' if extra_str is None else ' for %s' % extra_str)
if debug and len(fracs) > 0:
print ' pairwise'
print ' tree dist seq dist frac diff'
for key, frac_diff in sorted(fracs.items(), key=operator.itemgetter(1), reverse=True):
print ' %.4f %.4f %.4f %s %s' % (tdists[key], mdists[key], frac_diff, key[0], key[1])
if debug:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
utils.print_reco_event(annotation)
# ----------------------------------------------------------------------------------------
def calculate_lb_values(dtree, tau, lbr_tau_factor=None, only_calc_metric=None, dont_normalize=False, annotation=None, extra_str=None, iclust=None, debug=False):
# if <only_calc_metric> is None, we use <tau> and <lbr_tau_factor> to calculate both lbi and lbr (i.e. with different tau)
# - whereas if <only_calc_metric> is set, we use <tau> to calculate only the given metric
# note that it's a little weird to do all this tree manipulation here, but then do the dummy branch tree manipulation in set_lb_values(), but the dummy branch stuff depends on tau so it's better this way
# <iclust> is just to give a little more granularity in dbg
# TODO this is too slow (although it would be easy to have an option for it to only spot check a random subset of nodes)
# if annotation is not None: # check that the observed shm rate and tree depth are similar (we're still worried that they're different if we don't have the annotation, but we have no way to check it)
# compare_tree_distance_to_shm(dtree, annotation, extra_str=extra_str)
if max(get_leaf_depths(dtree).values()) > 1: # should only happen on old simulation files
if annotation is None:
raise Exception('tree needs rescaling in lb calculation (metrics will be wrong): found leaf depth greater than 1 (even when less than 1 they can be wrong, but we can be fairly certain that your BCR sequences don\'t have real mutation frequencty greater than 1, so this case we can actually check). If you pass in annotations we can rescale to the observed mutation frequencty.')
print ' %s leaf depths greater than 1, so rescaling by sequence length' % utils.color('yellow', 'warning')
dtree.scale_edges(1. / numpy.mean([len(s) for s in annotation['seqs']])) # using treeutils.rescale_tree() breaks, it seems because the update_bipartitions() call removes nodes near root on unrooted trees
if debug:
print ' calculating %s%s with tree:' % (' and '.join(lb_metrics if only_calc_metric is None else [only_calc_metric]), '' if extra_str is None else ' for %s' % extra_str)
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
multifo = None
if annotation is not None:
multifo = {} # NOTE now that I'm always doing this, it might make sense to rearrange things a bit, but i don't want to look at it right now
for node in dtree.postorder_node_iter():
multifo[node.taxon.label] = utils.get_multiplicity(annotation, uid=node.taxon.label) if node.taxon.label in annotation['unique_ids'] else 1 # if it's not in there, it could be from wonky names from lonr.r, also could be from FastTree tree where we don't get inferred intermediate sequences
treestr = dtree.as_string(schema='newick') # get this before the dummy branch stuff to make more sure it isn't modified
normstr = 'unnormalized' if dont_normalize else 'normalized'
if only_calc_metric is None:
assert lbr_tau_factor is not None # has to be set if we're calculating both metrics
if iclust is None or iclust == 0:
print ' %scalculating %s lb metrics with tau values %.4f (lbi) and %.4f * %d = %.4f (lbr)' % ('' if extra_str is None else '%s: '%extra_str, normstr, tau, tau, lbr_tau_factor, tau*lbr_tau_factor)
lbvals = set_lb_values(dtree, tau, only_calc_metric='lbi', dont_normalize=dont_normalize, multifo=multifo, debug=debug)
tmpvals = set_lb_values(dtree, tau*lbr_tau_factor, only_calc_metric='lbr', dont_normalize=dont_normalize, multifo=multifo, debug=debug)
lbvals['lbr'] = tmpvals['lbr']
else:
assert lbr_tau_factor is None or dont_normalize # we need to make sure that we weren't accidentally called with lbr_tau_factor set, but then we ignore it because the caller forgot that we ignore it if only_calc_metric is also set
if iclust is None or iclust == 0:
print ' calculating %s %s with tau %.4f' % (normstr, lb_metrics[only_calc_metric], tau)
lbvals = set_lb_values(dtree, tau, only_calc_metric=only_calc_metric, dont_normalize=dont_normalize, multifo=multifo, debug=debug)
lbvals['tree'] = treestr
return lbvals
# ----------------------------------------------------------------------------------------
def set_n_generations(seq_len, tau, n_tau_lengths, n_generations, debug=False):
if n_generations is None:
assert n_tau_lengths is not None # have to specify one or the other
n_generations = max(1, int(seq_len * tau * n_tau_lengths))
if debug:
print ' %d generations = seq_len * tau * n_tau_lengths = %d * %.4f * %d = max(1, int(%.2f))' % (n_generations, seq_len, tau, n_tau_lengths, seq_len * tau * n_tau_lengths)
# else:
# if debug:
# print ' %d generations' % n_generations
return n_generations
# ----------------------------------------------------------------------------------------
def get_tree_for_lb_bounds(bound, metric, seq_len, tau, n_generations, n_offspring, debug=False):
dtree = dendropy.Tree(is_rooted=True) # note that using a taxon namespace while you build the tree is *much* slower than labeling it afterward (and we do need labels when we calculate lb values)
if bound == 'min':
leaf_node = dtree.seed_node # pretty similar to the dummy root stuff
for igen in range(n_generations):
leaf_node = leaf_node.new_child(edge_length=1./seq_len)
elif bound == 'max':
old_leaf_nodes = [l for l in dtree.leaf_node_iter()]
assert len(old_leaf_nodes) == 1
new_leaf_nodes = []
for igen in range(n_generations):
for ileaf in range(len(old_leaf_nodes)):
for ioff in range(n_offspring):
new_leaf_nodes += [old_leaf_nodes[ileaf].new_child(edge_length=1./seq_len)]
old_leaf_nodes = new_leaf_nodes
new_leaf_nodes = []
else:
assert False
return dtree
# ----------------------------------------------------------------------------------------
def calculate_lb_bounds(seq_len, tau, n_tau_lengths=10, n_generations=None, n_offspring=2, only_metrics=None, btypes=None, debug=False): # NOTE the min is just tau, but I don't feel like deleting this fcn just to keep clear what the min means
info = {m : {} for m in lb_metrics}
n_generations = set_n_generations(seq_len, tau, n_tau_lengths, n_generations, debug=debug)
for metric in [m for m in lb_metrics if only_metrics is None or m in only_metrics]:
for bound in [b for b in ['min', 'max'] if btypes is None or b in btypes]:
if metric == 'lbr' and bound == 'min': # lbr min is always zero (leaves)
info[metric][bound] = {metric : 0., 'vals' : None}
continue
if debug:
print ' %s %s for seq len %d' % (utils.color('red', bound), utils.color('yellow', metric), seq_len)
start = time.time()
dtree = get_tree_for_lb_bounds(bound, metric, seq_len, tau, n_generations, n_offspring, debug=debug)
label_nodes(dtree)
lbvals = calculate_lb_values(dtree, tau, only_calc_metric=metric, dont_normalize=True, debug=debug)
bfcn = __builtins__[bound] # min() or max()
info[metric][bound] = {metric : bfcn(lbvals[metric].values()), 'vals' : lbvals}
if debug:
bname, bval = bfcn(lbvals[metric].items(), key=operator.itemgetter(1))
print ' %s of %d %s values (%.1fs): %s %.4f' % (bound, len(lbvals[metric]), metric, time.time() - start, bname, bval)
return info
# ----------------------------------------------------------------------------------------
def get_n_ancestors_to_affy_change(node, dtree, line, affinity_changes=None, min_affinity_change=1e-6, n_max_steps=15, also_return_branch_len=False, debug=False):
# find number of steps/ancestors to the nearest ancestor with lower affinity than <node>'s
# - also finds the corresponding distance, which is to the lower end of the branch containing the corresponding affinity-increasing mutation
# - this is chosen so that <n_steps> and <branch_len> are both 0 for the node at the bottom of a branch on which affinity increases, and are *not* the distance *to* the lower-affinity node
# - because it's so common for affinity to get worse from ancestor to descendent, it's important to remember that here we are looking for the first ancestor with lower affinity than the node in question, which is *different* to looking for the first ancestor that has lower affinity than one of its immediate descendents (which we could also plot, but it probably wouldn't be significantly different to the metric performance, since for the metric performance we only really care about the left side of the plot, but this only affects the right side)
# - <min_affinity_change> is just to eliminate floating point precision issues (especially since we're deriving affinity by inverting kd) (note that at least for now, and with default settings, the affinity changes should all be pretty similar, and not small)
this_affinity = utils.per_seq_val(line, 'affinities', node.taxon.label)
if debug:
print ' %12s %12s %8s %9.4f' % (node.taxon.label, '', '', this_affinity)
ancestor_node = node
chosen_ancestor_affinity = None
n_steps, branch_len = 0, 0.
while n_steps < n_max_steps: # note that if we can't find an ancestor with worse affinity, we don't plot the node
if ancestor_node is dtree.seed_node:
break
ancestor_distance = ancestor_node.edge_length # distance from current <ancestor_node> to its parent (who in the next line becomes <ancestor_node>)
ancestor_node = ancestor_node.parent_node # move one more step up the tree
ancestor_uid = ancestor_node.taxon.label
if ancestor_uid not in line['unique_ids']:
print ' %s ancestor %s of %s not in true line' % (utils.color('yellow', 'warning'), ancestor_uid, node.taxon.label)
break
ancestor_affinity = utils.per_seq_val(line, 'affinities', ancestor_uid)
if this_affinity - ancestor_affinity > min_affinity_change: # if we found an ancestor with lower affinity, we're done
chosen_ancestor_affinity = ancestor_affinity
if affinity_changes is not None:
affinity_changes.append(this_affinity - ancestor_affinity)
break
if debug:
print ' %12s %12s %8.4f %9.4f%s' % ('', ancestor_uid, branch_len, ancestor_affinity, utils.color('green', ' x') if ancestor_node is dtree.seed_node else '')
n_steps += 1
branch_len += ancestor_distance
if chosen_ancestor_affinity is None: # couldn't find ancestor with lower affinity
return (None, None) if also_return_branch_len else None
if debug:
print ' %12s %12s %8.4f %9.4f %s%-9.4f' % ('', ancestor_uid, branch_len, chosen_ancestor_affinity, utils.color('red', '+'), this_affinity - chosen_ancestor_affinity)
if also_return_branch_len: # kind of hackey, but we only want the branch length for plotting atm, and actually we aren't even making those plots by default any more
return n_steps, branch_len
else:
return n_steps
# ----------------------------------------------------------------------------------------
lonr_files = { # this is kind of ugly, but it's the cleanest way I can think of to have both this code and the R code know what they're called
'phy.outfname' : 'phy_out.txt',
'phy.treefname' : 'phy_tree.nwk',
'outseqs.fname' : 'outseqs.fasta',
'edgefname' : 'edges.tab',
'names.fname' : 'names.tab',
'lonrfname' : 'lonr.csv',
}
# ----------------------------------------------------------------------------------------
def build_lonr_tree(edgefos, debug=False):
# NOTE have to build the tree from the edge file, since the lonr code seems to add nodes that aren't in the newick file (which is just from phylip).
all_nodes = set([e['from'] for e in edgefos] + [e['to'] for e in edgefos])
effective_root_nodes = set([e['from'] for e in edgefos]) - set([e['to'] for e in edgefos]) # "effective" because it can be in an unrooted tree. Not sure if there's always exactly one node that has no inbound edges though
if len(effective_root_nodes) != 1:
raise Exception('too many effective root nodes: %s' % effective_root_nodes)
root_label = list(effective_root_nodes)[0] # should be '1' for dnapars
if debug:
print ' chose \'%s\' as root node' % root_label
tns = dendropy.TaxonNamespace(all_nodes)
root_node = dendropy.Node(taxon=tns.get_taxon(root_label)) # NOTE this sets node.label and node.taxon.label to the same thing, which may or may not be what we want # label=root_label, (if you start setting the node labels again, you also have to translate them below)
dtree = dendropy.Tree(taxon_namespace=tns, seed_node=root_node, is_rooted=True)
remaining_nodes = copy.deepcopy(all_nodes) - set([root_label]) # a.t.m. I'm not actually using <all_nodes> after this, but I still want to keep them separate in case I start using it
weight_or_distance_key = 'distance' # maybe should I be using the 'weight' column? I think they're just proportional though so I guess it shouldn't matter (same thing in the line below) #
root_edgefos = [efo for efo in edgefos if efo['from'] == root_label]
for efo in root_edgefos:
dtree.seed_node.new_child(taxon=tns.get_taxon(efo['to']), edge_length=efo[weight_or_distance_key]) # label=efo['to'], (if you start setting the node labels again, you also have to translate them below)
remaining_nodes.remove(efo['to'])
while len(remaining_nodes) > 0:
n_removed = 0 # I think I don't need this any more (it only happened before I remembered to remove the root node), but it doesn't seem like it'll hurt)
for lnode in dtree.leaf_node_iter():
children = [efo for efo in edgefos if efo['from'] == lnode.taxon.label]
if debug > 1 and len(children) > 0:
print ' adding children to %s:' % lnode.taxon.label
for chfo in children:
lnode.new_child(taxon=tns.get_taxon(chfo['to']), edge_length=chfo[weight_or_distance_key]) # label=chfo['to'], (if you start setting the node labels again, you also have to translate them below)
remaining_nodes.remove(chfo['to'])
n_removed += 1
if debug > 1:
print ' %s' % chfo['to']
if debug > 1:
print ' remaining: %d' % len(remaining_nodes)
if len(remaining_nodes) > 0 and n_removed == 0: # if there's zero remaining, we're just about to break anyway
if debug > 1:
print ' didn\'t remove any, so breaking: %s' % remaining_nodes
break
return dtree
# ----------------------------------------------------------------------------------------
def parse_lonr(outdir, input_seqfos, naive_seq_name, reco_info=None, debug=False):
def get_node_type_from_name(name, debug=False): # internal nodes in simulated trees should be labeled like 'mrca-<stuff>' (has to correspond to what bcr-phylo-benchmark did)
if 'mrca' in name:
return 'internal'
elif 'leaf' in name:
return 'leaf'
else:
if debug:
print ' not sure of node type for \'%s\'' % name
return None
# get lonr names (lonr replaces them with shorter versions, I think because of phylip)
lonr_names, input_names = {}, {}
with open(outdir + '/' + lonr_files['names.fname']) as namefile: # headers: "head head2"
reader = csv.DictReader(namefile, delimiter='\t')
for line in reader:
if line['head'][0] != 'L' and line['head'] != naive_seq_name: # internal node
dummy_int = int(line['head']) # check that it's just a (string of a) number
assert line['head2'] == '-'
continue
input_names[line['head']] = line['head2'] # head2 is our names
lonr_names[line['head2']] = line['head']
def final_name(lonr_name):
return input_names.get(lonr_name, lonr_name)
# read edge info (i.e., implicitly, the tree that lonr.r used)
edgefos = [] # headers: "from to weight distance"
with open(outdir + '/' + lonr_files['edgefname']) as edgefile:
reader = csv.DictReader(edgefile, delimiter='\t')
for line in reader:
line['distance'] = int(line['distance'])
line['weight'] = float(line['weight'])
edgefos.append(line)
dtree = build_lonr_tree(edgefos, debug=debug)
# switch leaves to input names
for node in dtree.leaf_node_iter():
node.taxon.label = input_names[node.taxon.label]
assert node.label is None # (if you start setting the node labels again, you also have to translate them here)
# node.label = node.taxon.label # (if you start setting the node labels again, you also have to translate them here)
if debug:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=250))
nodefos = {node.taxon.label : {} for node in dtree.postorder_node_iter()} # info for each node (internal and leaf), destined for output
# read the sequences for both leaves and inferred (internal) ancestors
seqfos = {final_name(sfo['name']) : sfo['seq'] for sfo in utils.read_fastx(outdir + '/' + lonr_files['outseqs.fname'])}
input_seqfo_dict = {sfo['name'] : sfo['seq'] for sfo in input_seqfos} # just to make sure lonr didn't modify the input sequences
for node in dtree.postorder_node_iter():
label = node.taxon.label
if label not in seqfos:
raise Exception('unexpected sequence name %s' % label)
if node.is_leaf() or label == naive_seq_name:
if label not in input_seqfo_dict:
raise Exception('leaf node \'%s\' not found in input seqs' % label)
if seqfos[label] != input_seqfo_dict[label]:
print 'input: %s' % input_seqfo_dict[label]
print ' lonr: %s' % utils.color_mutants(input_seqfo_dict[label], seqfos[label], align=True)
raise Exception('lonr leaf sequence doesn\'t match input sequence (see above)')
nodefos[label]['seq'] = seqfos[label]
# read actual lonr info
lonrfos = []
if debug:
print ' pos mutation lonr syn./a.b.d. parent child'
with open(outdir + '/' + lonr_files['lonrfname']) as lonrfile: # heads: "mutation,LONR,mutation.type,position,father,son,flag"
reader = csv.DictReader(lonrfile)
for line in reader:
assert len(line['mutation']) == 2
assert line['mutation.type'] in ('S', 'R')
assert line['flag'] in ('TRUE', 'FALSE')
mutation = line['mutation'].upper() # dnapars has it upper case already, but neighbor has it lower case
parent_name = final_name(line['father'])
child_name = final_name(line['son'])
parent_seq = nodefos[parent_name]['seq']
pos = int(line['position']) - 1 # switch from one- to zero-indexing
child_seq = nodefos[child_name]['seq']
if parent_seq[pos] != mutation[0] or child_seq[pos] != mutation[1]:
print 'parent: %s' % parent_seq
print ' child: %s' % utils.color_mutants(parent_seq, child_seq, align=True)
raise Exception('mutation info (%s at %d) doesn\'t match sequences (see above)' % (mutation, pos))
lonrfos.append({
'mutation' : mutation,
'lonr' : float(line['LONR']),
'synonymous' : line['mutation.type'] == 'S',
'position' : pos,
'parent' : parent_name,
'child' : child_name,
'affected_by_descendents' : line['flag'] == 'TRUE',
})
if debug:
lfo = lonrfos[-1]
print ' %3d %2s %5.2f %s / %s %4s %-20s' % (lfo['position'], lfo['mutation'], lfo['lonr'], 'x' if lfo['synonymous'] else ' ', 'x' if lfo['affected_by_descendents'] else ' ', lfo['parent'], lfo['child'])
# check for duplicate nodes (not sure why lonr.r kicks these, but I should probably collapse them at some point)
# in simulation, we sample internal nodes, but then lonr.r's tree construction forces these to be leaves, but then frequently they're immediately adjacent to internal nodes in lonr.r's tree... so we try to collapse them
duplicate_groups = utils.group_seqs_by_value(nodefos.keys(), keyfunc=lambda q: nodefos[q]['seq'])
duplicate_groups = [g for g in duplicate_groups if len(g) > 1]
if len(duplicate_groups) > 0:
n_max = 15
dbg_str = ', '.join([' '.join(g) for g in duplicate_groups[:n_max]]) # only print the first 15 of 'em, if there's more
if len(duplicate_groups) > n_max:
dbg_str += utils.color('blue', ' [...]')
print ' collapsing %d groups of nodes with duplicate sequences (probably just internal nodes that were renamed by lonr.r): %s' % (len(duplicate_groups), dbg_str)
for dgroup in duplicate_groups:
non_phylip_names = [n for n in dgroup if get_node_type_from_name(n) is not None]
if len(non_phylip_names) == 0: # and phylip internal node names are of form str(<integer>), so just choose the first alphabetically, because whatever
name_to_use = sorted(dgroup)[0]
elif len(non_phylip_names) == 1:
name_to_use = non_phylip_names[0]
else:
raise Exception('wtf %s (should\'ve been either one or zero non-phylip names)' % non_phylip_names)
names_to_remove = [n for n in dgroup if n != name_to_use]
for rname in names_to_remove: # only info in here a.t.m. is the sequence
del nodefos[rname]
# NOTE not collapsing nodes in tree to match <nodefos> (see comment on next line)
# collapse_nodes(dtree, name_to_use, rname, allow_failure=True, debug=True) # holy fuckballs this is not worth the effort (it doesn't really work because the tree is too screwed up) [just gave up and added the duplicate info to the return dict]
for lfo in lonrfos:
for key in ('parent', 'child'):
if lfo[key] in names_to_remove:
lfo[key] = name_to_use
return {'tree' : dtree.as_string(schema='newick'), 'nodes' : nodefos, 'values' : lonrfos}
# ----------------------------------------------------------------------------------------
def run_lonr(input_seqfos, naive_seq_name, workdir, tree_method, lonr_code_file=None, phylip_treefile=None, phylip_seqfile=None, seed=1, debug=False):
if lonr_code_file is None:
lonr_code_file = os.path.dirname(os.path.realpath(__file__)).replace('/python', '/bin/lonr.r')
if not os.path.exists(lonr_code_file):
raise Exception('lonr code file %s d.n.e.' % lonr_code_file)
if tree_method not in ('dnapars', 'neighbor'):
raise Exception('unexpected lonr tree method %s' % tree_method)
# # installation stuff
# rcmds = [
# 'source("https://bioconductor.org/biocLite.R")',
# 'biocLite("Biostrings")',
# 'install.packages("seqinr", repos="http://cran.rstudio.com/")',
# ]
# utils.run_r(rcmds, workdir)
input_seqfile = workdir + '/input-seqs.fa'
with open(input_seqfile, 'w') as iseqfile:
for sfo in input_seqfos:
iseqfile.write('>%s\n%s\n' % (sfo['name'], sfo['seq']))
existing_phylip_output_str = ''
if phylip_treefile is not None: # using existing phylip output, e.g. from cft
tree = get_dendro_tree(treefname=phylip_treefile)
edgefos = []
for node in tree.preorder_node_iter():
for edge in node.child_edge_iter():
edgefos.append({'from' : node.taxon.label, 'to' : edge.head_node.taxon.label, 'weight' : edge.length})
existing_edgefname = workdir + '/edges.csv'
existing_node_seqfname = workdir + '/infered-node-seqs.fa'
with open(existing_edgefname, 'w') as edgefile:
writer = csv.DictWriter(edgefile, ('from', 'to', 'weight'))
writer.writeheader()
for line in edgefos:
writer.writerow(line)
with open(existing_node_seqfname, 'w') as node_seqfile:
writer = csv.DictWriter(node_seqfile, ('head', 'seq'))
writer.writeheader()
for sfo in utils.read_fastx(phylip_seqfile):
writer.writerow({'head' : sfo['name'], 'seq' : sfo['seq']})
existing_phylip_output_str = ', existing.edgefile="%s", existing.node.seqfile="%s"' % (existing_edgefname, existing_node_seqfname)
rcmds = [
'source("%s")' % lonr_code_file,
'set.seed(%d)' % seed,
'G.phy.outfname = "%s"' % lonr_files['phy.outfname'], # this is a pretty shitty way to do this, but the underlying problem is that there's too many files, but I don't want to parse them all into one or two files in R, so I need to pass all of 'em to the calling python script
'G.phy.treefname = "%s"' % lonr_files['phy.treefname'],
'G.outseqs.fname = "%s"' % lonr_files['outseqs.fname'],
'G.edgefname = "%s"' % lonr_files['edgefname'],
'G.names.fname = "%s"' % lonr_files['names.fname'],
'G.lonrfname = "%s"' % lonr_files['lonrfname'],
'compute.LONR(method="%s", infile="%s", workdir="%s/", outgroup="%s"%s)' % (tree_method, input_seqfile, workdir, naive_seq_name, existing_phylip_output_str),
]
outstr, errstr = utils.run_r(rcmds, workdir, extra_str=' ', return_out_err=True, debug=debug)
if debug:
print utils.pad_lines(outstr)
print utils.pad_lines(errstr)
os.remove(input_seqfile)
if phylip_treefile is not None:
os.remove(existing_edgefname)
os.remove(existing_node_seqfname)
# ----------------------------------------------------------------------------------------
def calculate_liberman_lonr(input_seqfos=None, line=None, reco_info=None, phylip_treefile=None, phylip_seqfile=None, tree_method=None, naive_seq_name='X-naive-X', seed=1, debug=False):
# NOTE see issues/notes in bin/lonr.r
if phylip_treefile is not None or phylip_seqfile is not None:
raise Exception('never got this (passing phylip output files to lonr.r) to work -- lonr.r kept barfing, although if you were running exactly the same phylip commands as lonr.r does, it would probably work.')
assert input_seqfos is None or line is None
if input_seqfos is None:
input_seqfos = [{'name' : line['unique_ids'][iseq], 'seq' : line['seqs'][iseq]} for iseq in range(len(line['unique_ids']))]
input_seqfos.insert(0, {'name' : naive_seq_name, 'seq' : line['naive_seq']})
if tree_method is None:
tree_method = 'dnapars' if len(input_seqfos) < 500 else 'neighbor'
workdir = utils.choose_random_subdir('/tmp/%s' % os.getenv('USER', default='partis-work'))
os.makedirs(workdir)
if debug:
print ' %s' % utils.color('green', 'lonr:')
run_lonr(input_seqfos, naive_seq_name, workdir, tree_method, phylip_treefile=phylip_treefile, phylip_seqfile=phylip_seqfile, seed=seed, debug=debug)
lonr_info = parse_lonr(workdir, input_seqfos, naive_seq_name, reco_info=reco_info, debug=debug)
for fn in lonr_files.values():
os.remove(workdir + '/' + fn)
os.rmdir(workdir)
return lonr_info
# ----------------------------------------------------------------------------------------
def get_tree_metric_lines(annotations, cpath, reco_info, use_true_clusters, min_overlap_fraction=0.5, only_use_best_partition=False, only_plot_uids_with_affinity_info=False, glfo=None, debug=False):
# collect inferred and true events
inf_lines_to_use, true_lines_to_use = None, None
if use_true_clusters: # use clusters from the true partition, rather than inferred one
assert reco_info is not None
true_partition = utils.get_partition_from_reco_info(reco_info)
print ' using %d true clusters to calculate inferred selection metrics (sizes: %s)' % (len(true_partition), ' '.join(str(l) for l in sorted([len(c) for c in true_partition], reverse=True)))
if debug:
print ' choosing N N N frac (N chosen)'
print ' from true & chosen = in common in common (w/out duplicates)'
inf_lines_to_use, true_lines_to_use = [], []
chosen_ustrs = set() # now that we're using the fraction instead of the raw total, we mostly shouldn't get multiple true clusters corresponding to the same inferred cluster, but maybe it'll still happen occasionally
for cluster in true_partition:
true_lines_to_use.append(utils.synthesize_multi_seq_line_from_reco_info(cluster, reco_info)) # note: duplicates (a tiny bit of) code in utils.print_true_events()
n_max_in_common, max_frac_in_common, ustr_to_use = None, None, None # look for the inferred cluster that has the most uids in common with this true cluster
for ustr in set(annotations) - chosen_ustrs: # order will be different in reco info and inferred clusters
n_in_common = len(set(utils.uids_and_dups(annotations[ustr])) & set(cluster)) # can't just look for the actual cluster since we collapse duplicates, but bcr-phylo doesn't (but maybe I should throw them out when parsing bcr-phylo output)
frac_in_common = n_in_common**2 / float(len(utils.uids_and_dups(annotations[ustr])) * len(cluster)) # and have to use frac instead of total to guard against inferred clusters that include several true clusters (reminder: these inferred clusters may have been run with --n-final-clusters 1 or something similar)
if max_frac_in_common is None or frac_in_common > max_frac_in_common:
ustr_to_use = ustr
n_max_in_common = n_in_common
max_frac_in_common = frac_in_common
if max_frac_in_common is None:
raise Exception('cluster \'%s\' not found in inferred annotations (probably because use_true_clusters was set)' % ':'.join(cluster))
if max_frac_in_common < min_overlap_fraction:
raise Exception('overlap fraction %.3f too small: for true cluster (size %d), highest was for inferred cluster with size %d (%d including duplicates). Maybe need to set --simultaneous-true-clonal-seqs (if you did set --simultaneous-true-clonal-seqs, you probably need to set --no-indels, i.e. a true cluster got split apart because of incorrect indel calls).' % (max_frac_in_common, len(cluster), len(annotations[ustr_to_use]['unique_ids']), len(utils.uids_and_dups(annotations[ustr_to_use]))))
if debug:
print ' %4d %4d %4d %4d %4.2f (%d)' % (len(set(annotations) - chosen_ustrs), len(cluster), len(utils.uids_and_dups(annotations[ustr_to_use])), n_max_in_common, max_frac_in_common, len(annotations[ustr_to_use]['unique_ids']))
if max_frac_in_common < 1:
print ' note: couldn\'t find an inferred cluster that corresponded exactly to the true cluster (best was %d & %d = %d (frac %.2f), where the inferred includes %d duplicates)' % (len(utils.uids_and_dups(annotations[ustr_to_use])), len(cluster), n_max_in_common, max_frac_in_common, utils.n_dups(annotations[ustr_to_use]))
if ustr_to_use in chosen_ustrs:
raise Exception('chose the same inferred cluster to correspond to two different true clusters')
chosen_ustrs.add(ustr_to_use)
inf_lines_to_use.append(annotations[ustr_to_use])
else: # use clusters from the inferred partition (whether from <cpath> or <annotations>), and synthesize clusters exactly matching these using single true annotations from <reco_info> (to repeat: these are *not* true clusters)
inf_lines_to_use = annotations.values() # we used to restrict it to clusters in the best partition, but I'm switching since I think whenever there are extra ones in <annotations> we always actually want their tree metrics (at the moment there will only be extra ones if either --calculate-alternative-annotations or --write-additional-cluster-annotations are set, but in the future it could also be the default)
if only_use_best_partition:
assert cpath is not None and cpath.i_best is not None
inf_lines_to_use = [l for l in inf_lines_to_use if l['unique_ids'] in cpath.partitions[cpath.i_best]]
if only_plot_uids_with_affinity_info:
assert False # should work fine as is, but needs to be checked and integrated with things
tmplines = []
for line in inf_lines_to_use:
iseqs_to_keep = [i for i, a in enumerate(line['affinities']) if a is not None]
if len(iseqs_to_keep) == 0:
continue
print ' keeping %d/%d' % (len(iseqs_to_keep), len(line['unique_ids']))
new_line = copy.deepcopy(line) # *really* don't want to modify the annotations from partitiondriver
utils.restrict_to_iseqs(new_line, iseqs_to_keep, glfo)
tmplines.append(new_line)
inf_lines_to_use = tmplines
if reco_info is not None:
for line in inf_lines_to_use:
true_line = utils.synthesize_multi_seq_line_from_reco_info(line['unique_ids'], reco_info)
true_lines_to_use.append(true_line)
return inf_lines_to_use, true_lines_to_use
# ----------------------------------------------------------------------------------------
def plot_tree_metrics(base_plotdir, inf_lines_to_use, true_lines_to_use, ete_path=None, workdir=None, include_relative_affy_plots=False, only_csv=False, queries_to_include=None, debug=False):
import plotting
import lbplotting
start = time.time()
print ' plotting to %s' % base_plotdir
# inferred plots
if true_lines_to_use is None: # at least for now I'm turning off inferred plots when we have true lines, the only reason we want it (I think) is to compare the effect of true vs inferred tree, which I'm not doing now, and it's slow af
has_affinities = any('affinities' in l for l in inf_lines_to_use) # we'd expect that either all or none of the families have affinity info, but oh well this makes it more general
inf_plotdir = base_plotdir + '/inferred-tree-metrics'
utils.prep_dir(inf_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lb_metrics.keys())
fnames = []
if has_affinities:
lbplotting.plot_lb_vs_affinity(inf_plotdir, inf_lines_to_use, 'aa-lbi', only_csv=only_csv, fnames=fnames, is_true_line=False, debug=debug)
if not only_csv:
lbplotting.plot_lb_distributions('aa-lbi', inf_plotdir, inf_lines_to_use, fnames=fnames, only_overall=False, iclust_fnames=None if has_affinities else 8)
if has_affinities:
lbplotting.plot_lb_vs_affinity(inf_plotdir, inf_lines_to_use, 'cons-dist-aa', only_csv=only_csv, fnames=fnames, is_true_line=False, debug=debug)
if not only_csv: # all the various scatter plots are really slow
lbplotting.plot_lb_distributions('cons-dist-aa', inf_plotdir, inf_lines_to_use, fnames=fnames, only_overall=False, iclust_fnames=None if has_affinities else 8)
lbplotting.make_lb_scatter_plots('cons-dist-aa', inf_plotdir, 'aa-lbi', inf_lines_to_use, fnames=fnames, is_true_line=False, colorvar='affinity' if has_affinities else 'edge-dist', add_jitter=False, iclust_fnames=None if has_affinities else 8, queries_to_include=queries_to_include)
# it's important to have nuc-lbi vs aa-lbi so you can see if they're super correlated (which means we didn't have any of the internal nodes):
lbplotting.make_lb_scatter_plots('aa-lbi', inf_plotdir, 'lbi', inf_lines_to_use, fnames=fnames, is_true_line=False, add_jitter=False, iclust_fnames=None if has_affinities else 8, queries_to_include=queries_to_include, add_stats='correlation')
lbplotting.plot_lb_distributions('lbr', inf_plotdir, inf_lines_to_use, fnames=fnames, only_overall=False, iclust_fnames=None if has_affinities else 8)
if ete_path is not None:
lbplotting.plot_lb_trees(['aa-lbi', 'lbr', 'cons-dist-aa'], inf_plotdir, inf_lines_to_use, ete_path, workdir, is_true_line=False, queries_to_include=queries_to_include)
subdirs = [d for d in os.listdir(inf_plotdir) if os.path.isdir(inf_plotdir + '/' + d)]
plotting.make_html(inf_plotdir, fnames=fnames, new_table_each_row=True, htmlfname=inf_plotdir + '/overview.html', extra_links=[(subd, '%s/%s/' % (inf_plotdir, subd)) for subd in subdirs])
# true plots
if true_lines_to_use is not None:
if 'affinities' not in true_lines_to_use[0] or all(affy is None for affy in true_lines_to_use[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody
# print ' %s no affinity information in this simulation, so can\'t plot lb/affinity stuff' % utils.color('yellow', 'note')
print ' selection metric plotting time (no true plots)): %.1f sec' % (time.time() - start)
return
true_plotdir = base_plotdir + '/true-tree-metrics'
utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lb_metrics.keys())
fnames = []
for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']):
lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, 'aa-lbi', is_true_line=True, affy_key=affy_key, only_csv=only_csv, fnames=fnames, debug=debug)
lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, 'cons-dist-aa', is_true_line=True, affy_key=affy_key, only_csv=only_csv, fnames=fnames, debug=debug)
if not only_csv:
lbplotting.make_lb_scatter_plots('cons-dist-aa', true_plotdir, 'aa-lbi', true_lines_to_use, fnames=fnames, is_true_line=True, colorvar='affinity', only_overall=True, add_jitter=False)
lbplotting.make_lb_scatter_plots('aa-lbi', true_plotdir, 'lbi', true_lines_to_use, fnames=fnames, is_true_line=True, only_overall=True, add_jitter=False, add_stats='correlation')
lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir + '/lbr', true_lines_to_use, 'lbr', is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug)
if not only_csv:
# mtmp = 'lbi'
# lbplotting.make_lb_scatter_plots('affinity-ptile', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, yvar='%s-ptile'%mtmp, colorvar='edge-dist', add_jitter=True)
# lbplotting.make_lb_scatter_plots('affinity-ptile', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, yvar='%s-ptile'%mtmp, colorvar='edge-dist', only_overall=False, choose_among_families=True)
# lbplotting.make_lb_scatter_plots('shm', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, colorvar='edge-dist', only_overall=True, add_jitter=False)
# lbplotting.make_lb_scatter_plots('affinity-ptile', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, yvar='cons-dist-nuc-ptile', colorvar='edge-dist', add_jitter=True)
for lb_metric in lb_metrics:
lbplotting.make_lb_affinity_joyplots(true_plotdir + '/joyplots', true_lines_to_use, lb_metric, fnames=fnames)
# lbplotting.plot_lb_distributions('lbi', true_plotdir, true_lines_to_use, fnames=fnames, is_true_line=True, only_overall=True)
# lbplotting.plot_lb_distributions('lbr', true_plotdir, true_lines_to_use, fnames=fnames, is_true_line=True, only_overall=True)
if ete_path is not None:
lbplotting.plot_lb_trees(['aa-lbi', 'lbr', 'cons-dist-aa'], true_plotdir, true_lines_to_use, ete_path, workdir, is_true_line=True)
# for lb_metric in lb_metrics:
# lbplotting.plot_true_vs_inferred_lb(true_plotdir + '/' + lb_metric, true_lines_to_use, inf_lines_to_use, lb_metric, fnames=fnames)
# lbplotting.plot_cons_seq_accuracy(true_plotdir, true_lines_to_use, fnames=fnames)
subdirs = [d for d in os.listdir(true_plotdir) if os.path.isdir(true_plotdir + '/' + d)]
plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(subd, '%s/%s/' % (true_plotdir, subd)) for subd in subdirs])
print ' selection metric plotting time: %.1f sec' % (time.time() - start)
# ----------------------------------------------------------------------------------------
def get_tree_for_line(line, treefname=None, cpath=None, annotations=None, use_true_clusters=False, ignore_existing_internal_node_labels=False, debug=False):
# figure out how we want to get the inferred tree
if treefname is not None:
dtree = get_dendro_tree(treefname=treefname, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, debug=debug)
origin = 'treefname'
if len(set([n.taxon.label for n in dtree.preorder_node_iter()]) & set(line['unique_ids'])) == 0: # if no nodes in common between line and tree in file (e.g. you passed in the wrong file or didn't set --cluster-indices)
dtree = None
origin = 'no-uids'
elif False: # use_liberman_lonr_tree: # NOTE see issues/notes in bin/lonr.r
lonr_info = calculate_liberman_lonr(line=line, reco_info=reco_info, debug=debug)
dtree = get_dendro_tree(treestr=lonr_info['tree'])
# line['tree-info']['lonr'] = lonr_info
origin = 'lonr'
elif cpath is not None and cpath.i_best is not None and not use_true_clusters and line['unique_ids'] in cpath.partitions[cpath.i_best]: # if <use_true_clusters> is set, then the clusters in <inf_lines_to_use> won't correspond to the history in <cpath>, so this won't work NOTE now that I've added the direct check if the unique ids are in the best partition, i can probably remove the use_true_clusters check, but I don't want to mess with it a.t.m.
assert annotations is not None
i_only_cluster = cpath.partitions[cpath.i_best].index(line['unique_ids'])
cpath.make_trees(annotations=annotations, i_only_cluster=i_only_cluster, get_fasttrees=True, debug=False)
dtree = cpath.trees[i_only_cluster] # as we go through the loop, the <cpath> is presumably filling all of these in
origin = 'cpath'
else:
seqfos = [{'name' : uid, 'seq' : seq} for uid, seq in zip(line['unique_ids'], line['seqs'])]
dtree = get_fasttree_tree(seqfos, naive_seq=line['naive_seq'], debug=debug)
origin = 'fasttree'
return {'tree' : dtree, 'origin' : origin}
# ----------------------------------------------------------------------------------------
def check_lb_values(line, lbvals):
for metric in [m for m in lbvals if m in lb_metrics]:
missing = set(line['unique_ids']) - set(lbvals[metric])
if len(missing) > 0: # we expect to get extra ones in the tree, for inferred ancestral nodes for which we don't have sequences, but missing ones probabliy indicate something's up
# raise Exception('uids in annotation not the same as lb info keys\n missing: %s\n extra: %s' % (' '.join(set(line['unique_ids']) - set(lbvals[metric])), ' '.join(set(lbvals[metric]) - set(line['unique_ids']))))
extra = set(lbvals[metric]) - set(line['unique_ids'])
common = set(line['unique_ids']) & set(lbvals[metric])
print ' %s uids in annotation not the same as lb info keys for \'%s\': %d missing %d extra (%d in common)' % (utils.color('red', 'error'), metric, len(missing), len(extra), len(common))
if len(missing) + len(extra) < 35:
print ' missing: %s\n extra: %s\n common: %s' % (' '.join(missing), ' '.join(extra), ' '.join(common))
# NOTE this is not tested, but might be worth using in the future
# # ----------------------------------------------------------------------------------------
# def get_trees_for_annotations(annotations, cpath=None, workdir=None, min_cluster_size=default_min_selection_metric_cluster_size, cluster_indices=None, debug=False): # NOTE this duplicates some code in the following function (but I want them separate since I don't really care about this fcn much)
# print 'getting trees'
# inf_lines_to_use = annotations.values()
# n_before = len(inf_lines_to_use)
# inf_lines_to_use = sorted([l for l in inf_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
# n_after = len(inf_lines_to_use) # after removing the small ones
# tree_origin_counts = {n : {'count' : 0, 'label' : l} for n, l in (('treefname', 'read from %s' % treefname), ('cpath', 'made from cpath'), ('fasttree', 'ran fasttree'), ('lonr', 'ran liberman lonr'))}
# print ' calculating selection metrics for %d cluster%s with size%s: %s' % (n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in inf_lines_to_use))
# print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size)
# if cluster_indices is not None:
# if min(cluster_indices) < 0 or max(cluster_indices) >= len(inf_lines_to_use):
# raise Exception('invalid cluster indices %s for partition with %d clusters' % (cluster_indices, len(inf_lines_to_use)))
# print ' skipped all iclusts except %s (size%s %s)' % (' '.join(str(i) for i in cluster_indices), utils.plural(len(cluster_indices)), ' '.join(str(len(inf_lines_to_use[i]['unique_ids'])) for i in cluster_indices))
# n_already_there = 0
# for iclust, line in enumerate(inf_lines_to_use):
# if cluster_indices is not None and iclust not in cluster_indices:
# continue
# if debug:
# print ' %s sequence cluster' % utils.color('green', str(len(line['unique_ids'])))
# if 'tree-info' in line: # NOTE we used to continue here, but now I've decided we really want to overwrite what's there (although I'm a little worried that there was a reason I'm forgetting not to overwrite them)
# if debug:
# print ' %s overwriting tree that was already in <line>' % utils.color('yellow', 'warning')
# n_already_there += 1
# treefo = get_tree_for_line(line, cpath=cpath, annotations=annotations, debug=debug)
# if treefo is None:
# continue
# tree_origin_counts[treefo['origin']]['count'] += 1
# line['tree-info'] = {} # NOTE <treefo> has a dendro tree, but what we put in the <line> (at least for now) is a newick string
# line['tree-info']['tree'] = treefo['tree'].as_string(schema='newick')
# print ' tree origins: %s' % ', '.join(('%d %s' % (nfo['count'], nfo['label'])) for n, nfo in tree_origin_counts.items() if nfo['count'] > 0)
# if n_already_there > 0:
# print ' %s overwriting %d / %d that already had trees' % (utils.color('yellow', 'warning'), n_already_there, n_after)
# ----------------------------------------------------------------------------------------
def get_aa_lb_metrics(line, nuc_dtree, lb_tau, lbr_tau_factor=None, only_calc_metric=None, dont_normalize_lbi=False, extra_str=None, iclust=None, debug=False): # and add them to <line>
utils.add_seqs_aa(line)
aa_dtree = get_aa_tree(nuc_dtree, line, extra_str=extra_str, debug=debug)
aa_lb_info = calculate_lb_values(aa_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, only_calc_metric=only_calc_metric, annotation=line, dont_normalize=dont_normalize_lbi, extra_str=extra_str, iclust=iclust, debug=debug)
if 'tree-info' not in line:
line['tree-info'] = {'lb' : {}}
line['tree-info']['lb']['aa-tree'] = aa_dtree.as_string(schema='newick')
for nuc_metric in [k for k in aa_lb_info if k != 'tree']:
line['tree-info']['lb']['aa-'+nuc_metric] = aa_lb_info[nuc_metric]
# ----------------------------------------------------------------------------------------
def calculate_tree_metrics(annotations, lb_tau, lbr_tau_factor=None, cpath=None, treefname=None, reco_info=None, use_true_clusters=False, base_plotdir=None,
ete_path=None, workdir=None, dont_normalize_lbi=False, only_csv=False, min_cluster_size=default_min_selection_metric_cluster_size,
dtr_path=None, train_dtr=False, dtr_cfg=None, add_aa_consensus_distance=False, add_aa_lb_metrics=False, true_lines_to_use=None, include_relative_affy_plots=False,
cluster_indices=None, outfname=None, only_use_best_partition=False, glfo=None, queries_to_include=None, ignore_existing_internal_node_labels=False, debug=False):
print 'getting selection metrics'
if reco_info is not None:
if not use_true_clusters:
print ' note: getting selection metrics on simulation without setting <use_true_clusters> (i.e. probably without setting --simultaneous-true-clonal-seqs)'
for tmpline in reco_info.values():
assert len(tmpline['unique_ids']) == 1 # at least for the moment, we're splitting apart true multi-seq lines when reading in seqfileopener.py
if dtr_path is not None:
assert not dont_normalize_lbi # it's trained on normalized lbi, so results are garbage if you don't normalize
dtr_cfgvals, trainfo, skmodels, pmml_models, missing_models = init_dtr(train_dtr, dtr_path, cfg_fname=dtr_cfg)
if true_lines_to_use is not None: # i.e. being called by bin/dtr-run.py
assert reco_info is None
inf_lines_to_use = None
else: # called from python/partitiondriver.py
inf_lines_to_use, true_lines_to_use = get_tree_metric_lines(annotations, cpath, reco_info, use_true_clusters, only_use_best_partition=only_use_best_partition, glfo=glfo) # NOTE these continue to be modified (by removing clusters we don't want) further down, and then they get passed to the plotting functions
# get tree and calculate metrics for inferred lines
if inf_lines_to_use is not None:
n_before = len(inf_lines_to_use)
inf_lines_to_use = sorted([l for l in inf_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
n_after = len(inf_lines_to_use) # after removing the small ones
tree_origin_counts = {n : {'count' : 0, 'label' : l} for n, l in (('treefname', 'read from %s' % treefname), ('cpath', 'made from cpath'), ('fasttree', 'ran fasttree'), ('lonr', 'ran liberman lonr'))}
print ' calculating selection metrics for %d cluster%s with size%s: %s' % (n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in inf_lines_to_use))
print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size)
if cluster_indices is not None:
if min(cluster_indices) < 0 or max(cluster_indices) >= len(inf_lines_to_use):
raise Exception('invalid cluster indices %s for partition with %d clusters' % (cluster_indices, len(inf_lines_to_use)))
print ' skipped all iclusts except %s (size%s %s)' % (' '.join(str(i) for i in cluster_indices), utils.plural(len(cluster_indices)), ' '.join(str(len(inf_lines_to_use[i]['unique_ids'])) for i in cluster_indices))
n_already_there, n_skipped_uid = 0, 0
final_inf_lines = []
for iclust, line in enumerate(inf_lines_to_use):
if cluster_indices is not None and iclust not in cluster_indices:
continue
if debug:
print ' %s sequence cluster' % utils.color('green', str(len(line['unique_ids'])))
treefo = get_tree_for_line(line, treefname=treefname, cpath=cpath, annotations=annotations, use_true_clusters=use_true_clusters, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, debug=debug)
if treefo['tree'] is None and treefo['origin'] == 'no-uids':
n_skipped_uid += 1
continue
tree_origin_counts[treefo['origin']]['count'] += 1
if 'tree-info' in line: # NOTE we used to continue here, but now I've decided we really want to overwrite what's there (although I'm a little worried that there was a reason I'm forgetting not to overwrite them)
if debug:
print ' %s overwriting selection metric info that was already in <line>' % utils.color('yellow', 'warning')
n_already_there += 1
line['tree-info'] = {} # NOTE <treefo> has a dendro tree, but what we put in the <line> (at least for now) is a newick string
line['tree-info']['lb'] = calculate_lb_values(treefo['tree'], lb_tau, lbr_tau_factor=lbr_tau_factor, annotation=line, dont_normalize=dont_normalize_lbi, extra_str='inf tree', iclust=iclust, debug=debug)
check_lb_values(line, line['tree-info']['lb']) # would be nice to remove this eventually, but I keep runnining into instances where dendropy is silently removing nodes
if add_aa_consensus_distance:
add_cdists_to_lbfo(line, line['tree-info']['lb'], 'cons-dist-aa', debug=debug) # this adds the values both directly to the <line>, and to <line['tree-info']['lb']>, but the former won't end up in the output file unless the corresponding keys are specified as extra annotation columns (this distinction/duplication is worth having, although it's not ideal)
if add_aa_lb_metrics:
get_aa_lb_metrics(line, treefo['tree'], lb_tau, lbr_tau_factor=lbr_tau_factor, dont_normalize_lbi=dont_normalize_lbi, extra_str='(AA inf tree, iclust %d)'%iclust, iclust=iclust, debug=debug)
if dtr_path is not None and not train_dtr: # don't want to train on data
calc_dtr(False, line, line['tree-info']['lb'], treefo['tree'], None, pmml_models, dtr_cfgvals) # adds predicted dtr values to lbfo (hardcoded False and None are to make sure we don't train on data)
final_inf_lines.append(line)
print ' tree origins: %s' % ', '.join(('%d %s' % (nfo['count'], nfo['label'])) for n, nfo in tree_origin_counts.items() if nfo['count'] > 0)
if n_skipped_uid > 0:
print ' skipped %d/%d clusters that had no uids in common with tree in %s' % (n_skipped_uid, n_after, treefname)
if n_already_there > 0:
print ' %s replaced tree info in %d / %d that already had it' % (utils.color('yellow', 'warning'), n_already_there, n_after)
inf_lines_to_use = final_inf_lines # replace it with a new list that only has the clusters we really want
# calculate lb values for true lines/trees
if true_lines_to_use is not None: # note that if <base_plotdir> *isn't* set, we don't actually do anything with the true lb values
n_true_before = len(true_lines_to_use)
true_lines_to_use = sorted([l for l in true_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
n_true_after = len(true_lines_to_use)
print ' also doing %d true cluster%s with size%s: %s' % (n_true_after, utils.plural(n_true_after), utils.plural(n_true_after), ' '.join(str(len(l['unique_ids'])) for l in true_lines_to_use))
print ' skipping %d smaller than %d' % (n_true_before - n_true_after, min_cluster_size)
final_true_lines = []
for iclust, true_line in enumerate(true_lines_to_use):
if cluster_indices is not None and iclust not in cluster_indices:
continue
true_dtree = get_dendro_tree(treestr=true_line['tree'])
true_lb_info = calculate_lb_values(true_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, annotation=true_line, dont_normalize=dont_normalize_lbi, extra_str='true tree', iclust=iclust, debug=debug)
true_line['tree-info'] = {'lb' : true_lb_info}
check_lb_values(true_line, true_line['tree-info']['lb']) # would be nice to remove this eventually, but I keep runnining into instances where dendropy is silently removing nodes
if add_aa_lb_metrics:
get_aa_lb_metrics(true_line, true_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, dont_normalize_lbi=dont_normalize_lbi, extra_str='(AA true tree, iclust %d)'%iclust, iclust=iclust, debug=debug)
if add_aa_consensus_distance:
add_cdists_to_lbfo(true_line, true_line['tree-info']['lb'], 'cons-dist-aa', debug=debug) # see comment in previous call above
if dtr_path is not None:
calc_dtr(train_dtr, true_line, true_lb_info, true_dtree, trainfo, pmml_models, dtr_cfgvals) # either adds training values to trainfo, or adds predicted dtr values to lbfo
final_true_lines.append(true_line)
true_lines_to_use = final_true_lines # replace it with a new list that only has the clusters we really want
if dtr_path is not None: # it would be nice to eventually merge these two blocks, i.e. use the same code to plot dtr and lbi/lbr
if train_dtr:
print ' training decision trees into %s' % dtr_path
if dtr_cfgvals['n_train_per_family'] is not None:
print ' n_train_per_family: using only %d from each family for among-families dtr' % dtr_cfgvals['n_train_per_family']
for cg in cgroups:
for tvar in dtr_targets[cg]:
train_dtr_model(trainfo[cg][tvar], dtr_path, dtr_cfgvals, cg, tvar)
elif base_plotdir is not None:
assert true_lines_to_use is not None
plstart = time.time()
assert ete_path is None or workdir is not None # need the workdir to make the ete trees
import plotting
import lbplotting
# if 'affinities' not in annotations[0] or all(affy is None for affy in annotations[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody
# return
print ' plotting to %s' % base_plotdir
true_plotdir = base_plotdir + '/true-tree-metrics'
lbmlist = sorted(m for m in dtr_metrics if m not in missing_models) # sorted() is just so the order in the html file matches that in the lb metric one
utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lbmlist)
fnames = []
for lbm in lbmlist:
if 'delta-affinity' in lbm:
lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir+'/'+lbm, true_lines_to_use, lbm, is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug)
else:
for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']):
lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, lbm, is_true_line=True, only_csv=only_csv, fnames=fnames, affy_key=affy_key)
if not only_csv:
plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(subd, '%s/%s/' % (true_plotdir, subd)) for subd in lbmlist])
print ' dtr plotting time %.1fs' % (time.time() - plstart)
elif base_plotdir is not None:
assert ete_path is None or workdir is not None # need the workdir to make the ete trees
plot_tree_metrics(base_plotdir, inf_lines_to_use, true_lines_to_use, ete_path=ete_path, workdir=workdir, include_relative_affy_plots=include_relative_affy_plots, only_csv=only_csv, queries_to_include=queries_to_include, debug=debug)
if outfname is not None:
print ' writing selection metrics to %s' % outfname
utils.prep_dir(None, fname=outfname, allow_other_files=True)
def dumpfo(tl):
dumpfo = {'unique_ids' : l['unique_ids']}
dumpfo.update(l['tree-info'])
return dumpfo
with open(outfname, 'w') as tfile:
json.dump([dumpfo(l) for l in inf_lines_to_use if 'tree-info' in l], tfile)
# ----------------------------------------------------------------------------------------
def init_dtr(train_dtr, dtr_path, cfg_fname=None):
# ----------------------------------------------------------------------------------------
def read_cfg():
if cfg_fname is None: # just use the defaults
dtr_cfgvals = {}
else: # read cfg values from a file
with open(cfg_fname) as yfile:
dtr_cfgvals = yaml.load(yfile, Loader=Loader)
if 'vars' in dtr_cfgvals: # format is slightly different in the file (in the file we don't require the explicit split between per-seq and per-cluster variables)
allowed_vars = set(v for cg in cgroups for pc in dtr_vars[cg] for v in dtr_vars[cg][pc])
cfg_vars = set(v for cg in cgroups for v in dtr_cfgvals['vars'][cg])
bad_vars = cfg_vars - allowed_vars
if len(bad_vars) > 0:
raise Exception('unexpected dtr var%s (%s) in cfg file %s' % (utils.plural(len(bad_vars)), ', '.join(bad_vars), cfg_fname))
for cg in cgroups:
dtr_cfgvals['vars'][cg] = {pc : [v for v in dtr_vars[cg][pc] if v in dtr_cfgvals['vars'][cg]] for pc in pchoices} # loop over the allowed vars here so the order is always the same
for tk in set(default_dtr_options) - set(dtr_cfgvals): # set any missing ones to the defaults
if tk == 'vars':
dtr_cfgvals[tk] = dtr_vars
elif tk == 'n_jobs':
dtr_cfgvals[tk] = utils.auto_n_procs() # isn't working when I put it up top, not sure why
else:
dtr_cfgvals[tk] = default_dtr_options[tk]
return dtr_cfgvals
# ----------------------------------------------------------------------------------------
def read_model(cg, tvar):
if 'pypmml' not in sys.modules:
import pypmml
picklefname, pmmlfname = dtrfname(dtr_path, cg, tvar), dtrfname(dtr_path, cg, tvar, suffix='pmml')
if os.path.exists(picklefname): # pickle file (i.e. with entire model class written to disk, but *must* be read with the same version of sklearn that was used to write it) [these should always be there, since on old ones they were all we had, and on new ones we write both pickle and pmml]
if os.path.exists(pmmlfname): # pmml file (i.e. just with the info to make predictions, but can be read with other software versions)
pmml_models[cg][tvar] = sys.modules['pypmml'].Model.fromFile(pmmlfname)
else: # if the pmml file isn't there, this must be old files, so we read the pickle, convert to pmml, then read that new pmml file
if 'joblib' not in sys.modules: # just so people don't need to install it unless they're training (also scons seems to break it https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp)
import joblib
with open(picklefname) as dfile:
skmodels[cg][tvar] = sys.modules['joblib'].load(dfile)
write_pmml(pmmlfname, skmodels[cg][tvar], get_dtr_varnames(cg, dtr_cfgvals['vars']), tvar)
pmml_models[cg][tvar] = sys.modules['pypmml'].Model.fromFile(pmmlfname)
else:
if cg == 'among-families' and tvar == 'delta-affinity': # this is the only one that should be missing, since we added it last
missing_models.append('-'.join([cg, tvar, metric_method])) # this is fucking dumb, but I need it later when I have the full name, not cg and tvar
print ' %s %s doesn\'t exist, skipping (%s)' % (cg, tvar, dtrfname(dtr_path, cg, tvar))
return
raise Exception('model file doesn\'t exist: %s' % picklefname)
# ----------------------------------------------------------------------------------------
dtr_cfgvals = read_cfg()
skmodels = {cg : {tv : None for tv in dtr_targets[cg]} for cg in cgroups}
pmml_models = {cg : {tv : None for tv in dtr_targets[cg]} for cg in cgroups}
missing_models = []
trainfo = None
if train_dtr:
trainfo = {cg : {tv : {'in' : [], 'out' : []} for tv in dtr_targets[cg]} for cg in cgroups} # , 'weights' : []}
else:
rstart = time.time()
for cg in cgroups:
for tvar in dtr_targets[cg]:
read_model(cg, tvar)
print ' read decision trees from %s (%.1fs)' % (dtr_path, time.time() - rstart)
return dtr_cfgvals, trainfo, skmodels, pmml_models, missing_models
# ----------------------------------------------------------------------------------------
def calc_dtr(train_dtr, line, lbfo, dtree, trainfo, pmml_models, dtr_cfgvals, skmodels=None): # either add training values for <line>, or predict on it
# ----------------------------------------------------------------------------------------
def add_dtr_training_vals(cg, tvar, dtr_invals): # transfer dtr input values to tfo['in'], and add output (affinity stuff) values to tfo['out']
# trainfo[XXX]['weights'] += line['affinities']
def get_delta_affinity_vals():
tmpvals = {s : [] for s in tfo}
for iseq, uid in enumerate(line['unique_ids']):
n_steps = get_n_ancestors_to_affy_change(dtree.find_node_with_taxon_label(uid), dtree, line)
if n_steps is None: # can't train on None-type values
continue
tmpvals['in'].append(dtr_invals[cg][iseq])
tmpvals['out'].append(-n_steps)
return tmpvals
tfo = trainfo[cg][tvar]
if cg == 'within-families':
if tvar == 'affinity':
tfo['in'] += dtr_invals[cg]
max_affy = max(line['affinities'])
tfo['out'] += [a / max_affy for a in line['affinities']]
elif tvar == 'delta-affinity':
tmpvals = get_delta_affinity_vals()
tfo['in'] += tmpvals['in']
tfo['out'] += tmpvals['out']
else:
assert False
elif cg == 'among-families':
if dtr_cfgvals['n_train_per_family'] is None:
assert tvar == 'affinity' # eh why bother doing the other one
tfo['in'] += dtr_invals[cg]
tfo['out'] += line['affinities']
else:
if tvar == 'affinity':
i_to_keep = numpy.random.choice(range(len(line['unique_ids'])), size=dtr_cfgvals['n_train_per_family'], replace=False)
tfo['in'] += [dtr_invals[cg][i] for i in i_to_keep]
tfo['out'] += [line['affinities'][i] for i in i_to_keep]
elif tvar == 'delta-affinity':
tmpvals = get_delta_affinity_vals()
if len(tmpvals['in']) == 0: # no affinity increases
return
i_to_keep = numpy.random.choice(range(len(tmpvals['in'])), size=dtr_cfgvals['n_train_per_family'], replace=False)
tfo['in'] += [tmpvals['in'][i] for i in i_to_keep]
tfo['out'] += [tmpvals['out'][i] for i in i_to_keep]
else:
assert False
else:
assert False
# ----------------------------------------------------------------------------------------
utils.add_naive_seq_aa(line)
utils.add_seqs_aa(line)
for mtmp in ['cons-dist-nuc', 'cons-dist-aa']:
add_cdists_to_lbfo(line, lbfo, mtmp)
dtr_invals = {cg : get_dtr_vals(cg, dtr_cfgvals['vars'], line, lbfo, dtree) for cg in cgroups} # all dtr input variable values, before we fiddle with them for the different dtrs
if train_dtr: # train and write new model
for cg in cgroups:
for tvar in dtr_targets[cg]:
add_dtr_training_vals(cg, tvar, dtr_invals)
else: # read existing model
for cg in cgroups:
for tvar in dtr_targets[cg]:
if pmml_models[cg][tvar] is None: # only way this can happen atm is old dirs that don't have among-families delta-affinity
continue
outfo = {}
for iseq, uid in enumerate(line['unique_ids']):
pmml_invals = {var : val for var, val in zip(get_dtr_varnames(cg, dtr_cfgvals['vars']), dtr_invals[cg][iseq])} # convert from format for sklearn to format for pmml
outfo[uid] = pmml_models[cg][tvar].predict(pmml_invals)['predicted_%s'%tvar]
# if skmodels[cg][tvar] is not None: # leaving this here cause maybe we'll want to fall back to it or something if pmml ends up having problems
# sk_val = skmodels[cg][tvar].predict([dtr_invals[cg][iseq]])
# assert utils.is_normed(sk_val / outfo[uid])
lbfo['-'.join([cg, tvar, 'dtr'])] = outfo # NOTE it would be nice to automate this '-'.join() conversion, it happens in a few places already
# ----------------------------------------------------------------------------------------
# differences to calculate_tree_metrics(): this fcn
# 1) can run a bunch of metrics that the other can't
# 2) mosty focuses on running one metric at a time (as opposed to running all the ones that we typically want on data)
# 3) doesn't plot as many things
# 4) only runs on simulation (as opposed to making two sets of things, for simulation and data)
def calculate_individual_tree_metrics(metric_method, annotations, base_plotdir=None, ete_path=None, workdir=None, lb_tau=None, lbr_tau_factor=None, only_csv=False, min_cluster_size=None, include_relative_affy_plots=False, dont_normalize_lbi=False, debug=False):
# ----------------------------------------------------------------------------------------
def get_combo_lbfo(varlist, iclust, line, is_aa_lb=False):
if 'shm-aa' in varlist and 'seqs_aa' not in line:
utils.add_naive_seq_aa(line)
utils.add_seqs_aa(line)
lbfo = {}
for mtmp in [m for m in varlist if 'cons-dist-' in m]:
add_cdists_to_lbfo(line, lbfo, mtmp)
dtree = get_dendro_tree(treestr=line['tree'])
lbvars = set(varlist) & set(['lbi', 'lbr']) # although if is_aa_lb is set, we're really calculating aa-lbi/aa-lbr
tmp_tau, tmp_factor = lb_tau, lbr_tau_factor # weird/terrible hack (necessary to allow the calculation fcn to enforce that either a) we're calculating both metrics, so we probably want the factor applied or b) we're only calculating one, and we're not normalizing (i.e. we're probably calculating the bounds)
if len(lbvars) == 2:
only_calc_metric = None
elif len(lbvars) == 1:
only_calc_metric = list(lbvars)[0]
if only_calc_metric == 'lbr':
tmp_tau *= lbr_tau_factor
tmp_factor = None
else:
raise Exception('unexpected combination of variables %s' % varlist)
if is_aa_lb:
get_aa_lb_metrics(line, dtree, tmp_tau, lbr_tau_factor=tmp_factor, only_calc_metric=only_calc_metric, dont_normalize_lbi=dont_normalize_lbi, extra_str='true tree', iclust=iclust)
lbfo.update(line['tree-info']['lb'])
else:
tmp_lb_info = calculate_lb_values(dtree, tmp_tau, only_calc_metric=only_calc_metric, lbr_tau_factor=tmp_factor, annotation=line, dont_normalize=dont_normalize_lbi, extra_str='true tree', iclust=iclust)
for lbm in [m for m in lb_metrics if m in varlist]: # this skips the tree, which I guess isn't a big deal
lbfo[lbm] = {u : tmp_lb_info[lbm][u] for u in line['unique_ids']} # remove the ones that aren't in <line> (since we don't have sequences for them, so also no consensus distance)
return dtree, lbfo
# ----------------------------------------------------------------------------------------
if min_cluster_size is None:
min_cluster_size = default_min_selection_metric_cluster_size
n_before = len(annotations)
annotations = sorted([l for l in annotations if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
n_after = len(annotations)
print ' getting non-lb metric %s for %d true cluster%s with size%s: %s' % (metric_method, n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in annotations))
print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size)
pstart = time.time()
for iclust, line in enumerate(annotations):
assert 'tree-info' not in line # could handle it, but don't feel like thinking about it a.t.m.
if metric_method == 'shm':
metric_info = {u : -utils.per_seq_val(line, 'n_mutations', u) for u in line['unique_ids']}
line['tree-info'] = {'lb' : {metric_method : metric_info}}
elif metric_method == 'fay-wu-h': # NOTE this isn't actually tree info, but I"m comparing it to things calculated with a tree, so putting it in the same place at least for now
fwh = -utils.fay_wu_h(line)
line['tree-info'] = {'lb' : {metric_method : {u : fwh for i, u in enumerate(line['unique_ids'])}}} # kind of weird to set it individually for each sequence when they all have the same value (i.e. it's a per-family metric), but I don't want to do actual per-family comparisons any more, and this way we can at least look at it
elif metric_method in ['cons-dist-nuc', 'cons-dist-aa']:
lbfo = {}
add_cdists_to_lbfo(line, lbfo, metric_method)
line['tree-info'] = {'lb' : lbfo}
elif metric_method == 'delta-lbi':
dtree, lbfo = get_combo_lbfo(['lbi'], iclust, line)
delta_lbfo = {}
for uid in line['unique_ids']:
node = dtree.find_node_with_taxon_label(uid)
if node is dtree.seed_node:
continue # maybe I should add it as something? not sure
delta_lbfo[uid] = lbfo['lbi'][uid] - lbfo['lbi'][node.parent_node.taxon.label] # I think the parent should always be in here, since I think we should calculate lbi for every node in the tree
line['tree-info'] = {'lb' : {metric_method : delta_lbfo}}
elif 'aa-lb' in metric_method: # aa versions of lbi and lbr
_, _ = get_combo_lbfo([metric_method.lstrip('aa-')], iclust, line, is_aa_lb=True)
elif metric_method == 'cons-lbi': # now uses aa-lbi as a tiebreaker for cons-dist-aa, but used to be old z-score style combination of (nuc-)lbi and cons-dist
def tiefcn(uid):
cdist, aalbi = lbfo['cons-dist-aa'][uid], lbfo['aa-lbi'][uid]
return cdist + aalbi / max_aa_lbi
_, lbfo = get_combo_lbfo(['cons-dist-aa', 'lbi'], iclust, line, is_aa_lb=True)
max_aa_lbi = max(lbfo['aa-lbi'].values())
line['tree-info'] = {'lb' : {metric_method : {u : tiefcn(u) for u in line['unique_ids']}}}
else:
assert False
print ' tree quantity calculation/prediction time: %.1fs' % (time.time() - pstart)
if base_plotdir is not None:
plstart = time.time()
assert ete_path is None or workdir is not None # need the workdir to make the ete trees
import plotting
import lbplotting
if 'affinities' not in annotations[0] or all(affy is None for affy in annotations[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody
return
true_plotdir = base_plotdir + '/true-tree-metrics'
utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=[metric_method])
fnames = []
if metric_method in ['delta-lbi', 'aa-lbr']:
lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir+'/'+metric_method, annotations, metric_method, is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug)
else:
for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']):
lbplotting.plot_lb_vs_affinity(true_plotdir, annotations, metric_method, is_true_line=True, only_csv=only_csv, fnames=fnames, affy_key=affy_key)
if not only_csv:
plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(metric_method, '%s/%s/' % (true_plotdir, metric_method)),])
print ' non-lb metric plotting time %.1fs' % (time.time() - plstart)
# ----------------------------------------------------------------------------------------
def run_laplacian_spectra(treestr, workdir=None, plotdir=None, plotname=None, title=None, debug=False):
# - https://www.ncbi.nlm.nih.gov/pubmed/26658901/
# - instructions here: https://besjournals.onlinelibrary.wiley.com/doi/full/10.1111/2041-210X.12526
# I think this is what ended up working (thought probably not in docker):
# apt-get install libgmp-dev libmpfr-dev
# > install.packages("RPANDA",dependencies=TRUE)
# ok but then I needed to modify the code, so downloaded the source from cran, and swapped out for the spectR.R that eric sent, then installed with:
# R CMD INSTALL -l packages/RPANDA/lib packages/RPANDA/ # NOTE needs to happen whenever you modify the R source
# condensation of docs from the above paper:
# - > res<-spectR(Phyllostomidae) # compute eigenvalues (and some metrics describing the distribution, e.g. skewness, kurtosis, eigengap)
# - > plot_spectR(res) # make plots for eigenvalue spectrum
# - if eigengap (largest gap between sorted eigenvalues) is e.g. between 3 and 4, then the tree can be separated into three regions, and you use the BIC stuff to find those regions
# - > res<-BICompare(Phyllostomidae,3)
# - > plot_BICompare(Phyllostomidae,res)
# - > res<-JSDtree(Phyllostomidae_genera) # pairwise jensen-shannon distances between the 25 phylogenies
# - > JSDtree_cluster(res) # plots heatmap and hierarchical cluster
if debug:
print utils.pad_lines(get_ascii_tree(treestr=treestr))
print treestr
if workdir is None:
workdir = utils.choose_random_subdir('/tmp/%s' % os.getenv('USER', default='partis-work'))
eigenfname = '%s/eigenvalues.txt' % workdir
os.makedirs(workdir)
cmdlines = [
'library(ape, quiet=TRUE)',
# 'library(RPANDA, quiet=TRUE)', # old way, before I had to modify the source code because the CRAN version removes all eigenvalues <1 (for method="standard" -- with method="normal" it's <0, which is probably better, but it also seems to smoosh all the eigenvalues to be almost exactly 1)
'library("RPANDA", lib.loc="%s/packages/RPANDA/lib", quiet=TRUE)' % os.path.dirname(os.path.realpath(__file__)).replace('/python', ''),
'tree <- read.tree(text = "%s")' % treestr,
# 'print(tree)',
'specvals <- spectR(tree, method=c("standard"))', # compute eigenvalues (and some metrics describing the distribution, e.g. skewness, kurtosis, eigengap)
# 'print(specvals)',
'capture.output(specvals$eigenvalues, file="%s")' % eigenfname,
]
outstr, errstr = utils.run_r(cmdlines, workdir, return_out_err=True) # if it crashes, call it without return_out_err, so it prints stuff as it goes
errstr = '\n'.join([l.strip() for l in errstr.split('\n') if 'This is vegan' not in l])
for oestr in (outstr, errstr):
if oestr.strip() == '':
continue
print utils.pad_lines(outstr)
eigenvalues = []
with open(eigenfname) as efile:
for line in efile:
for tstr in line.split():
if '[' in tstr:
if int(tstr.strip('[]')) != len(eigenvalues) + 1:
raise Exception('couldn\'t process line:\n%s' % line)
else:
eigenvalues.append(float(tstr))
os.remove(eigenfname)
os.rmdir(workdir)
if plotdir is not None:
import plotting
plotting.plot_laplacian_spectra(plotdir, plotname, eigenvalues, title)
# ----------------------------------------------------------------------------------------
def combine_selection_metrics(lp_infos, min_cluster_size=default_min_selection_metric_cluster_size, plotdir=None, ig_or_tr='ig', args=None, is_simu=False): # don't really like passing <args> like this, but it's the easiest cfg convention atm
# ----------------------------------------------------------------------------------------
def getpids(line):
all_ids = []
for ip, pids in enumerate(line['paired-uids']):
if pids is None or len(pids) == 0:
continue
elif len(pids) == 1:
# assert pids[0] not in all_ids # this is kind of slow, and maybe it's ok to comment it?
all_ids.append(pids[0])
else:
raise Exception('too many paired ids (%d) for %s: %s' % (len(pids), line['unique_ids'][ip], ' '.join(pids)))
return all_ids
# ----------------------------------------------------------------------------------------
def find_cluster_pairs(lpair): # the annotation lists should just be in the same order, but after adding back in all the unpaired sequences to each chain they could be a bit wonky
lp_antn_pairs = []
lpk = tuple(lpair)
if None in lp_infos[lpk].values():
return lp_antn_pairs
h_part, l_part = [sorted(lp_infos[lpk]['cpaths'][l].best(), key=len, reverse=True) for l in lpair]
h_atn_dict, l_atn_dict = [utils.get_annotation_dict(lp_infos[lpk]['antn_lists'][l]) for l in lpair]
n_no_info = 0
for h_clust in h_part:
h_atn = h_atn_dict[':'.join(h_clust)]
if 'tree-info' not in h_atn: # skip (presumably) the smaller ones
n_no_info += 1
continue
if 'paired-uids' not in h_atn: # seems to just be single-seq clusters, so i don't care
continue
l_clusts = [c for c in l_part if len(set(getpids(h_atn)) & set(c)) > 0]
if len(l_clusts) != 1:
print ' %s couldn\'t find a unique light cluster (found %d, looked in %d) for heavy cluster with size %d and %d paired ids (heavy: %s pids: %s)' % (utils.color('yellow', 'warning'), len(l_clusts), len(l_part), len(h_atn), len(getpids(h_atn)), ':'.join(h_clust), ':'.join(getpids(h_atn)))
continue
assert len(l_clusts) == 1
l_atn = l_atn_dict[':'.join(l_clusts[0])]
h_atn['loci'] = [lpair[0] for _ in h_atn['unique_ids']] # this kind of sucks, but it seems like the best option a.t.m. (see note in event.py)
l_atn['loci'] = [lpair[1] for _ in l_atn['unique_ids']]
lp_antn_pairs.append((h_atn, l_atn))
if n_no_info > 0:
print ' no tree info in %d annotations (probably smaller than min tree metric cluster size)' % n_no_info
return lp_antn_pairs
# ----------------------------------------------------------------------------------------
def gsval(mfo, tch, vname):
cln, iseq = mfo[tch], mfo[tch+'_iseq']
if vname in cln:
assert vname in utils.linekeys['per_seq']
return cln[vname][iseq]
elif vname == 'cell-types':
return None
elif vname == 'aa-cfrac':
return lb_cons_dist(cln, iseq, aa=True, frac=True)
elif vname == 'shm-aa':
return utils.shm_aa(cln, iseq=iseq)
elif vname == 'aa-cdist':
return -smvals(cln, 'cons-dist-aa', iseq=iseq)
elif vname in selection_metrics:
return smvals(cln, vname, iseq=iseq)
elif vname == 'multipy': # multiplicity
return utils.get_multiplicity(cln, iseq=iseq)
else:
raise Exception('unsupported sort var \'%s\'' % vname)
# ----------------------------------------------------------------------------------------
def sumv(mfo, kstr):
return sum(gsval(mfo, c, kstr) for c in 'hl')
# ----------------------------------------------------------------------------------------
def sum_nuc_shm_pct(mpfo):
total_len = sum(len(gsval(mpfo, c, 'seqs')) - gsval(mpfo, c, 'seqs').count(utils.ambig_base) for c in 'hl')
return 100 * sumv(mpfo, 'n_mutations') / float(total_len)
# ----------------------------------------------------------------------------------------
def read_cfgfo():
allowed_keys = set(['n-families', 'n-per-family', 'include-unobs-cons-seqs', 'vars', 'cell-types', 'max-ambig-positions', 'min-umis', 'min-median-nuc-shm-%', 'min-hdist-to-already-chosen'])
# allowed_vars = []
if debug:
print ' ab choice cfg:'
outstr, _ = utils.simplerun('cat %s'%args.ab_choice_cfg, return_out_err=True)
print utils.pad_lines(outstr)
with open(args.ab_choice_cfg) as cfile:
cfgfo = yaml.load(cfile, Loader=Loader)
if len(set(cfgfo) - allowed_keys) > 0:
raise Exception('unexpected key[s] in ab choice cfg: %s (choose from: %s)' % (' '.join(set(cfgfo) - allowed_keys), ' '.join(allowed_keys)))
return cfgfo
# ----------------------------------------------------------------------------------------
def add_unobs_cseqs(metric_pairs, chosen_mfos, all_chosen_seqs, tdbg=False):
# ----------------------------------------------------------------------------------------
def use_iseqs(tch, mtmp): # if any observed seqs in the family have shm indels, we need to figure out whether the indel should be included in the cons seq
hsil = mtmp[tch]['has_shm_indels']
tstr = '(%d / %d = %.2f)' % (hsil.count(True), len(hsil), hsil.count(True) / float(len(hsil)))
if hsil.count(True) / float(len(hsil)) > 0.5:
print ' %s more than half %s of %s seqs have indels, so using *input* cons seq (note that if there\'s more than one indel, this may well be wrong, since you probably only want indels that are in a majority of the family [which is probably not all of them])' % (utils.color('yellow', 'warning'), tstr, tch)
return True
else:
if any(hsil): # if none of them have indels, don't print anything
print ' less than half %s of %s seqs have indels, so not using input seqs for cons seq' % (tstr, tch)
return False
# ----------------------------------------------------------------------------------------
def getcseqs(tch, use_input_seqs, aa=False):
if use_input_seqs:
return utils.cons_seq_of_line(mtmp[tch], aa=aa, use_input_seqs=True)
else:
return mtmp[tch]['consensus_seq'+('_aa' if aa else '')]
# ----------------------------------------------------------------------------------------
mtmp = metric_pairs[0]
uis = {c : use_iseqs(c, mtmp) for c in 'hl'} # if any observed seqs in the family have shm indels, we need to figure out whether the indel should be included in the cons seq
cseqs = {c : getcseqs(c, uis[c], aa=True) for c in 'hl'} # aa cons seqs
def nambig(c): return utils.n_variable_ambig_aa(mtmp[c], cseqs[c], getcseqs(c, uis[c], aa=False))
if 'max-ambig-positions' in cfgfo and sum(nambig(c) for c in 'hl') > cfgfo['max-ambig-positions']:
print ' cons seq: too many ambiguous bases in h+l (%d > %d)' % (sum(nambig(c) for c in 'hl'), cfgfo['max-ambig-positions'])
return
consfo = {c : mtmp[c] for c in 'hl'}
consfo.update({'iclust' : iclust, 'consensus' : True})
consfo.update({c+'_use_input_seqs' : uis[c] for c in 'hl'})
consfo.update({c+'_cseq_aa' : cseqs[c] for c in 'hl'})
consfo.update({c+'_cseq_nuc' : getcseqs(c, uis[c], aa=False) for c in 'hl'})
chosen_mfos.append(consfo)
all_chosen_seqs.add(tuple(cseqs[c] for c in 'hl'))
if tdbg:
print ' %s: added cons seq%s' % (utils.color('green', 'x'), (' (using %s input seq[s] becuase of indels)'%' '.join(c for c in 'hl' if consfo[c+'_use_input_seqs'])) if any(consfo[c+'_use_input_seqs'] for c in 'hl') else '')
# ----------------------------------------------------------------------------------------
def local_hdist_aa(s1, s2, defval=None, frac=False): # ick, this is ugly, but I think makes sense for now
if len(s1) == len(s2):
hfcn = utils.hamming_fraction if frac else utils.hamming_distance
return hfcn(s1, s2, amino_acid=True)
elif defval is not None:
return defval
else:
return max([len(s1), len(s2)]) # NOTE it's kind of weird and arbitrary to return the max seq len if they're different lengths, but if they're different lengths we don't care anyway cause we're just looking for very similar sequences
# ----------------------------------------------------------------------------------------
def choose_abs(metric_pairs, iclust, tdbg=False):
# ----------------------------------------------------------------------------------------
def get_n_choose(tcfg, key):
if key not in tcfg:
return None
if isinstance(tcfg[key], int): # take the same number from each family
return tcfg[key]
else: # specify a different number for each family
assert len(tcfg[key]) == cfgfo['n-families']
return tcfg[key][iclust]
# ----------------------------------------------------------------------------------------
def in_chosen_seqs(all_chosen_seqs, mfo):
mfseqs = tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl')
return mfseqs in all_chosen_seqs
# ----------------------------------------------------------------------------------------
def too_close_to_chosen_seqs(all_chosen_seqs, mfo, hdist, ttdbg=False):
if len(all_chosen_seqs) == 0:
return False
mfseqs = tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl')
if ttdbg:
h_min, l_min = [min(local_hdist_aa(acseqs[i], mseq) for acseqs in all_chosen_seqs) for i, mseq in enumerate(mfseqs)]
print ' %d %d %s' % (h_min, l_min, utils.color('red', 'x') if sum([h_min, l_min]) < hdist else '')
return any(sum(local_hdist_aa(cseq, mseq) for mseq, cseq in zip(mfseqs, acseqs)) < hdist for acseqs in all_chosen_seqs)
# ----------------------------------------------------------------------------------------
# run through a bunch of options for skipping seqs/families
if iclust >= cfgfo['n-families']:
return []
if tdbg:
print ' iclust %d: choosing abs from joint cluster with size %d (marked with %s)' % (iclust, len(metric_pairs), utils.color('green', 'x'))
for ctk, ntk in [('cell-types', 'cell-types'), ('min-umis', 'umis')]:
if len(metric_pairs) > 0 and ctk in cfgfo and ntk not in metric_pairs[0]['h']:
print ' %s \'%s\' in cfgfo but \'%s\' info not in annotation' % (utils.color('yellow', 'warning'), ctk, ntk)
if 'cell-types' in cfgfo and len(metric_pairs) > 0 and 'cell-types' in metric_pairs[0]['h']:
def keepfcn(m): return all(gsval(m, c, 'cell-types') in cfgfo['cell-types'] for c in 'hl') # kind of dumb to check both, they should be the same, but whatever it'll crash in the debug printing below if they're different
n_before = len(metric_pairs)
metric_pairs = [m for m in metric_pairs if keepfcn(m)]
if tdbg and n_before - len(metric_pairs) > 0:
print ' skipped %d with cell type not among %s' % (n_before - len(metric_pairs), cfgfo['cell-types'])
if 'min-umis' in cfgfo and len(metric_pairs) > 0 and 'umis' in metric_pairs[0]['h']:
def keepfcn(m): return sum(gsval(m, c, 'umis') for c in 'hl') > cfgfo['min-umis']
n_before = len(metric_pairs)
metric_pairs = [m for m in metric_pairs if keepfcn(m)]
if tdbg and n_before - len(metric_pairs) > 0:
print ' skipped %d with umis less than %d' % (n_before - len(metric_pairs), cfgfo['min-umis'])
if 'min-median-nuc-shm-%' in cfgfo and len(metric_pairs) > 0:
median_shm = numpy.median([sum_nuc_shm_pct(m) for m in metric_pairs])
skip_family = median_shm < cfgfo['min-median-nuc-shm-%']
if tdbg:
print ' %s family: median h+l nuc shm %.2f%% %s than %.2f%%' % (utils.color('yellow', 'skipping entire') if skip_family else 'keeping', median_shm, 'less' if skip_family else 'greater', cfgfo['min-median-nuc-shm-%'])
if skip_family:
return []
if 'max-ambig-positions' in cfgfo: # max number of ambiguous amino acid positions summed over h+l
def keepfcn(m):
def nambig(c): return utils.n_variable_ambig_aa(m[c], gsval(m, c, 'input_seqs_aa'), gsval(m, c, 'input_seqs'))
return sum(nambig(c) for c in 'hl') <= cfgfo['max-ambig-positions']
n_before = len(metric_pairs)
metric_pairs = [m for m in metric_pairs if keepfcn(m)]
if tdbg and n_before - len(metric_pairs):
print ' skipped %d with too many ambiguous bases (>%d)' % (n_before - len(metric_pairs), cfgfo['max-ambig-positions'])
if len(metric_pairs) == 0:
return []
chosen_mfos = [] # includes unobs cons seqs plus seqs chosen from all sortvars
all_chosen_seqs = set() # just for keeping track of the seqs we've already chosen
# maybe add the unobserved cons seq
if 'include-unobs-cons-seqs' in cfgfo and cfgfo['include-unobs-cons-seqs']:
add_unobs_cseqs(metric_pairs, chosen_mfos, all_chosen_seqs, tdbg=tdbg) # well, doesn't necessarily add it, but at least checks to see if we should
# actually choose them, sorted by the various specified vars
for sortvar, vcfg in cfgfo['vars'].items():
assert vcfg['sort'] in ['low', 'high']
if [get_n_choose(cfo, k) for cfo, k in [(vcfg, 'n'), (cfgfo, 'n-per-family')]].count(None) != 1:
raise Exception('specify exactly one of \'n-per-family\' and/or \'vars\': \'n\'')
n_already_chosen, n_same_seqs, n_too_close, n_newly_chosen = 0, 0, 0, 0
sorted_mfos = metric_pairs
sorted_mfos = sorted(sorted_mfos, key=lambda m: sum(mtpys[c][gsval(m, c, 'input_seqs_aa')] for c in 'hl'), reverse=True)
sorted_mfos = sorted(sorted_mfos, key=lambda m: sum(gsval(m, c, sortvar) for c in 'hl'), reverse=vcfg['sort']=='high')
for mfo in sorted_mfos:
if mfo in chosen_mfos:
n_already_chosen += 1
continue
if in_chosen_seqs(all_chosen_seqs, mfo):
n_same_seqs += 1
continue
if 'min-hdist-to-already-chosen' in cfgfo and too_close_to_chosen_seqs(all_chosen_seqs, mfo, cfgfo['min-hdist-to-already-chosen']):
n_too_close += 1
continue
if any(gsval(mfo, c, 'has_shm_indels') for c in 'hl'):
print ' %s choosing ab with shm indel: the consensus sequence may or may not reflect the indels (see above). uids: %s %s' % (utils.color('yellow', 'warning'), gsval(mfo, 'h', 'unique_ids'), gsval(mfo, 'l', 'unique_ids'))
chosen_mfos.append(mfo)
all_chosen_seqs.add(tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl'))
n_newly_chosen += 1 # number chosen from this sortvar
# this takes the top <n> by <sortvar> (not including any unobs cons seq)
if get_n_choose(vcfg, 'n') is not None and n_newly_chosen >= get_n_choose(vcfg, 'n'): # number to choose for this var in this family
break
# whereas this makes sure we have N from the family over all sort vars (including any unobs cons seq), while still sorting by <sortvar>. It probably does *not* make sense to specify both versions
if get_n_choose(cfgfo, 'n-per-family') is not None and len(chosen_mfos) >= get_n_choose(cfgfo, 'n-per-family'): # number to choose for all vars in this family (it's kind of weird/confusing to have this inside the sortvar loop, but i think it actually makes sense)
break
if tdbg:
print ' %s: chose %d%s%s%s' % (sortvar, n_newly_chosen,
'' if n_already_chosen==0 else ' (%d were in common with a previous var)'%n_already_chosen,
'' if n_same_seqs==0 else ' (%d had seqs identical to previously-chosen ones)'%n_same_seqs,
'' if n_too_close==0 else ' (%d had seqs too close to previously-chosen ones)'%n_too_close)
if tdbg:
print ' chose %d total' % len(chosen_mfos)
return chosen_mfos
# ----------------------------------------------------------------------------------------
def add_plotval_uids(iclust_plotvals, iclust_mfos, metric_pairs):
def waschosen(m):
return 'chosen' if all(gsval(m, c, 'unique_ids') in iclust_chosen_ids for c in 'hl') else 'nope'
def ustr(m):
rstr = ''
if waschosen(m) == 'chosen': # if this is commented, i think i can simplify this fcn a lot? UPDATE need the extra text for cases where lots of dots are on top of each other
rstr = 'x'
if args.queries_to_include is not None and all(gsval(m, c, 'unique_ids') in args.queries_to_include for c in 'hl'):
common_chars = ''.join(c for c, d in zip(gsval(m, 'h', 'unique_ids'), gsval(m, 'l', 'unique_ids')) if c==d)
common_chars = common_chars.rstrip('-ig')
if len(common_chars) > 0:
rstr += ' ' + common_chars
else:
rstr += ' ' + ' '.join(gsval(m, c, 'unique__ids') for c in 'hl')
return None if rstr == '' else rstr
non_cons_mfos = [m for m in iclust_mfos if 'consensus' not in m]
iclust_chosen_ids = [gsval(m, c, 'unique_ids') for m in non_cons_mfos for c in 'hl']
iclust_plotvals['uids'] = [ustr(m) for m in metric_pairs]
iclust_plotvals['chosen'] = [waschosen(m) for m in metric_pairs]
# ----------------------------------------------------------------------------------------
def write_chosen_file(all_chosen_mfos, hash_len=8):
# ----------------------------------------------------------------------------------------
def getofo(mfo):
ofo = collections.OrderedDict([('iclust', mfo['iclust'])])
if 'consensus' in mfo:
def consid(mfo, c): return '%s-cons-%s' % (utils.uidhashstr(mfo[c]['consensus_seq_aa'])[:hash_len], mfo[c]['loci'][0])
ofo.update([(c+'_id', consid(mfo, c)) for c in 'hl'])
else:
ofo.update([(c+'_id', gsval(mfo, c, 'unique_ids')) for c in 'hl'])
for kn in ['aa-cfrac', 'shm-aa', 'aa-cdist']:
ofo.update([('sum_'+kn, sum(gsval(mfo, c, kn) for c in 'hl'))])
ofo.update([(c+'_family_size', len(mfo[c]['unique_ids'])) for c in 'hl'])
ofo.update([(c+'_'+r+'_gene' , mfo[c][r+'_gene']) for r in utils.regions for c in 'hl'])
if 'consensus' in mfo:
for tch in 'hl':
ofo[tch+'_seq_aa'] = mfo[tch+'_cseq_aa']
ofo[tch+'_seq_nuc'] = mfo[tch+'_cseq_nuc']
ofo[tch+'_has_shm_indels'] = mfo[tch+'_use_input_seqs']
else:
for ok, lk in [('has_shm_indels', None), ('cell_type', 'cell-types'), ('aa-cfrac', None), ('aa-cdist', None), ('shm-aa', None), ('seq_nuc', 'input_seqs'), ('seq_aa', 'input_seqs_aa')]:
ofo.update([(c+'_'+ok, gsval(mfo, c, utils.non_none([lk, ok]))) for c in 'hl'])
if 'consensus' not in mfo: # check that the aa seqs are actually translations of the nuc seqs (for unobs cons seqs, we expect them to differ) NOTE i don't know if this is really worthwhile long term, but it makes me feel warm and fuzzy atm that it's here
for tch in 'hl':
if utils.ltranslate(ofo[tch+'_seq_nuc']) != ofo[tch+'_seq_aa']:
print ' %s aa seq not translation of nuc seq for %s %s:' % (utils.color('yellow', 'warning'), tch, ofo[tch+'_id'])
utils.color_mutants(utils.ltranslate(ofo[tch+'_seq_nuc']), ofo[tch+'_seq_aa'], amino_acid=True, print_result=True, extra_str=' ')
return ofo
# ----------------------------------------------------------------------------------------
if debug:
print ' writing %d chosen abs to %s' % (len(all_chosen_mfos), args.chosen_ab_fname)
with open(args.chosen_ab_fname, 'w') as cfile:
outfos, fieldnames = [], None
for mfo in all_chosen_mfos:
outfos.append(getofo(mfo))
if fieldnames is None or len(outfos[-1].keys()) > len(fieldnames):
fieldnames = outfos[-1].keys()
if len(all_chosen_mfos) > 0:
writer = csv.DictWriter(cfile, fieldnames)
writer.writeheader()
for ofo in outfos:
writer.writerow(ofo)
# ----------------------------------------------------------------------------------------
def print_dbg(metric_pairs):
# ----------------------------------------------------------------------------------------
def init_xtras():
xtra_heads = [('cell-types', ['cell', 'type']), ('umis', ['umis', 'h+l']), ('c_genes', ['c_gene', ''])]
xheads, xtrafo, xlens = [[], []], [], {}
for xn, xh in xtra_heads:
if all(xn not in mpfo[c] for mpfo in metric_pairs for c in 'hl'):
continue
xtrafo.append(xn)
ctlens = [len(str(gsval(m, c, xn))) for m in metric_pairs for c in 'hl']
xlens[xn] = max([len(h) for h in xh] + ctlens) + 1
xheads = [x + [utils.wfmt(s, xlens[xn])] for x, s in zip(xheads, xh)]
return xtrafo, xheads, xlens
# ----------------------------------------------------------------------------------------
def get_xstr(mpfo, xlens):
xstr = [] # don't try to condense these into a block, they're too different
if 'cell-types' in xtrafo:
ctval = utils.get_single_entry(list(set(gsval(mpfo, c, 'cell-types') for c in 'hl')))
xstr += [utils.wfmt(utils.non_none([ctval, '?']), xlens['cell-types'])]
if 'umis' in xtrafo:
uvals = [gsval(mpfo, c, 'umis') for c in 'hl']
xstr += [utils.wfmt('?' if None in uvals else sum(uvals), xlens['umis'])]
if 'c_genes' in xtrafo:
cg = gsval(mpfo, 'h', 'c_genes')
xstr += [utils.wfmt('?' if cg in [None, 'None'] else cg.replace('IGH', ''), xlens['c_genes'])]
return xstr
# ----------------------------------------------------------------------------------------
def get_didstr(dids):
if len(set(dids)) == 1: # make sure they're from the same droplet
didstr = dids[0]
if args.queries_to_include is not None and hid in args.queries_to_include and lid in args.queries_to_include:
didstr = utils.color('red', didstr, width=20)
else:
print ' %s paired seqs %s %s have different droplet ids (i.e. they were probably mis-paired) %s' % (utils.color('red', 'error'), hid, lid, dids)
didstr = 'see error'
return didstr
# ----------------------------------------------------------------------------------------
def getcdist(cons_mfo, mpfo, tch, frac=False): # can't just use gsval() for cases where we used the "input" (indel'd) cons seq (although note that there's probably some other places where the orginal/indel-reversed version is used)
defval = gsval(mpfo, tch, 'aa-c'+('frac' if frac else 'dist'))
if cons_mfo is None:
return defval
return local_hdist_aa(gsval(mpfo, tch, 'input_seqs_aa'), cons_mfo[c+'_cseq_aa'], defval=defval, frac=frac)
# ----------------------------------------------------------------------------------------
xtrafo, xheads, xlens = init_xtras()
lstr = '%s %s' % (utils.locstr(h_atn['loci'][0]), utils.locstr(l_atn['loci'][0]))
h_cshm, l_cshm = [lb_cons_seq_shm(l, aa=True) for l in [h_atn, l_atn]]
cdstr = '%2d %2d' % (h_cshm, l_cshm)
sstr = ' %3d %3d %3d' % (len(metric_pairs), len(h_atn['unique_ids']), len(l_atn['unique_ids']))
gstrs = ['%s %s' % (utils.color_gene(h_atn[r+'_gene']), utils.color_gene(l_atn[r+'_gene']) if r!='d' else '') for r in utils.regions]
gstr_len = max(utils.len_excluding_colors(s) for s in gstrs) # don't really need this as long as it's the last column
gstrs = ['%s%s' % (g, ' '*(gstr_len - utils.len_excluding_colors(g))) for g in gstrs]
h_cseq, l_cseq = [l['consensus_seq_aa'] for l in (h_atn, l_atn)]
cons_mfo = None
if any('consensus' in m for m in iclust_mfos):
cons_mfo = utils.get_single_entry([m for m in iclust_mfos if 'consensus' in m])
h_cseq, l_cseq = [cons_mfo[c+'_cseq_aa'] if cons_mfo[c+'_use_input_seqs'] else cs for c, cs in zip('hl', (h_cseq, l_cseq))]
h_cseq_str, l_cseq_str = [utils.color_mutants(cs, cs, amino_acid=True) for cs in (h_cseq, l_cseq)]
h_nseq, l_nseq = [utils.color_mutants(cs, l['naive_seq_aa'], amino_acid=True, align_if_necessary=True) for l, cs in zip((h_atn, l_atn), (h_cseq, l_cseq))]
print (' aa-cfrac (%%) aa-cdist droplet contig indels%s N %%shm N aa mutations sizes %s %s %s') % (' '.join(xheads[0]), utils.wfmt('genes cons:', gstr_len), h_cseq_str, l_cseq_str)
print (' sum h l h l h l h l %s h l nuc cons. obs. both h l %s %s %s') % (' '.join(xheads[1]), utils.wfmt('naive:', gstr_len), h_nseq, l_nseq)
sorted_mfos = sorted(metric_pairs, key=lambda m: sum(mtpys[c][gsval(m, c, 'input_seqs_aa')] for c in 'hl'), reverse=True)
for imp, mpfo in enumerate(sorted(sorted_mfos, key=lambda x: sum(getcdist(cons_mfo, x, c, frac=True) for c in 'hl'))):
hid, lid = [gsval(mpfo, c, 'unique_ids') for c in 'hl']
dids, cids = zip(*[utils.get_droplet_id(u, return_contigs=True) for u in (hid, lid)])
indelstr = ' '.join(utils.color('red', 'y') if utils.per_seq_val(l, 'has_shm_indels', u) else ' ' for c, u, l in zip('hl', [hid, lid], [h_atn, l_atn]))
h_seq, l_seq = [utils.color_mutants(cs, utils.per_seq_val(l, 'input_seqs_aa', u), amino_acid=True, align_if_necessary=True) for u, l, cs in zip((hid, lid), (h_atn, l_atn), (h_cseq, l_cseq))]
h_cfrac, l_cfrac = [getcdist(cons_mfo, mpfo, c, frac=True) for c in 'hl']
h_cdist, l_cdist = [getcdist(cons_mfo, mpfo, c) for c in 'hl']
print ' %s %4.1f %4.1f %4.1f %4d%4d %s %20s %s %s %s' % (lstr if imp==0 else ' '*utils.len_excluding_colors(lstr),
100*sum([h_cfrac, l_cfrac]), 100*h_cfrac, 100*l_cfrac, h_cdist, l_cdist,
utils.color('green', 'x') if mpfo in iclust_mfos else ' ',
get_didstr(dids), cids[0], cids[1], indelstr),
print ' %s %3d %3d %4.1f %s %2d %2d %2d %s %s %s %s' % (' '.join(get_xstr(mpfo, xlens)),
mtpys['h'][gsval(mpfo, 'h', 'input_seqs_aa')], mtpys['l'][gsval(mpfo, 'l', 'input_seqs_aa')],
sum_nuc_shm_pct(mpfo),
cdstr if imp==0 else ' '*len(cdstr),
sumv(mpfo, 'shm-aa'), gsval(mpfo, 'h', 'shm-aa'), gsval(mpfo, 'l', 'shm-aa'),
sstr if imp==0 else ' '*utils.len_excluding_colors(sstr), gstrs[imp] if imp<len(gstrs) else ' '*gstr_len,
h_seq, l_seq)
for gs in gstrs[imp+1:]: # if the cluster was smaller than gstrs, need to print the extra gstrs (this shouldn't really ever happen unless i make gstrs much longer))
print '%81s%s' % ('', gs) # this width will sometimes be wrong
print ''
# ----------------------------------------------------------------------------------------
def makeplots(metric_pairs, h_atn):
import plotting
import lbplotting
if is_simu:
# make performance plots for sum of h+l aa-cdist
mm = 'sum-cons-dist-aa'
h_atn['tree-info']['lb'][mm] = {} # NOTE it's kind of hackey to only add it to the heavy annotation, but i'm not doing anything with it after plotting right here, anyway
for mfo in metric_pairs:
h_atn['tree-info']['lb'][mm][gsval(mfo, 'h', 'unique_ids')] = -sum(gsval(mfo, c, 'aa-cdist') for c in 'hl')
fnames = []
lbplotting.plot_lb_vs_affinity(plotdir, [h_atn], mm, is_true_line=is_simu, fnames=fnames)
plotting.make_html(plotdir, fnames=fnames, extra_links=[(mm, '%s/%s/' % (plotdir, mm)),])
iclust_plotvals = {c+'_aa-cfrac' : [gsval(m, c, 'aa-cfrac') for m in metric_pairs] for c in 'hl'}
if any(vl.count(0)==len(vl) for vl in iclust_plotvals.values()): # doesn't plot anything useful, and gives a pyplot warning to std err which is annoying
return
add_plotval_uids(iclust_plotvals, iclust_mfos, metric_pairs) # add uids for the chosen ones
mstr = legtexts['cons-frac-aa']
lbplotting.plot_2d_scatter('h-vs-l-cfrac-iclust-%d'%iclust, plotdir, iclust_plotvals, 'l_aa-cfrac', 'light %s'%mstr, mstr, xvar='h_aa-cfrac', xlabel='heavy %s'%mstr, colorvar='chosen', stats='correlation') # NOTE this iclust will in general *not* correspond to the one in partition plots
# for k in iclust_plotvals:
# if k not in all_plotvals: all_plotvals[k] = [] # just for 'uids'
# all_plotvals[k] += iclust_plotvals[k]
# ----------------------------------------------------------------------------------------
def get_mtpys(metric_pairs): # NOTE this is the sum of utils.get_multiplicity() over identical sequences
mtpys = {}
for c in 'hl':
seqlist = [gsval(m, c, 'input_seqs_aa') for m in metric_pairs for _ in range(gsval(m, c, 'multipy'))]
mtpys[c] = {s : seqlist.count(s) for s in set(seqlist)}
return mtpys
# ----------------------------------------------------------------------------------------
debug = not is_simu or args.debug
all_chosen_mfos = []
cfgfo = read_cfgfo()
antn_pairs = []
for lpair in [lpk for lpk in utils.locus_pairs[ig_or_tr] if tuple(lpk) in lp_infos]:
antn_pairs += find_cluster_pairs(lpair)
# all_plotvals = {k : [] for k in ('h_aa-cfrac', 'l_aa-cfrac')}
n_too_small = 0
if debug:
print ' %d h/l pairs: %s' % (len(antn_pairs), ', '.join(' '.join(str(len(l['unique_ids'])) for l in p) for p in antn_pairs))
for iclust, (h_atn, l_atn) in enumerate(sorted(antn_pairs, key=lambda x: sum(len(l['unique_ids']) for l in x), reverse=True)):
for ltmp in (h_atn, l_atn):
utils.add_seqs_aa(ltmp)
add_cons_seqs(ltmp, aa=True) # this also adds the nuc one if it isn't there
metric_pairs = []
for hid, pids in zip(h_atn['unique_ids'], h_atn['paired-uids']):
if pids is None or len(pids) == 0: # should only have the latter now (set with .get() call in rewrite_input_metafo())
continue
lid = pids[0]
if lid not in l_atn['unique_ids']:
print ' paired light id %s missing' % lid
continue
if any(len(l['unique_ids']) < min_cluster_size for l in (h_atn, l_atn)):
n_too_small += 1
continue
mpfo = {'iclust' : iclust}
for tch, uid, ltmp in zip(('h', 'l'), (hid, lid), (h_atn, l_atn)):
mpfo[tch] = ltmp
mpfo[tch+'_iseq'] = ltmp['unique_ids'].index(uid)
metric_pairs.append(mpfo)
if len(metric_pairs) == 0:
continue
mtpys = get_mtpys(metric_pairs)
iclust_mfos = choose_abs(metric_pairs, iclust, tdbg=debug)
if len(iclust_mfos) > 0:
all_chosen_mfos += iclust_mfos
if debug:
print_dbg(metric_pairs)
if n_too_small > 0:
print ' skipped %d clusters smaller than %d' % (n_too_small, min_cluster_size)
if plotdir is not None:
makeplots(metric_pairs, h_atn)
if args.chosen_ab_fname is not None:
write_chosen_file(all_chosen_mfos)
# if plotdir is not None: # eh, maybe there isn't a big reason for an overall one
# lbplotting.plot_2d_scatter('h-vs-l-cfrac-iclust-all', plotdir, all_plotvals, 'l_aa-cfrac', 'light %s'%mstr, mstr, xvar='h_aa-cfrac', xlabel='heavy %s'%mstr, colorvar='chosen', stats='correlation')
| gpl-3.0 | -1,847,442,446,347,377,700 | 71.570919 | 556 | 0.596853 | false |
DavidCain/mitoc-trips | ws/migrations/0020_typo_corrections.py | 1 | 1360 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('ws', '0019_2020_ws_application')]
operations = [
migrations.AlterField(
model_name='climbingleaderapplication',
name='familiarity_spotting',
field=models.CharField(
choices=[
('none', 'not at all'),
('some', 'some exposure'),
('comfortable', 'comfortable'),
('very comfortable', 'very comfortable'),
],
max_length=16,
verbose_name='Familiarity with spotting boulder problems',
),
),
migrations.AlterField(
model_name='winterschoolleaderapplication',
name='winter_experience',
field=models.TextField(
blank=True,
help_text='Details of previous winter outdoors experience. '
'Include the type of trip (x-country skiing, above treeline, snowshoeing, ice climbing, etc), '
'approximate dates and locations, numbers of participants, notable trail and weather conditions. '
'Please also give details of whether you participated, led, or co-led these trips.',
max_length=5000,
),
),
]
| gpl-3.0 | 5,556,380,076,097,705,000 | 37.857143 | 114 | 0.546324 | false |
tomfa/flashcard-json-maker | simplequiz_interpreter.py | 1 | 3968 | #coding: utf-8
'''
This script reads a Q-A txt-file and generates JSON output.
USAGE:
Save your Q-A file
FORMAT:
Chapter 1 - cakes. The whole line is a part of the chapter title.
Q: Question that we wonder about?
A: Answer telling us what we want to know?
Q: Empty lines are ignored. If the question goes across multiple lines,
that's perfectly fine. We just add the new line to what we added last.
A: That goes for answers as well. Chapters do however need to be on a
single line.
'''
class Question:
"""Et spørsmål"""
def __init__(self, number, question):
self.question = question # String
self.explaination = "Ingen forklaring gitt"
self.chapter = ""
self.qnum = number
def append_line_to_question(self, more_question):
self.question += " " + more_question
def add_explaination(self, explaination):
self.explaination = explaination
def append_explaination(self, explaination):
self.explaination += " " + explaination
def setChapter(self, chapter):
self.chapter = chapter
def readQuiz(path):
f = open(path, 'r')
lines = f.read().split('\n')
questions = []
readingQ = False # Are we currently reading a question?
readingA = False # Are we currently reading an answer?
currentchapter = ""
chapters = []
qnum = -1
for line in lines:
line = line.strip().replace('"', '\\"')
if is_ignorable(line):
continue
if line_defines_chapter(line):
# omit 'Chapter '
currentchapter = line[8:]
if currentchapter not in chapters:
chapters.append(currentchapter)
continue
if line_defines_question(line):
qnum += 1
readingQ = True
readingA = False
# line[3:] is to skip the 'q: '. Not pretty
questions.append(Question(qnum, line[3:]))
questions[len(questions)-1].setChapter(currentchapter)
elif line_defines_answer(line):
readingA = True
readingQ = False
# line[3:] is to skip the 'a: '. Not pretty
questions[len(questions)-1].add_explaination(line[3:])
# If the line doesn't start with anything interesting, we append to
elif (readingA):
questions[len(questions)-1].append_explaination(line)
elif (readingQ):
questions[len(questions)-1].append_line_to_question(line)
return questions, chapters
def is_ignorable(line):
'''
returns true if line can safely be ignored
parameter:
* line: string
'''
return len(line) < 1
def line_defines_question(line):
return line.lower().startswith('q: ')
def line_defines_answer(line):
return line.lower().startswith('a: ')
def line_defines_chapter(line):
return line.lower().startswith('chapter ')
if __name__ == "__main__":
import sys
exams = {}
args = sys.argv[1:]
if not args:
print("Usage: Run as 'python flash_interpreter.py myfile.txt'")
sys.exit()
questions, chapters = readQuiz(args[0])
f = open('data.js', 'w')
f.write('var chapters = [')
for i in range(len(chapters)):
f.write('"' + chapters[i] + '"')
if (i + 1) < len(chapters):
f.write(', ')
f.write(']\n\n')
f.write('var questions = [\n')
for q in questions:
f.write('{\n')
f.write(' "id":"' + str(q.qnum) + '",\n')
f.write(' "chapter":"' + str(q.chapter) + '",\n')
f.write(' "question":"' + q.question + '",\n')
f.write(' "answer":"' + q.explaination + '"\n')
f.write('},\n\n')
f.write('];')
print "We're fine. totes fine."
print "Output saved as data.js"
| mit | 7,440,680,661,345,437,000 | 27.161765 | 75 | 0.554463 | false |
chudaol/edx-platform | cms/envs/common.py | 1 | 33521 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import imp
import os
import sys
import lms.envs.common
# Although this module itself may not use these imported variables, other dependent modules may.
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, DATA_DIR, ALL_LANGUAGES, WIKI_ENABLED,
update_module_store_settings, ASSET_IGNORE_REGEX, COPYRIGHT_YEAR, PARENTAL_CONSENT_AGE_LIMIT,
# The following PROFILE_IMAGE_* settings are included as they are
# indirectly accessed through the email opt-in API, which is
# technically accessible through the CMS via legacy URLs.
PROFILE_IMAGE_BACKEND, PROFILE_IMAGE_DEFAULT_FILENAME, PROFILE_IMAGE_DEFAULT_FILE_EXTENSION,
PROFILE_IMAGE_SECRET_KEY, PROFILE_IMAGE_MIN_BYTES, PROFILE_IMAGE_MAX_BYTES,
# The following setting is included as it is used to check whether to
# display credit eligibility table on the CMS or not.
ENABLE_CREDIT_ELIGIBILITY, YOUTUBE_API_KEY
)
from path import path
from warnings import simplefilter
from lms.djangoapps.lms_xblock.mixin import LmsBlockMixin
from cms.lib.xblock.authoring_mixin import AuthoringMixin
import dealer.git
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.mixin import LicenseMixin
############################ FEATURE CONFIGURATION #############################
STUDIO_NAME = "Studio"
STUDIO_SHORT_NAME = "Studio"
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the ones in lms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which blocks users
# based on their location.
'EMBARGO': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Allow creating courses with non-ascii characters in the course id
'ALLOW_UNICODE_COURSE_ID': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Turn off Video Upload Pipeline through Studio, by default
'ENABLE_VIDEO_UPLOAD_PIPELINE': False,
# Is this an edX-owned domain? (edx.org)
# for consistency in user-experience, keep the value of this feature flag
# in sync with the one in lms/envs/common.py
'IS_EDX_DOMAIN': False,
# let students save and manage their annotations
# for consistency in user-experience, keep the value of this feature flag
# in sync with the one in lms/envs/common.py
'ENABLE_EDXNOTES': False,
# Enable support for content libraries. Note that content libraries are
# only supported in courses using split mongo.
'ENABLE_CONTENT_LIBRARIES': True,
# Milestones application flag
'MILESTONES_APP': False,
# Prerequisite courses feature flag
'ENABLE_PREREQUISITE_COURSES': False,
# Toggle course entrance exams feature
'ENTRANCE_EXAMS': False,
# Toggle platform-wide course licensing
'LICENSING': False,
# Enable the courseware search functionality
'ENABLE_COURSEWARE_INDEX': False,
# Enable content libraries search functionality
'ENABLE_LIBRARY_INDEX': False,
# Enable course reruns, which will always use the split modulestore
'ALLOW_COURSE_RERUNS': True,
# Certificates Web/HTML Views
'CERTIFICATES_HTML_VIEW': False,
# Social Media Sharing on Student Dashboard
'SOCIAL_SHARING_SETTINGS': {
# Note: Ensure 'CUSTOM_COURSE_URLS' has a matching value in lms/envs/common.py
'CUSTOM_COURSE_URLS': False
},
# Teams feature
'ENABLE_TEAMS': False,
# Show video bumper in Studio
'ENABLE_VIDEO_BUMPER': False,
# How many seconds to show the bumper again, default is 7 days:
'SHOW_BUMPER_PERIODICITY': 7 * 24 * 3600,
# Enable credit eligibility feature
'ENABLE_CREDIT_ELIGIBILITY': ENABLE_CREDIT_ELIGIBILITY,
# Can the visibility of the discussion tab be configured on a per-course basis?
'ALLOW_HIDING_DISCUSSION_TAB': False,
# Timed or Proctored Exams
'ENABLE_PROCTORED_EXAMS': False,
}
ENABLE_JASMINE = False
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_cms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
COMMON_ROOT / 'static', # required to statically include common Underscore templates
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
from lms.envs.common import (
COURSE_KEY_PATTERN, COURSE_ID_PATTERN, USAGE_KEY_PATTERN, ASSET_KEY_PATTERN
)
######################### CSRF #########################################
# Forwards-compatibility with Django 1.7
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
# TODO: Re-import the Django version once we upgrade to Django 1.8 [PLAT-671]
# 'django.middleware.locale.LocaleMiddleware',
'django_locale.middleware.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# These are the Mixins that should be added to every XBlock.
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (
LmsBlockMixin,
InheritanceMixin,
XModuleMixin,
EditInfoMixin,
AuthoringMixin,
)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ Modulestore Configuration ################################
MODULESTORE_BRANCH = 'draft-preferred'
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
}
]
}
}
}
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
SESSION_COOKIE_SECURE = False
# Site info
SITE_ID = 1
SITE_NAME = "localhost:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
EDX_PLATFORM_REVISION = os.environ.get('EDX_PLATFORM_REVISION')
if not EDX_PLATFORM_REVISION:
try:
# Get git revision of the current file
EDX_PLATFORM_REVISION = dealer.git.Backend(path=REPO_ROOT).revision
except TypeError:
# Not a git repository
EDX_PLATFORM_REVISION = 'unknown'
# Static content
STATIC_URL = '/static/' + EDX_PLATFORM_REVISION + "/"
STATIC_ROOT = ENV_ROOT / "staticfiles" / EDX_PLATFORM_REVISION
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES_BIDI = lms.envs.common.LANGUAGES_BIDI
LANGUAGES = lms.envs.common.LANGUAGES
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
############################### Pipeline #######################################
STATICFILES_STORAGE = 'openedx.core.lib.django_require.staticstorage.OptimizedCachedRequireJsStorage'
from openedx.core.lib.rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'css/tinymce-studio-content-fonts.css',
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css',
'css/tinymce-studio-content.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-skin.css',
},
'style-main': {
# this is unnecessary and can be removed
'source_filenames': [
'css/studio-main.css',
],
'output_filename': 'css/studio-main.css',
},
'style-main-rtl': {
# this is unnecessary and can be removed
'source_filenames': [
'css/studio-main-rtl.css',
],
'output_filename': 'css/studio-main-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/cms-style-xmodule-annotations.css',
},
}
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc",
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# DJANGO-REQUIRE ###############################
# The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT.
REQUIRE_BASE_URL = "./"
# The name of a build profile to use for your project, relative to REQUIRE_BASE_URL.
# A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile.
# Set to False to disable running the default profile (e.g. if only using it to build Standalone
# Modules)
REQUIRE_BUILD_PROFILE = "cms/js/build.js"
# The name of the require.js script used by your project, relative to REQUIRE_BASE_URL.
REQUIRE_JS = "js/vendor/require.js"
# A dictionary of standalone modules to build with almond.js.
REQUIRE_STANDALONE_MODULES = {}
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = False
# A tuple of files to exclude from the compilation result of r.js.
REQUIRE_EXCLUDE = ("build.txt",)
# The execution environment in which to run r.js: auto, node or rhino.
# auto will autodetect the environment and make use of node if available and rhino if not.
# It can also be a path to a custom class that subclasses require.environments.Environment and defines some "args" function that returns a list with the command arguments to execute.
REQUIRE_ENVIRONMENT = "node"
################################# TENDER ######################################
# If you want to enable Tender integration (http://tenderapp.com/),
# put in the subdomain where Tender hosts tender_widget.js. For example,
# if you want to use the URL https://example.tenderapp.com/tender_widget.js,
# you should use "example".
TENDER_SUBDOMAIN = None
# If you want to have a vanity domain that points to Tender, put that here.
# For example, "help.myapp.com". Otherwise, should should be your full
# tenderapp domain name: for example, "example.tenderapp.com".
TENDER_DOMAIN = None
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'https://www.youtube.com/iframe_api',
# URL to get YouTube metadata
'METADATA_URL': 'https://www.googleapis.com/youtube/v3/videos',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
'IMAGE_API': 'http://img.youtube.com/vi/{youtube_id}/0.jpg', # /maxresdefault.jpg for 1920*1080
}
############################# VIDEO UPLOAD PIPELINE #############################
VIDEO_UPLOAD_PIPELINE = {
'BUCKET': '',
'ROOT_PATH': '',
'CONCURRENT_UPLOAD_LIMIT': 4,
}
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# History tables
'simple_history',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'openedx.core.djangoapps.course_groups', # not used in cms (yet), but tests run
'xblock_config',
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
'require',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'openedx.core.djangoapps.user_api',
'django_openid_auth',
'embargo',
# Monitoring signals
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
'openedx.core.djangoapps.content.course_overviews',
'openedx.core.djangoapps.content.course_structures',
# Credit courses
'openedx.core.djangoapps.credit',
'xblock_django',
)
################# EDX MARKETING SITE ##################################
EDXMKTG_LOGGED_IN_COOKIE_NAME = 'edxloggedin'
EDXMKTG_USER_INFO_COOKIE_NAME = 'edx-user-info'
EDXMKTG_USER_INFO_COOKIE_VERSION = 1
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 50000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'tracking_logs': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
},
'processors': [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.VideoEventProcessor'}
]
}
},
'segmentio': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'segment': {'ENGINE': 'eventtracking.backends.segment.SegmentBackend'}
},
'processors': [
{
'ENGINE': 'eventtracking.processors.whitelist.NameWhitelistProcessor',
'OPTIONS': {
'whitelist': []
}
},
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
}
}
EVENT_TRACKING_PROCESSORS = []
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
'problem_builder',
'edx_sga',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval',
# milestones
'milestones',
# edX Proctoring
'edx_proctoring',
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Size of chunks into which asset uploads will be divided
UPLOAD_CHUNK_SIZE_IN_MB = 50
### Max size of asset uploads to GridFS
MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 50
# FAQ url to direct users to if they upload
# a file that exceeds the above size
MAX_ASSET_UPLOAD_FILE_SIZE_URL = ""
### Default value for entrance exam minimum score
ENTRANCE_EXAM_MIN_SCORE_PCT = 50
### Default language for a new course
DEFAULT_COURSE_LANGUAGE = "en"
################ ADVANCED_COMPONENT_TYPES ###############
ADVANCED_COMPONENT_TYPES = [
'annotatable',
'textannotation', # module for annotating text (with annotation table)
'videoannotation', # module for annotating video (with annotation table)
'imageannotation', # module for annotating image (with annotation table)
'word_cloud',
'graphical_slider_tool',
'lti',
'library_content',
'edx_sga',
'problem-builder',
'pb-dashboard',
'poll',
'survey',
# XBlocks from pmitros repos are prototypes. They should not be used
# except for edX Learning Sciences experiments on edge.edx.org without
# further work to make them robust, maintainable, finalize data formats,
# etc.
'concept', # Concept mapper. See https://github.com/pmitros/ConceptXBlock
'done', # Lets students mark things as done. See https://github.com/pmitros/DoneXBlock
'audio', # Embed an audio file. See https://github.com/pmitros/AudioXBlock
'recommender', # Crowdsourced recommender. Prototype by dli&pmitros. Intended for roll-out in one place in one course.
'profile', # Prototype user profile XBlock. Used to test XBlock parameter passing. See https://github.com/pmitros/ProfileXBlock
'split_test',
'combinedopenended',
'peergrading',
'notes',
'schoolyourself_review',
'schoolyourself_lesson',
# Google Drive embedded components. These XBlocks allow one to
# embed public google drive documents and calendars within edX units
'google-document',
'google-calendar',
# In-course reverification checkpoint
'edx-reverification-block',
]
# Adding components in this list will disable the creation of new problem for those
# compoenents in studio. Existing problems will work fine and one can edit them in studio
DEPRECATED_ADVANCED_COMPONENT_TYPES = []
# Specify xblocks that should be treated as advanced problems. Each entry is a tuple
# specifying the xblock name and an optional YAML template to be used.
ADVANCED_PROBLEM_TYPES = [
{
'component': 'openassessment',
'boilerplate_name': None,
}
]
# Files and Uploads type filter values
FILES_AND_UPLOAD_TYPE_FILTERS = {
"Images": ['image/png', 'image/jpeg', 'image/jpg', 'image/gif', 'image/tiff', 'image/tif', 'image/x-icon'],
"Documents": [
'application/pdf',
'text/plain',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'application/vnd.openxmlformats-officedocument.presentationml.template',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'application/msword',
'application/vnd.ms-excel',
'application/vnd.ms-powerpoint',
],
}
# Default to no Search Engine
SEARCH_ENGINE = None
ELASTIC_FIELD_MAPPINGS = {
"start_date": {
"type": "date"
}
}
XBLOCK_SETTINGS = {
"VideoDescriptor": {
"licensing_enabled": FEATURES.get("LICENSING", False)
},
'VideoModule': {
'YOUTUBE_API_KEY': YOUTUBE_API_KEY
}
}
################################ Settings for Credit Course Requirements ################################
# Initial delay used for retrying tasks.
# Additional retries use longer delays.
# Value is in seconds.
CREDIT_TASK_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
CREDIT_TASK_MAX_RETRIES = 5
# Maximum age in seconds of timestamps we will accept
# when a credit provider notifies us that a student has been approved
# or denied for credit.
CREDIT_PROVIDER_TIMESTAMP_EXPIRATION = 15 * 60
################################ Deprecated Blocks Info ################################
DEPRECATED_BLOCK_TYPES = ['peergrading', 'combinedopenended']
#### PROCTORING CONFIGURATION DEFAULTS
PROCTORING_BACKEND_PROVIDER = {
'class': 'edx_proctoring.backends.NullBackendProvider',
'options': {},
}
PROCTORING_SETTINGS = {}
| agpl-3.0 | 239,010,632,844,921,660 | 31.200768 | 182 | 0.653113 | false |
openstack/python-saharaclient | saharaclient/tests/unit/test_data_sources.py | 1 | 4609 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from saharaclient.api import data_sources as ds
from saharaclient.tests.unit import base
from unittest import mock
from oslo_serialization import jsonutils as json
class DataSourceTest(base.BaseTestCase):
body = {
'name': 'name',
'url': 'url',
'description': 'descr',
'data_source_type': 'hdfs',
'credential_user': 'user',
'credential_pass': '123'
}
response = {
'name': 'name',
'url': 'url',
'description': 'descr',
'type': 'hdfs',
'credentials': {
'user': 'user',
'password': '123'
}
}
update_json = {
'name': 'UpdatedName',
'url': 'hdfs://myfakeserver/fakepath'
}
def test_create_data_sources(self):
url = self.URL + '/data-sources'
self.responses.post(url, status_code=202,
json={'data_source': self.response})
resp = self.client.data_sources.create(**self.body)
self.assertEqual(url, self.responses.last_request.url)
self.assertEqual(self.response,
json.loads(self.responses.last_request.body))
self.assertIsInstance(resp, ds.DataSources)
self.assertFields(self.response, resp)
def test_data_sources_list(self):
url = self.URL + '/data-sources'
self.responses.get(url, json={'data_sources': [self.response]})
resp = self.client.data_sources.list()
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp[0], ds.DataSources)
self.assertFields(self.response, resp[0])
def test_data_sources_get(self):
url = self.URL + '/data-sources/id'
self.responses.get(url, json={'data_source': self.response})
resp = self.client.data_sources.get('id')
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp, ds.DataSources)
self.assertFields(self.response, resp)
def test_data_sources_delete(self):
url = self.URL + '/data-sources/id'
self.responses.delete(url, status_code=204)
self.client.data_sources.delete('id')
self.assertEqual(url, self.responses.last_request.url)
def test_update_data_sources(self):
update_url = self.URL + '/data-sources/id'
self.responses.put(update_url, status_code=202,
json=self.update_json)
updated = self.client.data_sources.update("id", self.update_json)
self.assertEqual(self.update_json["name"], updated.name)
self.assertEqual(self.update_json["url"], updated.url)
@mock.patch('saharaclient.api.base.ResourceManager._create')
def test_create_data_source_s3_or_swift_credentials(self, create):
# Data source without any credential arguments
self.client.data_sources.create('ds', '', 'swift', 'swift://path')
self.assertNotIn('credentials', create.call_args[0][1])
# Data source with Swift credential arguments
self.client.data_sources.create('ds', '', 'swift', 'swift://path',
credential_user='user')
self.assertIn('credentials', create.call_args[0][1])
# Data source with S3 credential arguments
self.client.data_sources.create('ds', '', 'swift', 'swift://path',
s3_credentials={'accesskey': 'a'})
self.assertIn('credentials', create.call_args[0][1])
self.assertIn('accesskey', create.call_args[0][1]['credentials'])
# Data source with both S3 and swift credential arguments
self.client.data_sources.create('ds', '', 's3', 's3://path',
credential_user='swift_user',
s3_credentials={'accesskey': 's3_a'})
self.assertIn('user', create.call_args[0][1]['credentials'])
self.assertNotIn('accesskey', create.call_args[0][1]['credentials'])
| apache-2.0 | 7,927,911,403,367,229,000 | 37.731092 | 78 | 0.611195 | false |
dana-i2cat/felix | modules/resource/utilities/rspecs/serm/request_parser.py | 1 | 4179 | from rspecs.parser_base import ParserBase
from rspecs.commons_se import SELink
from rspecs.commons_tn import Node, Interface
import core
logger = core.log.getLogger("utility-rspec")
class SERMv3RequestParser(ParserBase):
def __init__(self, from_file=None, from_string=None):
super(SERMv3RequestParser, self).__init__(from_file, from_string)
self.__sv = self.rspec.nsmap.get('sharedvlan')
self.__felix = self.rspec.nsmap.get('felix')
self.__proto = self.rspec.nsmap.get('protogeni')
def check_se_node_resource(self, node):
# according to the proposed URNs structure, a SE-node MUST have
# "serm" as resource-name (client_id) and authority
# (component_manager_id) fields
# At least we verify the autority field here!
if node.attrib.get("component_manager_id", None) is not None and \
node.attrib.get("client_id", None) is not None:
if "serm" in node.attrib.get("component_manager_id", "") or \
"serm" in node.attrib.get("client_id", ""):
return True
return False
def check_se_link_resource(self, link, c_manager):
# according to the proposed URNs structure, a TN-link MUST have
# "serm" as resource-name (client_id) and authority
# (component_manager_name) fields
# At least we verify the autority field here!
if not c_manager.attrib.get("name"):
return False
if "serm" in c_manager.attrib.get("name"):
return True
return False
def update_protogeni_cm_uuid(self, tag, obj):
cmuuid = tag.attrib.get("{%s}component_manager_uuid" % (self.__proto))
if cmuuid is not None:
obj.add_component_manager_uuid(cmuuid)
def get_nodes(self, rspec):
nodes = []
for n in rspec.findall(".//{%s}node" % (self.none)):
if not self.check_se_node_resource(n):
logger.info("Skipping this node, not a SE-res: %s", (n,))
continue
node = Node(n.attrib.get("client_id"),
n.attrib.get("component_manager_id"),
n.attrib.get("exclusive"))
self.update_protogeni_cm_uuid(n, node)
for i in n.iterfind("{%s}interface" % (self.none)):
interface = Interface(i.attrib.get("client_id"))
for sv in i.iterfind("{%s}link_shared_vlan" % (self.__sv)):
interface.add_vlan(sv.attrib.get("vlantag"),
sv.attrib.get("name"))
node.add_interface(interface.serialize())
nodes.append(node.serialize())
return nodes
def nodes(self):
return self.get_nodes(self.rspec)
def get_links(self, rspec):
links_ = []
for l in rspec.findall(".//{%s}link" % (self.none)):
manager_ = l.find("{%s}component_manager" % (self.none))
if manager_ is None:
self.raise_exception("Component-Mgr tag not found in link!")
if not self.check_se_link_resource(l, manager_):
logger.info("Skipping this link, not a SE-res: %s", (l,))
continue
type_ = l.find("{%s}link_type" % (self.none))
if type_ is None:
self.raise_exception("Link-Type tag not found in link!")
l_ = SELink(l.attrib.get("client_id"), type_.attrib.get("name"),
manager_.attrib.get("name"))
self.update_protogeni_cm_uuid(l, l_)
# FIXME: VLAN seems not properly added to interface
[l_.add_interface_ref(i.attrib.get("client_id"),
i.attrib.get("{%s}vlan" % (self.__felix)))
for i in l.iterfind("{%s}interface_ref" % (self.none))]
[l_.add_property(p.attrib.get("source_id"),
p.attrib.get("dest_id"),
p.attrib.get("capacity"))
for p in l.iterfind("{%s}property" % (self.none))]
links_.append(l_.serialize())
return links_
def links(self):
return self.get_links(self.rspec)
| apache-2.0 | 6,311,328,318,198,974,000 | 38.8 | 78 | 0.556832 | false |
katajakasa/utuputki2 | alembic/versions/4690204e5a62_initial.py | 1 | 5560 | """Initial
Revision ID: 4690204e5a62
Revises:
Create Date: 2015-10-28 18:43:54.656000
"""
# revision identifiers, used by Alembic.
revision = '4690204e5a62'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('event',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('visible', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('source',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('file_name', sa.String(length=256), nullable=True),
sa.Column('file_ext', sa.String(length=4), nullable=True),
sa.Column('mime_type', sa.String(length=32), nullable=True),
sa.Column('size_bytes', sa.Integer(), nullable=True),
sa.Column('media_type', sa.Integer(), nullable=True),
sa.Column('youtube_hash', sa.String(length=32), nullable=True),
sa.Column('other_url', sa.String(length=512), nullable=True),
sa.Column('length_seconds', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('message', sa.String(length=64), nullable=True),
sa.Column('video_codec', sa.String(length=16), nullable=True),
sa.Column('video_bitrate', sa.Integer(), nullable=True),
sa.Column('video_w', sa.Integer(), nullable=True),
sa.Column('video_h', sa.Integer(), nullable=True),
sa.Column('audio_codec', sa.String(length=16), nullable=True),
sa.Column('audio_bitrate', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=32), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('nickname', sa.String(length=32), nullable=True),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('level', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('setting',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('key', sa.String(length=32), nullable=True),
sa.Column('value', sa.String(length=32), nullable=True),
sa.Column('type', sa.Integer(), nullable=True),
sa.Column('max', sa.Integer(), nullable=True),
sa.Column('min', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('session',
sa.Column('key', sa.String(length=32), nullable=False),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('start', sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(['user'], ['user.id'], ),
sa.PrimaryKeyConstraint('key')
)
op.create_table('sourcequeue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(['user'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('media',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('source', sa.Integer(), nullable=True),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('queue', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['queue'], ['sourcequeue.id'], ),
sa.ForeignKeyConstraint(['source'], ['source.id'], ),
sa.ForeignKeyConstraint(['user'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('player',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=16), nullable=True),
sa.Column('event', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('last', sa.Integer(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event'], ['event.id'], ),
sa.ForeignKeyConstraint(['last'], ['media.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_player_token'), 'player', ['token'], unique=True)
op.create_table('skip',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('media', sa.Integer(), nullable=True),
sa.Column('player', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['media'], ['media.id'], ),
sa.ForeignKeyConstraint(['player'], ['player.id'], ),
sa.ForeignKeyConstraint(['user'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user', 'media', 'player', name='_user_media_player_uc')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
op.drop_table('sourcequeue')
op.drop_table('source')
op.drop_table('skip')
op.drop_table('setting')
op.drop_table('session')
op.drop_index(op.f('ix_player_token'), table_name='player')
op.drop_table('player')
op.drop_table('media')
op.drop_table('event')
### end Alembic commands ###
| mit | -8,873,998,268,785,954,000 | 40.185185 | 80 | 0.648741 | false |
optiflows/nyuki | nyuki/workflow/tasks/trigger_workflow.py | 1 | 7024 | import json
import asyncio
import logging
from enum import Enum
from aiohttp import ClientSession
from tukio.task import register
from tukio.task.holder import TaskHolder
from tukio.workflow import WorkflowExecState, Workflow
from .utils import runtime
from .utils.uri import URI
log = logging.getLogger(__name__)
class WorkflowStatus(Enum):
PENDING = 'pending'
RUNNING = 'running'
TIMEOUT = 'timeout'
DONE = 'done'
@register('trigger_workflow', 'execute')
class TriggerWorkflowTask(TaskHolder):
__slots__ = (
'template', 'blocking', 'task', '_engine', 'data',
'status', 'triggered_id', 'async_future',
)
SCHEMA = {
'type': 'object',
'required': ['template'],
'additionalProperties': False,
'properties': {
'template': {
'type': 'object',
'required': ['service', 'id'],
'additionalProperties': False,
'properties': {
'service': {'type': 'string', 'minLength': 1},
'id': {'type': 'string', 'minLength': 1},
'draft': {'type': 'boolean', 'default': False},
},
},
'blocking': {'type': 'boolean', 'default': True},
},
}
def __init__(self, config):
super().__init__(config)
self.template = self.config['template']
self.blocking = self.config.get('blocking', True)
self.task = None
self._engine = 'http://{}/{}/api/v1/workflow'.format(
runtime.config.get('http_host', 'localhost'),
self.template['service'],
)
# Reporting
self.status = WorkflowStatus.PENDING.value
self.data = None
self.triggered_id = None
self.async_future = None
def report(self):
return {
'exec_id': self.triggered_id,
'status': self.status,
}
async def async_exec(self, topic, data):
log.debug(
"Received data for async trigger_workflow in '%s': %s",
topic, data,
)
if not self.async_future.done():
self.async_future.set_result(data)
await runtime.bus.unsubscribe(topic)
async def execute(self, event):
"""
Entrypoint execution method.
"""
self.data = event.data
self.task = asyncio.Task.current_task()
is_draft = self.template.get('draft', False)
# Send the HTTP request
log.info('Triggering template %s%s on service %s', self.template['id'],
' (draft)' if is_draft else '', self.template['service'])
# Setup headers (set requester and exec-track to avoid workflow loops)
workflow = runtime.workflows[Workflow.current_workflow().uid]
parent = workflow.exec.get('requester')
track = list(workflow.exec.get('track', []))
if parent:
track.append(parent)
headers = {
'Content-Type': 'application/json',
'Referer': URI.instance(workflow.instance),
'X-Surycat-Exec-Track': ','.join(track)
}
# Handle blocking trigger_workflow using mqtt
if self.blocking:
topic = '{}/async/{}'.format(runtime.bus.name, self.uid[:8])
headers['X-Surycat-Async-Topic'] = topic
headers['X-Surycat-Async-Events'] = ','.join([
WorkflowExecState.END.value,
WorkflowExecState.ERROR.value,
])
self.async_future = asyncio.Future()
await runtime.bus.subscribe(topic, self.async_exec)
def _unsub(f):
asyncio.ensure_future(runtime.bus.unsubscribe(topic))
self.task.add_done_callback(_unsub)
async with ClientSession() as session:
# Compute data to send to sub-workflows
url = '{}/vars/{}{}'.format(
self._engine,
self.template['id'],
'/draft' if is_draft else '',
)
async with session.get(url) as response:
if response.status != 200:
raise RuntimeError("Can't load template info")
wf_vars = await response.json()
lightened_data = {
key: self.data[key]
for key in wf_vars
if key in self.data
}
params = {
'url': '{}/instances'.format(self._engine),
'headers': headers,
'data': json.dumps({
'id': self.template['id'],
'draft': is_draft,
'inputs': lightened_data,
})
}
async with session.put(**params) as response:
if response.status != 200:
msg = "Can't process workflow template {} on {}".format(
self.template, self.nyuki_api
)
if response.status % 400 < 100:
reason = await response.json()
msg = "{}, reason: {}".format(msg, reason['error'])
raise RuntimeError(msg)
resp_body = await response.json()
self.triggered_id = resp_body['id']
wf_id = '@'.join([self.triggered_id[:8], self.template['service']])
self.status = WorkflowStatus.RUNNING.value
log.info('Successfully started %s', wf_id)
self.task.dispatch_progress(self.report())
# Block until task completed
if self.blocking:
log.info('Waiting for workflow %s to complete', wf_id)
await self.async_future
self.status = WorkflowStatus.DONE.value
log.info('Workflow %s is done', wf_id)
self.task.dispatch_progress({'status': self.status})
return self.data
async def _end_triggered_workflow(self):
"""
Asynchronously cancel the triggered workflow.
"""
wf_id = '@'.join([self.triggered_id[:8], self.template['service']])
async with ClientSession() as session:
url = '{}/instances/{}'.format(self._engine, self.triggered_id)
async with session.delete(url) as response:
if response.status != 200:
log.warning('Failed to cancel workflow %s', wf_id)
else:
log.info('Workflow %s has been cancelled', wf_id)
def teardown(self):
"""
Called when this task is cancelled or timed out.
"""
if self.task.timed_out is True:
self.status = WorkflowStatus.TIMEOUT.value
else:
self.status = WorkflowStatus.DONE.value
self.task.dispatch_progress({'status': self.status})
if not self.triggered_id:
log.debug('No workflow to cancel')
return self.data
asyncio.ensure_future(self._end_triggered_workflow())
return self.data
| apache-2.0 | 5,176,207,135,158,172,000 | 33.431373 | 79 | 0.530325 | false |
petroniocandido/pyFTS | pyFTS/models/seasonal/msfts.py | 1 | 1921 | import numpy as np
from pyFTS.common import FLR
from pyFTS.models.seasonal import sfts
class MultiSeasonalFTS(sfts.SeasonalFTS):
"""
Multi-Seasonal Fuzzy Time Series
"""
def __init__(self, name, indexer, **kwargs):
super(MultiSeasonalFTS, self).__init__("MSFTS")
self.name = "Multi Seasonal FTS"
self.shortname = "MSFTS " + name
self.detail = ""
self.seasonality = 1
self.has_seasonality = True
self.has_point_forecasting = True
self.is_high_order = False
self.is_multivariate = True
self.indexer = indexer
self.flrgs = {}
def generate_flrg(self, flrs):
for flr in flrs:
if str(flr.index) not in self.flrgs:
self.flrgs[str(flr.index)] = sfts.SeasonalFLRG(flr.index)
self.flrgs[str(flr.index)].append_rhs(flr.RHS)
def train(self, data, **kwargs):
if kwargs.get('sets', None) is not None:
self.sets = kwargs.get('sets', None)
if kwargs.get('parameters', None) is not None:
self.seasonality = kwargs.get('parameters', None)
#ndata = self.indexer.set_data(data,self.doTransformations(self.indexer.get_data(data)))
flrs = FLR.generate_indexed_flrs(self.sets, self.indexer, data)
self.generate_flrg(flrs)
def forecast(self, data, **kwargs):
ret = []
index = self.indexer.get_season_of_data(data)
ndata = self.indexer.get_data(data)
for k in np.arange(0, len(index)):
flrg = self.flrgs[str(index[k])]
mp = self.getMidpoints(flrg)
ret.append(sum(mp) / len(mp))
return ret
def forecast_ahead(self, data, steps, **kwargs):
ret = []
for i in steps:
flrg = self.flrgs[str(i)]
mp = self.getMidpoints(flrg)
ret.append(sum(mp) / len(mp))
return ret
| gpl-3.0 | -2,375,264,770,700,917,000 | 28.106061 | 96 | 0.580947 | false |
bigswitch/neutron | neutron/tests/functional/pecan_wsgi/test_hooks.py | 1 | 22434 | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from neutron.api.v2 import attributes
from neutron import context
from neutron.db.quota import driver as quota_driver
from neutron import manager
from neutron.pecan_wsgi.controllers import resource
from neutron.pecan_wsgi.hooks import policy_enforcement as pe
from neutron import policy
from neutron.tests.functional.pecan_wsgi import test_functional
class TestOwnershipHook(test_functional.PecanFunctionalTest):
def test_network_ownership_check(self):
net_response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
network_id = jsonutils.loads(net_response.body)['network']['id']
port_response = self.app.post_json(
'/v2.0/ports.json',
params={'port': {'network_id': network_id,
'admin_state_up': True}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(201, port_response.status_int)
class TestQuotaEnforcementHook(test_functional.PecanFunctionalTest):
def test_quota_enforcement_single(self):
ctx = context.get_admin_context()
quota_driver.DbQuotaDriver.update_quota_limit(
ctx, 'tenid', 'network', 1)
# There is enough headroom for creating a network
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
# But a second request will fail
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh-2'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(response.status_int, 409)
def test_quota_enforcement_bulk_request(self):
ctx = context.get_admin_context()
quota_driver.DbQuotaDriver.update_quota_limit(
ctx, 'tenid', 'network', 3)
# There is enough headroom for a bulk request creating 2 networks
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [
{'name': 'meh1'},
{'name': 'meh2'}]},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
# But it won't be possible to create 2 more networks...
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [
{'name': 'meh3'},
{'name': 'meh4'}]},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(response.status_int, 409)
class TestPolicyEnforcementHook(test_functional.PecanFunctionalTest):
FAKE_RESOURCE = {
'mehs': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True, 'primary_key': True},
'attr': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'restricted_attr': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string':
attributes.TENANT_ID_MAX_LEN},
'is_visible': True}
}
}
def setUp(self):
# Create a controller for a fake resource. This will make the tests
# independent from the evolution of the API (so if one changes the API
# or the default policies there won't be any risk of breaking these
# tests, or at least I hope so)
super(TestPolicyEnforcementHook, self).setUp()
self.mock_plugin = mock.Mock()
attributes.RESOURCE_ATTRIBUTE_MAP.update(self.FAKE_RESOURCE)
attributes.PLURALS['mehs'] = 'meh'
manager.NeutronManager.set_plugin_for_resource('meh', self.mock_plugin)
fake_controller = resource.CollectionsController('mehs', 'meh')
manager.NeutronManager.set_controller_for_resource(
'mehs', fake_controller)
# Inject policies for the fake resource
policy.init()
policy._ENFORCER.set_rules(
oslo_policy.Rules.from_dict(
{'create_meh': '',
'update_meh': 'rule:admin_only',
'delete_meh': 'rule:admin_only',
'get_meh': 'rule:admin_only or field:mehs:id=xxx',
'get_meh:restricted_attr': 'rule:admin_only'}),
overwrite=False)
def test_before_on_create_authorized(self):
# Mock a return value for an hypothetical create operation
self.mock_plugin.create_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.post_json('/v2.0/mehs.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'})
# We expect this operation to succeed
self.assertEqual(201, response.status_int)
self.assertEqual(0, self.mock_plugin.get_meh.call_count)
self.assertEqual(1, self.mock_plugin.create_meh.call_count)
def test_before_on_put_not_authorized(self):
# The policy hook here should load the resource, and therefore we must
# mock a get response
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'before', and the
# plugin method should not be called at all
response = self.app.put_json('/v2.0/mehs/xxx.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.update_meh.call_count)
def test_before_on_delete_not_authorized(self):
# The policy hook here should load the resource, and therefore we must
# mock a get response
self.mock_plugin.delete_meh.return_value = None
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'before', and the
# plugin method should not be called
response = self.app.delete_json('/v2.0/mehs/xxx.json',
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.delete_meh.call_count)
def test_after_on_get_not_authorized(self):
# The GET test policy will deny access to anything whose id is not
# 'xxx', so the following request should be forbidden
self.mock_plugin.get_meh.return_value = {
'id': 'yyy',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'after', and the
# plugin method should be called
response = self.app.get('/v2.0/mehs/yyy.json',
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
def test_after_on_get_excludes_admin_attribute(self):
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.get('/v2.0/mehs/xxx.json',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
json_response = jsonutils.loads(response.body)
self.assertNotIn('restricted_attr', json_response['meh'])
def test_after_on_list_excludes_admin_attribute(self):
self.mock_plugin.get_mehs.return_value = [{
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}]
response = self.app.get('/v2.0/mehs',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
json_response = jsonutils.loads(response.body)
self.assertNotIn('restricted_attr', json_response['mehs'][0])
class TestDHCPNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
# the DHCP notifier needs to be mocked so that correct operations can
# be easily validated. For the purpose of this test it is indeed not
# necessary that the notification is actually received and processed by
# the agent
patcher = mock.patch('neutron.api.rpc.agentnotifiers.'
'dhcp_rpc_agent_api.DhcpAgentNotifyAPI.notify')
self.mock_notifier = patcher.start()
super(TestDHCPNotifierHook, self).setUp()
def test_dhcp_notifications_disabled(self):
cfg.CONF.set_override('dhcp_agent_notification', False)
self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(0, self.mock_notifier.call_count)
def test_get_does_not_trigger_notification(self):
self.do_request('/v2.0/networks', tenant_id='tenid')
self.assertEqual(0, self.mock_notifier.call_count)
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}}, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(1, self.mock_notifier.call_count)
self.assertEqual(mock.call(mock.ANY, json_body, 'network.create.end'),
self.mock_notifier.mock_calls[-1])
network_id = json_body['network']['id']
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'meh-2'}},
headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(2, self.mock_notifier.call_count)
self.assertEqual(mock.call(mock.ANY, json_body, 'network.update.end'),
self.mock_notifier.mock_calls[-1])
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
self.assertEqual(3, self.mock_notifier.call_count)
# No need to validate data content sent to the notifier as it's just
# going to load the object from the database
self.assertEqual(mock.call(mock.ANY, mock.ANY, 'network.delete.end'),
self.mock_notifier.mock_calls[-1])
def test_bulk_create_triggers_notifications(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [{'name': 'meh_1'},
{'name': 'meh_2'}]},
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
item_1 = json_body['networks'][0]
item_2 = json_body['networks'][1]
self.assertEqual(2, self.mock_notifier.call_count)
self.mock_notifier.assert_has_calls(
[mock.call(mock.ANY, {'network': item_1}, 'network.create.end'),
mock.call(mock.ANY, {'network': item_2}, 'network.create.end')])
class TestNovaNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.'
'_nova_notify')
self.mock_notifier = patcher.start()
super(TestNovaNotifierHook, self).setUp()
def test_nova_notification_skips_on_failure(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.put_json(
'/v2.0/networks/%s.json' % uuidutils.generate_uuid(),
params={'network': {'name': 'meh-2'}},
headers=req_headers,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertFalse(self.mock_notifier.called)
def test_nova_notifications_disabled(self):
cfg.CONF.set_override('notify_nova_on_port_data_changes', False)
self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertFalse(self.mock_notifier.called)
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}}, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.assert_called_once_with('create', 'network', {},
json_body)
self.mock_notifier.reset_mock()
network_id = json_body['network']['id']
# NOTE(kevinbenton): the original passed into the notifier does
# not contain all of the fields of the object. Only those required
# by the policy engine are included.
orig = pe.fetch_resource(context.get_admin_context(),
'network', network_id)
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'meh-2'}},
headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.assert_called_once_with('update', 'network',
orig, json_body)
self.mock_notifier.reset_mock()
orig = pe.fetch_resource(context.get_admin_context(),
'network', network_id)
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
# No need to validate data content sent to the notifier as it's just
# going to load the object from the database
self.mock_notifier.assert_called_once_with('delete', 'network', {},
{'network': orig})
def test_bulk_create_triggers_notifications(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [{'name': 'meh_1'},
{'name': 'meh_2'}]},
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
item_1 = json_body['networks'][0]
item_2 = json_body['networks'][1]
self.assertEqual(
[mock.call('create', 'network', {}, {'network': item_1}),
mock.call('create', 'network', {}, {'network': item_2})],
self.mock_notifier.mock_calls)
class TestMetricsNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.'
'_notifier')
self.mock_notifier = patcher.start().info
super(TestMetricsNotifierHook, self).setUp()
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(
[mock.call(mock.ANY, 'network.create.start', payload),
mock.call(mock.ANY, 'network.create.end', json_body)],
self.mock_notifier.mock_calls)
self.mock_notifier.reset_mock()
network_id = json_body['network']['id']
payload = {'network': {'name': 'meh-2'}}
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params=payload, headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
# id should be in payload sent to notifier
payload['id'] = network_id
self.assertEqual(
[mock.call(mock.ANY, 'network.update.start', payload),
mock.call(mock.ANY, 'network.update.end', json_body)],
self.mock_notifier.mock_calls)
self.mock_notifier.reset_mock()
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
payload = {'network_id': network_id}
self.assertEqual(
[mock.call(mock.ANY, 'network.delete.start', payload),
mock.call(mock.ANY, 'network.delete.end', payload)],
self.mock_notifier.mock_calls)
def test_bulk_create_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload,
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(2, self.mock_notifier.call_count)
self.mock_notifier.assert_has_calls(
[mock.call(mock.ANY, 'network.create.start', payload),
mock.call(mock.ANY, 'network.create.end', json_body)])
def test_bad_create_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'create_network',
side_effect=ValueError):
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.create.start', mock.ANY)],
self.mock_notifier.mock_calls)
def test_bad_update_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.reset_mock()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'update_network',
side_effect=ValueError):
response = self.app.put_json(
'/v2.0/networks/%s.json' % json_body['network']['id'],
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.update.start', mock.ANY)],
self.mock_notifier.mock_calls)
def test_bad_delete_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.reset_mock()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'delete_network',
side_effect=ValueError):
response = self.app.delete(
'/v2.0/networks/%s.json' % json_body['network']['id'],
headers=req_headers, expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.delete.start', mock.ANY)],
self.mock_notifier.mock_calls)
| apache-2.0 | 8,203,198,585,067,558,000 | 44.505071 | 79 | 0.578764 | false |
neep305/swordfish | text_analysis/fileutil.py | 1 | 1183 | from konlpy.tag import Hannanum
from collections import Counter
import pandas as pd
import csv
import json
def read_localcsv(path):
result = pd.read_csv(path, encoding='UTF-8')
print(result)
return result
def get_json_data(path):
#r = requests.get(URL)
#data = r.text
RESULTS = {"children": []}
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
for line in reader:
RESULTS['children'].append({
"name": line['Name'],
"symbol": line['Symbol'],
"symbol": line['Symbol'],
"price": line['lastsale'],
"net_change": line['netchange'],
"percent_change": line['pctchange'],
"volume": line['share_volume'],
"value": line['Nasdaq100_points']
})
# print(json.dumps(RESULTS))
return json.dumps(RESULTS)
def get_tags(text, ntags=50, multiplier=10):
h = Hannanum()
nouns = h.nouns(text)
count = Counter(nouns)
# for word,cnt in count.most_common(ntags):
# print(word,cnt)
return count
def get_csv_data(path, column):
# localcsv = read_localcsv(path)
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
content = ''
for line in reader:
content += ' ' + line[column]
tags = get_tags(content)
return tags
| mit | -798,070,984,540,722,700 | 22.196078 | 45 | 0.667794 | false |
dtrodrigues/nifi-minifi-cpp | libminifi/test/script-tests/test_scripts/stateful_processor.py | 2 | 1602 | #
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def describe(processor):
processor.setDescription("Processor used for testing in ExecutePythonProcessorTests.cpp")
state = 0
class WriteCallback(object):
def process(self, output_stream):
global state
new_content = str(state).encode('utf-8')
output_stream.write(new_content)
state = state + 1
return len(new_content)
def onTrigger(context, session):
global state
log.info('Vrrm, vrrrm, processor is running, vrrrm!!')
# flow_file = session.get()
flow_file = session.create()
flow_file.setAttribute("filename", str(state))
log.info('created flow file: %s' % flow_file.getAttribute('filename'))
if flow_file is not None:
session.write(flow_file, WriteCallback())
session.transfer(flow_file, REL_SUCCESS)
| apache-2.0 | 7,008,879,742,411,832,000 | 33.826087 | 93 | 0.714107 | false |
prymitive/upaas-admin | upaas_admin/features/cron.py | 1 | 1583 | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2014 by Łukasz Mierzwa
:contact: [email protected]
"""
from __future__ import unicode_literals
import logging
from django.utils.translation import ugettext as _
from upaas.config.base import (Config, ConfigurationError, StringEntry,
IntegerEntry)
from upaas_admin.features.base import Feature
log = logging.getLogger(__name__)
class CronEntryConfig(Config):
schema = {
"command": StringEntry(required=True),
"minute": IntegerEntry(min_value=0, max_value=59, default=-1),
"hour": IntegerEntry(min_value=0, max_value=23, default=-1),
"day": IntegerEntry(min_value=1, max_value=31, default=-1),
"month": IntegerEntry(min_value=1, max_value=12, default=-1),
"weekday": IntegerEntry(min_value=1, max_value=7, default=-1),
}
class CronFeature(Feature):
def parse_crons(self):
crons = []
for item in self.value:
try:
cron = CronEntryConfig(item)
except ConfigurationError as e:
log.error(_('Invalid cron configuration in {item}: '
'{e}').format(item=item, e=e))
else:
crons.append('cron = %d %d %d %d %d %s' % (
cron.minute, cron.hour, cron.day, cron.month, cron.weekday,
cron.command))
return crons
def update_vassal(self, application, options):
for cron in self.parse_crons():
options.append(cron)
return options
| gpl-3.0 | 2,626,893,547,294,586,400 | 28.296296 | 79 | 0.584071 | false |
Zerknechterer/pyload | module/plugins/hoster/GoogledriveCom.py | 1 | 1839 | # -*- coding: utf-8 -*
#
# Test links:
# https://drive.google.com/file/d/0B6RNTe4ygItBQm15RnJiTmMyckU/view?pli=1
import re
import urlparse
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.utils import html_unescape
class GoogledriveCom(SimpleHoster):
__name__ = "GoogledriveCom"
__type__ = "hoster"
__version__ = "0.12"
__pattern__ = r'https?://(?:www\.)?(drive|docs)\.google\.com/(file/d/\w+|uc\?.*id=)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Drive.google.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "[email protected]")]
DISPOSITION = False #: Remove in 0.4.10
NAME_PATTERN = r'(?:<title>|class="uc-name-size".*>)(?P<N>.+?)(?: - Google Drive</title>|</a> \()'
OFFLINE_PATTERN = r'align="center"><p class="errorMessage"'
LINK_FREE_PATTERN = r'"([^"]+uc\?.*?)"'
def setup(self):
self.multiDL = True
self.resumeDownload = True
self.chunkLimit = 1
def handleFree(self, pyfile):
for _i in xrange(2):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("Free download link not found"))
else:
link = html_unescape(m.group(1).decode('unicode-escape'))
if not urlparse.urlparse(link).scheme:
link = urlparse.urljoin("https://docs.google.com/", link)
direct_link = self.directLink(link, False)
if not direct_link:
self.html = self.load(link, decode=True)
else:
self.link = direct_link
break
getInfo = create_getInfo(GoogledriveCom)
| gpl-3.0 | -4,720,897,857,208,252,000 | 29.65 | 105 | 0.559543 | false |
abcdef123/stem | test/integ/descriptor/server_descriptor.py | 1 | 14214 | """
Integration tests for stem.descriptor.server_descriptor.
"""
from __future__ import with_statement
import datetime
import os
import unittest
import stem.control
import stem.descriptor.server_descriptor
import stem.exit_policy
import stem.version
import test.integ.descriptor
import test.runner
class TestServerDescriptor(unittest.TestCase):
def test_metrics_descriptor(self):
"""
Parses and checks our results against a server descriptor from metrics.
"""
descriptor_path = test.integ.descriptor.get_resource("example_descriptor")
descriptor_file = open(descriptor_path)
descriptor_file.readline() # strip header
descriptor_contents = descriptor_file.read()
descriptor_file.close()
expected_family = [
"$0CE3CFB1E9CC47B63EA8869813BF6FAB7D4540C1",
"$1FD187E8F69A9B74C9202DC16A25B9E7744AB9F6",
"$74FB5EFA6A46DE4060431D515DC9A790E6AD9A7C",
"$77001D8DA9BF445B0F81AA427A675F570D222E6A",
"$B6D83EC2D9E18B0A7A33428F8CFA9C536769E209",
"$D2F37F46182C23AB747787FD657E680B34EAF892",
"$E0BD57A11F00041A9789577C53A1B784473669E4",
"$E5E3E9A472EAF7BE9682B86E92305DB4C71048EF",
]
expected_onion_key = """-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAJv5IIWQ+WDWYUdyA/0L8qbIkEVH/cwryZWoIaPAzINfrw1WfNZGtBmg
skFtXhOHHqTRN4GPPrZsAIUOQGzQtGb66IQgT4tO/pj+P6QmSCCdTfhvGfgTCsC+
WPi4Fl2qryzTb3QO5r5x7T8OsG2IBUET1bLQzmtbC560SYR49IvVAgMBAAE=
-----END RSA PUBLIC KEY-----"""
expected_signing_key = """-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAKwvOXyztVKnuYvpTKt+nS3XIKeO8dVungi8qGoeS+6gkR6lDtGfBTjd
uE9UIkdAl9zi8/1Ic2wsUNHE9jiS0VgeupITGZY8YOyMJJ/xtV1cqgiWhq1dUYaq
51TOtUogtAPgXPh4J+V8HbFFIcCzIh3qCO/xXo+DSHhv7SSif1VpAgMBAAE=
-----END RSA PUBLIC KEY-----"""
expected_signature = """-----BEGIN SIGNATURE-----
dskLSPz8beUW7bzwDjR6EVNGpyoZde83Ejvau+5F2c6cGnlu91fiZN3suE88iE6e
758b9ldq5eh5mapb8vuuV3uO+0Xsud7IEOqfxdkmk0GKnUX8ouru7DSIUzUL0zqq
Qlx9HNCqCY877ztFRC624ja2ql6A2hBcuoYMbkHjcQ4=
-----END SIGNATURE-----"""
desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
self.assertEquals("caerSidi", desc.nickname)
self.assertEquals("A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB", desc.fingerprint)
self.assertEquals("71.35.133.197", desc.address)
self.assertEquals(9001, desc.or_port)
self.assertEquals(None, desc.socks_port)
self.assertEquals(None, desc.dir_port)
self.assertEquals("Tor 0.2.1.30 on Linux x86_64", desc.platform)
self.assertEquals(stem.version.Version("0.2.1.30"), desc.tor_version)
self.assertEquals("Linux x86_64", desc.operating_system)
self.assertEquals(588217, desc.uptime)
self.assertEquals(datetime.datetime(2012, 3, 1, 17, 15, 27), desc.published)
self.assertEquals("www.atagar.com/contact", desc.contact)
self.assertEquals(["1", "2"], desc.link_protocols)
self.assertEquals(["1"], desc.circuit_protocols)
self.assertEquals(False, desc.hibernating)
self.assertEquals(False, desc.allow_single_hop_exits)
self.assertEquals(False, desc.extra_info_cache)
self.assertEquals("D225B728768D7EA4B5587C13A7A9D22EBBEE6E66", desc.extra_info_digest)
self.assertEquals(["2"], desc.hidden_service_dir)
self.assertEquals(expected_family, desc.family)
self.assertEquals(153600, desc.average_bandwidth)
self.assertEquals(256000, desc.burst_bandwidth)
self.assertEquals(104590, desc.observed_bandwidth)
self.assertEquals(stem.exit_policy.ExitPolicy("reject *:*"), desc.exit_policy)
self.assertEquals(expected_onion_key, desc.onion_key)
self.assertEquals(expected_signing_key, desc.signing_key)
self.assertEquals(expected_signature, desc.signature)
self.assertEquals([], desc.get_unrecognized_lines())
self.assertEquals("2C7B27BEAB04B4E2459D89CA6D5CD1CC5F95A689", desc.digest())
def test_old_descriptor(self):
"""
Parses a relay server descriptor from 2005.
"""
descriptor_path = test.integ.descriptor.get_resource("old_descriptor")
descriptor_file = open(descriptor_path)
descriptor_file.readline() # strip header
descriptor_contents = descriptor_file.read()
descriptor_file.close()
desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
self.assertEquals("krypton", desc.nickname)
self.assertEquals("3E2F63E2356F52318B536A12B6445373808A5D6C", desc.fingerprint)
self.assertEquals("212.37.39.59", desc.address)
self.assertEquals(8000, desc.or_port)
self.assertEquals(None, desc.socks_port)
self.assertEquals(None, desc.dir_port)
self.assertEquals("Tor 0.1.0.14 on FreeBSD i386", desc.platform)
self.assertEquals(stem.version.Version("0.1.0.14"), desc.tor_version)
self.assertEquals("FreeBSD i386", desc.operating_system)
self.assertEquals(64820, desc.uptime)
self.assertEquals(datetime.datetime(2005, 12, 16, 18, 1, 3), desc.published)
self.assertEquals(None, desc.contact)
self.assertEquals(None, desc.link_protocols)
self.assertEquals(None, desc.circuit_protocols)
self.assertEquals(True, desc.hibernating)
self.assertEquals(False, desc.allow_single_hop_exits)
self.assertEquals(False, desc.extra_info_cache)
self.assertEquals(None, desc.extra_info_digest)
self.assertEquals(None, desc.hidden_service_dir)
self.assertEquals([], desc.family)
self.assertEquals(102400, desc.average_bandwidth)
self.assertEquals(10485760, desc.burst_bandwidth)
self.assertEquals(0, desc.observed_bandwidth)
self.assertEquals(datetime.datetime(2005, 12, 16, 18, 0, 48), desc.read_history_end)
self.assertEquals(900, desc.read_history_interval)
self.assertEquals(datetime.datetime(2005, 12, 16, 18, 0, 48), desc.write_history_end)
self.assertEquals(900, desc.write_history_interval)
self.assertEquals([], desc.get_unrecognized_lines())
# The read-history and write-history lines are pretty long so just checking
# the initial contents for the line and parsed values.
read_values_start = [20774, 489973, 510022, 511163, 20949]
self.assertEquals(read_values_start, desc.read_history_values[:5])
write_values_start = [81, 8848, 8927, 8927, 83, 8848, 8931, 8929, 81, 8846]
self.assertEquals(write_values_start, desc.write_history_values[:10])
def test_cached_descriptor(self):
"""
Parses the cached descriptor file in our data directory, checking that it
doesn't raise any validation issues and looking for unrecognized descriptor
additions.
"""
# lengthy test and uneffected by targets, so only run once
if test.runner.only_run_once(self, "test_cached_descriptor"): return
descriptor_path = test.runner.get_runner().get_test_dir("cached-descriptors")
if not os.path.exists(descriptor_path):
test.runner.skip(self, "(no cached descriptors)")
return
with open(descriptor_path) as descriptor_file:
for desc in stem.descriptor.server_descriptor.parse_file(descriptor_file):
# the following attributes should be deprecated, and not appear in the wild
self.assertEquals(None, desc.read_history_end)
self.assertEquals(None, desc.write_history_end)
self.assertEquals(None, desc.eventdns)
self.assertEquals(None, desc.socks_port)
unrecognized_lines = desc.get_unrecognized_lines()
if unrecognized_lines:
# TODO: This isn't actually a problem, and rather than failing we
# should alert the user about these entries at the end of the tests
# (along with new events, getinfo options, and such). For now though
# there doesn't seem to be anything in practice to trigger this so
# failing to get our attention if it does.
self.fail("Unrecognized descriptor content: %s" % unrecognized_lines)
def test_non_ascii_descriptor(self):
"""
Parses a descriptor with non-ascii content.
"""
descriptor_path = test.integ.descriptor.get_resource("non-ascii_descriptor")
descriptor_file = open(descriptor_path)
descriptor_file.readline() # strip header
descriptor_contents = descriptor_file.read()
descriptor_file.close()
expected_contact = "2048R/F171EC1F Johan Bl\xc3\xa5b\xc3\xa4ck \xe3\x81\x93\xe3\x82\x93\xe3\x81\xab\xe3\x81\xa1\xe3\x81\xaf"
desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
self.assertEquals("torrelay389752132", desc.nickname)
self.assertEquals("5D47E91A1F7421A4E3255F4D04E534E9A21407BB", desc.fingerprint)
self.assertEquals("130.243.230.116", desc.address)
self.assertEquals(9001, desc.or_port)
self.assertEquals(None, desc.socks_port)
self.assertEquals(None, desc.dir_port)
self.assertEquals("Tor 0.2.2.35 (git-4f42b0a93422f70e) on Linux x86_64", desc.platform)
self.assertEquals(stem.version.Version("0.2.2.35"), desc.tor_version)
self.assertEquals("Linux x86_64", desc.operating_system)
self.assertEquals(3103848, desc.uptime)
self.assertEquals(datetime.datetime(2012, 3, 21, 16, 28, 14), desc.published)
self.assertEquals(expected_contact, desc.contact)
self.assertEquals(["1", "2"], desc.link_protocols)
self.assertEquals(["1"], desc.circuit_protocols)
self.assertEquals(False, desc.hibernating)
self.assertEquals(False, desc.allow_single_hop_exits)
self.assertEquals(False, desc.extra_info_cache)
self.assertEquals("51E9FD0DA7C235D8C0250BAFB6E1ABB5F1EF9F04", desc.extra_info_digest)
self.assertEquals(["2"], desc.hidden_service_dir)
self.assertEquals([], desc.family)
self.assertEquals(81920, desc.average_bandwidth)
self.assertEquals(102400, desc.burst_bandwidth)
self.assertEquals(84275, desc.observed_bandwidth)
self.assertEquals(stem.exit_policy.ExitPolicy("reject *:*"), desc.exit_policy)
self.assertEquals([], desc.get_unrecognized_lines())
def test_cr_in_contact_line(self):
"""
Parses a descriptor with a huge contact line containing anomalous carriage
returns ('\r' entries).
"""
descriptor_path = test.integ.descriptor.get_resource("cr_in_contact_line")
descriptor_file = open(descriptor_path)
descriptor_file.readline() # strip header
descriptor_contents = descriptor_file.read()
descriptor_file.close()
desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
self.assertEquals("pogonip", desc.nickname)
self.assertEquals("6DABD62BC65D4E6FE620293157FC76968DAB9C9B", desc.fingerprint)
self.assertEquals("75.5.248.48", desc.address)
# the contact info block is huge so just checking the start and end,
# including some of the embedded carriage returns
contact_start = "jie1 at pacbell dot net -----BEGIN PGP PUBLIC KEY BLOCK-----\rVersion:"
contact_end = "YFRk3NhCY=\r=Xaw3\r-----END PGP PUBLIC KEY BLOCK-----"
self.assertTrue(desc.contact.startswith(contact_start))
self.assertTrue(desc.contact.endswith(contact_end))
def test_negative_uptime(self):
"""
Parses a descriptor where we are tolerant of a negative uptime, and another
where we shouldn't be.
"""
descriptor_path = test.integ.descriptor.get_resource("negative_uptime")
descriptor_file = open(descriptor_path)
descriptor_file.readline() # strip header
descriptor_contents = descriptor_file.read()
descriptor_file.close()
desc = stem.descriptor.server_descriptor.RelayDescriptor(descriptor_contents)
self.assertEquals("TipTor", desc.nickname)
self.assertEquals("137962D4931DBF08A24E843288B8A155D6D2AEDD", desc.fingerprint)
self.assertEquals("62.99.247.83", desc.address)
# modify the relay version so it's after when the negative uptime bug
# should appear
descriptor_contents = descriptor_contents.replace("Tor 0.1.1.25", "Tor 0.1.2.7")
self.assertRaises(ValueError, stem.descriptor.server_descriptor.RelayDescriptor, descriptor_contents)
def test_bridge_descriptor(self):
"""
Parses a bridge descriptor.
"""
descriptor_path = test.integ.descriptor.get_resource("bridge_descriptor")
descriptor_file = open(descriptor_path)
descriptor_file.readline() # strip header
descriptor_contents = descriptor_file.read()
descriptor_file.close()
expected_family = [
"$CE396C72A3D0880F74C064FEA79D68C15BD380B9",
"$AB8B00C00B1347BA80A88E548FAC9EDF701D7D0E",
"$8C8A470D7C23151665A7B84E75E89FCC205A3304",
]
desc = stem.descriptor.server_descriptor.BridgeDescriptor(descriptor_contents)
self.assertEquals("Unnamed", desc.nickname)
self.assertEquals("AE54E28ED069CDF45F3009F963EE3B3D6FA26A2E", desc.fingerprint)
self.assertEquals("10.45.227.253", desc.address)
self.assertEquals(9001, desc.or_port)
self.assertEquals(None, desc.socks_port)
self.assertEquals(None, desc.dir_port)
self.assertEquals("Tor 0.2.3.12-alpha (git-800942b4176ca31c) on Linux x86_64", desc.platform)
self.assertEquals(stem.version.Version("0.2.3.12-alpha"), desc.tor_version)
self.assertEquals("Linux x86_64", desc.operating_system)
self.assertEquals(186, desc.uptime)
self.assertEquals(datetime.datetime(2012, 3, 22, 17, 34, 38), desc.published)
self.assertEquals("somebody", desc.contact)
self.assertEquals(["1", "2"], desc.link_protocols)
self.assertEquals(["1"], desc.circuit_protocols)
self.assertEquals(False, desc.hibernating)
self.assertEquals(False, desc.allow_single_hop_exits)
self.assertEquals(False, desc.extra_info_cache)
self.assertEquals("134F81F7A0D270B85FCD481DD10CEA34BA7B15C9", desc.extra_info_digest)
self.assertEquals(["2"], desc.hidden_service_dir)
self.assertEquals(expected_family, desc.family)
self.assertEquals(409600, desc.average_bandwidth)
self.assertEquals(819200, desc.burst_bandwidth)
self.assertEquals(5120, desc.observed_bandwidth)
self.assertEquals(stem.exit_policy.ExitPolicy("reject *:*"), desc.exit_policy)
self.assertEquals("006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4", desc.digest())
self.assertEquals([], desc.get_unrecognized_lines())
| lgpl-3.0 | -4,238,564,453,306,384,400 | 43.981013 | 128 | 0.728015 | false |
jgrundstad/viewer | admin.py | 1 | 1761 | from django.contrib import admin
from models import Project, Bnid, Sample, Study, Caller, Report, Variant, \
Genome, Contact, SharedData
class ProjectAdmin(admin.ModelAdmin):
model = Project
list_display = ('id', 'name', 'description', 'creation_date')
filter_horizontal = ('user', )
class BnidAdmin(admin.ModelAdmin):
model = Bnid
class SampleAdmin(admin.ModelAdmin):
model = Sample
list_display =('id', 'name')
class CallerAdmin(admin.ModelAdmin):
display = ['name']
class ReportAdmin(admin.ModelAdmin):
model = Report
list_display = ('caller', 'report_file', 'upload_date')
class StudyAdmin(admin.ModelAdmin):
model = Study
list_display = ('name', 'description')
class GenomeAdmin(admin.ModelAdmin):
model = Genome
list_display = ('id', 'name')
class VariantAdmin(admin.ModelAdmin):
model = Variant
list_display = ('__str__', 'report', 'gene_name', 'chrom', 'pos', 'ref', 'alt',
'normal_ref_count', 'normal_alt_count', 'tumor_ref_count',
'tumor_alt_count')
class ContactAdmin(admin.ModelAdmin):
model = Contact
list_display = ('full_name', 'email', 'project')
class SharedDataAdmin(admin.ModelAdmin):
model = SharedData
list_display = ('uuid', 'field_lookup', 'user', 'creation_date', 'inactive_date')
admin.site.register(Project, ProjectAdmin)
admin.site.register(Sample, SampleAdmin)
admin.site.register(Bnid, BnidAdmin)
admin.site.register(Study, StudyAdmin)
admin.site.register(Caller, CallerAdmin)
admin.site.register(Report, ReportAdmin)
admin.site.register(Genome, GenomeAdmin)
admin.site.register(Variant, VariantAdmin)
admin.site.register(Contact, ContactAdmin)
admin.site.register(SharedData, SharedDataAdmin) | apache-2.0 | -6,679,012,992,260,252,000 | 26.968254 | 85 | 0.693924 | false |
yongshengwang/builthue | apps/oozie/src/oozie/models.py | 1 | 80907 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import logging
import re
import StringIO
import time
import zipfile
from datetime import datetime, timedelta
from string import Template
from itertools import chain
from django.db import models
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.forms.models import inlineformset_factory
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.log.access import access_warn
from desktop.lib import django_mako
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.models import Document
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.hadoopfs import Hdfs
from liboozie.submittion import Submission
from liboozie.submittion import create_directories
from oozie.conf import REMOTE_SAMPLE_DIR
from oozie.utils import utc_datetime_format
from oozie.timezones import TIMEZONES
LOG = logging.getLogger(__name__)
PATH_MAX = 512
name_validator = RegexValidator(regex='^[a-zA-Z_][\-_a-zA-Z0-9]{1,39}$',
message=_('Enter a valid value: combination of 2 - 40 letters and digits starting by a letter'))
# To sync in worklow.models.js
DEFAULT_SLA = [
{'key': 'enabled', 'value': False},
{'key': 'nominal-time', 'value': ''},
{'key': 'should-start', 'value': ''},
{'key': 'should-end', 'value': ''},
{'key': 'max-duration', 'value': ''},
{'key': 'alert-events', 'value': ''},
{'key': 'alert-contact', 'value': ''},
{'key': 'notification-msg', 'value': ''},
{'key': 'upstream-apps', 'value': ''},
]
class JobManager(models.Manager):
def can_read(self, user, job_id):
job = Job.objects.select_related().get(pk=job_id).get_full_node()
return job.can_read(user)
def can_read_or_exception(self, request, job_id, exception_class=PopupException):
if job_id is None:
return
try:
job = Job.objects.select_related().get(pk=job_id).get_full_node()
if job.can_read(request.user):
return job
else:
message = _("Permission denied. %(username)s does not have the permissions required to access job %(id)s") % \
{'username': request.user.username, 'id': job.id}
access_warn(request, message)
request.error(message)
raise exception_class(message)
except Job.DoesNotExist:
raise exception_class(_('job %(id)s does not exist') % {'id': job_id})
def can_edit_or_exception(self, request, job, exception_class=PopupException):
if job.is_editable(request.user):
return True
else:
raise exception_class(_('Not allowed to modified this job'))
class Job(models.Model):
"""
Base class for Oozie Workflows, Coordinators and Bundles.
"""
owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('Person who can modify the job.')) # Deprecated
name = models.CharField(max_length=40, blank=False, validators=[name_validator], # Deprecated
help_text=_t('Name of the job, which must be unique per user.'), verbose_name=_t('Name'))
description = models.CharField(max_length=1024, blank=True, verbose_name=_t('Description'), # Deprecated
help_text=_t('The purpose of the job.'))
last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Last modified'))
schema_version = models.CharField(max_length=128, verbose_name=_t('Schema version'),
help_text=_t('The version of the XML schema used to talk to Oozie.'))
deployment_dir = models.CharField(max_length=1024, blank=True, verbose_name=_t('HDFS deployment directory'),
help_text=_t('The path on the HDFS where all the workflows and '
'dependencies must be uploaded.'))
is_shared = models.BooleanField(default=False, db_index=True, verbose_name=_t('Is shared'), # Deprecated
help_text=_t('Enable other users to have access to this job.'))
parameters = models.TextField(default='[{"name":"oozie.use.system.libpath","value":"true"}]', verbose_name=_t('Oozie parameters'),
help_text=_t('Parameters used at the submission time (e.g. market=US, oozie.use.system.libpath=true).'))
is_trashed = models.BooleanField(default=False, db_index=True, verbose_name=_t('Is trashed'), blank=True, # Deprecated
help_text=_t('If this job is trashed.'))
doc = generic.GenericRelation(Document, related_name='oozie_doc')
data = models.TextField(blank=True, default=json.dumps({})) # e.g. data=json.dumps({'sla': [python data], ...})
objects = JobManager()
unique_together = ('owner', 'name')
def delete(self, skip_trash=False, *args, **kwargs):
if skip_trash:
self.doc.all().delete()
return super(Job, self).delete(*args, **kwargs)
else:
for job in self.doc.all():
job.send_to_trash()
return self
def restore(self):
self.doc.get().restore_from_trash()
return self
def save(self):
super(Job, self).save()
if not self.deployment_dir:
default_dir = Hdfs.join(REMOTE_SAMPLE_DIR.get(), '_%s_-oozie-%s-%s' % (self.owner.username, self.id, time.time()))
self.deployment_dir = default_dir
super(Job, self).save()
def is_deployed(self, fs):
return self.deployment_dir != '' and fs.exists(self.deployment_dir)
def __str__(self):
res = '%s - %s' % (force_unicode(self.name), self.owner)
return force_unicode(res)
def get_full_node(self):
try:
return self.workflow
except Workflow.DoesNotExist:
pass
try:
return self.coordinator
except Coordinator.DoesNotExist:
pass
try:
return self.bundle
except Bundle.DoesNotExist:
pass
def get_type(self):
return self.get_full_node().get_type()
def get_absolute_url(self):
return self.get_full_node().get_absolute_url()
def get_parameters(self):
return json.loads(self.parameters)
def add_parameter(self, name, value):
oozie_parameters = self.get_parameters()
oozie_parameters.append({"name": name, "value": value})
self.parameters = json.dumps(oozie_parameters)
@property
def parameters_escapejs(self):
return self._escapejs_parameters_list(self.parameters)
def _escapejs_parameters_list(self, parameters):
return json.dumps(json.loads(parameters), cls=JSONEncoderForHTML)
@property
def status(self):
# TODO
if self.is_shared:
return _('shared')
else:
return _('personal')
def find_all_parameters(self):
params = self.find_parameters()
if hasattr(self, 'sla') and self.sla_enabled:
for param in find_json_parameters(self.sla):
if param not in params:
params[param] = ''
for param in self.get_parameters():
params[param['name'].strip()] = param['value']
return [{'name': name, 'value': value} for name, value in params.iteritems()]
def can_read(self, user):
try:
return self.doc.get().can_read(user)
except Exception, e:
LOG.error('can_read failed because the object has more than one document: %s' % self.doc.all())
raise e
def is_editable(self, user):
return user.is_superuser or self.owner == user or self.doc.get().can_write(user)
@property
def data_dict(self):
if not self.data:
self.data = json.dumps({})
data_python = json.loads(self.data)
# Backward compatibility
if 'sla' not in data_python:
data_python['sla'] = copy.deepcopy(DEFAULT_SLA)
if 'credentials' not in data_python:
data_python['credentials'] = []
return data_python
@property
def data_js_escaped(self):
return json.dumps(self.data_dict, cls=JSONEncoderForHTML)
@property
def sla(self):
return self.data_dict['sla']
@sla.setter
def sla(self, sla):
data_ = self.data_dict
data_['sla'] = sla
self.data = json.dumps(data_)
@property
def sla_enabled(self):
return self.sla[0]['value'] # #1 is enabled
class WorkflowManager(models.Manager):
SCHEMA_VERSION = {
'0.4': 'uri:oozie:workflow:0.4',
'0.5': 'uri:oozie:workflow:0.5'
}
def new_workflow(self, owner):
workflow = Workflow(owner=owner, schema_version=WorkflowManager.SCHEMA_VERSION['0.4'])
kill = Kill(name='kill', workflow=workflow, node_type=Kill.node_type)
end = End(name='end', workflow=workflow, node_type=End.node_type)
start = Start(name='start', workflow=workflow, node_type=Start.node_type)
to = Link(parent=start, child=end, name='to')
related = Link(parent=start, child=end, name='related')
workflow.start = start
workflow.end = end
return workflow
def initialize(self, workflow, fs=None):
Kill.objects.create(name='kill', workflow=workflow, node_type=Kill.node_type)
end = End.objects.create(name='end', workflow=workflow, node_type=End.node_type)
start = Start.objects.create(name='start', workflow=workflow, node_type=Start.node_type)
link = Link(parent=start, child=end, name='to')
link.save()
Link.objects.create(parent=start, child=end, name='related')
workflow.start = start
workflow.end = end
workflow.save()
Document.objects.link(workflow, owner=workflow.owner, name=workflow.name, description=workflow.description)
if fs:
self.check_workspace(workflow, fs)
def check_workspace(self, workflow, fs):
create_directories(fs, [REMOTE_SAMPLE_DIR.get()])
create_directories(fs)
if workflow.is_shared:
perms = 0755
else:
perms = 0711
Submission(workflow.owner, workflow, fs, None, {})._create_dir(workflow.deployment_dir, perms=perms)
def destroy(self, workflow, fs):
Submission(workflow.owner, workflow, fs, None, {}).remove_deployment_dir()
try:
workflow.coordinator_set.update(workflow=None) # In Django 1.3 could do ON DELETE set NULL
except:
pass
workflow.save()
workflow.delete(skip_trash=True)
def managed(self):
return self.filter(managed=True)
def unmanaged(self):
return self.filter(managed=False)
class Workflow(Job):
is_single = models.BooleanField(default=False)
start = models.ForeignKey('Start', related_name='start_workflow', blank=True, null=True)
end = models.ForeignKey('End', related_name='end_workflow', blank=True, null=True)
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('Job configuration properties used by all the actions of the workflow '
'(e.g. mapred.job.queue.name=production)'))
managed = models.BooleanField(default=True)
objects = WorkflowManager()
HUE_ID = 'hue-id-w'
ICON = '/oozie/static/art/icon_oozie_workflow_48.png'
METADATA_FORMAT_VERSION = "0.0.1"
def get_type(self):
return 'workflow'
def get_properties(self):
return json.loads(self.job_properties)
def clone(self, fs, new_owner=None):
source_deployment_dir = self.deployment_dir # Needed
nodes = self.node_set.all()
links = Link.objects.filter(parent__workflow=self)
name = self.name + '-copy'
if new_owner is not None:
owner = new_owner
else:
owner = self.owner
copy_doc = self.doc.get().copy(name=name, owner=owner)
copy = self
copy.pk = None
copy.id = None
copy.name = name
copy.deployment_dir = ''
copy.owner = owner
copy.save()
copy.doc.all().delete()
copy.doc.add(copy_doc)
old_nodes_mapping = {}
for node in nodes:
prev_id = node.id
node = node.get_full_node()
node.pk = None
node.id = None
node.workflow = copy
node.save()
old_nodes_mapping[prev_id] = node
for link in links:
link.pk = None
link.id = None
link.parent = old_nodes_mapping[link.parent.id]
link.child = old_nodes_mapping[link.child.id]
link.save()
copy.start = old_nodes_mapping[self.start.id]
copy.end = old_nodes_mapping[self.end.id]
copy.save()
try:
if copy.is_shared:
perms = 0755
else:
perms = 0711
fs.copy_remote_dir(source_deployment_dir, copy.deployment_dir, owner=copy.owner, dir_mode=perms)
except WebHdfsException, e:
msg = _('The copy of the deployment directory failed: %s.') % e
LOG.error(msg)
raise PopupException(msg)
# Reload workflow from DB... clears relationship cache
copy = Workflow.objects.get(id=copy.id)
return copy
@property
def job_properties_escapejs(self):
return self._escapejs_parameters_list(self.job_properties)
def has_cycle(self):
"""
Topological sort for detecting cycles in the directed graph.
"""
queue = set([self.start])
removed_edges = set()
while queue:
node = queue.pop()
edges = set(node.get_children_links())
for edge in edges:
removed_edges.add(edge)
# Edge has no other incoming edges
if not set(edge.child.get_parent_links()) - removed_edges:
queue.add(edge.child)
graph_edges = set([edge for node in self.node_set.all() for edge in node.get_children_links()])
return len(graph_edges - removed_edges) > 0 # Graph does not have unseen edges
def find_parameters(self):
params = set()
if self.sla_enabled:
for param in find_json_parameters(self.sla):
params.add(param)
for node in self.node_list:
if hasattr(node, 'find_parameters'):
params.update(node.find_parameters())
return dict([(param, '') for param in list(params)])
@property
def actions(self):
return Action.objects.filter(workflow=self, node_type__in=Action.types)
@property
def node_list(self):
"""Return a flatten node list ordered by the hierarchy of the nodes in the workflow"""
def flatten(nodes):
flat = []
if type(nodes) == list:
for node in nodes:
flat.extend(flatten(node))
else:
flat.append(nodes)
return flat
def from_iterable(iterables):
# Python 2.6 chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
for it in iterables:
for element in it:
yield element
return list(chain(from_iterable([flatten(row) for row in self.get_hierarchy()])))
@classmethod
def get_application_path_key(cls):
return 'oozie.wf.application.path'
@classmethod
def get_application_filename(cls):
return 'workflow.xml'
def get_absolute_url(self):
if self.doc.only('extra').get().extra == 'jobsub':
return '/jobsub/#edit-design/%s' % self.id
else:
return reverse('oozie:edit_workflow', kwargs={'workflow': self.id}) + '#editWorkflow'
def get_hierarchy(self):
node = Start.objects.get(workflow=self) # Uncached version of start.
kill = Kill.objects.get(workflow=node.workflow)
# Special case: manage error email actions separately
try:
kill_nodes = [Link.objects.filter(child=kill).get(name='ok').parent, kill]
except Link.DoesNotExist:
kill_nodes = [kill]
return self.get_hierarchy_rec(node=node) + [kill_nodes, [End.objects.get(workflow=node.workflow)]]
def get_hierarchy_rec(self, node=None):
if node is None:
node = self.start
if node.id is None:
return []
node = node.get_full_node()
parents = node.get_parents()
if isinstance(node, End):
return [] # Not returning the end node
elif isinstance(node, Decision):
children = node.get_children('start')
return [[node] + [[self.get_hierarchy_rec(node=child) for child in children],
node.get_child_end()]] + self.get_hierarchy_rec(node.get_child_end().get_child('to'))
elif isinstance(node, DecisionEnd):
return []
elif isinstance(node, Fork):
children = node.get_children('start')
return [[node] + [[self.get_hierarchy_rec(node=child) for child in children],
node.get_child_join()]] + self.get_hierarchy_rec(node.get_child_join().get_child('to'))
elif isinstance(node, Join):
return []
else:
child = Link.objects.filter(parent=node).exclude(name__in=['related', 'kill', 'error'])[0].child
return [node] + self.get_hierarchy_rec(child)
def gen_status_graph(self, oozie_workflow):
from oozie.forms import NodeMetaForm # Circular dependency
actions = oozie_workflow.get_working_actions()
controls = oozie_workflow.get_control_flow_actions()
WorkflowFormSet = inlineformset_factory(Workflow, Node, form=NodeMetaForm, max_num=0, can_order=False, can_delete=False)
forms = WorkflowFormSet(instance=self).forms
template = 'editor/gen/workflow-graph-status.xml.mako'
index = dict([(form.instance.id, form) for form in forms])
actions_index = dict([(action.name, action) for action in actions])
controls_index = dict([(control.name.strip(':'), control) for control in controls])
return django_mako.render_to_string(template, {'nodes': self.get_hierarchy(), 'index': index, 'actions': actions_index, 'controls': controls_index})
@classmethod
def gen_status_graph_from_xml(cls, user, oozie_workflow):
from oozie.importlib.workflows import import_workflow # Circular dependency
try:
workflow = Workflow.objects.new_workflow(user)
workflow.save()
try:
import_workflow(workflow, oozie_workflow.definition)
graph = workflow.gen_status_graph(oozie_workflow)
return graph, workflow.node_list
except Exception, e:
LOG.warn('Workflow %s could not be converted to a graph: %s' % (oozie_workflow.id, e))
finally:
if workflow.pk is not None:
workflow.delete(skip_trash=True)
return None, []
def to_xml(self, mapping=None):
if mapping is None:
mapping = {}
tmpl = 'editor/gen/workflow.xml.mako'
xml = re.sub(re.compile('\s*\n+', re.MULTILINE), '\n', django_mako.render_to_string(tmpl, {'workflow': self, 'mapping': mapping}))
return force_unicode(xml)
def compress(self, mapping=None, fp=StringIO.StringIO()):
metadata = {
'version': Workflow.METADATA_FORMAT_VERSION,
'nodes': {},
'attributes': {
'description': self.description,
'deployment_dir': self.deployment_dir
}
}
for node in self.node_list:
if hasattr(node, 'jar_path'):
metadata['nodes'][node.name] = {
'attributes': {
'jar_path': node.jar_path
}
}
xml = self.to_xml(mapping=mapping)
zfile = zipfile.ZipFile(fp, 'w')
zfile.writestr("workflow.xml", smart_str(xml))
zfile.writestr("workflow-metadata.json", smart_str(json.dumps(metadata)))
zfile.close()
return fp
@classmethod
def decompress(cls, fp):
zfile = zipfile.ZipFile(fp, 'r')
metadata_json = zfile.read('workflow-metadata.json')
metadata = json.loads(metadata_json)
workflow_xml = zfile.read('workflow.xml')
return workflow_xml, metadata
@property
def sla_workflow_enabled(self):
return self.sla_enabled or any([node.sla_enabled for node in self.node_list if hasattr(node, 'sla_enabled')])
@property
def credentials(self):
sub_lists = [node.credentials for node in self.node_list if hasattr(node, 'credentials')]
return set([item['name'] for l in sub_lists for item in l if item['value']])
class Link(models.Model):
# Links to exclude when using get_children_link(), get_parent_links() in the API
META_LINKS = ('related',)
parent = models.ForeignKey('Node', related_name='child_node')
child = models.ForeignKey('Node', related_name='parent_node', verbose_name='')
name = models.CharField(max_length=40)
comment = models.CharField(max_length=1024, default='', blank=True)
def __unicode__(self):
return '%s %s %s' % (self.parent, self.child, self.name)
class Node(models.Model):
"""
Base class for the Oozie WorkflowAction or ControlFlow Nodes.
http://nightly.cloudera.com/cdh4/cdh/4/oozie-3.1.3-cdh4.0.0-SNAPSHOT/WorkflowFunctionalSpec.html#a3_Workflow_Nodes
The Node model is an abstract base class. All concrete actions derive from it.
And it provides something for the Action or ControlFlow to reference.
See https://docs.djangoproject.com/en/dev/topics/db/models/#multi-table-inheritance
"""
PARAM_FIELDS = ()
name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name'),
help_text=_t('Name of the action, which must be unique by workflow.'))
description = models.CharField(max_length=1024, blank=True, default='', verbose_name=_t('Description'),
help_text=_t('The purpose of the action.'))
node_type = models.CharField(max_length=64, blank=False, verbose_name=_t('Type'),
help_text=_t('The type of action (e.g. MapReduce, Pig...)'))
workflow = models.ForeignKey(Workflow)
children = models.ManyToManyField('self', related_name='parents', symmetrical=False, through=Link)
data = models.TextField(blank=True, default=json.dumps({}))
unique_together = ('workflow', 'name')
def get_full_node(self):
if self.node_type == Mapreduce.node_type:
node = self.mapreduce
elif self.node_type == Pig.node_type:
node = self.pig
elif self.node_type == Hive.node_type:
node = self.hive
elif self.node_type == Sqoop.node_type:
node = self.sqoop
elif self.node_type == Ssh.node_type:
node = self.ssh
elif self.node_type == Shell.node_type:
node = self.shell
elif self.node_type == DistCp.node_type:
node = self.distcp
elif self.node_type == Fs.node_type:
node = self.fs
elif self.node_type == Email.node_type:
node = self.email
elif self.node_type == SubWorkflow.node_type:
node = self.subworkflow
elif self.node_type == Streaming.node_type:
node = self.streaming
elif self.node_type == Java.node_type:
node = self.java
elif self.node_type == Generic.node_type:
node = self.generic
elif self.node_type == Start.node_type:
node = self.start
elif self.node_type == End.node_type:
node = self.end
elif self.node_type == Kill.node_type:
node = self.kill
elif self.node_type == Fork.node_type:
node = self.fork
elif self.node_type == Join.node_type:
node = self.join
elif self.node_type == Decision.node_type:
node = self.decision
elif self.node_type == DecisionEnd.node_type:
node = self.decisionend
else:
raise Exception(_('Unknown Node type: %s. Was it set at its creation?'), (self.node_type,))
return node
def find_parameters(self):
return find_parameters(self, self.PARAM_FIELDS)
def __unicode__(self):
if self.name != '':
return '%s' % self.name
else:
return '%s-%s' % (self.node_type, self.id)
def to_xml(self, mapping=None):
if mapping is None:
mapping = {}
node = self.get_full_node()
data = {
'node': node,
'mapping': mapping
}
return django_mako.render_to_string(node.get_template_name(), data)
# Can't use through relation directly with this Django version?
# https://docs.djangoproject.com/en/1.2/topics/db/models/#intermediary-manytomany
def get_link(self, name=None):
if name is None:
return Link.objects.exclude(name__in=Link.META_LINKS).get(parent=self)
else:
return Link.objects.exclude(name__in=Link.META_LINKS).get(parent=self, name=name)
def get_child_link(self, name=None):
return self.get_link(name)
def get_child(self, name=None):
"""Includes DecisionEnd nodes"""
return self.get_link(name).child.get_full_node()
def get_oozie_child(self, name=None):
"""Resolves DecisionEnd nodes"""
child = self.get_link(name).child.get_full_node()
if child and child.node_type == DecisionEnd.node_type:
child = child.get_oozie_child('to')
return child
def get_children(self, name=None):
if name is not None:
return [link.child for link in Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self, name=name)]
else:
return [link.child for link in Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self)]
def get_parent(self, name=None):
if name is not None:
return self.get_parent_link(name).parent.get_full_node()
else:
return self.get_parent_link().parent.get_full_node()
def get_parents(self):
return [link.parent for link in self.get_parent_links()]
def get_parent_link(self, name=None):
if name is not None:
return Link.objects.get(child=self, name=name)
else:
return Link.objects.get(child=self)
def get_parent_links(self):
return Link.objects.filter(child=self).exclude(name__in=Link.META_LINKS)
def get_children_links(self, name=None):
if name is None:
return Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self)
else:
return Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self, name=name)
def get_all_children_links(self):
return Link.objects.filter(parent=self)
def get_template_name(self):
return 'editor/gen/workflow-%s.xml.mako' % self.node_type
def is_visible(self):
return True
def add_node(self, child):
raise NotImplementedError(_("%(node_type)s has not implemented the 'add_node' method.") % {
'node_type': self.node_type
})
@property
def data_dict(self):
if not self.data:
self.data = json.dumps({})
data_python = json.loads(self.data)
# Backward compatibility
if 'sla' not in data_python:
data_python['sla'] = copy.deepcopy(DEFAULT_SLA)
if 'credentials' not in data_python:
data_python['credentials'] = []
return data_python
@property
def sla(self):
return self.data_dict['sla']
@sla.setter
def sla(self, sla):
data_ = self.data_dict
data_['sla'] = sla
self.data = json.dumps(data_)
@property
def sla_enabled(self):
return self.sla[0]['value'] # #1 is enabled
@property
def credentials(self):
return self.data_dict['credentials']
@credentials.setter
def credentials(self, credentials):
data_ = self.data_dict
data_['credentials'] = credentials
self.data = json.dumps(data_)
class Action(Node):
types = ()
class Meta:
# Cloning does not work anymore if not abstract
abstract = True
def add_node(self, child):
Link.objects.filter(parent=self, name='ok').delete()
Link.objects.create(parent=self, child=child, name='ok')
if not Link.objects.filter(parent=self, name='error').exists():
Link.objects.create(parent=self, child=Kill.objects.get(name='kill', workflow=self.workflow), name='error')
# The fields with '[]' as default value are JSON dictionaries
# When adding a new action, also update
# - Action.types below
# - Node.get_full_node()
# - forms.py _node_type_TO_FORM_CLS
# - workflow.js
# - maybe actions_utils.mako
class Mapreduce(Action):
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'jar_path', 'prepares', 'sla')
node_type = 'mapreduce'
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
jar_path = models.CharField(max_length=PATH_MAX, verbose_name=_t('Jar name'),
help_text=_t('Name or path to the %(program)s jar file on HDFS. E.g. examples.jar.') % {'program': 'MapReduce'})
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete and then to create before starting the application. '
'This should be used exclusively for directory cleanup.'))
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
def get_prepares(self):
return json.loads(self.prepares)
class Streaming(Action):
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'mapper', 'reducer', 'sla')
node_type = "streaming"
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
mapper = models.CharField(max_length=PATH_MAX, blank=False, verbose_name=_t('Mapper'),
help_text=_t('The executable/script to be used as mapper.'))
reducer = models.CharField(max_length=PATH_MAX, blank=False, verbose_name=_t('Reducer'),
help_text=_t('The executable/script to be used as reducer.'))
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
class Java(Action):
PARAM_FIELDS = ('files', 'archives', 'jar_path', 'main_class', 'args',
'java_opts', 'job_properties', 'prepares', 'sla')
node_type = "java"
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
jar_path = models.CharField(max_length=PATH_MAX, blank=False, verbose_name=_t('Jar name'),
help_text=_t('Name or path to the %(program)s jar file on HDFS. E.g. examples.jar.') % {'program': 'Java'})
main_class = models.CharField(max_length=256, blank=False, verbose_name=_t('Main class'),
help_text=_t('Full name of the Java class. E.g. org.apache.hadoop.examples.Grep'))
args = models.TextField(blank=True, verbose_name=_t('Arguments'),
help_text=_t('Arguments of the main method. The value of each arg element is considered a single argument '
'and they are passed to the main method in the same order.'))
java_opts = models.CharField(max_length=256, blank=True, verbose_name=_t('Java options'),
help_text=_t('Command-line parameters used to start the JVM that will execute '
'the Java application. Using this element is equivalent to using the mapred.child.java.opts '
'configuration property. E.g. -Dexample-property=hue'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete and then to create before starting the application. '
'This should be used exclusively for directory cleanup.'))
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
capture_output = models.BooleanField(default=False, verbose_name=_t('Capture output'),
help_text=_t('Capture output of the stdout of the %(program)s command execution. The %(program)s '
'command output must be in Java Properties file format and it must not exceed 2KB. '
'From within the workflow definition, the output of an %(program)s action node is accessible '
'via the String action:output(String node, String key) function') % {'program': node_type.title()})
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
def get_prepares(self):
return json.loads(self.prepares)
class Pig(Action):
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials')
node_type = 'pig'
script_path = models.CharField(max_length=256, blank=False, verbose_name=_t('Script name'),
help_text=_t('Script name or path to the Pig script. E.g. my_script.pig.'))
params = models.TextField(default="[]", verbose_name=_t('Parameters'),
help_text=_t('The Pig parameters of the script. e.g. "-param", "INPUT=${inputDir}"'))
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete and then to create before starting the application. '
'This should be used exclusively for directory cleanup.'))
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
def get_params(self):
return json.loads(self.params)
def get_prepares(self):
return json.loads(self.prepares)
class Hive(Action):
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials')
node_type = 'hive'
script_path = models.CharField(max_length=256, blank=False, verbose_name=_t('Script name'),
help_text=_t('Script name or path to the %(type)s script. E.g. my_script.sql.') % {'type': node_type.title()})
params = models.TextField(default="[]", verbose_name=_t('Parameters'),
help_text=_t('The %(type)s parameters of the script. E.g. N=5, INPUT=${inputDir}') % {'type': node_type.title()})
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
job_properties = models.TextField(default='[]',
verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete, then create, before starting the application. '
'This should be used exclusively for directory cleanup.'))
job_xml = models.CharField(max_length=PATH_MAX, default='hive-config.xml', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hive hive-config.xml file bundled in the workflow deployment directory. Pick a name different than hive-site.xml.'))
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
def get_params(self):
return json.loads(self.params)
def get_prepares(self):
return json.loads(self.prepares)
class Sqoop(Action):
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials')
node_type = 'sqoop'
script_path = models.TextField(blank=True, verbose_name=_t('Command'), default='',
help_text=_t('The full %(type)s command. Either put it here or split it by spaces and insert the parts as multiple parameters below.')
% {'type': node_type.title()})
params = models.TextField(default="[]", verbose_name=_t('Parameters'),
help_text=_t('If no command is specified, split the command by spaces and insert the %(type)s parameters '
'here e.g. import, --connect, jdbc:hsqldb:file:db.hsqldb, ...') % {'type': node_type.title()})
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
job_properties = models.TextField(default='[]',
verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete then to create before starting the application. '
'This should be used exclusively for directory cleanup'))
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
def get_params(self):
return json.loads(self.params)
def get_prepares(self):
return json.loads(self.prepares)
class Ssh(Action):
PARAM_FIELDS = ('user', 'host', 'command', 'params', 'sla', 'credentials')
node_type = 'ssh'
user = models.CharField(max_length=64, verbose_name=_t('User'),
help_text=_t('User executing the shell command.'))
host = models.CharField(max_length=256, verbose_name=_t('Host'),
help_text=_t('Where the shell will be executed.'))
command = models.CharField(max_length=256, verbose_name=_t('%(type)s command') % {'type': node_type.title()},
help_text=_t('The command that will be executed.'))
params = models.TextField(default="[]", verbose_name=_t('Arguments'),
help_text=_t('The arguments of the %(type)s command.') % {'type': node_type.title()})
capture_output = models.BooleanField(default=False, verbose_name=_t('Capture output'),
help_text=_t('Capture output of the stdout of the %(program)s command execution. The %(program)s '
'command output must be in Java properties file format and it must not exceed 2KB. '
'From within the workflow definition, the output of an %(program)s action node is accessible '
'via the String action:output(String node, String key) function') % {'program': node_type.title()})
def get_params(self):
return json.loads(self.params)
class Shell(Action):
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials')
node_type = 'shell'
command = models.CharField(max_length=256, blank=False, verbose_name=_t('%(type)s command') % {'type': node_type.title()},
help_text=_t('The path of the Shell command to execute.'))
params = models.TextField(default="[]", verbose_name=_t('Arguments'),
help_text=_t('The arguments of Shell command can then be specified using one or more argument element.'))
files = models.TextField(default="[]", verbose_name=_t('Files'),
help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.'))
archives = models.TextField(default="[]", verbose_name=_t('Archives'),
help_text=_t('List of names or paths of the archives to be added to the distributed cache.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)'))
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete then to create before starting the application. '
'This should be used exclusively for directory cleanup'))
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
capture_output = models.BooleanField(default=False, verbose_name=_t('Capture output'),
help_text=_t('Capture output of the stdout of the %(program)s command execution. The %(program)s '
'command output must be in Java Properties file format and it must not exceed 2KB. '
'From within the workflow definition, the output of an %(program)s action node is accessible '
'via the String action:output(String node, String key) function') % {'program': node_type.title()})
def get_properties(self):
return json.loads(self.job_properties)
def get_files(self):
return json.loads(self.files)
def get_archives(self):
return json.loads(self.archives)
def get_params(self):
return json.loads(self.params)
def get_prepares(self):
return json.loads(self.prepares)
class DistCp(Action):
PARAM_FIELDS = ('job_properties', 'params', 'prepares', 'sla', 'credentials')
node_type = 'distcp'
params = models.TextField(default="[]", verbose_name=_t('Arguments'),
help_text=_t('The arguments of the %(type)s command. Put options first, then source paths, then destination path.')
% {'type': node_type.title()})
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production'))
prepares = models.TextField(default="[]", verbose_name=_t('Prepares'),
help_text=_t('List of absolute paths to delete then to create before starting the application. '
'This should be used exclusively for directory cleanup'))
job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'),
help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. '
'Properties specified in the Job Properties element override properties specified in the '
'files specified in the Job XML element.'))
def get_properties(self):
return json.loads(self.job_properties)
def get_params(self):
return json.loads(self.params)
def get_prepares(self):
return json.loads(self.prepares)
class Fs(Action):
PARAM_FIELDS = ('deletes', 'mkdirs', 'moves', 'chmods', 'touchzs', 'sla', 'credentials')
node_type = 'fs'
deletes = models.TextField(default="[]", verbose_name=_t('Delete path'), blank=True,
help_text=_t('Delete the specified path, if it is a directory it deletes recursively all its content and '
'then deletes the directory.'))
mkdirs = models.TextField(default="[]", verbose_name=_t('Create directory'), blank=True,
help_text=_t('Create the specified directory, it creates all missing directories in the path. '
'If the directory already exist it does a no-op.'))
moves = models.TextField(default="[]", verbose_name=_t('Move file'), blank=True,
help_text=_t('Move a file or directory to another path.'))
chmods = models.TextField(default="[]", verbose_name=_t('Change permissions'), blank=True,
help_text=_t('Change the permissions for the specified path. Permissions can be specified using the Unix Symbolic '
'representation (e.g. -rwxrw-rw-) or an octal representation (755).'))
touchzs = models.TextField(default="[]", verbose_name=_t('Create or touch a file'), blank=True,
help_text=_t('Creates a zero length file in the specified path if none exists or touch it.'))
def get_deletes(self):
return json.loads(self.deletes)
def get_mkdirs(self):
return json.loads(self.mkdirs)
def get_moves(self):
return json.loads(self.moves)
def get_chmods(self):
return json.loads(self.chmods)
def get_touchzs(self):
return json.loads(self.touchzs)
class Email(Action):
PARAM_FIELDS = ('to', 'cc', 'subject', 'body', 'sla', 'credentials')
node_type = 'email'
to = models.TextField(default='', verbose_name=_t('TO addresses'), help_text=_t('Comma-separated values.'))
cc = models.TextField(default='', verbose_name=_t('CC addresses (optional)'), blank=True, help_text=_t('Comma-separated values.'))
subject = models.TextField(default='', verbose_name=_t('Subject'), help_text=_t('Plain-text.'))
body = models.TextField(default='', verbose_name=_t('Body'), help_text=_t('Plain-text.'))
class SubWorkflow(Action):
PARAM_FIELDS = ('subworkflow', 'propagate_configuration', 'job_properties', 'sla', 'credentials')
node_type = 'subworkflow'
sub_workflow = models.ForeignKey(Workflow, default=None, db_index=True, blank=True, null=True, verbose_name=_t('Sub-workflow'),
help_text=_t('The sub-workflow application to include. You must own all the sub-workflows.'))
propagate_configuration = models.BooleanField(default=True, verbose_name=_t('Propagate configuration'), blank=True,
help_text=_t('If the workflow job configuration should be propagated to the child workflow.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'),
help_text=_t('Can be used to specify the job properties that are required to run the child workflow job.'))
def get_properties(self):
return json.loads(self.job_properties)
class Generic(Action):
PARAM_FIELDS = ('xml', 'credentials', 'sla', 'credentials')
node_type = 'generic'
xml = models.TextField(default='', verbose_name=_t('XML of the custom action'),
help_text=_t('This will be inserted verbatim in the action %(action)s. '
'E.g. all the XML content like %(xml_action)s '
'will be inserted into the action and produce %(full_action)s') % {
'action': '<action name="email">...</action>',
'xml_action': '<email><cc>[email protected]</cc></email>',
'full_action': '<action name="email"><email><cc>[email protected]</cc></email><ok/><error/></action>'})
Action.types = (Mapreduce.node_type, Streaming.node_type, Java.node_type, Pig.node_type, Hive.node_type, Sqoop.node_type, Ssh.node_type, Shell.node_type,
DistCp.node_type, Fs.node_type, Email.node_type, SubWorkflow.node_type, Generic.node_type)
class ControlFlow(Node):
"""
http://incubator.apache.org/oozie/docs/3.2.0-incubating/docs/WorkflowFunctionalSpec.html#a3.1_Control_Flow_Nodes
"""
class Meta:
abstract = True
def get_xml(self):
return django_mako.render_to_string(self.get_template_name(), {})
def is_visible(self):
return True
# Could not make this abstract
class Start(ControlFlow):
node_type = 'start'
def add_node(self, child):
Link.objects.filter(parent=self).delete()
link = Link.objects.create(parent=self, child=child, name='to')
class End(ControlFlow):
node_type = 'end'
def add_node(self, child):
raise RuntimeError(_("End should not have any children."))
class Kill(ControlFlow):
node_type = 'kill'
message = models.CharField(max_length=256, blank=False, default='Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]')
def add_node(self, child):
raise RuntimeError(_("Kill should not have any children."))
def is_visible(self):
return False
class Fork(ControlFlow):
"""
A Fork can be converted into a Decision node.
"""
node_type = 'fork'
def is_visible(self):
return True
def get_child_join(self):
return Link.objects.get(parent=self, name='related').child.get_full_node()
def convert_to_decision(self):
self.remove_join()
decision = Decision.objects.create(workflow=self.workflow, node_type=Decision.node_type)
decision.save()
links = self.get_all_children_links()
has_default = False
for link in links:
if link.name == 'default':
has_default = True
link.parent = decision
# Defaults to end
if not has_default:
link = Link.objects.create(name="default", parent=decision, child=self.workflow.end)
link.save()
self.delete()
return decision
def remove_join(self):
join = self.get_child_join()
after_join = join.get_child('to')
for parent in join.get_parent_actions():
link = parent.get_link('ok')
link.child = after_join
link.save()
# Automatically delete links thought foreign keys
join.delete()
class Join(ControlFlow):
node_type = 'join'
def is_visible(self):
return True
def get_parent_fork(self):
return self.get_parent_link('related').parent.get_full_node()
def get_parent_actions(self):
return [link.parent for link in self.get_parent_links()]
class Decision(ControlFlow):
"""
Essentially a fork where only one of the paths of execution are chosen.
Graphically, this is represented the same way as a fork.
The DecisionEnd node is not represented in Oozie, only in Hue.
"""
node_type = 'decision'
def get_child_end(self):
return Link.objects.get(parent=self, name='related').child.get_full_node()
def is_visible(self):
return True
def update_description(self):
self.description = ', '.join(self.get_children_links().values_list('comment', flat=True))
self.save()
class DecisionEnd(ControlFlow):
"""
Defines the end of a join.
This node exists purely in the Hue application to provide a smooth transition
from Decision to Endself.
NOTE: NOT AN OOZIE NODE
"""
node_type = 'decisionend'
def is_visible(self):
return False
def get_parent_decision(self):
return self.get_parent_link('related').parent.get_full_node()
def get_parent_actions(self):
return [link.parent for link in self.get_parent_links()]
def to_xml(self, mapping):
return ''
FREQUENCY_UNITS = (('minutes', _('Minutes')),
('hours', _('Hours')),
('days', _('Days')),
('months', _('Months')))
FREQUENCY_NUMBERS = [(i, i) for i in xrange(1, 61)]
DATASET_FREQUENCY = ['MINUTE', 'HOUR', 'DAY', 'MONTH', 'YEAR']
class Coordinator(Job):
frequency_number = models.SmallIntegerField(default=1, choices=FREQUENCY_NUMBERS, verbose_name=_t('Frequency number'),
help_text=_t('The number of units of the rate at which '
'data is periodically created.')) # unused
frequency_unit = models.CharField(max_length=20, choices=FREQUENCY_UNITS, default='days', verbose_name=_t('Frequency unit'),
help_text=_t('The unit of the rate at which data is periodically created.')) # unused
timezone = models.CharField(max_length=24, choices=TIMEZONES, default='America/Los_Angeles', verbose_name=_t('Timezone'),
help_text=_t('The timezone of the coordinator. Only used for managing the daylight saving time changes when combining several coordinators.'))
start = models.DateTimeField(default=datetime.today(), verbose_name=_t('Start'),
help_text=_t('When to start the first workflow.'))
end = models.DateTimeField(default=datetime.today() + timedelta(days=3), verbose_name=_t('End'),
help_text=_t('When to start the last workflow.'))
workflow = models.ForeignKey(Workflow, null=True, verbose_name=_t('Workflow'),
help_text=_t('The workflow to schedule repeatedly.'))
timeout = models.SmallIntegerField(null=True, blank=True, verbose_name=_t('Timeout'),
help_text=_t('Number of minutes the coordinator action will be in '
'WAITING or READY status before giving up on its execution.'))
concurrency = models.PositiveSmallIntegerField(null=True, blank=True, choices=FREQUENCY_NUMBERS, verbose_name=_t('Concurrency'),
help_text=_t('The number of coordinator actions that are allowed to run concurrently (RUNNING status) '
'before the coordinator engine starts throttling them.'))
execution = models.CharField(max_length=10, null=True, blank=True, verbose_name=_t('Execution'),
choices=(('FIFO', _t('FIFO (oldest first) default')),
('LIFO', _t('LIFO (newest first)')),
('LAST_ONLY', _t('LAST_ONLY (discards all older materializations)'))),
help_text=_t('Execution strategy of its coordinator actions when there is backlog of coordinator '
'actions in the coordinator engine. The different execution strategies are \'oldest first\', '
'\'newest first\' and \'last one only\'. A backlog normally happens because of delayed '
'input data, concurrency control or because manual re-runs of coordinator jobs.'))
throttle = models.PositiveSmallIntegerField(null=True, blank=True, choices=FREQUENCY_NUMBERS, verbose_name=_t('Throttle'),
help_text=_t('The materialization or creation throttle value for its coordinator actions. '
'Number of maximum coordinator actions that are allowed to be in WAITING state concurrently.'))
job_properties = models.TextField(default='[]', verbose_name=_t('Workflow properties'),
help_text=_t('Additional properties to transmit to the workflow, e.g. limit=100, and EL functions, e.g. username=${coord:user()}'))
HUE_ID = 'hue-id-c'
ICON = '/oozie/static/art/icon_oozie_coordinator_48.png'
METADATA_FORMAT_VERSION = "0.0.1"
CRON_MAPPING = {
'0,15,30,45 * * * *': _('Every 15 minutes'),
'0,30 * * * *': _('Every 30 minutes'),
'0 * * * *': _('Every hour'),
'0 0 * * *': _('Every day at midnight'),
'0 0 * * 0': _('Every week'),
'0 0 1 * *': _('Every month'),
'0 0 1 1 *': _('Every year'),
}
def get_type(self):
return 'coordinator'
def to_xml(self, mapping=None):
if mapping is None:
mapping = {}
tmpl = "editor/gen/coordinator.xml.mako"
return re.sub(re.compile('\s*\n+', re.MULTILINE), '\n', django_mako.render_to_string(tmpl, {'coord': self, 'mapping': mapping})).encode('utf-8', 'xmlcharrefreplace')
def clone(self, new_owner=None):
datasets = Dataset.objects.filter(coordinator=self)
data_inputs = DataInput.objects.filter(coordinator=self)
data_outputs = DataOutput.objects.filter(coordinator=self)
name = self.name + '-copy'
if new_owner is not None:
owner = new_owner
else:
owner = self.owner
copy_doc = self.doc.get().copy(name=name, owner=owner)
copy = self
copy.pk = None
copy.id = None
copy.name = name
copy.deployment_dir = ''
copy.owner = owner
copy.save()
copy.doc.all().delete()
copy.doc.add(copy_doc)
old_dataset_mapping = {}
for dataset in datasets:
prev_id = dataset.id
dataset.pk = None
dataset.id = None
dataset.coordinator = copy
dataset.save()
old_dataset_mapping[prev_id] = dataset
for data_input in data_inputs:
data_input.pk = None
data_input.id = None
data_input.coordinator = copy
data_input.dataset = old_dataset_mapping[data_input.dataset.id]
data_input.save()
for data_output in data_outputs:
data_output.pk = None
data_output.id = None
data_output.coordinator = copy
data_output.dataset = old_dataset_mapping[data_output.dataset.id]
data_output.save()
return copy
@classmethod
def get_application_path_key(cls):
return 'oozie.coord.application.path'
@classmethod
def get_application_filename(cls):
return 'coordinator.xml'
def get_properties(self):
props = json.loads(self.job_properties)
index = [prop['name'] for prop in props]
for prop in self.workflow.get_parameters():
if not prop['name'] in index:
props.append(prop)
index.append(prop['name'])
# Remove DataInputs and DataOutputs
datainput_names = [_input.name for _input in self.datainput_set.all()]
dataoutput_names = [_output.name for _output in self.dataoutput_set.all()]
removable_names = datainput_names + dataoutput_names
props = filter(lambda prop: prop['name'] not in removable_names, props)
return props
@property
def job_properties_escapejs(self):
return self._escapejs_parameters_list(self.job_properties)
@property
def start_utc(self):
return utc_datetime_format(self.start)
@property
def end_utc(self):
return utc_datetime_format(self.end)
def get_absolute_url(self):
return reverse('oozie:edit_coordinator', kwargs={'coordinator': self.id})
@property
def frequency(self):
return '${coord:%(unit)s(%(number)d)}' % {'unit': self.frequency_unit, 'number': self.frequency_number}
@property
def text_frequency(self):
return '%(number)d %(unit)s' % {'unit': self.frequency_unit, 'number': self.frequency_number}
def find_parameters(self):
params = self.workflow.find_parameters()
for param in find_parameters(self, ['job_properties']):
params[param] = ''
if self.sla_enabled:
for param in find_json_parameters(self.sla):
params.add(param)
for dataset in self.dataset_set.all():
for param in find_parameters(dataset, ['uri']):
if param not in set(DATASET_FREQUENCY):
params[param] = ''
for ds in self.datainput_set.all():
params.pop(ds.name, None)
for ds in self.dataoutput_set.all():
params.pop(ds.name, None)
for wf_param in json.loads(self.job_properties):
params.pop(wf_param['name'], None)
return params
def compress(self, mapping=None, fp=StringIO.StringIO()):
metadata = {
'version': Coordinator.METADATA_FORMAT_VERSION,
'workflow': self.workflow.name,
'attributes': {
'description': self.description,
'deployment_dir': self.deployment_dir
}
}
xml = self.to_xml(mapping=mapping)
zfile = zipfile.ZipFile(fp, 'w')
zfile.writestr("coordinator.xml", smart_str(xml))
zfile.writestr("coordinator-metadata.json", smart_str(json.dumps(metadata)))
zfile.close()
return fp
@classmethod
def decompress(cls, fp):
zfile = zipfile.ZipFile(fp, 'r')
metadata_json = zfile.read('coordinator-metadata.json')
metadata = json.loads(metadata_json)
xml = zfile.read('coordinator.xml')
return xml, metadata
@property
def sla_jsescaped(self):
return json.dumps(self.sla, cls=JSONEncoderForHTML)
@property
def cron_frequency(self):
if 'cron_frequency' in self.data_dict:
return self.data_dict['cron_frequency']
else:
# Backward compatibility
freq = '0 0 * * *'
if self.frequency_number == 1:
if self.frequency_unit == 'MINUTES':
freq = '* * * * *'
elif self.frequency_unit == 'HOURS':
freq = '0 * * * *'
elif self.frequency_unit == 'DAYS':
freq = '0 0 * * *'
elif self.frequency_unit == 'MONTH':
freq = '0 0 * * *'
return {'frequency': freq, 'isAdvancedCron': False}
@property
def cron_frequency_human(self):
frequency = self.cron_frequency['frequency']
return Coordinator.CRON_MAPPING.get(frequency, frequency)
@cron_frequency.setter
def cron_frequency(self, cron_frequency):
data_ = self.data_dict
data_['cron_frequency'] = cron_frequency
self.data = json.dumps(data_)
class DatasetManager(models.Manager):
def can_read_or_exception(self, request, dataset_id):
if dataset_id is None:
return
try:
dataset = Dataset.objects.get(pk=dataset_id)
if dataset.coordinator.can_read(request.user):
return dataset
else:
message = _("Permission denied. %(username)s does not have the permissions to access dataset %(id)s.") % \
{'username': request.user.username, 'id': dataset.id}
access_warn(request, message)
request.error(message)
raise PopupException(message)
except Dataset.DoesNotExist:
raise PopupException(_('dataset %(id)s not exist') % {'id': dataset_id})
class Dataset(models.Model):
"""
http://oozie.apache.org/docs/3.3.0/CoordinatorFunctionalSpec.html#a6.3._Synchronous_Coordinator_Application_Definition
"""
name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name'),
help_text=_t('The name of the dataset.'))
description = models.CharField(max_length=1024, blank=True, default='', verbose_name=_t('Description'),
help_text=_t('A description of the dataset.'))
start = models.DateTimeField(default=datetime.today(), verbose_name=_t('Start'),
help_text=_t(' The UTC datetime of the initial instance of the dataset. The initial instance also provides '
'the baseline datetime to compute instances of the dataset using multiples of the frequency.'))
frequency_number = models.SmallIntegerField(default=1, choices=FREQUENCY_NUMBERS, verbose_name=_t('Frequency number'),
help_text=_t('The number of units of the rate at which '
'data is periodically created.'))
frequency_unit = models.CharField(max_length=20, choices=FREQUENCY_UNITS, default='days', verbose_name=_t('Frequency unit'),
help_text=_t('The unit of the rate at which data is periodically created.'))
uri = models.CharField(max_length=1024, default='/data/${YEAR}${MONTH}${DAY}', verbose_name=_t('URI'),
help_text=_t('The URI template that identifies the dataset and can be resolved into concrete URIs to identify a particular '
'dataset instance. The URI consist of constants (e.g. ${YEAR}/${MONTH}) and '
'configuration properties (e.g. /home/${USER}/projects/${PROJECT})'))
timezone = models.CharField(max_length=24, choices=TIMEZONES, default='America/Los_Angeles', verbose_name=_t('Timezone'),
help_text=_t('The timezone of the dataset. Only used for managing the daylight saving time changes when combining several datasets.'))
done_flag = models.CharField(max_length=64, blank=True, default='', verbose_name=_t('Done flag'),
help_text=_t('The done file for the data set. If the Done flag is not specified, then Oozie '
'configures Hadoop to create a _SUCCESS file in the output directory. If Done '
'flag is set to empty, then Coordinator looks for the existence of the directory itself.'))
coordinator = models.ForeignKey(Coordinator, verbose_name=_t('Coordinator'),
help_text=_t('The coordinator associated with this data.'))
instance_choice = models.CharField(max_length=10, default='default', verbose_name=_t('Instance type'),
help_text=_t('Customize the date instance(s), e.g. define a range of dates, use EL functions...'))
advanced_start_instance = models.CharField(max_length=128, default='0', verbose_name=_t('Start instance'),
help_text=_t('Shift the frequency for gettting past/future start date or enter verbatim the Oozie start instance, e.g. ${coord:current(0)}'))
advanced_end_instance = models.CharField(max_length=128, blank=True, default='0', verbose_name=_t('End instance'),
help_text=_t('Optional: Shift the frequency for gettting past/future end dates or enter verbatim the Oozie end instance.'))
objects = DatasetManager()
unique_together = ('coordinator', 'name')
def __unicode__(self):
return '%s' % (self.name,)
@property
def start_utc(self):
return utc_datetime_format(self.start)
@property
def frequency(self):
return '${coord:%(unit)s(%(number)d)}' % {'unit': self.frequency_unit, 'number': self.frequency_number}
@property
def text_frequency(self):
return '%(number)d %(unit)s' % {'unit': self.frequency_unit, 'number': self.frequency_number}
@property
def start_instance(self):
if not self.is_advanced_start_instance:
return int(self.advanced_start_instance)
else:
return 0
@property
def is_advanced_start_instance(self):
return not self.is_int(self.advanced_start_instance)
def is_int(self, text):
try:
int(text)
return True
except ValueError:
return False
@property
def end_instance(self):
if not self.is_advanced_end_instance:
return int(self.advanced_end_instance)
else:
return 0
@property
def is_advanced_end_instance(self):
return not self.is_int(self.advanced_end_instance)
class DataInput(models.Model):
name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name of an input variable in the workflow.'),
help_text=_t('The name of the variable of the workflow to automatically fill up.'))
dataset = models.OneToOneField(Dataset, verbose_name=_t('The dataset representing format of the data input.'),
help_text=_t('The pattern of the input data we want to process.'))
coordinator = models.ForeignKey(Coordinator)
unique_together = ('coordinator', 'name')
class DataOutput(models.Model):
name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name of an output variable in the workflow'),
help_text=_t('The name of the variable of the workflow to automatically filled up.'))
dataset = models.OneToOneField(Dataset, verbose_name=_t('The dataset representing the format of the data output.'),
help_text=_t('The pattern of the output data we want to generate.'))
coordinator = models.ForeignKey(Coordinator)
unique_together = ('coordinator', 'name')
class BundledCoordinator(models.Model):
bundle = models.ForeignKey('Bundle', verbose_name=_t('Bundle'),
help_text=_t('The bundle regrouping all the coordinators.'))
coordinator = models.ForeignKey(Coordinator, verbose_name=_t('Coordinator'),
help_text=_t('The coordinator to batch with other coordinators.'))
parameters = models.TextField(default='[{"name":"oozie.use.system.libpath","value":"true"}]', verbose_name=_t('Parameters'),
help_text=_t('Constants used at the submission time (e.g. market=US, oozie.use.system.libpath=true).'))
def get_parameters(self):
return json.loads(self.parameters)
class Bundle(Job):
kick_off_time = models.DateTimeField(default=datetime.today(), verbose_name=_t('Start'),
help_text=_t('When to start the first coordinators.'))
coordinators = models.ManyToManyField(Coordinator, through='BundledCoordinator')
HUE_ID = 'hue-id-b'
ICON = '/oozie/static/art/icon_oozie_bundle_48.png'
METADATA_FORMAT_VERSION = '0.0.1'
def get_type(self):
return 'bundle'
def to_xml(self, mapping=None):
if mapping is None:
mapping = {}
tmpl = "editor/gen/bundle.xml.mako"
return force_unicode(
re.sub(re.compile('\s*\n+', re.MULTILINE), '\n', django_mako.render_to_string(tmpl, {
'bundle': self,
'mapping': mapping
})))
def clone(self, new_owner=None):
bundleds = BundledCoordinator.objects.filter(bundle=self)
name = self.name + '-copy'
if new_owner is not None:
owner = new_owner
else:
owner = self.owner
copy_doc = self.doc.get().copy(name=name, owner=owner)
copy = self
copy.pk = None
copy.id = None
copy.name = name
copy.deployment_dir = ''
copy.owner = owner
copy.save()
copy.doc.all().delete()
copy.doc.add(copy_doc)
for bundled in bundleds:
bundled.pk = None
bundled.id = None
bundled.bundle = copy
bundled.save()
return copy
@classmethod
def get_application_path_key(cls):
return 'oozie.bundle.application.path'
@classmethod
def get_application_filename(cls):
return 'bundle.xml'
def get_absolute_url(self):
return reverse('oozie:edit_bundle', kwargs={'bundle': self.id})
def find_parameters(self):
params = {}
for bundled in BundledCoordinator.objects.filter(bundle=self):
for param in bundled.coordinator.find_parameters():
params[param] = ''
for param in bundled.get_parameters():
params.pop(param['name'], None)
return params
@property
def kick_off_time_utc(self):
return utc_datetime_format(self.kick_off_time)
def compress(self, mapping=None, fp=StringIO.StringIO()):
metadata = {
'version': Bundle.METADATA_FORMAT_VERSION,
'attributes': {
'description': self.description,
'deployment_dir': self.deployment_dir
}
}
xml = self.to_xml(mapping=mapping)
zfile = zipfile.ZipFile(fp, 'w')
zfile.writestr("bundle.xml", smart_str(xml))
zfile.writestr("bundle-metadata.json", smart_str(json.dumps(metadata)))
zfile.close()
return fp
@classmethod
def decompress(cls, fp):
zfile = zipfile.ZipFile(fp, 'r')
metadata_json = zfile.read('bundle-metadata.json')
metadata = json.loads(metadata_json)
xml = zfile.read('bundle.xml')
return xml, metadata
class HistoryManager(models.Manager):
def create_from_submission(self, submission):
History.objects.create(submitter=submission.user,
oozie_job_id=submission.oozie_id,
job=submission.job,
properties=json.dumps(submission.properties))
class History(models.Model):
"""
Contains information on submitted workflows/coordinators.
"""
submitter = models.ForeignKey(User, db_index=True)
submission_date = models.DateTimeField(auto_now=True, db_index=True)
oozie_job_id = models.CharField(max_length=128)
job = models.ForeignKey(Job, db_index=True)
properties = models.TextField()
objects = HistoryManager()
@property
def properties_dict(self):
return json.loads(self.properties)
def get_absolute_oozie_url(self):
view = 'oozie:list_oozie_workflow'
if self.oozie_job_id.endswith('C'):
view = 'oozie:list_oozie_coordinator'
elif self.oozie_job_id.endswith('B'):
view = 'oozie:list_oozie_bundle'
return reverse(view, kwargs={'job_id': self.oozie_job_id})
def get_workflow(self):
if self.oozie_job_id.endswith('W'):
return self.job.get_full_node()
def get_coordinator(self):
if self.oozie_job_id.endswith('C'):
return self.job.get_full_node()
@classmethod
def get_workflow_from_config(self, conf_dict):
try:
return Workflow.objects.get(id=conf_dict.get(Workflow.HUE_ID))
except Workflow.DoesNotExist:
pass
@classmethod
def get_coordinator_from_config(self, conf_dict):
try:
return Coordinator.objects.get(id=conf_dict.get(Coordinator.HUE_ID))
except Coordinator.DoesNotExist:
pass
@classmethod
def cross_reference_submission_history(cls, user, oozie_id):
# Try do get the history
history = None
try:
history = History.objects.get(oozie_job_id=oozie_id)
if history.job.owner != user:
history = None
except History.DoesNotExist:
pass
return history
def get_link(oozie_id):
link = ''
if 'W@' in oozie_id:
link = reverse('oozie:list_oozie_workflow_action', kwargs={'action': oozie_id})
elif oozie_id.endswith('W'):
link = reverse('oozie:list_oozie_workflow', kwargs={'job_id': oozie_id})
elif oozie_id.endswith('C'):
link = reverse('oozie:list_oozie_coordinator', kwargs={'job_id': oozie_id})
return link
def find_parameters(instance, fields=None):
"""Find parameters in the given fields"""
if fields is None:
fields = [field.name for field in instance._meta.fields]
params = []
for field in fields:
data = getattr(instance, field)
if field == 'sla' and not instance.sla_enabled:
continue
if isinstance(data, list):
params.extend(find_json_parameters(data))
elif isinstance(data, basestring):
for match in Template.pattern.finditer(data):
name = match.group('braced')
if name is not None:
params.append(name)
return params
def find_json_parameters(fields):
# To make smarter
# Input is list of json dict
params = []
for field in fields:
for data in field.values():
if isinstance(data, basestring):
for match in Template.pattern.finditer(data):
name = match.group('braced')
if name is not None:
params.append(name)
return params
# See http://wiki.apache.org/hadoop/JobConfFile
_STD_PROPERTIES = [
'mapred.input.dir',
'mapred.output.dir',
'mapred.job.name',
'mapred.job.queue.name',
'mapred.mapper.class',
'mapred.reducer.class',
'mapred.combiner.class',
'mapred.partitioner.class',
'mapred.map.tasks',
'mapred.reduce.tasks',
'mapred.input.format.class',
'mapred.output.format.class',
'mapred.input.key.class',
'mapred.input.value.class',
'mapred.output.key.class',
'mapred.output.value.class',
'mapred.mapoutput.key.class',
'mapred.mapoutput.value.class',
'mapred.combine.buffer.size',
'mapred.min.split.size',
'mapred.speculative.execution',
'mapred.map.tasks.speculative.execution',
'mapred.reduce.tasks.speculative.execution',
'mapred.queue.default.acl-administer-jobs',
]
_STD_PROPERTIES_JSON = json.dumps(_STD_PROPERTIES)
ACTION_TYPES = {
Mapreduce.node_type: Mapreduce,
Streaming.node_type: Streaming,
Java.node_type: Java,
Pig.node_type: Pig,
Hive.node_type: Hive,
Sqoop.node_type: Sqoop,
Ssh.node_type: Ssh,
Shell.node_type: Shell,
DistCp.node_type: DistCp,
Fs.node_type: Fs,
Email.node_type: Email,
SubWorkflow.node_type: SubWorkflow,
Generic.node_type: Generic,
}
CONTROL_TYPES = {
Fork.node_type: Fork,
Join.node_type: Join,
Decision.node_type: Decision,
DecisionEnd.node_type: DecisionEnd,
Start.node_type: Start,
End.node_type: End,
}
NODE_TYPES = ACTION_TYPES.copy()
NODE_TYPES.update(CONTROL_TYPES)
| apache-2.0 | 4,944,804,039,425,903,000 | 38.640862 | 183 | 0.636088 | false |
d/hamster-applet | src/docky_control/2.1/hamster_control.py | 1 | 3518 | #!/usr/bin/env python
#
# Copyright (C) 2010 Toms Baugis
#
# Original code from Banshee control,
# Copyright (C) 2009-2010 Jason Smith, Rico Tzschichholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import atexit
import gobject
import sys, os
from subprocess import Popen
try:
import gtk
from dockmanager.dockmanager import DockManagerItem, DockManagerSink, DOCKITEM_IFACE
from signal import signal, SIGTERM
from sys import exit
except ImportError, e:
print e
exit()
from hamster import client
from hamster.utils import stuff, i18n
i18n.setup_i18n()
class HamsterItem(DockManagerItem):
def __init__(self, sink, path):
DockManagerItem.__init__(self, sink, path)
self.storage = client.Storage()
self.storage.connect("facts-changed", lambda storage: self.refresh_hamster())
self.storage.connect("activities-changed", lambda storage: self.refresh_hamster())
self.id_map = {} #menu items
self.update_text()
self.add_actions()
gobject.timeout_add_seconds(60, self.refresh_hamster)
def refresh_hamster(self):
try:
self.update_text()
finally: # we want to go on no matter what, so in case of any error we find out about it sooner
return True
def update_text(self):
today = self.storage.get_todays_facts()
if today and today[-1].end_time is None:
fact = today[-1]
self.set_tooltip("%s - %s" % (fact.activity, fact.category))
self.set_badge(stuff.format_duration(fact.delta, human=False))
else:
self.set_tooltip(_("No activity"))
self.reset_badge()
def menu_pressed(self, menu_id):
if self.id_map[menu_id] == _("Overview"):
Popen(["hamster-time-tracker", "overview"])
elif self.id_map[menu_id] == _("Preferences"):
Popen(["hamster-time-tracker", "preferences"])
self.add_actions() # TODO - figure out why is it that we have to regen all menu items after each click
def add_actions(self):
# first clear the menu
for k in self.id_map.keys():
self.remove_menu_item(k)
self.id_map = {}
# now add buttons
self.add_menu_item(_("Overview"), "")
self.add_menu_item(_("Preferences"), "preferences-desktop-personal")
class HamsterSink(DockManagerSink):
def item_path_found(self, pathtoitem, item):
if item.Get(DOCKITEM_IFACE, "DesktopFile", dbus_interface="org.freedesktop.DBus.Properties").endswith ("hamster-time-tracker.desktop"):
self.items[pathtoitem] = HamsterItem(self, pathtoitem)
hamstersink = HamsterSink()
def cleanup():
hamstersink.dispose()
if __name__ == "__main__":
mainloop = gobject.MainLoop(is_running=True)
atexit.register (cleanup)
signal(SIGTERM, lambda signum, stack_frame: exit(1))
while mainloop.is_running():
mainloop.run()
| gpl-3.0 | 982,473,164,084,869,900 | 29.591304 | 143 | 0.666003 | false |
JoshBorke/redline | accounts/urls.py | 1 | 2078 | from django.conf.urls.defaults import *
from redline import settings
urlpatterns = patterns('redline.accounts.views',
url(r'^$', 'accounts_list', name='account_list'),
url(r'^overview/(?P<year>\d+)/(?P<month>\d+)/$', 'accounts_detail', name='accounts_detail'),
url(r'^overview/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)$', 'accounts_detail_type', name='accounts_detail_type'),
url(r'^overview/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)/(?P<slug>[\w_-]+)/$', 'accounts_category_detail', name='accounts_category_detail'),
url(r'^add/$', 'account_add', name='account_add'),
url(r'^edit/(?P<account_id>\d+)/$', 'account_edit', name='account_edit'),
# for specific accounts
url(r'^info/(?P<account_id>\d+)/$', 'account_info', name='account_info'),
url(r'^info/(?P<account_id>\d+)/(?P<year>\d+)/(?P<month>\d+)$', 'account_detail', name='account_detail'),
url(r'^info/(?P<account_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)$', 'account_detail_type', name='account_detail_type'),
url(r'^info/(?P<account_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)/$(?P<slug>[\w_-]+)/', 'account_category_detail', name='account_category_detail'),
# for misc
url(r'^delete/(?P<account_id>\d+)/$', 'account_delete', name='account_delete'),
url(r'^import/(?P<account_id>\d+)/$', 'account_import', name='account_import'),
# for account types, not used
url(r'^account_type$', 'account_type_list', name='account_type_list'),
url(r'^account_type/add/$', 'account_type_add', name='account_type_add'),
url(r'^account_type/edit/(?P<account_type_id>\d+)/$', 'account_type_edit', name='account_type_edit'),
url(r'^account_type/delete/(?P<account_type_id>\d+)/$', 'account_type_delete', name='account_type_delete'),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/home/josh/local/redline/media'}),
(r'^data-files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/home/josh/local/redline/media'}),
)
| gpl-3.0 | -7,997,031,493,255,206,000 | 66.032258 | 161 | 0.60924 | false |
iirob/python-opcua | opcua/common/structures.py | 1 | 11226 | """
Support for custom structures in client and server
We only support a subset of features but should be enough
for custom structures
"""
import os
import importlib
import re
import logging
# The next two imports are for generated code
from datetime import datetime
import uuid
from enum import Enum, IntEnum, EnumMeta
from lxml import objectify
from opcua.ua.ua_binary import Primitives
from opcua import ua
logger = logging.getLogger(__name__)
def get_default_value(uatype, enums):
if uatype == "String":
return "None"
elif uatype == "Guid":
return "uuid.uuid4()"
elif uatype in ("ByteString", "CharArray", "Char"):
return "b''"
elif uatype == "Boolean":
return "True"
elif uatype == "DateTime":
return "datetime.utcnow()"
elif uatype in ("Int16", "Int32", "Int64", "UInt16", "UInt32", "UInt64", "Double", "Float", "Byte", "SByte"):
return 0
elif uatype in enums:
return "ua." + uatype + "(" + enums[uatype] + ")"
elif hasattr(ua, uatype) and issubclass(getattr(ua, uatype), Enum):
# We have an enum, try to initilize it correctly
val = list(getattr(ua, uatype).__members__)[0]
return "ua.{}([})".format(uatype, val)
else:
return "ua.{}()".format(uatype)
class EnumType(object):
def __init__(self, name):
self.name = name
self.fields = []
self.typeid = None
def get_code(self):
code = """
class {0}(IntEnum):
'''
{0} EnumInt autogenerated from xml
'''
""".format(self.name)
for EnumeratedValue in self.fields:
name = EnumeratedValue.Name
value = EnumeratedValue.Value
code += " {} = {}\n".format(name, value)
return code
class EnumeratedValue(object):
def __init__(self, name, value):
if name == "None":
name = "None_"
name = name.replace(" ", "")
self.Name = name
self.Value = value
class Struct(object):
def __init__(self, name):
self.name = name
self.fields = []
self.typeid = None
def get_code(self):
code = """
class {0}(object):
'''
{0} structure autogenerated from xml
'''
""".format(self.name)
code += " ua_types = [\n"
for field in self.fields:
prefix = "ListOf" if field.array else ""
uatype = prefix + field.uatype
if uatype == "ListOfChar":
uatype = "String"
code += " ('{}', '{}'),\n".format(field.name, uatype)
code += " ]"
code += """
def __init__(self):
"""
if not self.fields:
code += " pass"
for field in self.fields:
code += " self.{} = {}\n".format(field.name, field.value)
return code
class Field(object):
def __init__(self, name):
self.name = name
self.uatype = None
self.value = None
self.array = False
class StructGenerator(object):
def __init__(self):
self.model = []
def make_model_from_string(self, xml):
obj = objectify.fromstring(xml)
self._make_model(obj)
def make_model_from_file(self, path):
obj = objectify.parse(path)
root = obj.getroot()
self._make_model(root)
def _make_model(self, root):
enums = {}
for child in root.iter("{*}EnumeratedType"):
intenum = EnumType(child.get("Name"))
for xmlfield in child.iter("{*}EnumeratedValue"):
name = xmlfield.get("Name")
value = xmlfield.get("Value")
enumvalue = EnumeratedValue(name, value)
intenum.fields.append(enumvalue)
enums[child.get("Name")] = value
self.model.append(intenum)
for child in root.iter("{*}StructuredType"):
struct = Struct(child.get("Name"))
array = False
for xmlfield in child.iter("{*}Field"):
name = xmlfield.get("Name")
if name.startswith("NoOf"):
array = True
continue
field = Field(_clean_name(name))
field.uatype = xmlfield.get("TypeName")
if ":" in field.uatype:
field.uatype = field.uatype.split(":")[1]
field.uatype = _clean_name(field.uatype)
field.value = get_default_value(field.uatype, enums)
if array:
field.array = True
field.value = []
array = False
struct.fields.append(field)
self.model.append(struct)
def save_to_file(self, path, register=False):
_file = open(path, "wt")
self._make_header(_file)
for struct in self.model:
_file.write(struct.get_code())
if register:
_file.write(self._make_registration())
_file.close()
def _make_registration(self):
code = "\n\n"
for struct in self.model:
code += "ua.register_extension_object('{name}', ua.NodeId.from_string('{nodeid}'), {name})\n".format(name=struct.name, nodeid=struct.typeid)
return code
def get_python_classes(self, env=None):
return _generate_python_class(self.model, env=env)
def save_and_import(self, path, append_to=None):
"""
save the new structures to a python file which be used later
import the result and return resulting classes in a dict
if append_to is a dict, the classes are added to the dict
"""
self.save_to_file(path)
name = os.path.basename(path)
name = os.path.splitext(name)[0]
mymodule = importlib.import_module(name)
if append_to is None:
result = {}
else:
result = append_to
for struct in self.model:
result[struct.name] = getattr(mymodule, struct.name)
return result
def _make_header(self, _file):
_file.write("""
'''
THIS FILE IS AUTOGENERATED, DO NOT EDIT!!!
'''
from datetime import datetime
import uuid
from opcua import ua
""")
def set_typeid(self, name, typeid):
for struct in self.model:
if struct.name == name:
struct.typeid = typeid
return
def load_type_definitions(server, nodes=None):
"""
Download xml from given variable node defining custom structures.
If no node is given, attemps to import variables from all nodes under
"0:OPC Binary"
the code is generated and imported on the fly. If you know the structures
are not going to be modified it might be interresting to copy the generated files
and include them in you code
"""
if nodes is None:
nodes = []
for desc in server.nodes.opc_binary.get_children_descriptions():
if desc.BrowseName != ua.QualifiedName("Opc.Ua"):
nodes.append(server.get_node(desc.NodeId))
structs_dict = {}
generators = []
for node in nodes:
xml = node.get_value()
xml = xml.decode("utf-8")
generator = StructGenerator()
generators.append(generator)
generator.make_model_from_string(xml)
# generate and execute new code on the fly
generator.get_python_classes(structs_dict)
# same but using a file that is imported. This can be usefull for debugging library
#name = node.get_browse_name().Name
# Make sure structure names do not contain charaters that cannot be used in Python class file names
#name = _clean_name(name)
#name = "structures_" + node.get_browse_name().Name
#generator.save_and_import(name + ".py", append_to=structs_dict)
# register classes
# every children of our node should represent a class
for ndesc in node.get_children_descriptions():
ndesc_node = server.get_node(ndesc.NodeId)
ref_desc_list = ndesc_node.get_references(refs=ua.ObjectIds.HasDescription, direction=ua.BrowseDirection.Inverse)
if ref_desc_list: #some server put extra things here
name = _clean_name(ndesc.BrowseName.Name)
if not name in structs_dict:
logger.warning("%s is found as child of binary definition node but is not found in xml", name)
continue
nodeid = ref_desc_list[0].NodeId
ua.register_extension_object(name, nodeid, structs_dict[name])
# save the typeid if user want to create static file for type definitnion
generator.set_typeid(name, nodeid.to_string())
for key, val in structs_dict.items():
if isinstance(val, EnumMeta) and key is not "IntEnum":
setattr(ua, key, val)
return generators, structs_dict
def _clean_name(name):
"""
Remove characters that might be present in OPC UA structures
but cannot be part of of Python class names
"""
name = re.sub(r'\W+', '_', name)
name = re.sub(r'^[0-9]+', r'_\g<0>', name)
return name
def _generate_python_class(model, env=None):
"""
generate Python code and execute in a new environment
return a dict of structures {name: class}
Rmw: Since the code is generated on the fly, in case of error the stack trace is
not available and debugging is very hard...
"""
if env is None:
env = {}
# Add the required libraries to dict
if "ua" not in env:
env['ua'] = ua
if "datetime" not in env:
env['datetime'] = datetime
if "uuid" not in env:
env['uuid'] = uuid
if "enum" not in env:
env['IntEnum'] = IntEnum
# generate classes one by one and add them to dict
for element in model:
code = element.get_code()
exec(code, env)
return env
def load_enums(server, env=None):
"""
Read enumeration data types on server and generate python Enums in ua scope for them
"""
model = []
nodes = server.nodes.enum_data_type.get_children()
if env is None:
env = ua.__dict__
for node in nodes:
name = node.get_browse_name().Name
try:
c = _get_enum_strings(name, node)
except ua.UaError as ex:
try:
c = _get_enum_values(name, node)
except ua.UaError as ex:
logger.info("Node %s, %s under DataTypes/Enumeration, does not seem to have a child called EnumString or EumValue: %s", name, node, ex)
continue
if not hasattr(ua, c.name):
model.append(c)
return _generate_python_class(model, env=env)
def _get_enum_values(name, node):
def_node = node.get_child("0:EnumValues")
val = def_node.get_value()
c = EnumType(name)
c.fields = [EnumeratedValue(enumval.DisplayName.Text, enumval.Value) for enumval in val]
return c
def _get_enum_strings(name, node):
def_node = node.get_child("0:EnumStrings")
val = def_node.get_value()
c = EnumType(name)
c.fields = [EnumeratedValue(st.Text, idx) for idx, st in enumerate(val)]
return c
| lgpl-3.0 | -3,811,595,053,384,737,300 | 30.622535 | 152 | 0.576786 | false |
albertoriva/bioscripts | simplediff.py | 1 | 5294 | #!/usr/bin/env python
import sys
import csv
import math
def parseSlice(s):
if "-" in s:
parts = s.split("-")
return slice(int(parts[0]) - 1, int(parts[1]))
else:
p = int(s)
return slice(p-1, p)
class SimpleDiff():
filename = None
outfile = "/dev/stdout"
labels = None
colname1 = "avg1"
colname2 = "avg2"
alpha = 1.0
slice1 = None
slice2 = None
def process(self, f, out, header=True):
nin = 0
nout = 0
na = self.slice1.stop - self.slice1.start
nb = self.slice2.stop - self.slice2.start
if header:
f.readline()
c = csv.reader(f, delimiter='\t')
for line in c:
nin += 1
data1 = line[self.slice1]
data2 = line[self.slice2]
data1 = [ float(v) for v in data1 ]
data2 = [ float(v) for v in data2 ]
amin = min(data1)
amax = max(data1)
bmin = min(data2)
bmax = max(data2)
if amin > bmax:
# A over B
r1 = amax - amin
r2 = bmax - bmin
d = self.alpha * max(r1, r2)
if (amin - bmax) > d:
avg1 = sum(data1) / na
avg2 = sum(data2) / nb
if avg1 > 0 and avg2 > 0:
out.write("{}\t{}\t{}\t{}\n".format(line[0], avg1, avg2, math.log(avg1/avg2, 2.0)))
nout += 1
elif bmin > amax:
# B over A
r1 = amax - amin
r2 = bmax - bmin
d = self.alpha * max(r1, r2)
if (bmin - amax) > d:
avg1 = sum(data1) / na
avg2 = sum(data2) / nb
if avg1 > 0 and avg2 > 0:
out.write("{}\t{}\t{}\t{}\n".format(line[0], avg1, avg2, math.log(avg1/avg2, 2.0)))
nout += 1
return (nin, nout)
def parseArgs(self, args):
prev = ""
if "-h" in args or "--help" in args:
return self.usage()
for a in args:
if prev == "-a":
self.alpha = float(a)
prev = ""
elif prev == "-o":
self.outfile = a
prev = ""
elif prev == "-l":
self.labels = parseSlice(a)
prev = ""
elif prev == "-c1":
self.colname1 = a
prev = ""
elif prev == "-c2":
self.colname2 = a
prev = ""
elif a in ["-a", "-o", "-l", "-c1", "-c2"]:
prev = a
elif self.filename is None:
self.filename = a
elif self.slice1 is None:
self.slice1 = parseSlice(a)
elif self.slice2 is None:
self.slice2 = parseSlice(a)
if (self.filename and self.slice1 and self.slice2):
return True
else:
return self.usage()
def usage(self):
sys.stdout.write("""Usage: simplediff.py [options] exprfile slice1 slice2
This program performs "simple" differential analysis on gene expression data. `exprfile'
should be a file containing gene expression values with genes on the rows and samples
in the columns. `slice1' and `slice2' should be expressions of the form P-Q indicating
which columns contain the data for the two conditions being compared (e.g., if the first
condition is represented by three columns starting at column 5, use 5-7).
Options:
-a A | Set the alpha parameter to A (see below). Default: {}.
-o O | Write output to file O.
-c1 C | Set label for average of condition 1 values to C. Default: {}.
-c1 C | Set label for average of condition 2 values to C. Default: {}.
A gene is considered to be differentially expressed between two groups of samples (A and B)
if the two following conditions hold:
* The two sets of expression values are totally separated, ie:
the minimum expression values for the samples in A is larger than the maximum in B
-OR-
the minimum expression values for the samples in B is larger than the maximum in A
* The distance between the two sets of values (the difference between the maximum of
the "lower" one and the minimum of the "upper" one) is larger than the largest of the
two ranges of values in A and B, multiplied by the alpha parameter.
Example: A = {{10, 12, 16}}
B = {{20, 21, 22}}
The two sets are separated, because min(B) > max(A). The distance between the two sets is
4 (20-16), range(A) = 6, range(B) = 2. If alpha is set to 1.0 (the default) then this
gene would NOT be considered significantly different, because the largest range is 6,
and 6 * alpha > 4. If alpha was set to 0.5, the gene would be called as different.
""".format(self.alpha, self.colname1, self.colname2))
def run(self):
with open(self.outfile, "w") as out:
with open(self.filename, "r") as f:
(nin, nout) = self.process(f, out)
sys.stderr.write("{} in, {} out\n".format(nin, nout))
if __name__ == "__main__":
SD = SimpleDiff()
if SD.parseArgs(sys.argv[1:]):
SD.run()
| gpl-3.0 | 6,085,323,893,334,041,000 | 34.293333 | 107 | 0.525123 | false |
Erotemic/ibeis | dev/_scripts/_timeits/time_uuids.py | 1 | 4117 | # -*- coding: utf-8 -*-
"""
Script to help time determenistic uuid creation
"""
from __future__ import absolute_import, division, print_function
from six.moves import range, builtins
import os
import multiprocessing
import time
from PIL import Image
import hashlib
import numpy as np
import uuid
from utool._internal.meta_util_six import get_funcname
# My data getters
from vtool_ibeis.tests import grabdata
elephant = grabdata.get_testimg_path('elephant.jpg')
lena = grabdata.get_testimg_path('lena.jpg')
zebra = grabdata.get_testimg_path('zebra.jpg')
jeff = grabdata.get_testimg_path('jeff.png')
gpath = zebra
if not os.path.exists(gpath):
gpath = zebra
try:
getattr(builtins, 'profile')
__LINE_PROFILE__ = True
except AttributeError:
__LINE_PROFILE__ = False
def profile(func):
return func
@profile
def get_image_uuid(img_bytes_):
# hash the bytes using sha1
bytes_sha1 = hashlib.sha1(img_bytes_)
hashbytes_20 = bytes_sha1.digest()
# sha1 produces 20 bytes, but UUID requires 16 bytes
hashbytes_16 = hashbytes_20[0:16]
uuid_ = uuid.UUID(bytes=hashbytes_16)
return uuid_
@profile
def make_uuid_PIL_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
img_bytes_ = pil_img.tobytes()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
@profile
def make_uuid_NUMPY_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
np_img = np.asarray(pil_img)
np_flat = np_img.ravel()
img_bytes_ = np_flat.tostring()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
@profile
def make_uuid_NUMPY_STRIDE_16_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
np_img = np.asarray(pil_img)
np_flat = np_img.ravel()[::16]
img_bytes_ = np_flat.tostring()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
@profile
def make_uuid_NUMPY_STRIDE_64_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
img_bytes_ = np.asarray(pil_img).ravel()[::64].tostring()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
@profile
def make_uuid_CONTIG_NUMPY_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
np_img = np.asarray(pil_img)
np_flat = np_img.ravel().tostring()
np_contig = np.ascontiguousarray(np_flat)
img_bytes_ = np_contig.tostring()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
@profile
def make_uuid_CONTIG_NUMPY_STRIDE_16_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
np_img = np.asarray(pil_img)
np_contig = np.ascontiguousarray(np_img.ravel()[::16])
img_bytes_ = np_contig.tostring()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
@profile
def make_uuid_CONTIG_NUMPY_STRIDE_64_bytes(gpath):
pil_img = Image.open(gpath, 'r') # NOQA
# Read PIL image data
img_bytes_ = np.ascontiguousarray(np.asarray(pil_img).ravel()[::64]).tostring()
uuid_ = get_image_uuid(img_bytes_)
return uuid_
if __name__ == '__main__':
multiprocessing.freeze_support() # win32
test_funcs = [
make_uuid_PIL_bytes,
make_uuid_NUMPY_bytes,
make_uuid_NUMPY_STRIDE_16_bytes,
make_uuid_NUMPY_STRIDE_64_bytes,
make_uuid_CONTIG_NUMPY_bytes,
make_uuid_CONTIG_NUMPY_STRIDE_16_bytes,
make_uuid_CONTIG_NUMPY_STRIDE_64_bytes,
]
func_strs = ', '.join([get_funcname(func) for func in test_funcs])
# cool trick
setup = 'from __main__ import (gpath, %s) ' % (func_strs,)
number = 10
for func in test_funcs:
funcname = get_funcname(func)
print('Running: %s' % funcname)
if __LINE_PROFILE__:
start = time.time()
for _ in range(number):
func(gpath)
total_time = time.time() - start
else:
import timeit
stmt = '%s(gpath)' % funcname
total_time = timeit.timeit(stmt=stmt, setup=setup, number=number)
print('timed: %r seconds in %s' % (total_time, funcname))
| apache-2.0 | 6,607,357,199,787,421,000 | 26.817568 | 83 | 0.634685 | false |
bjodah/chempy | benchmarks/benchmarks/equilibria.py | 1 | 1079 | import numpy as np
from chempy.tests.ammonical_cupric_solution import get_ammonical_cupric_eqsys
class TimeEqsys:
def setup(self):
self.eqsys, self.c0 = get_ammonical_cupric_eqsys()
def time_roots(self):
x, new_inits, success = self.eqsys.roots(self.c0, np.logspace(-3, 0, 50), "NH3")
assert all(success)
def time_roots_symengine(self):
from symengine import Lambdify
x, new_inits, success = self.eqsys.roots(
self.c0,
np.logspace(-3, 0, 50),
"NH3",
lambdify=Lambdify,
lambdify_unpack=False,
)
assert all(success)
def time_roots_no_propagate(self):
x, new_inits, success = self.eqsys.roots(
self.c0, np.logspace(-3, 0, 50), "NH3", propagate=False
)
assert all(success)
if __name__ == "__main__":
import time
te = TimeEqsys()
te.setup()
# t1 = time.time()
# te.time_roots_symengine()
# print(time.time()-t1)
t1 = time.time()
te.time_roots()
print(time.time() - t1)
| bsd-2-clause | -5,027,225,692,750,565,000 | 23.522727 | 88 | 0.570899 | false |
hgamboa/novainstrumentation | novainstrumentation/peakdelta.py | 1 | 2443 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 16:20:03 2013
@author: utilizador
"""
import sys
from numpy import NaN, Inf, arange, isscalar, array, asarray
##############################################################################
########################### Peaks Detection ##################################
##############################################################################
def peakdelta(v, delta, x=None):
"""
Returns two arrays
function [maxtab, mintab]=peakdelta(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = peakdelta(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = peakdelta(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
| mit | -1,012,318,517,677,826,700 | 27.792683 | 80 | 0.462137 | false |
google/report2bq | application/classes/report_type_test.py | 1 | 1637 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from classes.report_type import Type
class TypeTest(unittest.TestCase):
def setUp(self):
pass
def test_valid_current_enum(self):
self.assertEqual(Type.DV360, Type('dv360'))
def test_valid_enum_rewrite(self):
self.assertEqual([Type.DV360, Type.CM, Type.GA360_RPT],
[ Type(T) for T in ['dbm', 'dcm', 'ga360'] ])
def test_valid_internals(self):
self.assertEqual([Type._ADMIN, Type._JOBS, Type._RUNNING],
[ Type(T) for T in ['administration', 'jobs', 'running'] ])
def test_unknown(self):
self.assertEqual([Type._UNKNOWN, Type._UNKNOWN],
[ Type(T) for T in ['foo', None] ])
def test_runner(self):
self.assertEqual('run-dv360-123', Type.DV360.runner('123'))
self.assertEqual(None, Type._ADMIN.runner('123'))
def test_str(self):
self.assertEqual(['dv360', 'cm', 'administration', 'unknown'],
[ T.value for T in [
Type.DV360, Type.CM, Type._ADMIN, Type(None),
] ])
| apache-2.0 | -4,314,635,563,280,956,400 | 33.829787 | 80 | 0.645082 | false |
702nADOS/sumo | tools/xml/xml2csv.py | 1 | 10954 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file xml2csv.py
@author Jakob Erdmann
@author Michael Behrisch
@author Laura Bieker
@date 2013-12-08
@version $Id: xml2csv.py 22608 2017-01-17 06:28:54Z behrisch $
Convert hierarchical xml files to csv. This only makes sense if the hierarchy has low depth.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2013-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import socket
import collections
from optparse import OptionParser
import xml.sax
try:
import lxml.etree
import lxml.sax
haveLxml = True
except ImportError:
haveLxml = False
import xsd
PY3 = sys.version_info > (3,)
class NestingHandler(xml.sax.handler.ContentHandler):
"""A handler which knows the current nesting of tags"""
def __init__(self):
self.tagstack = []
def startElement(self, name, attrs):
self.tagstack.append(name)
def endElement(self, name):
self.tagstack.pop()
def depth(self):
# do not count the root element
return len(self.tagstack) - 1
class AttrFinder(NestingHandler):
def __init__(self, xsdFile, source, split):
NestingHandler.__init__(self)
self.tagDepths = {} # tag -> depth of appearance
self.tagAttrs = collections.defaultdict(
collections.OrderedDict) # tag -> set of attrs
self.renamedAttrs = {} # (name, attr) -> renamedAttr
self.attrs = {}
self.depthTags = {} # child of root: depth of appearance -> tag list
self.rootDepth = 1 if split else 0
if xsdFile:
self.xsdStruc = xsd.XsdStructure(xsdFile)
if split:
for ele in self.xsdStruc.root.children:
self.attrs[ele.name] = []
self.depthTags[ele.name] = [[]]
self.recursiveAttrFind(ele, ele, 1)
else:
self.attrs[self.xsdStruc.root.name] = []
self.depthTags[self.xsdStruc.root.name] = []
self.recursiveAttrFind(
self.xsdStruc.root, self.xsdStruc.root, 0)
else:
self.xsdStruc = None
xml.sax.parse(source, self)
def addElement(self, root, name, depth):
if name not in self.tagDepths:
if len(self.depthTags[root]) == depth:
self.tagDepths[name] = depth
self.depthTags[root].append([])
self.depthTags[root][depth].append(name)
return True
if name not in self.depthTags[root][depth]:
print("Ignoring tag %s at depth %s" %
(name, depth), file=sys.stderr)
return False
def recursiveAttrFind(self, root, currEle, depth):
if not self.addElement(root.name, currEle.name, depth):
return
for a in currEle.attributes:
if ":" not in a.name: # no namespace support yet
self.tagAttrs[currEle.name][a.name] = a
anew = "%s_%s" % (currEle.name, a.name)
self.renamedAttrs[(currEle.name, a.name)] = anew
attrList = self.attrs[root.name]
if anew in attrList:
del attrList[attrList.index(anew)]
attrList.append(anew)
for ele in currEle.children:
self.recursiveAttrFind(root, ele, depth + 1)
def startElement(self, name, attrs):
NestingHandler.startElement(self, name, attrs)
if self.depth() >= self.rootDepth:
root = self.tagstack[self.rootDepth]
if self.depth() == self.rootDepth and root not in self.attrs:
self.attrs[root] = []
self.depthTags[root] = [[]] * self.rootDepth
if not self.addElement(root, name, self.depth()):
return
# collect attributes
for a in sorted(list(attrs.keys())):
if a not in self.tagAttrs[name] and ":" not in a:
self.tagAttrs[name][a] = xsd.XmlAttribute(a)
if not (name, a) in self.renamedAttrs:
anew = "%s_%s" % (name, a)
self.renamedAttrs[(name, a)] = anew
self.attrs[root].append(anew)
class CSVWriter(NestingHandler):
def __init__(self, attrFinder, options):
NestingHandler.__init__(self)
self.attrFinder = attrFinder
self.options = options
self.currentValues = collections.defaultdict(lambda: "")
self.haveUnsavedValues = False
self.outfiles = {}
self.rootDepth = 1 if options.split else 0
for root in sorted(attrFinder.depthTags):
if len(attrFinder.depthTags) == 1:
if not options.output:
options.output = os.path.splitext(options.source)[0]
if not options.output.isdigit() and not options.output.endswith(".csv"):
options.output += ".csv"
self.outfiles[root] = getOutStream(options.output)
else:
if options.output:
outfilename = options.output + "%s.csv" % root
else:
outfilename = os.path.splitext(
options.source)[0] + "%s.csv" % root
self.outfiles[root] = open(outfilename, 'w')
if (PY3):
self.outfiles[root].write(str.encode(
options.separator.join(map(self.quote, attrFinder.attrs[root])) + "\n"))
else:
self.outfiles[root].write(
options.separator.join(map(self.quote, attrFinder.attrs[root])) + "\n")
def quote(self, s):
return "%s%s%s" % (self.options.quotechar, s, self.options.quotechar)
# the following two are needed for the lxml saxify to work
def startElementNS(self, name, qname, attrs):
self.startElement(qname, attrs)
def endElementNS(self, name, qname):
self.endElement(qname)
def startElement(self, name, attrs):
NestingHandler.startElement(self, name, attrs)
if self.depth() >= self.rootDepth:
root = self.tagstack[self.rootDepth]
# print("start", name, root, self.depth(), self.attrFinder.depthTags[root][self.depth()])
if name in self.attrFinder.depthTags[root][self.depth()]:
for a, v in attrs.items():
if isinstance(a, tuple):
a = a[1]
# print(a, dict(self.attrFinder.tagAttrs[name]))
if a in self.attrFinder.tagAttrs[name]:
if self.attrFinder.xsdStruc:
enum = self.attrFinder.xsdStruc.getEnumeration(
self.attrFinder.tagAttrs[name][a].type)
if enum:
v = enum.index(v)
a2 = self.attrFinder.renamedAttrs.get((name, a), a)
self.currentValues[a2] = v
self.haveUnsavedValues = True
def endElement(self, name):
if self.depth() >= self.rootDepth:
root = self.tagstack[self.rootDepth]
# print("end", name, root, self.depth(), self.attrFinder.depthTags[root][self.depth()], self.haveUnsavedValues)
if name in self.attrFinder.depthTags[root][self.depth()]:
if self.haveUnsavedValues:
if(PY3):
self.outfiles[root].write(str.encode(self.options.separator.join(
[self.quote(self.currentValues[a]) for a in self.attrFinder.attrs[root]]) + "\n"))
else:
self.outfiles[root].write(self.options.separator.join(
[self.quote(self.currentValues[a]) for a in self.attrFinder.attrs[root]]) + "\n")
self.haveUnsavedValues = False
for a in self.attrFinder.tagAttrs[name]:
a2 = self.attrFinder.renamedAttrs.get((name, a), a)
del self.currentValues[a2]
NestingHandler.endElement(self, name)
def getSocketStream(port, mode='rb'):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", port))
s.listen(1)
conn, addr = s.accept()
return conn.makefile(mode)
def getOutStream(output):
if output.isdigit():
return getSocketStream(int(output), 'wb')
return open(output, 'wb')
def get_options():
optParser = OptionParser(
usage=os.path.basename(sys.argv[0]) + " [<options>] <input_file_or_port>")
optParser.add_option("-s", "--separator", default=";",
help="separating character for fields")
optParser.add_option("-q", "--quotechar", default='',
help="quoting character for fields")
optParser.add_option("-x", "--xsd", help="xsd schema to use")
optParser.add_option("-a", "--validation", action="store_true",
default=False, help="enable schema validation")
optParser.add_option("-p", "--split", action="store_true",
default=False, help="split in different files for the first hierarchy level")
optParser.add_option("-o", "--output", help="base name for output")
options, args = optParser.parse_args()
if len(args) != 1:
optParser.print_help()
sys.exit()
if options.validation and not haveLxml:
print("lxml not available, skipping validation", file=sys.stderr)
options.validation = False
if args[0].isdigit():
if not options.xsd:
print("a schema is mandatory for stream parsing", file=sys.stderr)
sys.exit()
options.source = getSocketStream(int(args[0]))
else:
options.source = args[0]
if options.output and options.output.isdigit() and options.split:
print(
"it is not possible to use splitting together with stream output", file=sys.stderr)
sys.exit()
return options
def main():
options = get_options()
# get attributes
attrFinder = AttrFinder(options.xsd, options.source, options.split)
# write csv
handler = CSVWriter(attrFinder, options)
if options.validation:
schema = lxml.etree.XMLSchema(file=options.xsd)
parser = lxml.etree.XMLParser(schema=schema)
tree = lxml.etree.parse(options.source, parser)
lxml.sax.saxify(tree, handler)
else:
xml.sax.parse(options.source, handler)
if __name__ == "__main__":
main()
| gpl-3.0 | -2,313,956,629,142,083,600 | 38.545126 | 122 | 0.576228 | false |
joostvdg/jenkins-job-builder | jenkins_jobs/cli/subcommand/base.py | 1 | 2294 | #!/usr/bin/env python
# Copyright (C) 2015 Wayne Warren
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BaseSubCommand(object):
"""Base class for Jenkins Job Builder subcommands, intended to allow
subcommands to be loaded as stevedore extensions by third party users.
"""
def __init__(self):
pass
@abc.abstractmethod
def parse_args(self, subparsers, recursive_parser):
"""Define subcommand arguments.
:param subparsers
A sub parser object. Implementations of this method should
create a new subcommand parser by calling
parser = subparsers.add_parser('command-name', ...)
This will return a new ArgumentParser object; all other arguments to
this method will be passed to the argparse.ArgumentParser constructor
for the returned object.
"""
@abc.abstractmethod
def execute(self, config):
"""Execute subcommand behavior.
:param config
JJBConfig object containing final configuration from config files,
command line arguments, and environment variables.
"""
@staticmethod
def parse_option_recursive_exclude(parser):
"""Add '--recursive' and '--exclude' arguments to given parser.
"""
parser.add_argument(
'-r', '--recursive',
action='store_true',
dest='recursive',
default=False,
help='''look for yaml files recursively''')
parser.add_argument(
'-x', '--exclude',
dest='exclude',
action='append',
default=[],
help='''paths to exclude when using recursive search, uses standard
globbing.''')
| apache-2.0 | 1,903,094,280,295,874,600 | 33.238806 | 79 | 0.649085 | false |
Subsets and Splits