code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.contrib.auth.models import User, Group, Permission
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.test import TestCase
from django.utils.encoding import force_text
from weblate.lang.models import Language
from weblate.trans.models import Project, Translation, Comment
from weblate.permissions.data import DEFAULT_GROUPS, ADMIN_PERMS
from weblate.permissions.models import AutoGroup, GroupACL
from weblate.permissions.helpers import (
has_group_perm, can_delete_comment, can_edit, can_author_translation,
)
from weblate.trans.tests.test_models import ModelTestCase
class PermissionsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
'user', '[email protected]', 'x'
)
self.owner = User.objects.create_user(
'owner', '[email protected]', 'x'
)
self.project = Project.objects.create(slug='test')
self.project.add_user(self.owner, '@Administration')
def test_owner_owned(self):
self.assertTrue(
has_group_perm(
self.owner, 'trans.author_translation', project=self.project
)
)
def test_owner_no_perm(self):
self.assertFalse(
has_group_perm(
self.owner, 'trans.delete_project', project=self.project
)
)
def test_owner_user(self):
self.assertFalse(
has_group_perm(
self.user, 'trans.author_translation', project=self.project
)
)
def test_check_owner(self):
self.assertTrue(
has_group_perm(
self.owner, 'trans.author_translation', project=self.project
)
)
def test_check_user(self):
self.assertFalse(
has_group_perm(
self.user, 'trans.author_translation', project=self.project
)
)
def test_delete_comment_owner(self):
comment = Comment(project=self.project)
self.assertTrue(can_delete_comment(self.owner, comment))
def test_delete_comment_user(self):
comment = Comment(project=self.project)
self.assertFalse(can_delete_comment(self.user, comment))
def test_cache(self):
comment = Comment(project=self.project)
key = ('_can_delete_comment', self.project.get_full_slug())
self.assertTrue(not hasattr(self.user, 'acl_permissions_cache'))
self.assertFalse(can_delete_comment(self.user, comment))
self.assertFalse(self.user.acl_permissions_cache[key])
self.user.acl_permissions_cache[key] = True
self.assertTrue(can_delete_comment(self.user, comment))
def test_default_groups(self):
"""Check consistency of default permissions.
- The admin permissions have to contain all used permissions
"""
for group in DEFAULT_GROUPS:
self.assertEqual(
DEFAULT_GROUPS[group] - ADMIN_PERMS,
set()
)
class GroupACLTest(ModelTestCase):
PERMISSION = "trans.save_translation"
def setUp(self):
super(GroupACLTest, self).setUp()
self.user = User.objects.create_user(
"user", '[email protected]', 'x'
)
self.privileged = User.objects.create_user(
"privileged", '[email protected]', 'x'
)
self.group = Group.objects.create(name="testgroup")
self.project = self.subproject.project
self.subproject.translation_set.all().delete()
self.language = Language.objects.get_default()
self.trans = Translation.objects.create(
subproject=self.subproject, language=self.language,
filename="this/is/not/a.template"
)
app, perm = self.PERMISSION.split('.')
self.permission = Permission.objects.get(
codename=perm, content_type__app_label=app
)
self.group.permissions.add(self.permission)
self.privileged.groups.add(self.group)
def test_acl_lockout(self):
"""Basic sanity check.
Group ACL set on a subproject should only allow members of
the marked group to edit it.
"""
self.assertTrue(can_edit(self.user, self.trans, self.PERMISSION))
self.assertTrue(can_edit(self.privileged, self.trans, self.PERMISSION))
acl = GroupACL.objects.create(subproject=self.subproject)
acl.groups.add(self.group)
self.clear_permission_cache()
self.assertTrue(can_edit(self.privileged, self.trans, self.PERMISSION))
self.assertFalse(can_edit(self.user, self.trans, self.PERMISSION))
def test_acl_overlap(self):
"""ACL overlap test.
When two ACLs can apply to a translation object, only the most
specific one should apply.
"""
acl_lang = GroupACL.objects.create(language=self.language)
acl_lang.groups.add(self.group)
self.assertTrue(
can_edit(self.privileged, self.trans, self.PERMISSION))
acl_sub = GroupACL.objects.create(subproject=self.subproject)
self.clear_permission_cache()
self.assertFalse(
can_edit(self.privileged, self.trans, self.PERMISSION))
acl_sub.groups.add(self.group)
self.clear_permission_cache()
self.assertTrue(
can_edit(self.privileged, self.trans, self.PERMISSION))
def test_acl_str(self):
acl = GroupACL()
self.assertIn(
'unspecified', force_text(acl)
)
acl.language = self.language
self.assertIn(
'language=English', force_text(acl)
)
acl.subproject = self.subproject
self.assertIn(
'subproject=Test/Test', force_text(acl)
)
acl.subproject = None
acl.project = self.project
self.assertIn(
'project=Test', force_text(acl)
)
def test_acl_clean(self):
acl = GroupACL()
self.assertRaises(
ValidationError,
acl.clean
)
acl.project = self.project
acl.subproject = self.subproject
acl.save()
self.assertIsNone(acl.project)
def test_acl_project(self):
"""Basic sanity check for project-level actions.
When a Group ACL is set for a project, and only for a project,
it should apply to project-level actions on that project.
"""
acl = GroupACL.objects.get(project=self.project)
acl.groups.add(self.group)
permission = Permission.objects.get(
codename='author_translation', content_type__app_label='trans'
)
acl.permissions.add(permission)
self.group.permissions.add(permission)
self.assertFalse(
can_author_translation(self.user, self.project)
)
self.assertTrue(
can_author_translation(self.privileged, self.project)
)
def test_affects_unrelated(self):
"""Unrelated objects test.
If I set an ACL on an object, it should not affect objects
that it doesn't match. (in this case, a different language)
"""
lang_cs = Language.objects.get(code='cs')
lang_de = Language.objects.get(code='de')
trans_cs = Translation.objects.create(
subproject=self.subproject, language=lang_cs,
filename="this/is/not/a.template"
)
trans_de = Translation.objects.create(
subproject=self.subproject, language=lang_de,
filename="this/is/not/a.template"
)
acl = GroupACL.objects.create(language=lang_cs)
acl.groups.add(self.group)
self.assertTrue(can_edit(self.privileged, trans_cs, self.PERMISSION))
self.assertFalse(can_edit(self.user, trans_cs, self.PERMISSION))
self.assertTrue(can_edit(self.privileged, trans_de, self.PERMISSION))
self.assertTrue(can_edit(self.user, trans_de, self.PERMISSION))
def test_affects_partial_match(self):
"""Partial ACL match test.
If I set an ACL on two criteria, e.g., subproject and language,
it should not affect objects that only match one of the criteria.
"""
lang_cs = Language.objects.get(code='cs')
lang_de = Language.objects.get(code='de')
trans_cs = Translation.objects.create(
subproject=self.subproject, language=lang_cs,
filename="this/is/not/a.template"
)
trans_de = Translation.objects.create(
subproject=self.subproject, language=lang_de,
filename="this/is/not/a.template"
)
acl = GroupACL.objects.create(
language=lang_cs,
subproject=self.subproject
)
acl.groups.add(self.group)
self.assertTrue(can_edit(self.privileged, trans_cs, self.PERMISSION))
self.assertFalse(can_edit(self.user, trans_cs, self.PERMISSION))
self.assertTrue(can_edit(self.privileged, trans_de, self.PERMISSION))
self.assertTrue(can_edit(self.user, trans_de, self.PERMISSION))
def clear_permission_cache(self):
"""Clear permission cache.
This is necessary when testing interaction of the built-in permissions
mechanism and Group ACL. The built-in mechanism will cache results
of `has_perm` and friends, but these can be affected by the Group ACL
lockout. Usually the cache will get cleared on every page request,
but here we need to do it manually.
"""
attribs = (
'_perm_cache',
'_user_perm_cache',
'_group_perm_cache',
'acl_permissions_cache',
'acl_permissions_owner',
'acl_permissions_groups',
)
for cache in attribs:
for user in (self.user, self.privileged):
if hasattr(user, cache):
delattr(user, cache)
def test_group_locked(self):
"""Limited privilege test.
Once a group is used in a GroupACL, it is said to be "locked".
Privileges from the locked group should not apply outside GroupACL.
I.e., if I gain "author_translation" privilege through membership
in a "privileged_group", applicable to Czech language, this should
not apply to any other language.
"""
lang_cs = Language.objects.get(code='cs')
lang_de = Language.objects.get(code='de')
trans_cs = Translation.objects.create(
subproject=self.subproject, language=lang_cs,
filename="this/is/not/a.template"
)
trans_de = Translation.objects.create(
subproject=self.subproject, language=lang_de,
filename="this/is/not/a.template"
)
perm_name = 'trans.author_translation'
permission = Permission.objects.get(
codename='author_translation', content_type__app_label='trans'
)
# Avoid conflict with automatic GroupACL
self.project.groupacl_set.all()[0].permissions.remove(permission)
self.assertFalse(can_edit(self.user, trans_cs, perm_name))
self.assertFalse(can_edit(self.privileged, trans_cs, perm_name))
self.assertFalse(can_edit(self.privileged, trans_de, perm_name))
self.clear_permission_cache()
self.group.permissions.add(permission)
self.assertFalse(can_edit(self.user, trans_cs, perm_name))
self.assertTrue(can_edit(self.privileged, trans_cs, perm_name))
self.assertTrue(can_edit(self.privileged, trans_de, perm_name))
self.clear_permission_cache()
acl = GroupACL.objects.create(language=lang_cs)
acl.groups.add(self.group)
self.assertTrue(can_edit(self.privileged, trans_cs, perm_name))
self.assertFalse(can_edit(self.privileged, trans_de, perm_name))
def test_project_specific(self):
"""Project specificity test.
Project-level actions should only be affected by Group ACLs that
are specific to the project, and don't have other criteria.
E.g., if a GroupACL lists project+language, this should not give
you project-level permissions.
"""
permission = Permission.objects.get(
codename='author_translation', content_type__app_label='trans'
)
self.group.permissions.add(permission)
acl_project_lang = GroupACL.objects.create(
language=self.language,
project=self.project
)
acl_project_lang.groups.add(self.group)
self.assertFalse(has_group_perm(
self.privileged, 'trans.author_translation', project=self.project
))
acl_project_only = GroupACL.objects.get(
language=None,
project=self.project,
)
acl_project_only.groups.add(self.group)
self.clear_permission_cache()
self.assertTrue(has_group_perm(
self.privileged, 'trans.author_translation', project=self.project
))
def test_acl_not_filtered(self):
"""Basic sanity check.
Group ACL set on a subproject should only allow members of
the marked group to edit it.
"""
self.assertTrue(can_edit(self.user, self.trans, self.PERMISSION))
self.assertTrue(can_edit(self.privileged, self.trans, self.PERMISSION))
acl = GroupACL.objects.create(subproject=self.subproject)
acl.groups.add(self.group)
acl.permissions.remove(self.permission)
self.clear_permission_cache()
self.assertTrue(can_edit(self.privileged, self.trans, self.PERMISSION))
self.assertTrue(can_edit(self.user, self.trans, self.PERMISSION))
class AutoGroupTest(TestCase):
@staticmethod
def create_user():
return User.objects.create_user('test1', '[email protected]', 'pass')
def test_default(self):
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
def test_none(self):
AutoGroup.objects.all().delete()
user = self.create_user()
self.assertEqual(user.groups.count(), 0)
def test_matching(self):
AutoGroup.objects.create(
match='^.*@weblate.org',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 2)
def test_nonmatching(self):
AutoGroup.objects.create(
match='^.*@example.net',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
class CommandTest(TestCase):
"""Test for management commands."""
def test_setupgroups(self):
call_command('setupgroups')
group = Group.objects.get(name='Users')
self.assertTrue(
group.permissions.filter(
codename='save_translation'
).exists()
)
call_command('setupgroups', move=True)
| lem9/weblate | weblate/permissions/tests.py | Python | gpl-3.0 | 15,824 |
# -*- coding: utf-8 -*-
"""
聚类和EM算法
~~~~~~~~~~~~~~~~
聚类
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# from .agglomerative_clustering import test_AgglomerativeClustering,test_AgglomerativeClustering_nclusters,test_AgglomerativeClustering_linkage
# from .dbscan import test_DBSCAN,test_DBSCAN_epsilon,test_DBSCAN_min_samples
from chapters.Cluster_EM.gmm import test_GMM,test_GMM_cov_type,test_GMM_n_components
# from .kmeans import test_Kmeans,test_Kmeans_n_init,test_Kmeans_nclusters
def create_data(centers,num=100,std=0.7):
'''
生成用于聚类的数据集
:param centers: 聚类的中心点组成的数组。如果中心点是二维的,则产生的每个样本都是二维的。
:param num: 样本数
:param std: 每个簇中样本的标准差
:return: 用于聚类的数据集。是一个元组,第一个元素为样本集,第二个元素为样本集的真实簇分类标记
'''
X, labels_true = make_blobs(n_samples=num, centers=centers, cluster_std=std)
return X,labels_true
def plot_data(*data):
'''
绘制用于聚类的数据集
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
labels=np.unique(labels_true)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
colors='rgbyckm' # 每个簇的样本标记不同的颜色
for i,label in enumerate(labels):
position=labels_true==label
ax.scatter(X[position,0],X[position,1],label="cluster %d"%label,
color=colors[i%len(colors)])
ax.legend(loc="best",framealpha=0.5)
ax.set_xlabel("X[0]")
ax.set_ylabel("Y[1]")
ax.set_title("data")
plt.show()
if __name__=='__main__':
centers=[[1,1],[2,2],[1,2],[10,20]] # 用于产生聚类的中心点
X,labels_true=create_data(centers,1000,0.5) # 产生用于聚类的数据集
# plot_data(X,labels_true) # 绘制用于聚类的数据集
# test_Kmeans(X,labels_true) # 调用 test_Kmeans 函数
# test_Kmeans_nclusters(X,labels_true) # 调用 test_Kmeans_nclusters 函数
# test_Kmeans_n_init(X,labels_true) # 调用 test_Kmeans_n_init 函数
# test_DBSCAN(X,labels_true) # 调用 test_DBSCAN 函数
# test_DBSCAN_epsilon(X,labels_true) # 调用 test_DBSCAN_epsilon 函数
# test_DBSCAN_min_samples(X,labels_true) # 调用 test_DBSCAN_min_samples 函数
# test_AgglomerativeClustering(X,labels_true) # 调用 test_AgglomerativeClustering 函数
# test_AgglomerativeClustering_nclusters(X,labels_true) # 调用 test_AgglomerativeClustering_nclusters 函数
# test_AgglomerativeClustering_linkage(X,labels_true) # 调用 test_AgglomerativeClustering_linkage 函数
# test_GMM(X,labels_true) # 调用 test_GMM 函数
# test_GMM_n_components(X,labels_true) # 调用 test_GMM_n_components 函数
test_GMM_cov_type(X,labels_true) # 调用 test_GMM_cov_type 函数
| huaxz1986/git_book | chapters/Cluster_EM/cluster.py | Python | gpl-3.0 | 3,182 |
from scapy.all import *
from scapy.layers import dhcp6
from time import time
def duid(ll_addr):
return DUID_LLT(lladdr=ll_addr, timeval=time())
def ias(requested, iface, T1=None, T2=None):
return map(lambda r: __build_ia(r, iface, T1, T2), requested)
def options(requested):
return map(__build_option_by_code, requested)
def __build_ia(request, iface, T1=None, T2=None):
ia = request.__class__(iaid=request.iaid, T1=(T1 == None and request.T1 or T1), T2=(T2 == None and request.T2 or T2))
ia.ianaopts.append(DHCP6OptIAAddress(addr=str(iface.global_ip()), preflft=300, validlft=300))
return ia
def __build_option_by_code(code):
opt = __option_klass_by_code(code)()
if isinstance(opt, DHCP6OptClientFQDN):
opt.fqdn = 'testhost.local.'
elif isinstance(opt, DHCP6OptDNSDomains):
pass
elif isinstance(opt, DHCP6OptDNSServers):
opt.dnsservers.append('2001:500:88:200::10')
elif isinstance(opt, DHCP6OptSNTPServers):
opt.sntpservers.append('2001:500:88:200::10')
return opt
def __option_klass_by_code(code):
return getattr(dhcp6, dhcp6.dhcp6opts_by_code[code])
| mwrlabs/veripy | contrib/rfc3736/builder.py | Python | gpl-3.0 | 1,149 |
#!/usr/bin/env python
'''
@file freq_scale.py
@brief Sandbox for various frequency scale generators
@author gm
@copyright gm 2014
This file is part of Chartreuse
Chartreuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Chartreuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Chartreuse. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy
import pylab
class LogFreqScale(object):
'''
Log frequency scale
'''
def __init__(self, length, dft_length, sampling_freq):
self.length = length
self.dft_length = dft_length
self.sampling_freq = sampling_freq
self._Synthesize()
def _Synthesize(self):
'''
Actual processing function for generating the scale
'''
kLowBound = 2.0 * self.sampling_freq / self.dft_length
kHighBound = self.sampling_freq * 0.5
tmp = numpy.linspace(kLowBound, kHighBound, self.length)
tmp[0] = self.sampling_freq / (self.dft_length * (3.0 / 4.0))
self.data = numpy.log2(tmp * 0.001)
if __name__ == "__main__":
import utilities
sampling_freq = 48000.0
dft_bins_count = 2048
low_edge = 62.5
high_edge = 1500.0
low_edge_idx = numpy.ceil(low_edge * dft_bins_count / sampling_freq)
high_edge_idx = dft_bins_count / 2 + 1
length = high_edge_idx - low_edge_idx + 1
generator = LogFreqScale(length, dft_bins_count, sampling_freq)
out_data = generator.data
print(utilities.PrintMetadata(utilities.GetMetadata(out_data)))
pylab.plot(out_data, label = "out")
pylab.legend()
pylab.show()
| G4m4/chartreuse | scripts/freq_scale.py | Python | gpl-3.0 | 2,022 |
#=======================================================================
# Author: Donovan Parks
#
# Extended error bar plot.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
from PyQt4 import QtGui, QtCore
import sys
import math
import numpy as np
from mpl_toolkits.axes_grid import make_axes_locatable, Size
from stamp.plugins.groups.AbstractGroupPlotPlugin import AbstractGroupPlotPlugin, TestWindow, ConfigureDialog
from stamp.plugins.groups.plots.configGUI.extendedErrorBarUI import Ui_ExtendedErrorBarDialog
from stamp.metagenomics import TableHelper
from matplotlib.patches import Rectangle
class ExtendedErrorBar(AbstractGroupPlotPlugin):
'''
Extended error bar plot.
'''
def __init__(self, preferences, parent=None):
AbstractGroupPlotPlugin.__init__(self, preferences, parent)
self.name = 'Extended error bar'
self.type = 'Statistical'
self.bSupportsHighlight = True
self.bPlotFeaturesIndividually = False
self.settings = preferences['Settings']
self.figWidth = self.settings.value('group: ' + self.name + '/width', 7.0).toDouble()[0]
self.figHeightPerRow = self.settings.value('group: ' + self.name + '/row height', 0.2).toDouble()[0]
self.sortingField = self.settings.value('group: ' + self.name + '/field', 'p-values').toString()
self.bShowBarPlot = self.settings.value('group: ' + self.name + '/sequences subplot', True).toBool()
self.bShowPValueLabels = self.settings.value('group: ' + self.name + '/p-value labels', True).toBool()
self.bShowCorrectedPvalues = self.settings.value('group: ' + self.name + '/show corrected p-values', True).toBool()
self.bCustomLimits = self.settings.value('group: ' + self.name + '/use custom limits', False).toBool()
self.minX = self.settings.value('group: ' + self.name + '/minimum', 0.0).toDouble()[0]
self.maxX = self.settings.value('group: ' + self.name + '/maximum', 1.0).toDouble()[0]
self.markerSize = self.settings.value('group: ' + self.name + '/marker size', 30).toInt()[0]
self.bShowStdDev = self.settings.value('group: ' + self.name + '/show std. dev.', False).toBool()
self.endCapSize = self.settings.value('group: ' + self.name + '/end cap size', 0.0).toInt()[0]
self.legendPos = self.settings.value('group: ' + self.name + '/legend position', -1).toInt()[0]
def mirrorProperties(self, plotToCopy):
self.name = plotToCopy.name
self.figWidth = plotToCopy.figWidth
self.figHeightPerRow = plotToCopy.figHeightPerRow
self.sortingField = plotToCopy.sortingField
self.bShowBarPlot = plotToCopy.bShowBarPlot
self.bShowPValueLabels = plotToCopy.bShowPValueLabels
self.bShowCorrectedPvalues = plotToCopy.bShowCorrectedPvalues
self.bCustomLimits = plotToCopy.bCustomLimits
self.minX = plotToCopy.minX
self.maxX = plotToCopy.maxX
self.markerSize = plotToCopy.markerSize
self.bShowStdDev = plotToCopy.bShowStdDev
self.endCapSize = plotToCopy.endCapSize
self.legendPos = plotToCopy.legendPos
def plot(self, profile, statsResults):
# *** Check if there is sufficient data to generate the plot
if len(statsResults.activeData) <= 0:
self.emptyAxis()
return
features = statsResults.getColumn('Features')
if len(features) > 200:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
reply = QtGui.QMessageBox.question(self, 'Continue?', 'Profile contains ' + str(len(features)) + ' features. ' +
'It may take several seconds to generate this plot. We recommend filtering your profile first. ' +
'Do you wish to continue?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
QtGui.QApplication.instance().restoreOverrideCursor()
if reply == QtGui.QMessageBox.No:
self.emptyAxis()
return
# *** Colour of plot elements
axesColour = str(self.preferences['Axes colour'].name())
group1Colour = str(self.preferences['Group colours'][profile.groupName1].name())
group2Colour = str(self.preferences['Group colours'][profile.groupName2].name())
# *** Colour of plot elements
highlightColor = (0.9, 0.9, 0.9)
# *** Sort data
if self.sortingField == 'p-values':
statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
[statsResults.dataHeadings['pValues']], False)
elif self.sortingField == 'Effect sizes':
statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
[statsResults.dataHeadings['EffectSize']],
True, True, False)
elif self.sortingField == 'Feature labels':
statsResults.activeData = TableHelper.SortTableStrCol(statsResults.activeData,\
statsResults.dataHeadings['Features'], False)
features = statsResults.getColumn('Features') # get sorted feature labels
# *** Create lists for each quantity of interest
if statsResults.multCompCorrection.method == 'False discovery rate':
pValueTitle = 'q-value'
else:
pValueTitle = 'p-value'
if self.bShowCorrectedPvalues:
pValueLabels = statsResults.getColumnAsStr('pValuesCorrected')
if statsResults.multCompCorrection.method != 'No correction':
pValueTitle += ' (corrected)'
else:
pValueLabels = statsResults.getColumnAsStr('pValues')
effectSizes = statsResults.getColumn('EffectSize')
lowerCIs = statsResults.getColumn('LowerCI')
upperCIs = statsResults.getColumn('UpperCI')
ciTitle = ('%.3g' % (statsResults.oneMinusAlpha()*100)) + '% confidence intervals'
# *** Truncate feature labels
highlightedFeatures = list(self.preferences['Highlighted group features'])
if self.preferences['Truncate feature names']:
length = self.preferences['Length of truncated feature names']
for i in xrange(0, len(features)):
if len(features[i]) > length+3:
features[i] = features[i][0:length] + '...'
for i in xrange(0, len(highlightedFeatures)):
if len(highlightedFeatures[i]) > length+3:
highlightedFeatures[i] = highlightedFeatures[i][0:length] + '...'
# *** Check that there is at least one significant feature
if len(features) <= 0:
self.emptyAxis('No significant features')
return
# *** Adjust effect size for axis scale
dominateInSample2 = []
percentage1 = []
percentage2 = []
for i in xrange(0, len(effectSizes)):
if statsResults.bConfIntervRatio:
if effectSizes[i] < 1:
# mirror CI across y-axis
effectSizes[i] = 1.0 / effectSizes[i]
lowerCI = effectSizes[i] - (1.0 / upperCIs[i])
upperCI = (1.0 / lowerCIs[i]) - effectSizes[i]
lowerCIs[i] = lowerCI
upperCIs[i] = upperCI
dominateInSample2.append(i)
else:
lowerCIs[i] = effectSizes[i] - lowerCIs[i]
upperCIs[i] = upperCIs[i] - effectSizes[i]
else:
lowerCIs[i] = effectSizes[i] - lowerCIs[i]
upperCIs[i] = upperCIs[i] - effectSizes[i]
if effectSizes[i] < 0.0:
dominateInSample2.append(i)
# *** Set figure size
if self.legendPos == 3 or self.legendPos == 4 or self.legendPos == 8: # bottom legend
heightBottomLabels = 0.56 # inches
else:
heightBottomLabels = 0.4 # inches
heightTopLabels = 0.25
plotHeight = self.figHeightPerRow*len(features)
self.imageWidth = self.figWidth
self.imageHeight = plotHeight + heightBottomLabels + heightTopLabels
if self.imageWidth > 256 or self.imageHeight > 256:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.emptyAxis()
reply = QtGui.QMessageBox.question(self, 'Excessively large plot', 'The resulting plot is too large to display.')
QtGui.QApplication.instance().restoreOverrideCursor()
return
self.fig.set_size_inches(self.imageWidth, self.imageHeight)
# *** Determine width of y-axis labels
yLabelBounds = self.yLabelExtents(features, 8)
# *** Size plots which comprise the extended errorbar plot
self.fig.clear()
spacingBetweenPlots = 0.25 # inches
widthNumSeqPlot = 1.25 # inches
if self.bShowBarPlot == False:
widthNumSeqPlot = 0.0
spacingBetweenPlots = 0.0
widthPvalueLabels = 0.75 # inches
if self.bShowPValueLabels == False:
widthPvalueLabels = 0.1
yPlotOffsetFigSpace = heightBottomLabels / self.imageHeight
heightPlotFigSpace = plotHeight / self.imageHeight
xPlotOffsetFigSpace = yLabelBounds.width + 0.1 / self.imageWidth
pValueLabelWidthFigSpace = widthPvalueLabels / self.imageWidth
widthPlotFigSpace = 1.0 - pValueLabelWidthFigSpace - xPlotOffsetFigSpace
widthErrorBarPlot = widthPlotFigSpace*self.imageWidth - widthNumSeqPlot - spacingBetweenPlots
axInitAxis = self.fig.add_axes([xPlotOffsetFigSpace,yPlotOffsetFigSpace,widthPlotFigSpace,heightPlotFigSpace])
divider = make_axes_locatable(axInitAxis)
divider.get_vertical()[0] = Size.Fixed(len(features)*self.figHeightPerRow)
if self.bShowBarPlot == True:
divider.get_horizontal()[0] = Size.Fixed(widthNumSeqPlot)
axErrorbar = divider.new_horizontal(widthErrorBarPlot, pad=spacingBetweenPlots, sharey=axInitAxis)
self.fig.add_axes(axErrorbar)
else:
divider.get_horizontal()[0] = Size.Fixed(widthErrorBarPlot)
axErrorbar = axInitAxis
# *** Plot of sequences for each subsystem
if self.bShowBarPlot == True:
axNumSeq = axInitAxis
meanRelFreqSeqs1 = statsResults.getColumn('MeanRelFreq1')
meanRelFreqSeqs2 = statsResults.getColumn('MeanRelFreq2')
if self.bShowStdDev:
stdDev1 = statsResults.getColumn('StdDevRelFreq1')
stdDev2 = statsResults.getColumn('StdDevRelFreq2')
endCapSize = self.endCapSize
else:
stdDev1 = [0] * len(meanRelFreqSeqs1)
stdDev2 = [0] * len(meanRelFreqSeqs2)
endCapSize = 0
axNumSeq.barh(np.arange(len(features))+0.0, meanRelFreqSeqs1, height = 0.3, xerr=stdDev1, color=group1Colour, ecolor='black', capsize=endCapSize)
axNumSeq.barh(np.arange(len(features))-0.3, meanRelFreqSeqs2, height = 0.3, xerr=stdDev2, color=group2Colour, ecolor='black', capsize=endCapSize)
for value in np.arange(-0.5, len(features)-1, 2):
axNumSeq.axhspan(value, value+1, facecolor=highlightColor,edgecolor='none',zorder=-1)
axNumSeq.set_xlabel('Mean proportion (%)')
maxPercentage = max(max(meanRelFreqSeqs1), max(meanRelFreqSeqs2))
axNumSeq.set_xticks([0, maxPercentage])
axNumSeq.set_xlim([0, maxPercentage*1.05])
maxPercentageStr = '%.1f' % maxPercentage
axNumSeq.set_xticklabels(['0.0', maxPercentageStr])
axNumSeq.set_yticks(np.arange(len(features)))
axNumSeq.set_yticklabels(features)
axNumSeq.set_ylim([-1, len(features)])
for label in axNumSeq.get_yticklabels():
if label.get_text() in highlightedFeatures:
label.set_color('red')
for a in axNumSeq.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for a in axNumSeq.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for line in axNumSeq.yaxis.get_ticklines():
line.set_color(axesColour)
for line in axNumSeq.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axNumSeq.spines.iteritems():
if loc in ['left', 'right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
# *** Plot confidence intervals for each subsystem
lastAxes = axErrorbar
markerSize = math.sqrt(float(self.markerSize))
axErrorbar.errorbar(effectSizes, np.arange(len(features)), xerr=[lowerCIs,upperCIs], fmt='o', ms=markerSize, mfc=group1Colour, mec='black', ecolor='black', zorder=10)
effectSizesSample2 = [effectSizes[value] for value in dominateInSample2]
axErrorbar.plot(effectSizesSample2, dominateInSample2, ls='', marker='o', ms=markerSize, mfc=group2Colour, mec='black', zorder=100)
if statsResults.bConfIntervRatio:
axErrorbar.vlines(1, -1, len(features), linestyle='dashed', color=axesColour)
else:
axErrorbar.vlines(0, -1, len(features), linestyle='dashed', color=axesColour)
for value in np.arange(-0.5, len(features)-1, 2):
axErrorbar.axhspan(value, value+1, facecolor=highlightColor,edgecolor='none',zorder=1)
axErrorbar.set_title(ciTitle)
axErrorbar.set_xlabel('Difference in mean proportions (%)')
if self.bCustomLimits:
axErrorbar.set_xlim([self.minX, self.maxX])
else:
self.minX, self.maxX = axErrorbar.get_xlim()
if self.bShowBarPlot == False:
axErrorbar.set_yticks(np.arange(len(features)))
axErrorbar.set_yticklabels(features)
axErrorbar.set_ylim([-1, len(features)])
for label in axErrorbar.get_yticklabels():
if label.get_text() in self.preferences['Highlighted group features']:
label.set_color('red')
else:
for label in axErrorbar.get_yticklabels():
label.set_visible(False)
for a in axErrorbar.yaxis.majorTicks:
a.set_visible(False)
for a in axErrorbar.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for a in axErrorbar.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for line in axErrorbar.yaxis.get_ticklines():
line.set_visible(False)
for line in axErrorbar.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axErrorbar.spines.iteritems():
if loc in ['left','right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
# *** Show p-values on right of last plot
if self.bShowPValueLabels == True:
axRight = lastAxes.twinx()
axRight.set_yticks(np.arange(len(pValueLabels)))
axRight.set_yticklabels(pValueLabels)
axRight.set_ylim([-1, len(pValueLabels)])
axRight.set_ylabel(pValueTitle)
for a in axRight.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for loc, spine in axRight.spines.iteritems():
spine.set_color('none')
# *** Legend
if self.legendPos != -1:
legend1 = Rectangle((0, 0), 1, 1, fc=group1Colour)
legend2 = Rectangle((0, 0), 1, 1, fc=group2Colour)
legend = self.fig.legend([legend1, legend2], (profile.groupName1, profile.groupName2), loc=self.legendPos, ncol=2)
legend.get_frame().set_linewidth(0)
self.updateGeometry()
self.draw()
def configure(self, profile, statsResults):
self.statsResults = statsResults
self.configDlg = ConfigureDialog(Ui_ExtendedErrorBarDialog)
# set enabled state of controls
self.configDlg.ui.chkShowStdDev.setChecked(self.bShowBarPlot)
self.configDlg.ui.spinEndCapSize.setValue(self.bShowBarPlot)
self.configDlg.ui.spinMinimumX.setEnabled(self.bCustomLimits)
self.configDlg.ui.spinMaximumX.setEnabled(self.bCustomLimits)
# set current value of controls
self.configDlg.ui.cboSortingField.setCurrentIndex(self.configDlg.ui.cboSortingField.findText(self.sortingField))
self.configDlg.ui.spinFigWidth.setValue(self.figWidth)
self.configDlg.ui.spinFigRowHeight.setValue(self.figHeightPerRow)
self.configDlg.ui.chkShowBarPlot.setChecked(self.bShowBarPlot)
self.configDlg.ui.chkPValueLabels.setChecked(self.bShowPValueLabels)
self.configDlg.ui.chkCorrectedPvalues.setChecked(self.bShowCorrectedPvalues)
self.configDlg.ui.chkCustomLimits.setChecked(self.bCustomLimits)
self.configDlg.ui.spinMinimumX.setValue(self.minX)
self.configDlg.ui.spinMaximumX.setValue(self.maxX)
self.configDlg.ui.spinMarkerSize.setValue(self.markerSize)
self.configDlg.ui.chkShowStdDev.setChecked(self.bShowStdDev)
self.configDlg.ui.spinEndCapSize.setValue(self.endCapSize)
if self.legendPos == 2:
self.configDlg.ui.radioLegendPosUpperLeft.setChecked(True)
elif self.legendPos == 3:
self.configDlg.ui.radioLegendPosLowerLeft.setChecked(True)
elif self.legendPos == 4:
self.configDlg.ui.radioLegendPosLowerRight.setChecked(True)
elif self.legendPos == 8:
self.configDlg.ui.radioLegendPosLowerCentre.setChecked(True)
else:
self.configDlg.ui.radioLegendPosNone.setChecked(True)
if self.configDlg.exec_() == QtGui.QDialog.Accepted:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.sortingField = str(self.configDlg.ui.cboSortingField.currentText())
self.figWidth = self.configDlg.ui.spinFigWidth.value()
self.figHeightPerRow = self.configDlg.ui.spinFigRowHeight.value()
self.bShowBarPlot = self.configDlg.ui.chkShowBarPlot.isChecked()
self.bShowPValueLabels = self.configDlg.ui.chkPValueLabels.isChecked()
self.bShowCorrectedPvalues = self.configDlg.ui.chkCorrectedPvalues.isChecked()
self.bCustomLimits = self.configDlg.ui.chkCustomLimits.isChecked()
self.minX = self.configDlg.ui.spinMinimumX.value()
self.maxX = self.configDlg.ui.spinMaximumX.value()
self.markerSize = self.configDlg.ui.spinMarkerSize.value()
self.bShowStdDev = self.configDlg.ui.chkShowStdDev.isChecked()
self.endCapSize = self.configDlg.ui.spinEndCapSize.value()
# legend position
if self.configDlg.ui.radioLegendPosUpperLeft.isChecked() == True:
self.legendPos = 2
elif self.configDlg.ui.radioLegendPosLowerLeft.isChecked() == True:
self.legendPos = 3
elif self.configDlg.ui.radioLegendPosLowerCentre.isChecked() == True:
self.legendPos = 8
elif self.configDlg.ui.radioLegendPosLowerRight.isChecked() == True:
self.legendPos = 4
else:
self.legendPos = -1
self.settings.setValue('group: ' + self.name + '/width', self.figWidth)
self.settings.setValue('group: ' + self.name + '/row height', self.figHeightPerRow)
self.settings.setValue('group: ' + self.name + '/field', self.sortingField)
self.settings.setValue('group: ' + self.name + '/sequences subplot', self.bShowBarPlot)
self.settings.setValue('group: ' + self.name + '/p-value labels', self.bShowPValueLabels)
self.settings.setValue('group: ' + self.name + '/show corrected p-values', self.bShowCorrectedPvalues)
self.settings.setValue('group: ' + self.name + '/use custom limits', self.bCustomLimits)
self.settings.setValue('group: ' + self.name + '/minimum', self.minX)
self.settings.setValue('group: ' + self.name + '/maximum', self.maxX)
self.settings.setValue('group: ' + self.name + '/marker size', self.markerSize)
self.settings.setValue('group: ' + self.name + '/show std. dev.', self.bShowStdDev)
self.settings.setValue('group: ' + self.name + '/end cap size', self.endCapSize)
self.settings.setValue('group: ' + self.name + '/legend position', self.legendPos)
self.plot(profile, statsResults)
QtGui.QApplication.instance().restoreOverrideCursor()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
testWindow = TestWindow(ExtendedErrorBar)
testWindow.show()
sys.exit(app.exec_())
| dparks1134/STAMP | stamp/plugins/groups/plots/ExtendedErrorBar.py | Python | gpl-3.0 | 19,288 |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from loadcell_calibration/GetFactorRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetFactorRequest(genpy.Message):
_md5sum = "36d09b846be0b371c5f190354dd3153e"
_type = "loadcell_calibration/GetFactorRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int64 a
int64 b
"""
__slots__ = ['a','b']
_slot_types = ['int64','int64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
a,b
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetFactorRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.a is None:
self.a = 0
if self.b is None:
self.b = 0
else:
self.a = 0
self.b = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2q.pack(_x.a, _x.b))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.a, _x.b,) = _struct_2q.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2q.pack(_x.a, _x.b))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.a, _x.b,) = _struct_2q.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2q = struct.Struct("<2q")
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from loadcell_calibration/GetFactorResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetFactorResponse(genpy.Message):
_md5sum = "b88405221c77b1878a3cbbfff53428d7"
_type = "loadcell_calibration/GetFactorResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int64 sum
"""
__slots__ = ['sum']
_slot_types = ['int64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
sum
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetFactorResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.sum is None:
self.sum = 0
else:
self.sum = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_q.pack(self.sum))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 8
(self.sum,) = _struct_q.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_q.pack(self.sum))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 8
(self.sum,) = _struct_q.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_q = struct.Struct("<q")
class GetFactor(object):
_type = 'loadcell_calibration/GetFactor'
_md5sum = '6a2e34150c00229791cc89ff309fff21'
_request_class = GetFactorRequest
_response_class = GetFactorResponse
| fioreinc/ipp-15-16 | deprecated/catkin_ws/install/lib/python2.7/dist-packages/loadcell_calibration/srv/_GetFactor.py | Python | gpl-3.0 | 6,807 |
# ---------------------------------------------------------------------------
# OrmapLayersConfig.py
# Created by: Shad Campbell
# Date: 3/11/2011
# Updated by:
# Description: This is a configuration file to be customized by each county.
# Do not delete any of the items in this file. If they are not in use then
# specify thier value and/or definition query to "".
# ---------------------------------------------------------------------------
LOTSANNO_LAYER="LotsAnno"
LOTSANNO_QD="\"MapNumber\" = '*MapNumber*'OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
PLATSANNO_LAYER="PlatsAnno"
PLATSANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
TAXCODEANNO_LAYER="TaxCodeAnno"
TAXCODEANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
TAXNUMANNO_LAYER="TaxlotNumberAnno"
TAXNUMANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ACRESANNO_LAYER="TaxlotAcresAnno"
ACRESANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO10_LAYER="Anno0010scale"
ANNO10_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO20_LAYER="Anno0020scale"
ANNO20_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO30_LAYER="Anno0030scale"
ANNO30_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO40_LAYER="Anno0040scale"
ANNO40_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO50_LAYER="Anno0050scale"
ANNO50_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO60_LAYER="Anno0060scale"
ANNO60_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO100_LAYER="Anno0100scale"
ANNO100_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO200_LAYER="Anno0200scale"
ANNO200_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO400_LAYER="Anno0400scale"
ANNO400_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO800_LAYER="Anno0800scale"
ANNO800_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO2000_LAYER="Anno2000scale"
ANNO2000_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
CORNER_ABOVE_LAYER="Corner"
CORNER_ABOVE_QD="\"MapNumber\"='*MapNumber*'"
TAXCODELINES_ABOVE_LAYER="TaxCodeLines - Above"
TAXCODELINES_ABOVE_QD=""
TAXLOTLINES_ABOVE_LAYER="TaxlotLines - Above"
TAXLOTLINES_ABOVE_QD="\"LineType\" <> 32"
REFLINES_ABOVE_LAYER="ReferenceLines - Above"
REFLINES_ABOVE_QD="\"MAPNUMBER\" = '*MapNumber*'"
CARTOLINES_ABOVE_LAYER="CartographicLines - Above"
CARTOLINES_ABOVE_QD=""
WATERLINES_ABOVE_LAYER="WaterLines - Above"
WATERLINES_ABOVE_QD=""
WATER_ABOVE_LAYER="Water - Above"
WATER_ABOVE_QD=""
MAPINDEXSEEMAP_LAYER=""
MAPINDEXSEEMAP_QD=""
MAPINDEX_LAYER="SeeMaps"
MAPINDEX_QD="\"IndexMap\" = '*MapNumber*'"
CORNER_BELOW_LAYER="Corner - Below"
CORNER_BELOW_QD=""
TAXCODELINES_BELOW_LAYER="TaxCodeLines - Below"
TAXCODELINES_BELOW_QD=""
TAXLOTLINES_BELOW_LAYER="TaxlotLines - Below"
TAXLOTLINES_BELOW_QD=""
REFLINES_BELOW_LAYER="ReferenceLines - Below"
REFLINES_BELOW_QD=""
CARTOLINES_BELOW_LAYER="CartographicLines - Below"
CARTOLINES_BELOW_QD=""
WATERLINES_BELOW_LAYER="WaterLines - Below"
WATERLINES_BELOW_QD=""
WATER_BELOW_LAYER="Water - Below"
WATER_BELOW_QD=""
PAGELAYOUT_TABLE="giscarto.CREATOR_ASR.PAGELAYOUTELEMENTS"
CANCELLEDNUMBERS_TABLE="giscarto.CREATOR_ASR.CANCELLEDNUMBERS"
CUSTOMDEFINITIONQUERIES_TABLE="CustomDefinitionQueries"
EXTRA1_LAYER="Arrow0010scale"
EXTRA1_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA2_LAYER="Arrow0020scale"
EXTRA2_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA3_LAYER="Arrow0030scale"
EXTRA3_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA4_LAYER="Arrow0040scale"
EXTRA4_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA5_LAYER="Arrow0050scale"
EXTRA5_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA6_LAYER="Arrow0100scale"
EXTRA6_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA7_LAYER="Arrow0200scale"
EXTRA7_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA8_LAYER="Arrow0400scale"
EXTRA8_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA9_LAYER="Arrow2000scale"
EXTRA9_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA10_LAYER="MapSecLines - Below"
EXTRA10_QD="\"MapNumber\"='*MapNumber*'"
EXTRA11_LAYER="Railroad"
EXTRA11_QD="CL <> 'Y'"
EXTRA12_LAYER="MapArea"
EXTRA12_QD="\"MapNumber\"='*MapNumber*'"
EXTRA13_LAYER=""
EXTRA13_QD=""
EXTRA14_LAYER="Taxlots - Above"
EXTRA14_QD="\"MapNumber\"='*MapNumber*'"
EXTRA15_LAYER="Arrow0060scale"
EXTRA15_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA16_LAYER="Landmarks"
EXTRA16_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA17_LAYER=""
EXTRA17_QD=""
EXTRA18_LAYER=""
EXTRA18_QD=""
EXTRA19_LAYER=""
EXTRA19_QD=""
EXTRA20_LAYER=""
EXTRA20_QD=""
| ORMAPtools/MapProduction | Config File Templates/ORMAP_LayersConfig.py | Python | gpl-3.0 | 5,573 |
'''
Rigidity is a simple wrapper to the built-in csv module that allows for
validation and correction of data being read/written from/to CSV files.
This module allows you to easily construct validation and correction
rulesets to be applied automatically while preserving the csv interface.
This allows you to easily upgrade old software to use new, strict rules.
'''
import rigidity.errors
import rigidity.rules as rules
class Rigidity():
'''
A wrapper for CSV readers and writers that allows
'''
csvobj = None # Declare here to prevent getattr/setattr recursion
#: Do not display output at all.
DISPLAY_NONE = 0
#: Display simple warnings when ValueError is raised by a rule.
DISPLAY_SIMPLE = 1
def __init__(self, csvobj, rules=[], display=DISPLAY_NONE):
'''
:param csvfile: a Reader or Writer object from the csv module;
any calls to this object's methods will be wrapped to perform
the specified rigidity checks.
:param rules=[]: a two dimensional list containing rules to
be applied to columns moving in/out of `csvobj`. The row
indices in this list match the column in the CSV file the list
of rules will be applied to.
:param int display: When an error is thrown, display the row
and information about which column caused the error.
'''
self.csvobj = csvobj
self.rules = rules
self.display = display
if isinstance(rules, dict):
self.keys = rules.keys()
else:
self.keys = range(0, len(rules))
# Wrapper methods for the `csv` interface
def writeheader(self):
'''
Plain pass-through to the given CSV object. It is assumed that
header information is already valid when the CSV object is
constructed.
'''
self.csvobj.writeheader()
def writerow(self, row):
'''
Validate and correct the data provided in `row` and raise an
exception if the validation or correction fails. Then, write the
row to the CSV file.
'''
try:
self.csvobj.writerow(self.validate_write(row))
except rigidity.errors.DropRow:
return
def writerows(self, rows):
'''
Validate and correct the data provided in every row and raise an
exception if the validation or correction fails.
.. note::
Behavior in the case that the data is invalid and cannot be
repaired is undefined. For example, the implementation may
choose to write all valid rows up until the error, or it may
choose to only conduct the write operation after all rows have
been verified. Do not depend on the presence or absence of any
of the rows in `rows` in the event that an exception occurs.
'''
for row in rows:
self.writerow(row)
# New methods, not part of the `csv` interface
def validate(self, row):
'''
.. warning::
This method is deprecated and will be removed in a future
release; it is included only to support old code. It will
not produce consistent results with bi-directional rules.
You should use :meth:`validate_read` or
:meth:`validate_write` instead.
Validate that the row conforms with the specified rules,
correcting invalid rows where the rule is able to do so.
If the row is valid or can be made valid through corrections,
this method will return a row that can be written to the CSV
file. If the row is invalid and cannot be corrected, then this
method will raise an exception.
:param row: a row object that can be passed to a CSVWriter's
writerow() method.
'''
# Ensure mutability - I'm looking at you, tuples!
if not isinstance(row, (list, dict)):
row = list(row)
# Iterate through all keys, updating the data
for key in self.keys:
value = row[key]
for rule in self.rules[key]:
if hasattr(rule, 'apply'):
value = rule.apply(value)
else:
return rule.read(value)
row[key] = value
# Return the updated data
return row
def validate_write(self, row):
'''
Validate that the row conforms with the specified rules,
correcting invalid rows where the rule is able to do so.
If the row is valid or can be made valid through corrections,
this method will return a row that can be written to the CSV
file. If the row is invalid and cannot be corrected, then this
method will raise an exception.
:param row: a row object that can be passed to a CSVWriter's
__next__() method.
'''
# Ensure mutability - I'm looking at you, tuples!
if not isinstance(row, (list, dict)):
row = list(row)
# Iterate through all keys, updating the data
for key in self.keys:
value = row[key]
for rule in self.rules[key]:
try:
value = rule.write(value)
except ValueError as err:
if self.display == self.DISPLAY_SIMPLE:
print('Invalid data encountered in column %s:' % key)
print(' -', row)
print(' - Error raised by rule:', rule)
print('')
raise err
row[key] = value
# Return the updated data
return row
def validate_read(self, row):
'''
Validate that the row conforms with the specified rules,
correcting invalid rows where the rule is able to do so.
If the row is valid or can be made valid through corrections,
this method will return a row that can be written to the CSV
file. If the row is invalid and cannot be corrected, then this
method will raise an exception.
:param row: a row object that can be returned from CSVReader's
readrow() method.
'''
# Ensure mutability - I'm looking at you, tuples!
if not isinstance(row, (list, dict)):
row = list(row)
# Iterate through all keys, updating the data
for key in self.keys:
value = row[key]
for rule in self.rules[key]:
try:
value = rule.read(value)
except ValueError as err:
if self.display == self.DISPLAY_SIMPLE:
print('Invalid data encountered in column %s:' % key)
print(' -', row)
print(' - Error raised by rule:', rule)
print('')
raise err
except IndexError as err:
if self.display == self.DISPLAY_SIMPLE:
print('IndexError raised in column %s:' % key)
print(' -', row)
print(' - Error raised by rule:', rule)
print('')
raise err
row[key] = value
# Return the updated data
return row
def skip(self):
'''
Return a row, skipping validation. This is useful when you want
to skip validation of header information.
'''
return next(self.csvobj)
def __iter__(self):
for row in iter(self.csvobj):
try:
yield self.validate_read(row)
except rigidity.errors.DropRow:
continue
def __next__(self):
'''
Call the __next__() method on the given CSV object, validate and
repair the row it returns, raise an exception if the row cannot
be repaired, and then return the row.
'''
try:
return self.validate_read(next(self.csvobj))
except rigidity.errors.DropRow:
return next(self)
def __getattr__(self, name):
if hasattr(self.csvobj, name):
return getattr(self.csvobj, name)
else:
return super().__getattr__(self, name)
def __setattr__(self, name, value):
if hasattr(self.csvobj, name):
return setattr(self.csvobj, name, value)
super().__setattr__(name, value)
def __delattr__(self, name):
if hasattr(self.csvobj, name):
return delattr(self.csvobj, name)
return super().__delattr__(name)
| austinhartzheim/rigidity | rigidity/__init__.py | Python | gpl-3.0 | 8,662 |
#!/usr/bin/env python
'''enable run-time addition and removal of master link, just like --master on the cnd line'''
''' TO USE:
link add 10.11.12.13:14550
link list
link remove 3 # to remove 3rd output
'''
from pymavlink import mavutil
import time, struct, math, sys, fnmatch, traceback, json, os
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_util
if mp_util.has_wxpython:
from MAVProxy.modules.lib.mp_menu import *
from MAVProxy.modules.lib.wx_addlink import MPMenulinkAddDialog
dataPackets = frozenset(['BAD_DATA','LOG_DATA'])
delayedPackets = frozenset([ 'MISSION_CURRENT', 'SYS_STATUS', 'VFR_HUD',
'GPS_RAW_INT', 'SCALED_PRESSURE', 'GLOBAL_POSITION_INT',
'NAV_CONTROLLER_OUTPUT' ])
activityPackets = frozenset([ 'HEARTBEAT', 'GPS_RAW_INT', 'GPS_RAW', 'GLOBAL_POSITION_INT', 'SYS_STATUS' ])
radioStatusPackets = frozenset([ 'RADIO', 'RADIO_STATUS'])
preferred_ports = [
'*FTDI*',
"*Arduino_Mega_2560*",
"*3D*",
"*USB_to_UART*",
'*Ardu*',
'*PX4*',
'*Hex_*',
'*Holybro_*',
'*mRo*',
'*FMU*',
'*Swift-Flyer*',
]
class LinkModule(mp_module.MPModule):
def __init__(self, mpstate):
super(LinkModule, self).__init__(mpstate, "link", "link control", public=True, multi_vehicle=True)
self.add_command('link', self.cmd_link, "link control",
["<list|ports|resetstats>",
'add (SERIALPORT)',
'attributes (LINK) (ATTRIBUTES)',
'remove (LINKS)',
'dataratelogging (DLSTATE)',
'hl (HLSTATE)'])
self.add_command('vehicle', self.cmd_vehicle, "vehicle control")
self.add_command('alllinks', self.cmd_alllinks, "send command on all links", ["(COMMAND)"])
self.no_fwd_types = set()
self.no_fwd_types.add("BAD_DATA")
self.add_completion_function('(SERIALPORT)', self.complete_serial_ports)
self.add_completion_function('(LINKS)', self.complete_links)
self.add_completion_function('(LINK)', self.complete_links)
self.add_completion_function('(HLSTATE)', self.complete_hl)
self.add_completion_function('(DLSTATE)', self.complete_dl)
self.last_altitude_announce = 0.0
self.vehicle_list = set()
self.high_latency = False
self.datarate_logging = False
self.datarate_logging_timer = mavutil.periodic_event(1)
self.old_streamrate = 0
self.old_streamrate2 = 0
self.menu_added_console = False
if mp_util.has_wxpython:
self.menu_rm = MPMenuSubMenu('Remove', items=[])
self.menu = MPMenuSubMenu('Link',
items=[MPMenuItem('Add...', 'Add...', '# link add ', handler=MPMenulinkAddDialog()),
self.menu_rm,
MPMenuItem('Ports', 'Ports', '# link ports'),
MPMenuItem('List', 'List', '# link list'),
MPMenuItem('Status', 'Status', '# link')])
self.last_menu_update = 0
def idle_task(self):
'''called on idle'''
if mp_util.has_wxpython:
if self.module('console') is not None:
if not self.menu_added_console:
self.menu_added_console = True
# we don't dynamically update these yet due to a wx bug
self.menu_rm.items = [ MPMenuItem(p, p, '# link remove %s' % p) for p in self.complete_links('') ]
self.module('console').add_menu(self.menu)
else:
self.menu_added_console = False
for m in self.mpstate.mav_master:
m.source_system = self.settings.source_system
m.mav.srcSystem = m.source_system
m.mav.srcComponent = self.settings.source_component
# don't let pending statustext wait forever for last chunk:
for src in self.status.statustexts_by_sysidcompid:
msgids = list(self.status.statustexts_by_sysidcompid[src].keys())
for msgid in msgids:
pending = self.status.statustexts_by_sysidcompid[src][msgid]
if time.time() - pending.last_chunk_time > 1:
self.emit_accumulated_statustext(src, msgid, pending)
# datarate logging if enabled, at 1 Hz
if self.datarate_logging_timer.trigger() and self.datarate_logging:
with open(self.datarate_logging, 'a') as logfile:
for master in self.mpstate.mav_master:
highest_msec_key = (self.target_system, self.target_component)
linkdelay = (self.status.highest_msec.get(highest_msec_key, 0) - master.highest_msec.get(highest_msec_key, 0))*1.0e-3
logfile.write(str(time.strftime("%H:%M:%S")) + "," +
str(self.link_label(master)) + "," +
str(master.linknum) + "," +
str(self.status.counters['MasterIn'][master.linknum]) + "," +
str(self.status.bytecounters['MasterIn'][master.linknum].total()) + "," +
str(linkdelay) + "," +
str(100 * round(master.packet_loss(), 3)) + "\n")
def complete_serial_ports(self, text):
'''return list of serial ports'''
ports = mavutil.auto_detect_serial(preferred_list=preferred_ports)
return [ p.device for p in ports ]
def complete_hl(self, text):
'''return list of hl options'''
return [ 'on', 'off' ]
def complete_dl(self, text):
'''return list of datarate_logging options'''
return [ 'on', 'off' ]
def complete_links(self, text):
'''return list of links'''
try:
ret = [ m.address for m in self.mpstate.mav_master ]
for m in self.mpstate.mav_master:
ret.append(m.address)
if hasattr(m, 'label'):
ret.append(m.label)
return ret
except Exception as e:
print("Caught exception: %s" % str(e))
def cmd_link(self, args):
'''handle link commands'''
if len(args) < 1:
self.show_link()
elif args[0] == "list":
self.cmd_link_list()
elif args[0] == "hl":
self.cmd_hl(args[1:])
elif args[0] == "dataratelogging":
self.cmd_dl(args[1:])
elif args[0] == "add":
if len(args) != 2:
print("Usage: link add LINK")
print('Usage: e.g. link add 127.0.0.1:9876')
print('Usage: e.g. link add 127.0.0.1:9876:{"label":"rfd900"}')
return
self.cmd_link_add(args[1:])
elif args[0] == "attributes":
if len(args) != 3:
print("Usage: link attributes LINK ATTRIBUTES")
print('Usage: e.g. link attributes rfd900 {"label":"bob"}')
return
self.cmd_link_attributes(args[1:])
elif args[0] == "ports":
self.cmd_link_ports()
elif args[0] == "remove":
if len(args) != 2:
print("Usage: link remove LINK")
return
self.cmd_link_remove(args[1:])
elif args[0] == "resetstats":
self.reset_link_stats()
else:
print("usage: link <list|add|remove|attributes|hl|dataratelogging|resetstats>")
def cmd_dl(self, args):
'''Toggle datarate logging'''
if len(args) < 1:
print("Datarate logging is " + ("on" if self.datarate_logging else "off"))
return
elif args[0] == "on":
self.datarate_logging = os.path.join(self.logdir, "dataratelog.csv")
print("Datarate Logging ON, logfile: " + self.datarate_logging)
# Open a new file handle (don't append) for logging
with open(self.datarate_logging, 'w') as logfile:
logfile.write("time, linkname, linkid, packetsreceived, bytesreceived, delaysec, lostpercent\n")
elif args[0] == "off":
print("Datarate Logging OFF")
self.datarate_logging = None
else:
print("usage: dataratelogging <on|off>")
def cmd_hl(self, args):
'''Toggle high latency mode'''
if len(args) < 1:
print("High latency mode is " + str(self.high_latency))
return
elif args[0] == "on":
print("High latency mode ON")
self.high_latency = True
# Tell ArduPilot to start sending HIGH_LATENCY2 messages
self.master.mav.command_long_send(
self.target_system, # target_system
self.target_component,
mavutil.mavlink.MAV_CMD_SET_MESSAGE_INTERVAL, # command
0, # confirmation
mavutil.mavlink.MAVLINK_MSG_ID_HIGH_LATENCY2, # param1 (msg id)
1000000, # param2 (message interval, us)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
# and stop sending any other messages
self.old_streamrate = self.settings.streamrate
self.old_streamrate2 = self.settings.streamrate2
self.settings.streamrate = -1
self.settings.streamrate2 = -1
for master in self.mpstate.mav_master:
master.mav.request_data_stream_send(self.mpstate.settings.target_system, self.mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
0, 1)
return
elif args[0] == "off":
print("High latency mode OFF")
self.high_latency = False
# Start sending the full message set again
self.settings.streamrate = self.old_streamrate
self.settings.streamrate2 = self.old_streamrate2
for master in self.mpstate.mav_master:
if master.linknum == 0:
rate = self.settings.streamrate
else:
rate = self.settings.streamrate2
if rate != -1 and self.mpstate.settings.streamrate != -1:
master.mav.request_data_stream_send(self.mpstate.settings.target_system, self.mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
rate, 1)
# Tell ArduPilot to stop sending HIGH_LATENCY2 messages
self.master.mav.command_long_send(
self.target_system, # target_system
self.target_component,
mavutil.mavlink.MAV_CMD_SET_MESSAGE_INTERVAL, # command
0, # confirmation
mavutil.mavlink.MAVLINK_MSG_ID_HIGH_LATENCY2, # param1 (msg id)
-1, # param2 (message interval)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
return
else:
print("usage: hl <on|off>")
def show_link(self):
'''show link information'''
for master in self.mpstate.mav_master:
highest_msec_key = (self.target_system, self.target_component)
linkdelay = (self.status.highest_msec.get(highest_msec_key, 0) - master.highest_msec.get(highest_msec_key, 0))*1.0e-3
if master.linkerror:
status = "DOWN"
else:
status = "OK"
sign_string = ''
try:
if master.mav.signing.sig_count:
if master.mav.signing.secret_key is None:
# unsigned/reject counts are not updated if we
# don't have a signing secret
sign_string = ", (no-signing-secret)"
else:
sign_string = ", unsigned %u reject %u" % (master.mav.signing.unsigned_count, master.mav.signing.reject_count)
except AttributeError as e:
# some mav objects may not have a "signing" attribute
pass
print("link %s %s (%u packets, %u bytes, %.2fs delay, %u lost, %.1f%% loss, rate:%uB/s%s)" % (self.link_label(master),
status,
self.status.counters['MasterIn'][master.linknum],
self.status.bytecounters['MasterIn'][master.linknum].total(),
linkdelay,
master.mav_loss,
master.packet_loss(),
self.status.bytecounters['MasterIn'][master.linknum].rate(),
sign_string))
def reset_link_stats(self):
'''reset link statistics'''
for master in self.mpstate.mav_master:
self.status.counters['MasterIn'][master.linknum] = 0
self.status.bytecounters['MasterIn'][master.linknum].__init__()
master.mav_loss = 0
master.mav_count = 0
def cmd_alllinks(self, args):
'''send command on all links'''
saved_target = self.mpstate.settings.target_system
print("Sending to: ", self.vehicle_list)
for v in sorted(self.vehicle_list):
self.cmd_vehicle([str(v)])
self.mpstate.functions.process_stdin(' '.join(args), True)
self.cmd_vehicle([str(saved_target)])
def cmd_link_list(self):
'''list links'''
print("%u links" % len(self.mpstate.mav_master))
for i in range(len(self.mpstate.mav_master)):
conn = self.mpstate.mav_master[i]
if hasattr(conn, 'label'):
print("%u (%s): %s" % (i, conn.label, conn.address))
else:
print("%u: %s" % (i, conn.address))
def parse_link_attributes(self, some_json):
'''return a dict based on some_json (empty if json invalid)'''
try:
return json.loads(some_json)
except ValueError:
print('Invalid JSON argument: {0}'.format(some_json))
return {}
def parse_link_descriptor(self, descriptor):
'''parse e.g. 'udpin:127.0.0.1:9877:{"foo":"bar"}' into
python structure ("udpin:127.0.0.1:9877", {"foo":"bar"})'''
optional_attributes = {}
link_components = descriptor.split(":{", 1)
device = link_components[0]
if (len(link_components) == 2 and link_components[1].endswith("}")):
# assume json
some_json = "{" + link_components[1]
optional_attributes = self.parse_link_attributes(some_json)
return (device, optional_attributes)
def apply_link_attributes(self, conn, optional_attributes):
for attr in optional_attributes:
print("Applying attribute to link: %s = %s" % (attr, optional_attributes[attr]))
setattr(conn, attr, optional_attributes[attr])
def link_add(self, descriptor, force_connected=False):
'''add new link'''
try:
(device, optional_attributes) = self.parse_link_descriptor(descriptor)
# if there's only 1 colon for port:baud
# and if the first string is a valid serial port, it's a serial connection
if len(device.split(':')) == 2:
ports = mavutil.auto_detect_serial(preferred_list=preferred_ports)
for p in ports:
if p.device == device.split(':')[0]:
# it's a valid serial port, reformat arguments to fit
self.settings.baudrate = int(device.split(':')[1])
device = device.split(':')[0]
break
print("Connect %s source_system=%d" % (device, self.settings.source_system))
try:
conn = mavutil.mavlink_connection(device, autoreconnect=True,
source_system=self.settings.source_system,
baud=self.settings.baudrate,
force_connected=force_connected)
except Exception as e:
# try the same thing but without force-connected for
# backwards-compatability
conn = mavutil.mavlink_connection(device, autoreconnect=True,
source_system=self.settings.source_system,
baud=self.settings.baudrate)
conn.mav.srcComponent = self.settings.source_component
except Exception as msg:
print("Failed to connect to %s : %s" % (descriptor, msg))
return False
if self.settings.rtscts:
conn.set_rtscts(True)
conn.mav.set_callback(self.master_callback, conn)
if hasattr(conn.mav, 'set_send_callback'):
conn.mav.set_send_callback(self.master_send_callback, conn)
conn.linknum = len(self.mpstate.mav_master)
conn.linkerror = False
conn.link_delayed = False
conn.last_heartbeat = 0
conn.last_message = 0
conn.highest_msec = {}
conn.target_system = self.settings.target_system
self.apply_link_attributes(conn, optional_attributes)
self.mpstate.mav_master.append(conn)
self.status.counters['MasterIn'].append(0)
self.status.bytecounters['MasterIn'].append(self.status.ByteCounter())
self.mpstate.vehicle_link_map[conn.linknum] = set(())
try:
mp_util.child_fd_list_add(conn.port.fileno())
except Exception:
pass
return True
def cmd_link_add(self, args):
'''add new link'''
descriptor = args[0]
print("Adding link %s" % descriptor)
self.link_add(descriptor)
def link_attributes(self, link, attributes):
i = self.find_link(link)
if i is None:
print("Connection (%s) not found" % (link,))
return
conn = self.mpstate.mav_master[i]
atts = self.parse_link_attributes(attributes)
self.apply_link_attributes(conn, atts)
def cmd_link_attributes(self, args):
'''change optional link attributes'''
link = args[0]
attributes = args[1]
print("Setting link %s attributes (%s)" % (link, attributes))
self.link_attributes(link, attributes)
def cmd_link_ports(self):
'''show available ports'''
ports = mavutil.auto_detect_serial(preferred_list=preferred_ports)
for p in ports:
print("%s : %s : %s" % (p.device, p.description, p.hwid))
def find_link(self, device):
'''find a device based on number, name or label'''
for i in range(len(self.mpstate.mav_master)):
conn = self.mpstate.mav_master[i]
if (str(i) == device or
conn.address == device or
getattr(conn, 'label', None) == device):
return i
return None
def cmd_link_remove(self, args):
'''remove an link'''
device = args[0]
if len(self.mpstate.mav_master) <= 1:
print("Not removing last link")
return
i = self.find_link(device)
if i is None:
return
conn = self.mpstate.mav_master[i]
print("Removing link %s" % conn.address)
try:
try:
mp_util.child_fd_list_remove(conn.port.fileno())
except Exception:
pass
self.mpstate.mav_master[i].close()
except Exception as msg:
print(msg)
pass
self.mpstate.mav_master.pop(i)
self.status.counters['MasterIn'].pop(i)
self.status.bytecounters['MasterIn'].pop(i)
del self.mpstate.vehicle_link_map[conn.linknum]
# renumber the links
vehicle_link_map_reordered = {}
for j in range(len(self.mpstate.mav_master)):
conn = self.mpstate.mav_master[j]
map_old = self.mpstate.vehicle_link_map[conn.linknum]
conn.linknum = j
vehicle_link_map_reordered[j] = map_old
self.mpstate.vehicle_link_map = vehicle_link_map_reordered
def get_usec(self):
'''time since 1970 in microseconds'''
return int(time.time() * 1.0e6)
def master_send_callback(self, m, master):
'''called on sending a message'''
if self.status.watch is not None:
for msg_type in self.status.watch:
if fnmatch.fnmatch(m.get_type().upper(), msg_type.upper()):
self.mpstate.console.writeln('> '+ str(m))
break
mtype = m.get_type()
if mtype != 'BAD_DATA' and self.mpstate.logqueue:
usec = self.get_usec()
usec = (usec & ~3) | 3 # linknum 3
self.mpstate.logqueue.put(bytearray(struct.pack('>Q', usec) + m.get_msgbuf()))
def handle_msec_timestamp(self, m, master):
'''special handling for MAVLink packets with a time_boot_ms field'''
if m.get_type() == 'GLOBAL_POSITION_INT':
# this is fix time, not boot time
return
msec = m.time_boot_ms
if msec == 0:
return
sysid = m.get_srcSystem()
compid = m.get_srcComponent()
highest_msec_key = (sysid,compid)
highest = master.highest_msec.get(highest_msec_key, 0)
if msec + 30000 < highest:
self.say('Time has wrapped')
print('Time has wrapped', msec, highest)
self.status.highest_msec[highest_msec_key] = msec
for mm in self.mpstate.mav_master:
mm.link_delayed = False
mm.highest_msec[highest_msec_key] = msec
return
# we want to detect when a link is delayed
master.highest_msec[highest_msec_key] = msec
if msec > self.status.highest_msec.get(highest_msec_key, 0):
self.status.highest_msec[highest_msec_key] = msec
if msec < self.status.highest_msec.get(highest_msec_key, 0) and len(self.mpstate.mav_master) > 1 and self.mpstate.settings.checkdelay:
master.link_delayed = True
else:
master.link_delayed = False
def colors_for_severity(self, severity):
severity_colors = {
# tuple is (fg, bg) (as in "white on red")
mavutil.mavlink.MAV_SEVERITY_EMERGENCY: ('white', 'red'),
mavutil.mavlink.MAV_SEVERITY_ALERT: ('white', 'red'),
mavutil.mavlink.MAV_SEVERITY_CRITICAL: ('white', 'red'),
mavutil.mavlink.MAV_SEVERITY_ERROR: ('black', 'orange'),
mavutil.mavlink.MAV_SEVERITY_WARNING: ('black', 'orange'),
mavutil.mavlink.MAV_SEVERITY_NOTICE: ('black', 'yellow'),
mavutil.mavlink.MAV_SEVERITY_INFO: ('white', 'green'),
mavutil.mavlink.MAV_SEVERITY_DEBUG: ('white', 'green'),
}
try:
return severity_colors[severity]
except Exception as e:
print("Exception: %s" % str(e))
return ('white', 'red')
def report_altitude(self, altitude):
'''possibly report a new altitude'''
master = self.master
if getattr(self.console, 'ElevationMap', None) is not None and self.mpstate.settings.basealt != 0:
lat = master.field('GLOBAL_POSITION_INT', 'lat', 0)*1.0e-7
lon = master.field('GLOBAL_POSITION_INT', 'lon', 0)*1.0e-7
alt1 = self.console.ElevationMap.GetElevation(lat, lon)
if alt1 is not None:
alt2 = self.mpstate.settings.basealt
altitude += alt2 - alt1
self.status.altitude = altitude
altitude_converted = self.height_convert_units(altitude)
if (int(self.mpstate.settings.altreadout) > 0 and
math.fabs(altitude_converted - self.last_altitude_announce) >=
int(self.settings.altreadout)):
self.last_altitude_announce = altitude_converted
rounded_alt = int(self.settings.altreadout) * ((self.settings.altreadout/2 + int(altitude_converted)) / int(self.settings.altreadout))
self.say("height %u" % rounded_alt, priority='notification')
def emit_accumulated_statustext(self, key, id, pending):
out = pending.accumulated_statustext()
if out != self.status.last_apm_msg or time.time() > self.status.last_apm_msg_time+2:
(fg, bg) = self.colors_for_severity(pending.severity)
out = pending.accumulated_statustext()
self.mpstate.console.writeln("AP: %s" % out, bg=bg, fg=fg)
self.status.last_apm_msg = out
self.status.last_apm_msg_time = time.time()
del self.status.statustexts_by_sysidcompid[key][id]
def master_msg_handling(self, m, master):
'''link message handling for an upstream link'''
if self.settings.target_system != 0 and m.get_srcSystem() != self.settings.target_system:
# don't process messages not from our target
if m.get_type() == "BAD_DATA":
if self.mpstate.settings.shownoise and mavutil.all_printable(m.data):
out = m.data
if type(m.data) == bytearray:
out = m.data.decode('ascii')
self.mpstate.console.write(out, bg='red')
return
if self.settings.target_system != 0 and master.target_system != self.settings.target_system:
# keep the pymavlink level target system aligned with the MAVProxy setting
master.target_system = self.settings.target_system
if self.settings.target_component != 0 and master.target_component != self.settings.target_component:
# keep the pymavlink level target component aligned with the MAVProxy setting
print("change target_component %u" % self.settings.target_component)
master.target_component = self.settings.target_component
mtype = m.get_type()
if (mtype == 'HEARTBEAT' or mtype == 'HIGH_LATENCY2') and m.type != mavutil.mavlink.MAV_TYPE_GCS:
if self.settings.target_system == 0 and self.settings.target_system != m.get_srcSystem():
self.settings.target_system = m.get_srcSystem()
self.say("online system %u" % self.settings.target_system,'message')
for mav in self.mpstate.mav_master:
mav.target_system = self.settings.target_system
if self.status.heartbeat_error:
self.status.heartbeat_error = False
self.say("heartbeat OK")
if master.linkerror:
master.linkerror = False
self.say("link %s OK" % (self.link_label(master)))
self.status.last_heartbeat = time.time()
master.last_heartbeat = self.status.last_heartbeat
armed = self.master.motors_armed()
if armed != self.status.armed:
self.status.armed = armed
if armed:
self.say("ARMED")
else:
self.say("DISARMED")
if master.flightmode != self.status.flightmode:
self.status.flightmode = master.flightmode
if self.mpstate.functions.input_handler is None:
self.set_prompt(self.status.flightmode + "> ")
if master.flightmode != self.status.last_mode_announced and time.time() > self.status.last_mode_announce + 2:
self.status.last_mode_announce = time.time()
self.status.last_mode_announced = master.flightmode
self.say("Mode " + self.status.flightmode)
if m.type == mavutil.mavlink.MAV_TYPE_FIXED_WING:
self.mpstate.vehicle_type = 'plane'
self.mpstate.vehicle_name = 'ArduPlane'
elif m.type in [mavutil.mavlink.MAV_TYPE_GROUND_ROVER,
mavutil.mavlink.MAV_TYPE_SURFACE_BOAT]:
self.mpstate.vehicle_type = 'rover'
self.mpstate.vehicle_name = 'APMrover2'
elif m.type in [mavutil.mavlink.MAV_TYPE_SUBMARINE]:
self.mpstate.vehicle_type = 'sub'
self.mpstate.vehicle_name = 'ArduSub'
elif m.type in [mavutil.mavlink.MAV_TYPE_QUADROTOR,
mavutil.mavlink.MAV_TYPE_COAXIAL,
mavutil.mavlink.MAV_TYPE_HEXAROTOR,
mavutil.mavlink.MAV_TYPE_OCTOROTOR,
mavutil.mavlink.MAV_TYPE_TRICOPTER,
mavutil.mavlink.MAV_TYPE_HELICOPTER,
mavutil.mavlink.MAV_TYPE_DODECAROTOR]:
self.mpstate.vehicle_type = 'copter'
self.mpstate.vehicle_name = 'ArduCopter'
elif m.type in [mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER]:
self.mpstate.vehicle_type = 'antenna'
self.mpstate.vehicle_name = 'AntennaTracker'
elif m.type in [mavutil.mavlink.MAV_TYPE_AIRSHIP]:
self.mpstate.vehicle_type = 'blimp'
self.mpstate.vehicle_name = 'Blimp'
elif mtype == 'STATUSTEXT':
class PendingText(object):
def __init__(self):
self.expected_count = None
self.severity = None
self.chunks = {}
self.start_time = time.time()
self.last_chunk_time = time.time()
def add_chunk(self, m): # m is a statustext message
self.severity = m.severity
self.last_chunk_time = time.time()
if hasattr(m, 'chunk_seq'):
# mavlink extensions are present.
chunk_seq = m.chunk_seq
mid = m.id
else:
# Note that m.id may still exist! It will
# contain the value 253, STATUSTEXT's mavlink
# message id. Thus our reliance on the
# presence of chunk_seq.
chunk_seq = 0
mid = 0
self.chunks[chunk_seq] = m.text
if len(m.text) != 50 or mid == 0:
self.expected_count = chunk_seq + 1;
def complete(self):
return (self.expected_count is not None and
self.expected_count == len(self.chunks))
def accumulated_statustext(self):
next_expected_chunk = 0
out = ""
for chunk_seq in sorted(self.chunks.keys()):
if chunk_seq != next_expected_chunk:
out += " ... "
next_expected_chunk = chunk_seq
out += self.chunks[chunk_seq]
next_expected_chunk += 1
return out
key = "%s.%s" % (m.get_srcSystem(), m.get_srcComponent())
if key not in self.status.statustexts_by_sysidcompid:
self.status.statustexts_by_sysidcompid[key] = {}
if hasattr(m, 'chunk_seq'):
mid = m.id
else:
# m.id will have the value of 253, STATUSTEXT mavlink id
mid = 0
if mid not in self.status.statustexts_by_sysidcompid[key]:
self.status.statustexts_by_sysidcompid[key][mid] = PendingText()
pending = self.status.statustexts_by_sysidcompid[key][mid]
pending.add_chunk(m)
if pending.complete():
# we have all of the chunks!
self.emit_accumulated_statustext(key, mid, pending)
elif mtype == "VFR_HUD":
have_gps_lock = False
if 'GPS_RAW' in self.status.msgs and self.status.msgs['GPS_RAW'].fix_type == 2:
have_gps_lock = True
elif 'GPS_RAW_INT' in self.status.msgs and self.status.msgs['GPS_RAW_INT'].fix_type == 3:
have_gps_lock = True
if have_gps_lock and not self.status.have_gps_lock and m.alt != 0:
self.say("GPS lock at %u meters" % m.alt, priority='notification')
self.status.have_gps_lock = True
elif mtype == "GPS_RAW":
if self.status.have_gps_lock:
if m.fix_type != 2 and not self.status.lost_gps_lock and (time.time() - self.status.last_gps_lock) > 3:
self.say("GPS fix lost")
self.status.lost_gps_lock = True
if m.fix_type == 2 and self.status.lost_gps_lock:
self.say("GPS OK")
self.status.lost_gps_lock = False
if m.fix_type == 2:
self.status.last_gps_lock = time.time()
elif mtype == "GPS_RAW_INT":
if self.status.have_gps_lock:
if m.fix_type < 3 and not self.status.lost_gps_lock and (time.time() - self.status.last_gps_lock) > 3:
self.say("GPS fix lost")
self.status.lost_gps_lock = True
if m.fix_type >= 3 and self.status.lost_gps_lock:
self.say("GPS OK")
self.status.lost_gps_lock = False
if m.fix_type >= 3:
self.status.last_gps_lock = time.time()
elif mtype == "NAV_CONTROLLER_OUTPUT" and self.status.flightmode == "AUTO" and self.mpstate.settings.distreadout:
rounded_dist = int(m.wp_dist/self.mpstate.settings.distreadout)*self.mpstate.settings.distreadout
if math.fabs(rounded_dist - self.status.last_distance_announce) >= self.mpstate.settings.distreadout:
if rounded_dist != 0:
self.say("%u" % rounded_dist, priority="progress")
self.status.last_distance_announce = rounded_dist
elif mtype == "GLOBAL_POSITION_INT":
self.report_altitude(m.relative_alt*0.001)
elif mtype == "COMPASSMOT_STATUS":
print(m)
elif mtype == "SIMSTATE":
self.mpstate.is_sitl = True
elif mtype == "ATTITUDE":
att_time = m.time_boot_ms * 0.001
self.mpstate.attitude_time_s = max(self.mpstate.attitude_time_s, att_time)
if self.mpstate.attitude_time_s - att_time > 120:
# cope with wrap
self.mpstate.attitude_time_s = att_time
elif mtype == "COMMAND_ACK":
try:
cmd = mavutil.mavlink.enums["MAV_CMD"][m.command].name
cmd = cmd[8:]
res = mavutil.mavlink.enums["MAV_RESULT"][m.result].name
res = res[11:]
if m.target_component not in [mavutil.mavlink.MAV_COMP_ID_MAVCAN]:
self.mpstate.console.writeln("Got COMMAND_ACK: %s: %s" % (cmd, res))
except Exception:
self.mpstate.console.writeln("Got MAVLink msg: %s" % m)
if m.command == mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION:
if m.result == mavutil.mavlink.MAV_RESULT_ACCEPTED:
self.say("Calibrated")
elif m.result == mavutil.mavlink.MAV_RESULT_FAILED:
self.say("Calibration failed")
elif m.result == mavutil.mavlink.MAV_RESULT_UNSUPPORTED:
self.say("Calibration unsupported")
elif m.result == mavutil.mavlink.MAV_RESULT_TEMPORARILY_REJECTED:
self.say("Calibration temporarily rejected")
else:
self.say("Calibration response (%u)" % m.result)
elif mtype == "MISSION_ACK":
try:
t = mavutil.mavlink.enums["MAV_MISSION_TYPE"][m.mission_type].name
t = t[12:]
res = mavutil.mavlink.enums["MAV_MISSION_RESULT"][m.type].name
res = res[12:]
self.mpstate.console.writeln("Got MISSION_ACK: %s: %s" % (t, res))
except Exception as e:
self.mpstate.console.writeln("Got MAVLink msg: %s" % m)
else:
#self.mpstate.console.writeln("Got MAVLink msg: %s" % m)
pass
if self.status.watch is not None:
for msg_type in self.status.watch:
if fnmatch.fnmatch(mtype.upper(), msg_type.upper()):
self.mpstate.console.writeln('< '+ str(m))
break
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
pass
def master_callback(self, m, master):
'''process mavlink message m on master, sending any messages to recipients'''
sysid = m.get_srcSystem()
mtype = m.get_type()
if mtype in ['HEARTBEAT', 'HIGH_LATENCY2'] and m.type != mavutil.mavlink.MAV_TYPE_GCS:
compid = m.get_srcComponent()
if sysid not in self.vehicle_list:
self.vehicle_list.add(sysid)
if (sysid, compid) not in self.mpstate.vehicle_link_map[master.linknum]:
self.mpstate.vehicle_link_map[master.linknum].add((sysid, compid))
print("Detected vehicle {0}:{1} on link {2}".format(sysid, compid, master.linknum))
# see if it is handled by a specialised sysid connection
if sysid in self.mpstate.sysid_outputs:
self.mpstate.sysid_outputs[sysid].write(m.get_msgbuf())
if mtype == "GLOBAL_POSITION_INT":
for modname in 'map', 'asterix', 'NMEA', 'NMEA2':
mod = self.module(modname)
if mod is not None:
mod.set_secondary_vehicle_position(m)
return
if getattr(m, '_timestamp', None) is None:
master.post_message(m)
self.status.counters['MasterIn'][master.linknum] += 1
if mtype == 'GLOBAL_POSITION_INT':
# send GLOBAL_POSITION_INT to 2nd GCS for 2nd vehicle display
for sysid in self.mpstate.sysid_outputs:
self.mpstate.sysid_outputs[sysid].write(m.get_msgbuf())
if self.mpstate.settings.fwdpos:
for link in self.mpstate.mav_master:
if link != master:
link.write(m.get_msgbuf())
# and log them
if mtype not in dataPackets and self.mpstate.logqueue:
# put link number in bottom 2 bits, so we can analyse packet
# delay in saved logs
usec = self.get_usec()
usec = (usec & ~3) | master.linknum
self.mpstate.logqueue.put(bytearray(struct.pack('>Q', usec) + m.get_msgbuf()))
# keep the last message of each type around
self.status.msgs[mtype] = m
instance_field = getattr(m, '_instance_field', None)
if mtype not in self.status.msg_count:
self.status.msg_count[mtype] = 0
self.status.msg_count[mtype] += 1
if instance_field is not None:
instance_value = getattr(m, instance_field, None)
if instance_value is not None:
mtype_instance = "%s[%s]" % (mtype, instance_value)
self.status.msgs[mtype_instance] = m
if mtype_instance not in self.status.msg_count:
self.status.msg_count[mtype_instance] = 0
self.status.msg_count[mtype_instance] += 1
if m.get_srcComponent() == mavutil.mavlink.MAV_COMP_ID_GIMBAL and mtype == 'HEARTBEAT':
# silence gimbal heartbeat packets for now
return
if getattr(m, 'time_boot_ms', None) is not None and self.settings.target_system == m.get_srcSystem():
# update link_delayed attribute
self.handle_msec_timestamp(m, master)
if mtype in activityPackets:
if master.linkerror:
master.linkerror = False
self.say("link %s OK" % (self.link_label(master)))
self.status.last_message = time.time()
master.last_message = self.status.last_message
if master.link_delayed and self.mpstate.settings.checkdelay:
# don't process delayed packets that cause double reporting
if mtype in delayedPackets:
return
self.master_msg_handling(m, master)
# don't pass along bad data
if mtype != 'BAD_DATA':
# pass messages along to listeners, except for REQUEST_DATA_STREAM, which
# would lead a conflict in stream rate setting between mavproxy and the other
# GCS
if self.mpstate.settings.mavfwd_rate or mtype != 'REQUEST_DATA_STREAM':
if mtype not in self.no_fwd_types:
for r in self.mpstate.mav_outputs:
r.write(m.get_msgbuf())
sysid = m.get_srcSystem()
target_sysid = self.target_system
# pass to modules
for (mod,pm) in self.mpstate.modules:
if not hasattr(mod, 'mavlink_packet'):
continue
# sysid 51/'3' is used by SiK radio for the injected RADIO/RADIO_STATUS mavlink frames.
# In order to be able to pass these to e.g. the graph module, which is not multi-vehicle,
# special handling is needed, so that the module gets both RADIO_STATUS and (single) target
# vehicle information.
if not(sysid == 51 and mtype in radioStatusPackets):
if not mod.multi_vehicle and sysid != target_sysid:
# only pass packets not from our target to modules that
# have marked themselves as being multi-vehicle capable
continue
try:
mod.mavlink_packet(m)
except Exception as msg:
if self.mpstate.settings.moddebug == 1:
print(msg)
elif self.mpstate.settings.moddebug > 1:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
def cmd_vehicle(self, args):
'''handle vehicle commands'''
if len(args) < 1:
print("Usage: vehicle SYSID[:COMPID]")
return
a = args[0].split(':')
self.mpstate.settings.target_system = int(a[0])
if len(a) > 1:
self.mpstate.settings.target_component = int(a[1])
# change default link based on most recent HEARTBEAT
best_link = 0
best_timestamp = 0
for i in range(len(self.mpstate.mav_master)):
m = self.mpstate.mav_master[i]
m.target_system = self.mpstate.settings.target_system
m.target_component = self.mpstate.settings.target_component
if 'HEARTBEAT' in m.messages:
stamp = m.messages['HEARTBEAT']._timestamp
src_system = m.messages['HEARTBEAT'].get_srcSystem()
if stamp > best_timestamp:
best_link = i
best_timestamp = stamp
m.link_delayed = False
self.mpstate.settings.link = best_link + 1
print("Set vehicle %s (link %u)" % (args[0], best_link+1))
def init(mpstate):
'''initialise module'''
return LinkModule(mpstate)
| AndKe/MAVProxy | MAVProxy/modules/mavproxy_link.py | Python | gpl-3.0 | 44,637 |
#!/usr/bin/env python
# coding=utf-8
"""572. Idempotent matrices
https://projecteuler.net/problem=572
A matrix $M$ is called idempotent if $M^2 = M$.
Let $M$ be a three by three matrix : $M=\begin{pmatrix} a & b & c\\\ d & e &
f\\\ g &h &i\\\ \end{pmatrix}$.
Let C(n) be the number of idempotent three by three matrices $M$ with integer
elements such that
$ -n \le a,b,c,d,e,f,g,h,i \le n$.
C(1)=164 and C(2)=848.
Find C(200).
"""
| openqt/algorithms | projecteuler/pe572-idempotent-matrices.py | Python | gpl-3.0 | 441 |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from keras.datasets import mnist, cifar10, cifar100
from sklearn.preprocessing import LabelBinarizer
from nets import LeNet, LeNetVarDropout, VGG, VGGVarDropout
sess = tf.Session()
def main():
dataset = 'cifar10' # mnist, cifar10, cifar100
# Load the data
# It will be downloaded first if necessary
if dataset == 'mnist':
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_size = 28
num_classes = 10
num_channels = 1
elif dataset == 'cifar10':
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
img_size = 32
num_classes = 10
num_channels = 3
elif dataset == 'cifar100':
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
img_size = 32
num_classes = 100
num_channels = 3
lb = LabelBinarizer()
lb.fit(y_train)
y_train_one_hot = lb.transform(y_train)
y_test_one_hot = lb.transform(y_test)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = np.reshape(X_train,[-1,img_size,img_size,num_channels])
X_test = np.reshape(X_test,[-1,img_size,img_size,num_channels])
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
m = VGGVarDropout(img_size,num_channels,num_classes)
sess.run(tf.global_variables_initializer())
m.fit(X_train,y_train_one_hot,sess)
pred = m.predict(X_test,sess)
y_test = np.squeeze(y_test)
acc = np.mean(np.equal(y_test,pred))
print("\nTest accuracy: %.3f" % acc)
if __name__ == "__main__":
main()
| cjratcliff/variational-dropout | main.py | Python | gpl-3.0 | 1,649 |
# This file is part of Osgende
# Copyright (C) 2017 Sarah Hoffmann
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# With minor modifications borrowed from
# https://bitbucket.org/zzzeek/sqlalchemy/issues/3566/figure-out-how-to-support-all-of-pgs
from sqlalchemy.sql import functions
from sqlalchemy.sql.selectable import FromClause
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.ext.compiler import compiles
class FunctionColumn(ColumnClause):
def __init__(self, function, name, type_=None):
self.function = self.table = function
self.name = self.key = name
self.type_ = type_
self.is_literal = False
@property
def _from_objects(self):
return []
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
co = ColumnClause(self.name, self.type_)
co.table = selectable
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(co.key)
if attach:
selectable._columns[co.key] = co
return co
@compiles(FunctionColumn)
def _compile_function_column(element, compiler, **kw):
return "(%s).%s" % (
compiler.process(element.function, **kw),
compiler.preparer.quote(element.name)
)
class ColumnFunction(functions.FunctionElement):
__visit_name__ = 'function'
@property
def columns(self):
return FromClause.columns.fget(self)
def _populate_column_collection(self):
for name, t in self.column_names:
self._columns[name] = FunctionColumn(self, name, t)
| lonvia/osgende | osgende/common/sqlalchemy/column_function.py | Python | gpl-3.0 | 2,328 |
import discord
from discord.ext import commands
from .utils.chat_formatting import escape_mass_mentions, italics, pagify
from random import randint
from random import choice
from enum import Enum
from urllib.parse import quote_plus
import datetime
import time
import aiohttp
import asyncio
settings = {"POLL_DURATION" : 60}
class RPS(Enum):
rock = "\N{MOYAI}"
paper = "\N{PAGE FACING UP}"
scissors = "\N{BLACK SCISSORS}"
class RPSParser:
def __init__(self, argument):
argument = argument.lower()
if argument == "rock":
self.choice = RPS.rock
elif argument == "paper":
self.choice = RPS.paper
elif argument == "scissors":
self.choice = RPS.scissors
else:
raise
class General:
"""General commands."""
def __init__(self, bot):
self.bot = bot
self.stopwatches = {}
self.ball = ["As I see it, yes", "It is certain", "It is decidedly so", "Most likely", "Outlook good",
"Signs point to yes", "Without a doubt", "Yes", "Yes – definitely", "You may rely on it", "Reply hazy, try again",
"Ask again later", "Better not tell you now", "Cannot predict now", "Concentrate and ask again",
"Don't count on it", "My reply is no", "My sources say no", "Outlook not so good", "Very doubtful"]
self.poll_sessions = []
@commands.command(hidden=True)
async def ping(self):
"""Pong."""
await self.bot.say("Pong.")
@commands.command()
async def choose(self, *choices):
"""Chooses between multiple choices.
To denote multiple choices, you should use double quotes.
"""
choices = [escape_mass_mentions(c) for c in choices]
if len(choices) < 2:
await self.bot.say('Not enough choices to pick from.')
else:
await self.bot.say(choice(choices))
@commands.command(pass_context=True)
async def roll(self, ctx, number : int = 100):
"""Rolls random number (between 1 and user choice)
Defaults to 100.
"""
author = ctx.message.author
if number > 1:
n = randint(1, number)
await self.bot.say("{} :game_die: {} :game_die:".format(author.mention, n))
else:
await self.bot.say("{} Maybe higher than 1? ;P".format(author.mention))
@commands.command(pass_context=True)
async def flip(self, ctx, user : discord.Member=None):
"""Flips a coin... or a user.
Defaults to coin.
"""
if user != None:
msg = ""
if user.id == self.bot.user.id:
user = ctx.message.author
msg = "Nice try. You think this is funny? How about *this* instead:\n\n"
char = "abcdefghijklmnopqrstuvwxyz"
tran = "ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz"
table = str.maketrans(char, tran)
name = user.display_name.translate(table)
char = char.upper()
tran = "∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z"
table = str.maketrans(char, tran)
name = name.translate(table)
await self.bot.say(msg + "(╯°□°)╯︵ " + name[::-1])
else:
await self.bot.say("*flips a coin and... " + choice(["HEADS!*", "TAILS!*"]))
@commands.command(pass_context=True)
async def rps(self, ctx, your_choice : RPSParser):
"""Play rock paper scissors"""
author = ctx.message.author
player_choice = your_choice.choice
red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))
cond = {
(RPS.rock, RPS.paper) : False,
(RPS.rock, RPS.scissors) : True,
(RPS.paper, RPS.rock) : True,
(RPS.paper, RPS.scissors) : False,
(RPS.scissors, RPS.rock) : False,
(RPS.scissors, RPS.paper) : True
}
if red_choice == player_choice:
outcome = None # Tie
else:
outcome = cond[(player_choice, red_choice)]
if outcome is True:
await self.bot.say("{} You win {}!"
"".format(red_choice.value, author.mention))
elif outcome is False:
await self.bot.say("{} You lose {}!"
"".format(red_choice.value, author.mention))
else:
await self.bot.say("{} We're square {}!"
"".format(red_choice.value, author.mention))
@commands.command(name="8", aliases=["8ball"])
async def _8ball(self, *, question : str):
"""Ask 8 ball a question
Question must end with a question mark.
"""
if question.endswith("?") and question != "?":
await self.bot.say("`" + choice(self.ball) + "`")
else:
await self.bot.say("That doesn't look like a question.")
@commands.command(aliases=["sw"], pass_context=True)
async def stopwatch(self, ctx):
"""Starts/stops stopwatch"""
author = ctx.message.author
if not author.id in self.stopwatches:
self.stopwatches[author.id] = int(time.perf_counter())
await self.bot.say(author.mention + " Stopwatch started!")
else:
tmp = abs(self.stopwatches[author.id] - int(time.perf_counter()))
tmp = str(datetime.timedelta(seconds=tmp))
await self.bot.say(author.mention + " Stopwatch stopped! Time: **" + tmp + "**")
self.stopwatches.pop(author.id, None)
@commands.command()
async def lmgtfy(self, *, search_terms : str):
"""Creates a lmgtfy link"""
search_terms = escape_mass_mentions(search_terms.replace(" ", "+"))
await self.bot.say("https://lmgtfy.com/?q={}".format(search_terms))
@commands.command(no_pm=True, hidden=True)
async def hug(self, user : discord.Member, intensity : int=1):
"""Because everyone likes hugs
Up to 10 intensity levels."""
name = italics(user.display_name)
if intensity <= 0:
msg = "(っ˘̩╭╮˘̩)っ" + name
elif intensity <= 3:
msg = "(っ´▽`)っ" + name
elif intensity <= 6:
msg = "╰(*´︶`*)╯" + name
elif intensity <= 9:
msg = "(つ≧▽≦)つ" + name
elif intensity >= 10:
msg = "(づ ̄ ³ ̄)づ{} ⊂(´・ω・`⊂)".format(name)
await self.bot.say(msg)
@commands.command(pass_context=True, no_pm=True)
async def userinfo(self, ctx, *, user: discord.Member=None):
"""Shows users's informations"""
author = ctx.message.author
server = ctx.message.server
if not user:
user = author
roles = [x.name for x in user.roles if x.name != "@everyone"]
joined_at = self.fetch_joined_at(user, server)
since_created = (ctx.message.timestamp - user.created_at).days
since_joined = (ctx.message.timestamp - joined_at).days
user_joined = joined_at.strftime("%d %b %Y %H:%M")
user_created = user.created_at.strftime("%d %b %Y %H:%M")
member_number = sorted(server.members,
key=lambda m: m.joined_at).index(user) + 1
created_on = "{}\n({} days ago)".format(user_created, since_created)
joined_on = "{}\n({} days ago)".format(user_joined, since_joined)
game = "Chilling in {} status".format(user.status)
if user.game is None:
pass
elif user.game.url is None:
game = "Playing {}".format(user.game)
else:
game = "Streaming: [{}]({})".format(user.game, user.game.url)
if roles:
roles = sorted(roles, key=[x.name for x in server.role_hierarchy
if x.name != "@everyone"].index)
roles = ", ".join(roles)
else:
roles = "None"
data = discord.Embed(description=game, colour=user.colour)
data.add_field(name="Joined Discord on", value=created_on)
data.add_field(name="Joined this server on", value=joined_on)
data.add_field(name="Roles", value=roles, inline=False)
data.set_footer(text="Member #{} | User ID:{}"
"".format(member_number, user.id))
name = str(user)
name = " ~ ".join((name, user.nick)) if user.nick else name
if user.avatar_url:
data.set_author(name=name, url=user.avatar_url)
data.set_thumbnail(url=user.avatar_url)
else:
data.set_author(name=name)
try:
await self.bot.say(embed=data)
except discord.HTTPException:
await self.bot.say("I need the `Embed links` permission "
"to send this")
@commands.command(pass_context=True, no_pm=True)
async def serverinfo(self, ctx):
"""Shows server's informations"""
server = ctx.message.server
online = len([m.status for m in server.members
if m.status == discord.Status.online or
m.status == discord.Status.idle])
total_users = len(server.members)
text_channels = len([x for x in server.channels
if x.type == discord.ChannelType.text])
voice_channels = len(server.channels) - text_channels
passed = (ctx.message.timestamp - server.created_at).days
created_at = ("Since {}. That's over {} days ago!"
"".format(server.created_at.strftime("%d %b %Y %H:%M"),
passed))
colour = ''.join([choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
data = discord.Embed(
description=created_at,
colour=discord.Colour(value=colour))
data.add_field(name="Region", value=str(server.region))
data.add_field(name="Users", value="{}/{}".format(online, total_users))
data.add_field(name="Text Channels", value=text_channels)
data.add_field(name="Voice Channels", value=voice_channels)
data.add_field(name="Roles", value=len(server.roles))
data.add_field(name="Owner", value=str(server.owner))
data.set_footer(text="Server ID: " + server.id)
if server.icon_url:
data.set_author(name=server.name, url=server.icon_url)
data.set_thumbnail(url=server.icon_url)
else:
data.set_author(name=server.name)
try:
await self.bot.say(embed=data)
except discord.HTTPException:
await self.bot.say("I need the `Embed links` permission "
"to send this")
@commands.command()
async def urban(self, *, search_terms : str, definition_number : int=1):
"""Urban Dictionary search
Definition number must be between 1 and 10"""
def encode(s):
return quote_plus(s, encoding='utf-8', errors='replace')
# definition_number is just there to show up in the help
# all this mess is to avoid forcing double quotes on the user
search_terms = search_terms.split(" ")
try:
if len(search_terms) > 1:
pos = int(search_terms[-1]) - 1
search_terms = search_terms[:-1]
else:
pos = 0
if pos not in range(0, 11): # API only provides the
pos = 0 # top 10 definitions
except ValueError:
pos = 0
search_terms = "+".join([encode(s) for s in search_terms])
url = "http://api.urbandictionary.com/v0/define?term=" + search_terms
try:
async with aiohttp.get(url) as r:
result = await r.json()
if result["list"]:
definition = result['list'][pos]['definition']
example = result['list'][pos]['example']
defs = len(result['list'])
msg = ("**Definition #{} out of {}:\n**{}\n\n"
"**Example:\n**{}".format(pos+1, defs, definition,
example))
msg = pagify(msg, ["\n"])
for page in msg:
await self.bot.say(page)
else:
await self.bot.say("Your search terms gave no results.")
except IndexError:
await self.bot.say("There is no definition #{}".format(pos+1))
except:
await self.bot.say("Error.")
@commands.command(pass_context=True, no_pm=True)
async def poll(self, ctx, *text):
"""Starts/stops a poll
Usage example:
poll Is this a poll?;Yes;No;Maybe
poll stop"""
message = ctx.message
if len(text) == 1:
if text[0].lower() == "stop":
await self.endpoll(message)
return
if not self.getPollByChannel(message):
check = " ".join(text).lower()
if "@everyone" in check or "@here" in check:
await self.bot.say("Nice try.")
return
p = NewPoll(message, " ".join(text), self)
if p.valid:
self.poll_sessions.append(p)
await p.start()
else:
await self.bot.say("poll question;option1;option2 (...)")
else:
await self.bot.say("A poll is already ongoing in this channel.")
async def endpoll(self, message):
if self.getPollByChannel(message):
p = self.getPollByChannel(message)
if p.author == message.author.id: # or isMemberAdmin(message)
await self.getPollByChannel(message).endPoll()
else:
await self.bot.say("Only admins and the author can stop the poll.")
else:
await self.bot.say("There's no poll ongoing in this channel.")
def getPollByChannel(self, message):
for poll in self.poll_sessions:
if poll.channel == message.channel:
return poll
return False
async def check_poll_votes(self, message):
if message.author.id != self.bot.user.id:
if self.getPollByChannel(message):
self.getPollByChannel(message).checkAnswer(message)
def fetch_joined_at(self, user, server):
"""Just a special case for someone special :^)"""
if user.id == "96130341705637888" and server.id == "133049272517001216":
return datetime.datetime(2016, 1, 10, 6, 8, 4, 443000)
else:
return user.joined_at
class NewPoll():
def __init__(self, message, text, main):
self.channel = message.channel
self.author = message.author.id
self.client = main.bot
self.poll_sessions = main.poll_sessions
msg = [ans.strip() for ans in text.split(";")]
if len(msg) < 2: # Needs at least one question and 2 choices
self.valid = False
return None
else:
self.valid = True
self.already_voted = []
self.question = msg[0]
msg.remove(self.question)
self.answers = {}
i = 1
for answer in msg: # {id : {answer, votes}}
self.answers[i] = {"ANSWER" : answer, "VOTES" : 0}
i += 1
async def start(self):
msg = "**POLL STARTED!**\n\n{}\n\n".format(self.question)
for id, data in self.answers.items():
msg += "{}. *{}*\n".format(id, data["ANSWER"])
msg += "\nType the number to vote!"
await self.client.send_message(self.channel, msg)
await asyncio.sleep(settings["POLL_DURATION"])
if self.valid:
await self.endPoll()
async def endPoll(self):
self.valid = False
msg = "**POLL ENDED!**\n\n{}\n\n".format(self.question)
for data in self.answers.values():
msg += "*{}* - {} votes\n".format(data["ANSWER"], str(data["VOTES"]))
await self.client.send_message(self.channel, msg)
self.poll_sessions.remove(self)
def checkAnswer(self, message):
try:
i = int(message.content)
if i in self.answers.keys():
if message.author.id not in self.already_voted:
data = self.answers[i]
data["VOTES"] += 1
self.answers[i] = data
self.already_voted.append(message.author.id)
except ValueError:
pass
def setup(bot):
n = General(bot)
bot.add_listener(n.check_poll_votes, "on_message")
bot.add_cog(n)
| jicruz/heroku-bot | cogs/general.py | Python | gpl-3.0 | 17,226 |
#!/usr/bin/python3
import _thread
import RPi.GPIO as GPIO
import socket
import time
from time import sleep
from sys import exit
import datetime
#import MySQLdb
# Start task command
# sleep 30 && python /home/pi/Scripts/Sprinkler/Sprinkler.py > /home/pi/Scripts/Sprinkler/log.txt 2>&1
# Set GPIO output points
Zones = [5, 6, 13, 19]
StatusLED = 16
# Set GPIO input points
CancelButton = 18
WaterSensor = 10
# Water Sensor Enabled?
Sensor = False
#Is it currently raining
isRaining = False
defaultWaitDuration = 0
def setup():
global serversocket,t
# Setup GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(True)
# Input Cancel Button
GPIO.setup(CancelButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Input Rain Sensor
if Sensor:
GPIO.setup(WaterSensor, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Setup 4 zones on GPIO
# Turn all Zones "OFF"
for i in Zones:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.HIGH)
# Setup status LED
GPIO.setup(StatusLED, GPIO.OUT)
# Setup Sockets
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9999
serversocket.bind((host, port))
serversocket.listen(5)
addLog("System", "Setup complete")
def mainRun():
global isRaining
addLog("System", "Main Thread started")
# Always check the switch
_thread.start_new_thread(checkSwitch, ((),))
while True:
global serversocket
clientsocket,addr = serversocket.accept()
fromClient = clientsocket.recv(1024)
clientsocket.close()
strFromClient = str(fromClient.decode("ascii"))
addLog("Recived", strFromClient)
# Split incoming message
requestType = strFromClient.split(":")
# Do something with that message
# What was the command?
if(requestType[0] == "WATER"):
# Is it raining
if(isRaining == False):
# Turn off LED if it was raining
statusLED("off")
# Start watering
_thread.start_new_thread(water, (requestType[1], requestType[2], ) )
elif(requestType[0] == "ZONE"):
if(requestType[1] == "ON"):
zone(int(requestType[2]), "ON")
else:
zone(int(requestType[2]), "OFF")
elif(requestType[0] == "RainStatus"):
# Some day we will send something back
print("nothing")
elif(requestType[0] == "QUIT"):
destroy()
# Check switch
def checkSwitch(self):
global isRaining
while True:
state = GPIO.input(CancelButton)
if(state):
if(state != isRaining):
addLog("System", "Switch TRUE")
statusLED("solid")
isRaining = True
else:
if(state != isRaining):
addLog("System", "Switch FALSE")
statusLED("off")
isRaining = False
sleep(2)
# Water the lawn
def water(zoneNum, duration):
# Turn on zone
zone(int(zoneNum), "ON")
statusLED("on")
# Sleep for that amount
sleep(int(duration) * 60)
# Turn off zone
zone(int(zoneNum), "OFF")
statusLED("off")
# Zone Control Setup
def zone(zoneSelect, onoff):
if(onoff == "ON"):
GPIO.output(Zones[zoneSelect], 0)
addLog('Zone ' + str(zoneSelect), 'ON')
else:
GPIO.output(Zones[zoneSelect], 1)
addLog('Zone ' + str(zoneSelect), 'OFF')
def rain():
global isRaining
# Check if it's raining
if Sensor:
if GPIO.input(WaterSensor):
isRaining = True
else:
isRaining = False
def statusLED(status):
if status == "blink":
GPIO.output(StatusLED, GPIO.HIGH)
sleep(0.5)
GPIO.output(StatusLED, GPIO.LOW)
sleep(0.5)
elif status == "solid":
GPIO.output(StatusLED, GPIO.HIGH)
elif status == "off":
GPIO.output(StatusLED, GPIO.LOW)
def addLog(currentZone, addedText):
now = datetime.datetime.now()
print ("{0}: {1}: {2}".format(now, currentZone, addedText))
def destroy():
global serversocket
serversocket.shutdown(socket.SHUT_RDWR)
for i in Zones:
GPIO.output(i, GPIO.LOW)
GPIO.output(StatusLED, GPIO.LOW)
addLog('System', 'Sprinkler Script OFF')
exit()
if __name__ == '__main__':
setup()
try:
mainRun()
except KeyboardInterrupt:
destroy()
finally:
GPIO.cleanup()
exit()
else:
destroy()
| Makerblaker/Sprinkler | server.py | Python | gpl-3.0 | 4,160 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from time import sleep
from ansible.module_utils.cloud import CloudRetry
try:
import boto
import boto.ec2 #boto does weird import stuff
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
import botocore
HAS_BOTO3 = True
except:
HAS_BOTO3 = False
try:
from distutils.version import LooseVersion
HAS_LOOSE_VERSION = True
except:
HAS_LOOSE_VERSION = False
from ansible.module_utils.six import string_types, binary_type, text_type
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError'
]
not_found = re.compile(r'^\w+.NotFound')
if response_code in retry_on or not_found.search(response_code):
return True
else:
return False
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError:
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
if conn_type == 'resource':
resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
return resource
elif conn_type == 'client':
client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
return client
else:
client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'AWS_DEFAULT_REGION' in os.environ:
region = os.environ['AWS_DEFAULT_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
if not boto3:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
region = botocore.session.get_session().get_config_variable('region')
else:
module.fail_json(msg="Boto3 is required for this module. Please install boto3 and try again")
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
elif 'AWS_SESSION_TOKEN' in os.environ:
security_token = os.environ['AWS_SESSION_TOKEN']
elif 'EC2_SECURITY_TOKEN' in os.environ:
security_token = os.environ['EC2_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def paging(pause=0, marker_property='marker'):
""" Adds paging to boto retrieval functions that support a 'marker'
this is configurable as not all boto functions seem to use the
same name.
"""
def wrapper(f):
def page(*args, **kwargs):
results = []
marker = None
while True:
try:
new = f(*args, marker=marker, **kwargs)
marker = getattr(new, marker_property)
results.extend(new)
if not marker:
break
elif pause:
sleep(pause)
except TypeError:
# Older version of boto do not allow for marker param, just run normally
results = f(*args, **kwargs)
break
return results
return page
return wrapper
def camel_dict_to_snake_dict(camel_dict):
def camel_to_snake(name):
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def value_is_list(camel_list):
checked_list = []
for item in camel_list:
if isinstance(item, dict):
checked_list.append(camel_dict_to_snake_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
return checked_list
snake_dict = {}
for k, v in camel_dict.items():
if isinstance(v, dict):
snake_dict[camel_to_snake(k)] = camel_dict_to_snake_dict(v)
elif isinstance(v, list):
snake_dict[camel_to_snake(k)] = value_is_list(v)
else:
snake_dict[camel_to_snake(k)] = v
return snake_dict
def snake_dict_to_camel_dict(snake_dict):
def camelize(complex_type):
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
new_type[camel(key)] = camelize(complex_type[key])
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(camelize(complex_type[i]))
else:
return complex_type
return new_type
def camel(words):
return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
return camelize(snake_dict)
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id', 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k,v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
tags_dict = {}
for tag in tags_list:
if 'key' in tag:
tags_dict[tag['key']] = tag['value']
elif 'Key' in tag:
tags_dict[tag['Key']] = tag['Value']
return tags_dict
def ansible_dict_to_boto3_tag_list(tags_dict):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k,v in tags_dict.items():
tags_list.append({'Key': k, 'Value': v})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = { 'vpc-id': vpc_id }
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [ str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list ]
return sec_group_id_list
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
| Inspq/ansible | lib/ansible/module_utils/ec2.py | Python | gpl-3.0 | 21,973 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def readfile(fn):
"""Read fn and return the contents."""
with open(path.join(here, fn), "r", encoding="utf-8") as f:
return f.read()
setup(
name="usfm2osis",
packages=find_packages(exclude=["tests*"]),
version="0.6.1",
description="Tools for converting Bibles from USFM to OSIS XML",
author="Christopher C. Little",
author_email="[email protected]",
url="https://github.com/chrislit/usfm2osis",
download_url="https://github.com/chrislit/usfm2osis/archive/master.zip",
keywords=["OSIS", "USFM", "Bible"],
license="GPLv3+",
zip_safe=False,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License v3 or later \
(GPLv3+)",
"Operating System :: OS Independent",
"Natural Language :: English",
"Intended Audience :: Religion",
"Intended Audience :: Developers",
"Topic :: Religion",
"Topic :: Text Processing :: Markup :: XML",
],
long_description="\n\n".join([readfile(f) for f in ("README.rst",)]),
# scripts=['scripts/usfm2osis', 'scripts/usfmtags'],
package_data={"usfm2osis": ["schemas/*.xsd"]},
entry_points={
"console_scripts": [
"usfm2osis = usfm2osis.scripts.usfm2osis:main",
"usfmtags = usfm2osis.scripts.usfmtags:main",
]
},
)
| chrislit/usfm2osis | setup.py | Python | gpl-3.0 | 1,809 |
import unittest
from tempfile import NamedTemporaryFile
import os
import numpy as np
from Orange.data import ContinuousVariable, DiscreteVariable
from Orange.data.io import CSVFormat
tab_file = """\
Feature 1\tFeature 2\tFeature 3
1.0 \t1.3 \t5
2.0 \t42 \t7
"""
csv_file = """\
Feature 1, Feature 2,Feature 3
1.0, 1.3, 5
2.0, 42, 7
"""
tab_file_nh = """\
1.0 \t1.3 \t5
2.0 \t42 \t7
"""
csv_file_nh = """\
1.0, 1.3, 5
2.0, 42, 7
"""
class TestTabReader(unittest.TestCase):
def read_easy(self, s, name):
file = NamedTemporaryFile("wt", delete=False)
filename = file.name
try:
file.write(s)
file.close()
table = CSVFormat().read_file(filename)
f1, f2, f3 = table.domain.variables
self.assertIsInstance(f1, DiscreteVariable)
self.assertEqual(f1.name, name + "1")
self.assertIsInstance(f2, ContinuousVariable)
self.assertEqual(f2.name, name + "2")
self.assertIsInstance(f3, ContinuousVariable)
self.assertEqual(f3.name, name + "3")
self.assertEqual(len(table.domain.class_vars), 1)
finally:
os.remove(filename)
def test_read_tab(self):
self.read_easy(tab_file, "Feature ")
self.read_easy(tab_file_nh, "Feature ")
def test_read_csv(self):
self.read_easy(csv_file, "Feature ")
self.read_easy(csv_file_nh, "Feature ")
| PythonCharmers/orange3 | Orange/tests/test_txt_reader.py | Python | gpl-3.0 | 1,539 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on 2013-10-11
#
# @author: Bartosz Nowak [email protected]
#
# This file is licensed GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
from __future__ import unicode_literals
import unicodecsv
from datetime import datetime, timedelta
from pytz import timezone
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.views.generic import UpdateView, FormView, TemplateView, CreateView
from neonet.views import LoggedInMixin
from DamageReports import models
from DamageReports import forms
class DamageReports(LoggedInMixin, FormView):
template_name = 'DamageReports/list.html'
form_class = forms.DamageReportsDateFilter
now = datetime.now(timezone('Europe/Warsaw'))
yesterday = now - timedelta(days=1)
initial = {'date_from': yesterday, 'date_to': now}
def form_valid(self, form):
reports = models.DamageReport.objects.select_related('commodity').filter(date__range=(
form.cleaned_data['date_from'], form.cleaned_data['date_to']))
return self.render_to_response(self.get_context_data(form=form, reports=reports))
class DamageReportsCreate(LoggedInMixin, CreateView):
model = models.DamageReport
template_name = 'DamageReports/create.html'
form_class = forms.DamageReportForm
now = datetime.now(timezone('Europe/Warsaw'))
initial = {'date': now}
def get_success_url(self):
return reverse('DamageReports:damage_reports_view')
def form_valid(self, form):
report = form.save(commit=False)
report.user = self.request.user
report.save()
return super(DamageReportsCreate, self).form_valid(form)
class DamageReportsUpdate(LoggedInMixin, UpdateView):
model = models.DamageReport
template_name = 'DamageReports/update.html'
form_class = forms.DamageReportForm
def get_success_url(self):
return reverse('DamageReports:list')
def get_initial(self):
initial = self.initial.copy()
initial['ean'] = self.get_object().commodity.ean
return initial
class DamageReportsExport(LoggedInMixin, FormView):
template_name = 'DamageReports/export.html'
form_class = forms.DamageReportsDateFilter
now = datetime.now(timezone('Europe/Warsaw'))
yesterday = now - timedelta(days=1)
initial = {'date_from': yesterday, 'date_to': now}
def form_valid(self, form):
response = HttpResponse(content_type='text/csv')
response['content-disposition'] = 'attachment; filename="reports.csv.txt"'
data = models.DamageReport.objects.\
select_related('commodity', 'detection_time', 'category', 'further_action', 'user').\
filter(date__range=(form.cleaned_data['date_from'], form.cleaned_data['date_to']))
writer = unicodecsv.writer(response, delimiter=b';')
if not data:
writer.writerow('Nie znaleziono żadnych raportów')
else:
for report in data:
row = ['', unicode(report.date), report.brand, report.commodity.__unicode__(), report.serial,
report.detection_time.detection_time, report.category.category, report.comments,
report.further_action.further_action, '', '',
(report.user.first_name + ' ' + report.user.last_name)
]
row = [element.strip() for element in row]
writer.writerow(row)
return response
class DamageReportsCharts(LoggedInMixin, TemplateView):
template_name = 'DamageReports/charts.html'
def get_context_data(self, **kwargs):
context = super(DamageReportsCharts, self).get_context_data(**kwargs)
context['chart'] = self._view()
return context
def _view(self):
self.a = {}
self.b = {}
self.c = {}
objects = models.DamageReport.objects.select_related('category').order_by('-date')
for report in objects:
_date = report.day_str()
if _date not in self.a:
self.a[_date] = 0
if _date not in self.b:
self.b[_date] = 0
if _date not in self.c:
self.c[_date] = 0
getattr(self, report.category.category.lower())[_date] += 1
reports = [{'data': [], 'name': 'A'},
{'data': [], 'name': 'B'},
{'data': [], 'name': 'C'}]
for k, v in self.a.iteritems():
reports[0]['data'].append([k, v])
for k, v in self.b.iteritems():
reports[1]['data'].append([k, v])
for k, v in self.c.iteritems():
reports[2]['data'].append([k, v])
return reports
| sztosz/neonet | DamageReports/views.py | Python | gpl-3.0 | 4,758 |
'''
Scheduler essential class
'''
import types, socket
# vim: ft=python ts=4 sw=4 sta et sts=4 ai:
job_state = ['waiting','running','error','finished']
queue_state = ['active', 'hold']
host_state = ['up', 'down', 'error']
class BaseScheduler(object) :
'''
Base scheduler class
'''
def __init__(self, conf={}, **kw) :
'''
Initialization
'''
self.name = ''
self.default_options = conf.get('default_options', {})
self.job_env_vars = {}
def submit(self, script, opts = {}, **kw) :
pass
def submit_bulk(self, script, ntask, opts = {}, **kw) :
pass
def list(self, filter = None, **kw):
pass
def status(self, jid, **kw):
pass
def cancel(self, jid_list, **kw):
pass
def hosts(self, **kw) :
'''
This function only give a list of host(s) managed by this scheduler
slot used / total may or may not be set for this function
'''
pass
def queues(self, **kw) :
'''This function will give both list of queue and host associated with each queue'''
pass
def job_script_var(self, script) :
'''
Substitute special character in job script with scheduler-specific job script environment
@type script string
@param script input job script
@rtype string
@return patched job script
NOTE: rely on self.job_env_vars
'''
new_script = script
for key, value in self.job_env_vars.iteritems() :
new_script = new_script.replace('@' + key + '@', value)
return new_script
class Host(object) :
def __init__(self, **kw) :
self.name = kw.get('name', '')
# slot used and total is used to get number of job running in the host
self.slot_used = kw.get('slot_used', 0)
self.slot_total = kw.get('slot_total', 0)
self.np = kw.get('np', 0)
self.loadavg = kw.get('loadavg', 0)
self.set_state(kw.get('state', 'down'))
def get_state(self) :
return self._state
def set_state(self, state) :
assert state in host_state
self._state = state
state = property(get_state, set_state)
def __repr__(self) :
return '<Host %(name)s,%(np)d,%(slot_used)d/%(slot_total)d,%(_state)s,%(loadavg).1f>' % vars(self)
def __eq__(self, other) :
# if host is logically equal (by ip address and name).
name = None
if type(other) == types.StringType or type(other) == types.UnicodeType :
name = other
elif type(other) == type(self) :
if hasattr(other, 'name') :
name = other.name
if name is None :
return False
try :
result1 = socket.getfqdn(self.name)
result2 = socket.getfqdn(name)
return result1 == result2
except :
return False
def __ne__(self, other) :
return not self.__eq__(other)
class Queue(object) :
def __init__(self, **kw) :
self.name = kw.get('name', '')
self.slot_used = kw.get('slot_used', 0)
self.slot_total = kw.get('slot_total', 0)
self.loadavg = kw.get('loadavg', 0)
self.set_online_hosts(kw.get('online_hosts', None))
self.set_offline_hosts(kw.get('offline_hosts', None))
self.set_state(kw.get('state', 'active'))
def get_state(self) :
return self._state
def set_state(self, state) :
assert state in queue_state
self._state = state
state = property(get_state, set_state)
def get_online_hosts(self) :
return self._online_hosts
def set_online_hosts(self, online_hosts) :
self._online_hosts = online_hosts
online_hosts = property(get_online_hosts, set_online_hosts)
def get_offline_hosts(self) :
return self._offline_hosts
def set_offline_hosts(self, offline_hosts) :
self._offline_hosts = offline_hosts
offline_hosts = property(get_offline_hosts, set_offline_hosts)
def __repr__(self) :
retval = '<Q %(name)s,%(_state)s,%(slot_used)d,%(slot_total)d,%(loadavg).1f>' % vars(self)
if self._online_hosts :
for host in self._online_hosts :
retval = retval + '\n\tOn:%s' % str(host)
if self._offline_hosts :
for host in self._offline_hosts :
retval = retval + '\n\tOff:%s' % str(host)
retval = retval + '\n'
return retval
class JobInfo(object):
def __init__(self, **kw):
self.jid = kw.get('jid', None)
self.tid = kw.get('tid', None)
self.name = kw.get('name', '')
self.owner = kw.get('owner', '')
self.queue = kw.get('queue', '')
self.account = kw.get('account', '')
self._np = kw.get('np', 1)
if kw.has_key('np') :
self.set_np(kw['np'])
self._state = 'waiting'
if kw.has_key('state') :
self.set_state(kw['state'])
self.host = None
self.submittime = kw.get('submittime', None)
self.starttime = kw.get('starttime', None)
self.scheduler = kw.get('scheduler', '')
self.scheduler_host = kw.get('scheduler_host', '')
def __repr__(self):
return '<job %s,%s,%s,%s,%s,%d,%s,%s,%s,%s,%s,%s>' % \
(self.jid,self.tid,self.name,self.owner,self.queue,self.np,self.state,self.scheduler,self.scheduler_host, self.host,self.submittime,self.starttime)
def get_np(self):
return self._np
def set_np(self,v):
self._np = int(v)
np = property(get_np, set_np)
def get_state(self):
return self._state
def set_state(self,v):
assert v in job_state
self._state = v
state = property(get_state, set_state)
def jidparse(jid_str) :
pass
def jidunparse(tuple) :
pass
if __name__ == '__main__' :
h1 = Host(name = 'compute-0-0')
h2 = Host(name = 'compute-0-1')
h3 = Host(name = 'compute-0-x')
#Queue(name = 'test')
#j = JobInfo()
print 'compute-0-0 == compute-0-1 : ', h1 == h2
print 'compute-0-1 == compute-0-0 : ', h2 == h1
print 'compute-0-0 == compute-0-0 : ', h1 == h1
print 'compute-0-0 == compute-0-0 (string) : ', h1 == 'compute-0-0'
print 'compute-0-x == compute-0-0 (string) : ', h3 == 'compute-0-0'
| somsak/rocks-solid | rocks_solid/scheduler/__init__.py | Python | gpl-3.0 | 6,356 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images in CIFAR-10.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_PADDING = 4
slim = tf.contrib.slim
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
Returns:
A preprocessed image.
"""
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# image = tf.image.resize_images(image,(output_height,output_width))
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[32, 32, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(distorted_image)
def preprocess_for_eval(image, output_height, output_width):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
Returns:
A preprocessed image.
"""
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
# image = tf.image.resize_images(image, (output_height, output_width))
# Resize and crop if needed.
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(resized_image)
def preprocess_image(image, output_height, output_width, is_training=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width)
else:
return preprocess_for_eval(image, output_height, output_width)
| MingLin-home/Ming_slim | preprocessing/cifarnet_preprocessing.py | Python | gpl-3.0 | 4,252 |
# Copyright (c) 2007 RADLogic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provide various handy Python functions.
Running this script directly will execute the doctests.
Functions:
int2bin(i, n) -- Convert integer to binary string.
bin2int(bin_string) -- Convert binary string to integer.
reverse(input_string) -- Reverse a string.
transpose(matrix) -- Transpose a list of lists.
polygon_area(points_list) -- Calculate the area of an arbitrary polygon.
timestamp() -- Return string containing current time stamp.
pt2str(point) -- Return prettier string version of point tuple.
gcf(a, b) -- Return the greatest common factor of two numbers.
lcm(a, b) -- Return the least common multiple of two numbers.
permutations(input_list) -- Generate all permutations of a list of items.
reduce_fraction(fraction) -- Reduce fraction (num, denom) to simplest form.
quantile(l, p) -- Return p quantile of list l. E.g. p=0.25 for q1.
trim(l) -- Discard values in list more than 1.5*IQR outside IQR.
nice_units(value) -- Return value converted to human readable units.
uniquify(seq) -- Return sequence with duplicate items in sequence seq removed.
reverse_dict(d) -- Return the dictionary with the items as keys and vice-versa.
lsb(x, n) -- Return the n least significant bits of x.
gray_encode(i) -- Gray encode the given integer.
random_vec(bits, max_value=None) -- Return a random binary vector.
binary_range(bits) -- Return list of all possible binary numbers width=bits.
float_range([start], stop, [step]) -- Return range of floats.
find_common_fixes(s1, s2) -- Find common (prefix, suffix) of two strings.
is_rotated(seq1, seq2) -- Return true if the list is a rotation of other list.
getmodule(obj) -- Return the module that contains the object definition of obj.
(use inspect.getmodule instead, though)
get_args(argv) -- Store command-line args in a dictionary.
This module requires Python >= 2.2
"""
__author__ = 'Tim Wegener <[email protected]>'
__date__ = '$Date: 2007/03/27 03:15:06 $'
__version__ = '$Revision: 0.45 $'
__credits__ = """
David Chandler, for polygon area algorithm.
(http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf)
"""
import re
import sys
import time
import random
try:
True, False
except NameError:
True, False = (1==1, 0==1)
def int2bin(i, n):
"""Convert decimal integer i to n-bit binary number (string).
>>> int2bin(0, 8)
'00000000'
>>> int2bin(123, 8)
'01111011'
>>> int2bin(123L, 8)
'01111011'
>>> int2bin(15, 2)
Traceback (most recent call last):
ValueError: Value too large for given number of bits.
"""
hex2bin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'}
# Convert to hex then map each hex digit to binary equivalent.
result = ''.join([hex2bin[x] for x in hex(i).lower().replace('l','')[2:]])
# Shrink result to appropriate length.
# Raise an error if the value is changed by the truncation.
if '1' in result[:-n]:
raise ValueError("Value too large for given number of bits.")
result = result[-n:]
# Zero-pad if length longer than mapped result.
result = '0'*(n-len(result)) + result
return result
def bin2int(bin_string):
"""Convert binary number string to decimal integer.
Note: Python > v2 has int(bin_string, 2)
>>> bin2int('1111')
15
>>> bin2int('0101')
5
"""
## result = 0
## bin_list = list(bin_string)
## if len(filter(lambda x: x in ('1','0'), bin_list)) < len(bin_list):
## raise Exception ("bin2int: Error - not a binary number: %s"
## % bin_string)
## bit_list = map(int, bin_list)
## bit_list.reverse() # Make most significant bit have highest index.
## for bit_place in range(len(bit_list)):
## result = result + ((2**bit_place) * bit_list[bit_place])
## return result
return int(bin_string, 2)
def reverse(input_string):
"""Reverse a string. Useful for strings of binary numbers.
>>> reverse('abc')
'cba'
"""
str_list = list(input_string)
str_list.reverse()
return ''.join(str_list)
def transpose(matrix):
"""Transpose a list of lists.
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']])
[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']]
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']])
[['a', 'd'], ['b', 'e'], ['c', 'f']]
>>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']])
[['a', 'd', 'g'], ['b', 'e', 'h']]
"""
result = zip(*matrix)
# Convert list of tuples to list of lists.
# map is faster than a list comprehension since it is being used with
# a built-in function as an argument.
result = map(list, result)
return result
def polygon_area(points_list, precision=100):
"""Calculate area of an arbitrary polygon using an algorithm from the web.
Return the area of the polygon as a positive float.
Arguments:
points_list -- list of point tuples [(x0, y0), (x1, y1), (x2, y2), ...]
(Unclosed polygons will be closed automatically.
precision -- Internal arithmetic precision (integer arithmetic).
>>> polygon_area([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 0), (0, 0)])
3.0
Credits:
Area of a General Polygon by David Chandler
http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf
"""
# Scale up co-ordinates and convert them to integers.
for i in range(len(points_list)):
points_list[i] = (int(points_list[i][0] * precision),
int(points_list[i][1] * precision))
# Close polygon if not closed.
if points_list[-1] != points_list[0]:
points_list.append(points_list[0])
# Calculate area.
area = 0
for i in range(len(points_list)-1):
(x_i, y_i) = points_list[i]
(x_i_plus_1, y_i_plus_1) = points_list[i+1]
area = area + (x_i_plus_1 * y_i) - (y_i_plus_1 * x_i)
area = abs(area / 2)
# Unscale area.
area = float(area)/(precision**2)
return area
def timestamp():
"""Return string containing current time stamp.
Note: In Python 2 onwards can use time.asctime() with no arguments.
"""
return time.asctime()
def pt2str(point):
"""Return prettier string version of point tuple.
>>> pt2str((1.8, 1.9))
'(1.8, 1.9)'
"""
return "(%s, %s)" % (str(point[0]), str(point[1]))
def gcf(a, b, epsilon=1e-16):
"""Return the greatest common factor of a and b, using Euclidean algorithm.
Arguments:
a, b -- two numbers
If both numbers are integers return an integer result,
otherwise return a float result.
epsilon -- floats less than this magnitude are considered to be zero
(default: 1e-16)
Examples:
>>> gcf(12, 34)
2
>>> gcf(13.5, 4)
0.5
>>> gcf(-2, 4)
2
>>> gcf(5, 0)
5
By (a convenient) definition:
>>> gcf(0, 0)
0
"""
result = max(a, b)
remainder = min(a, b)
while remainder and abs(remainder) > epsilon:
new_remainder = result % remainder
result = remainder
remainder = new_remainder
return abs(result)
def lcm(a, b, precision=None):
"""Return the least common multiple of a and b, using the gcf function.
Arguments:
a, b -- two numbers. If both are integers return an integer result,
otherwise a return a float result.
precision -- scaling factor if a and/or b are floats.
>>> lcm(21, 6)
42
>>> lcm(2.5, 3.5)
17.5
>>> str(lcm(1.5e-8, 2.5e-8, precision=1e9))
'7.5e-08'
By (an arbitary) definition:
>>> lcm(0, 0)
0
"""
# Note: Dummy precision argument is for backwards compatibility.
# Do the division first.
# (See http://en.wikipedia.org/wiki/Least_common_multiple )
denom = gcf(a, b)
if denom == 0:
result = 0
else:
result = a * (b / denom)
return result
def permutations(input_list):
"""Return a list containing all permutations of the input list.
Note: This is a recursive function.
>>> perms = permutations(['a', 'b', 'c'])
>>> perms.sort()
>>> for perm in perms:
... print perm
['a', 'b', 'c']
['a', 'c', 'b']
['b', 'a', 'c']
['b', 'c', 'a']
['c', 'a', 'b']
['c', 'b', 'a']
"""
out_lists = []
if len(input_list) > 1:
# Extract first item in list.
item = input_list[0]
# Find all permutations of remainder of list. (Recursive call.)
sub_lists = permutations(input_list[1:])
# For every permutation of the sub list...
for sub_list in sub_lists:
# Insert the extracted first item at every position of the list.
for i in range(len(input_list)):
new_list = sub_list[:]
new_list.insert(i, item)
out_lists.append(new_list)
else:
# Termination condition: only one item in input list.
out_lists = [input_list]
return out_lists
def reduce_fraction(fraction):
"""Reduce fraction tuple to simplest form. fraction=(num, denom)
>>> reduce_fraction((14, 7))
(2, 1)
>>> reduce_fraction((-2, 4))
(-1, 2)
>>> reduce_fraction((0, 4))
(0, 1)
>>> reduce_fraction((4, 0))
(1, 0)
"""
(numerator, denominator) = fraction
common_factor = abs(gcf(numerator, denominator))
result = (numerator/common_factor, denominator/common_factor)
return result
def quantile(l, p):
"""Return p quantile of list l. E.g. p=0.25 for q1.
See:
http://rweb.stat.umn.edu/R/library/base/html/quantile.html
"""
l_sort = l[:]
l_sort.sort()
n = len(l)
r = 1 + ((n - 1) * p)
i = int(r)
f = r - i
if i < n:
result = (1-f)*l_sort[i-1] + f*l_sort[i]
else:
result = l_sort[i-1]
return result
def trim(l):
"""Discard values in list more than 1.5*IQR outside IQR.
(IQR is inter-quartile-range)
This function uses rad_util.quantile
1.5*IQR -- mild outlier
3*IQR -- extreme outlier
See:
http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm
"""
l_sort = l[:]
l_sort.sort()
# Calculate medianscore (based on stats.py lmedianscore by Gary Strangman)
if len(l_sort) % 2 == 0:
# If even number of scores, average middle 2.
index = int(len(l_sort) / 2) # Integer division correct
median = float(l_sort[index] + l_sort[index-1]) / 2
else:
# int divsion gives mid value when count from 0
index = int(len(l_sort) / 2)
median = l_sort[index]
# Calculate IQR.
q1 = quantile(l_sort, 0.25)
q3 = quantile(l_sort, 0.75)
iqr = q3 - q1
iqr_extra = iqr * 1.5
def in_interval(x, i=iqr_extra, q1=q1, q3=q3):
return (x >= q1-i and x <= q3+i)
l_trimmed = [x for x in l_sort if in_interval(x)]
return l_trimmed
def nice_units(value, dp=0, sigfigs=None, suffix='', space=' ',
use_extra_prefixes=False, use_full_name=False, mode='si'):
"""Return value converted to human readable units eg milli, micro, etc.
Arguments:
value -- number in base units
dp -- number of decimal places to display (rounded)
sigfigs -- number of significant figures to display (rounded)
This overrides dp if set.
suffix -- optional unit suffix to append to unit multiplier
space -- seperator between value and unit multiplier (default: ' ')
use_extra_prefixes -- use hecto, deka, deci and centi as well if set.
(default: False)
use_full_name -- use full name for multiplier symbol,
e.g. milli instead of m
(default: False)
mode -- 'si' for SI prefixes, 'bin' for binary multipliers (1024, etc.)
(Default: 'si')
SI prefixes from:
http://physics.nist.gov/cuu/Units/prefixes.html
(Greek mu changed to u.)
Binary prefixes based on:
http://physics.nist.gov/cuu/Units/binary.html
>>> nice_units(2e-11)
'20 p'
>>> nice_units(2e-11, space='')
'20p'
"""
si_prefixes = {1e24: ('Y', 'yotta'),
1e21: ('Z', 'zetta'),
1e18: ('E', 'exa'),
1e15: ('P', 'peta'),
1e12: ('T', 'tera'),
1e9: ('G', 'giga'),
1e6: ('M', 'mega'),
1e3: ('k', 'kilo'),
1e-3: ('m', 'milli'),
1e-6: ('u', 'micro'),
1e-9: ('n', 'nano'),
1e-12: ('p', 'pico'),
1e-15: ('f', 'femto'),
1e-18: ('a', 'atto'),
1e-21: ('z', 'zepto'),
1e-24: ('y', 'yocto')
}
if use_extra_prefixes:
si_prefixes.update({1e2: ('h', 'hecto'),
1e1: ('da', 'deka'),
1e-1: ('d', 'deci'),
1e-2: ('c', 'centi')
})
bin_prefixes = {2**10: ('K', 'kilo'),
2**20: ('M', 'mega'),
2**30: ('G', 'mega'),
2**40: ('T', 'tera'),
2**50: ('P', 'peta'),
2**60: ('E', 'exa')
}
if mode == 'bin':
prefixes = bin_prefixes
else:
prefixes = si_prefixes
prefixes[1] = ('', '') # Unity.
# Determine appropriate multiplier.
multipliers = prefixes.keys()
multipliers.sort()
mult = None
for i in range(len(multipliers) - 1):
lower_mult = multipliers[i]
upper_mult = multipliers[i+1]
if lower_mult <= value < upper_mult:
mult_i = i
break
if mult is None:
if value < multipliers[0]:
mult_i = 0
elif value >= multipliers[-1]:
mult_i = len(multipliers) - 1
mult = multipliers[mult_i]
# Convert value for this multiplier.
new_value = value / mult
# Deal with special case due to rounding.
if sigfigs is None:
if mult_i < (len(multipliers) - 1) and\
round(new_value, dp) ==\
round((multipliers[mult_i+1] / mult), dp):
mult = multipliers[mult_i + 1]
new_value = value / mult
# Concatenate multiplier symbol.
if use_full_name:
label_type = 1
else:
label_type = 0
# Round and truncate to appropriate precision.
if sigfigs is None:
str_value = eval('"%.'+str(dp)+'f" % new_value', locals(), {})
else:
str_value = eval('"%.'+str(sigfigs)+'g" % new_value', locals(), {})
return str_value + space + prefixes[mult][label_type] + suffix
def uniquify(seq, preserve_order=False):
"""Return sequence with duplicate items in sequence seq removed.
The code is based on usenet post by Tim Peters.
This code is O(N) if the sequence items are hashable, O(N**2) if not.
Peter Bengtsson has a blog post with an empirical comparison of other
approaches:
http://www.peterbe.com/plog/uniqifiers-benchmark
If order is not important and the sequence items are hashable then
list(set(seq)) is readable and efficient.
If order is important and the sequence items are hashable generator
expressions can be used (in py >= 2.4) (useful for large sequences):
seen = set()
do_something(x for x in seq if x not in seen or seen.add(x))
Arguments:
seq -- sequence
preserve_order -- if not set the order will be arbitrary
Using this option will incur a speed penalty.
(default: False)
Example showing order preservation:
>>> uniquify(['a', 'aa', 'b', 'b', 'ccc', 'ccc', 'd'], preserve_order=True)
['a', 'aa', 'b', 'ccc', 'd']
Example using a sequence of un-hashable items:
>>> uniquify([['z'], ['x'], ['y'], ['z']], preserve_order=True)
[['z'], ['x'], ['y']]
The sorted output or the non-order-preserving approach should equal
that of the sorted order-preserving approach output:
>>> unordered = uniquify([3, 3, 1, 2], preserve_order=False)
>>> unordered.sort()
>>> ordered = uniquify([3, 3, 1, 2], preserve_order=True)
>>> ordered.sort()
>>> ordered
[1, 2, 3]
>>> int(ordered == unordered)
1
"""
try:
# Attempt fast algorithm.
d = {}
if preserve_order:
# This is based on Dave Kirby's method (f8) noted in the post:
# http://www.peterbe.com/plog/uniqifiers-benchmark
return [x for x in seq if (x not in d) and not d.__setitem__(x, 0)]
else:
for x in seq:
d[x] = 0
return d.keys()
except TypeError:
# Have an unhashable object, so use slow algorithm.
result = []
app = result.append
for x in seq:
if x not in result:
app(x)
return result
# Alias to noun form for backward compatibility.
unique = uniquify
def reverse_dict(d):
"""Reverse a dictionary so the items become the keys and vice-versa.
Note: The results will be arbitrary if the items are not unique.
>>> d = reverse_dict({'a': 1, 'b': 2})
>>> d_items = d.items()
>>> d_items.sort()
>>> d_items
[(1, 'a'), (2, 'b')]
"""
result = {}
for key, value in d.items():
result[value] = key
return result
def lsb(x, n):
"""Return the n least significant bits of x.
>>> lsb(13, 3)
5
"""
return x & ((2 ** n) - 1)
def gray_encode(i):
"""Gray encode the given integer."""
return i ^ (i >> 1)
def random_vec(bits, max_value=None):
"""Generate a random binary vector of length bits and given max value."""
vector = ""
for _ in range(int(bits / 10) + 1):
i = int((2**10) * random.random())
vector += int2bin(i, 10)
if max_value and (max_value < 2 ** bits - 1):
vector = int2bin((int(vector, 2) / (2 ** bits - 1)) * max_value, bits)
return vector[0:bits]
def binary_range(bits):
"""Return a list of all possible binary numbers in order with width=bits.
It would be nice to extend it to match the
functionality of python's range() built-in function.
"""
l = []
v = ['0'] * bits
toggle = [1] + [0] * bits
while toggle[bits] != 1:
v_copy = v[:]
v_copy.reverse()
l.append(''.join(v_copy))
toggle = [1] + [0]*bits
i = 0
while i < bits and toggle[i] == 1:
if toggle[i]:
if v[i] == '0':
v[i] = '1'
toggle[i+1] = 0
else:
v[i] = '0'
toggle[i+1] = 1
i += 1
return l
def float_range(start, stop=None, step=None):
"""Return a list containing an arithmetic progression of floats.
Return a list of floats between 0.0 (or start) and stop with an
increment of step.
This is in functionality to python's range() built-in function
but can accept float increments.
As with range(), stop is omitted from the list.
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
l = []
while cur < stop:
l.append(cur)
cur += step
return l
def find_common_fixes(s1, s2):
"""Find common (prefix, suffix) of two strings.
>>> find_common_fixes('abc', 'def')
('', '')
>>> find_common_fixes('abcelephantdef', 'abccowdef')
('abc', 'def')
>>> find_common_fixes('abcelephantdef', 'abccow')
('abc', '')
>>> find_common_fixes('elephantdef', 'abccowdef')
('', 'def')
"""
prefix = []
suffix = []
i = 0
common_len = min(len(s1), len(s2))
while i < common_len:
if s1[i] != s2[i]:
break
prefix.append(s1[i])
i += 1
i = 1
while i < (common_len + 1):
if s1[-i] != s2[-i]:
break
suffix.append(s1[-i])
i += 1
suffix.reverse()
prefix = ''.join(prefix)
suffix = ''.join(suffix)
return (prefix, suffix)
def is_rotated(seq1, seq2):
"""Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0
"""
# Do a sanity check.
if len(seq1) != len(seq2):
return False
# Look for occurrences of second sequence head item in first sequence.
start_indexes = []
head_item = seq2[0]
for index1 in range(len(seq1)):
if seq1[index1] == head_item:
start_indexes.append(index1)
# Check that wrapped sequence matches.
double_seq1 = seq1 + seq1
for index1 in start_indexes:
if double_seq1[index1:index1+len(seq1)] == seq2:
return True
return False
def getmodule(obj):
"""Return the module that contains the object definition of obj.
Note: Use inspect.getmodule instead.
Arguments:
obj -- python obj, generally a class or a function
Examples:
A function:
>>> module = getmodule(random.choice)
>>> module.__name__
'random'
>>> module is random
1
A class:
>>> module = getmodule(random.Random)
>>> module.__name__
'random'
>>> module is random
1
A class inheriting from a class in another module:
(note: The inheriting class must define at least one function.)
>>> class MyRandom(random.Random):
... def play(self):
... pass
>>> module = getmodule(MyRandom)
>>> if __name__ == '__main__':
... name = 'rad_util'
... else:
... name = module.__name__
>>> name
'rad_util'
>>> module is sys.modules[__name__]
1
Discussion:
This approach is slightly hackish, and won't work in various situations.
However, this was the approach recommended by GvR, so it's as good as
you'll get.
See GvR's post in this thread:
http://groups.google.com.au/group/comp.lang.python/browse_thread/thread/966a7bdee07e3b34/c3cab3f41ea84236?lnk=st&q=python+determine+class+module&rnum=4&hl=en#c3cab3f41ea84236
"""
if hasattr(obj, 'func_globals'):
func = obj
else:
# Handle classes.
func = None
for item in obj.__dict__.values():
if hasattr(item, 'func_globals'):
func = item
break
if func is None:
raise ValueError("No functions attached to object: %r" % obj)
module_name = func.func_globals['__name__']
# Get module.
module = sys.modules[module_name]
return module
def round_grid(value, grid, mode=0):
"""Round off the given value to the given grid size.
Arguments:
value -- value to be roudne
grid -- result must be a multiple of this
mode -- 0 nearest, 1 up, -1 down
Examples:
>>> round_grid(7.5, 5)
10
>>> round_grid(7.5, 5, mode=-1)
5
>>> round_grid(7.3, 5, mode=1)
10
>>> round_grid(7.3, 5.0, mode=1)
10.0
"""
off_grid = value % grid
if mode == 0:
add_one = int(off_grid >= (grid / 2.0))
elif mode == 1 and off_grid:
add_one = 1
elif mode == -1 and off_grid:
add_one = 0
result = ((int(value / grid) + add_one) * grid)
return result
def get_args(argv):
"""Store command-line args in a dictionary.
-, -- prefixes are removed
Items not prefixed with - or -- are stored as a list, indexed by 'args'
For options that take a value use --option=value
Consider using optparse or getopt (in Python standard library) instead.
"""
d = {}
args = []
for arg in argv:
if arg.startswith('-'):
parts = re.sub(r'^-+', '', arg).split('=')
if len(parts) == 2:
d[parts[0]] = parts[1]
else:
d[parts[0]] = None
else:
args.append(arg)
d['args'] = args
return d
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
| griddynamics/bunch | lettuce_bunch/rad_util.py | Python | gpl-3.0 | 26,792 |
"""
Copyright 2017 Ryan Wick ([email protected])
https://github.com/rrwick/Unicycler
This module contains functions relating to BLAST, which Unicycler uses to rotate completed circular
replicons to a standard starting point.
This file is part of Unicycler. Unicycler is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version. Unicycler is distributed in
the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along with Unicycler. If
not, see <http://www.gnu.org/licenses/>.
"""
import os
import subprocess
from .misc import load_fasta
from . import log
class CannotFindStart(Exception):
pass
def find_start_gene(sequence, start_genes_fasta, identity_threshold, coverage_threshold, blast_dir,
makeblastdb_path, tblastn_path):
"""
This function uses tblastn to look for start genes in the sequence. It returns the first gene
(using the order in the file) which meets the identity and coverage thresholds, as well as
the position of that gene (including which strand it is on).
This function assumes that the sequence is circular with no overlap.
"""
# Prepare the replicon sequence. In order to get a solid, single BLAST hit in cases where the
# gene overlaps from the end to the start, we have to duplicate some of the replicon sequence
# for the BLAST database.
seq_len = len(sequence)
start_genes_fasta = os.path.abspath(start_genes_fasta)
queries = load_fasta(start_genes_fasta)
if not queries:
raise CannotFindStart
longest_query = max(len(x[1]) for x in queries)
longest_query *= 3 # amino acids to nucleotides
dup_length = min(seq_len, longest_query)
sequence = sequence + sequence[:dup_length]
# BLAST has serious issues with paths that contain spaces. This page explains some of it:
# https://www.ncbi.nlm.nih.gov/books/NBK279669/
# But I couldn't make it all work for makeblastdb (spaces made it require -out, and it never
# accepted spaces in the -out path, no matter how I used quotes). So we will just move into the
# temporary directory to run the BLAST commands.
starting_dir = os.getcwd()
os.chdir(blast_dir)
# Create a FASTA file of the replicon sequence.
replicon_fasta_filename = 'replicon.fasta'
replicon_fasta = open(replicon_fasta_filename, 'w')
replicon_fasta.write('>replicon\n')
replicon_fasta.write(sequence)
replicon_fasta.write('\n')
replicon_fasta.close()
# Build the BLAST database.
command = [makeblastdb_path, '-dbtype', 'nucl', '-in', replicon_fasta_filename]
log.log(' ' + ' '.join(command), 2)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = process.communicate()
if err:
log.log('\nmakeblastdb encountered an error:\n' + err.decode())
os.chdir(starting_dir)
raise CannotFindStart
# Run the tblastn search.
command = [tblastn_path, '-db', replicon_fasta_filename, '-query', start_genes_fasta, '-outfmt',
'6 qseqid sstart send pident qlen qseq qstart bitscore', '-num_threads', '1']
log.log(' ' + ' '.join(command), 2)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
blast_out, blast_err = process.communicate()
process.wait()
if blast_err:
log.log('\nBLAST encountered an error:\n' + blast_err.decode())
# Find the best hit in the results.
best_hit, best_bitscore = None, 0
for line in blast_out.decode().splitlines():
hit = BlastHit(line, seq_len)
if hit.pident >= identity_threshold and hit.query_cov >= coverage_threshold and \
hit.qstart == 0 and hit.bitscore > best_bitscore:
best_hit = hit
best_bitscore = hit.bitscore
os.chdir(starting_dir)
if best_bitscore:
return best_hit
else:
raise CannotFindStart
class BlastHit(object):
def __init__(self, blast_line, seq_len):
self.qseqid = ''
self.pident, self.qstart, self.bitscore, self.query_cov, self.start_pos = 0, 0, 0, 0, 0
self.flip = False
parts = blast_line.strip().split('\t')
if len(parts) > 7:
self.qseqid = parts[0]
self.pident = float(parts[3])
self.qstart = int(parts[6]) - 1
self.bitscore = float(parts[7])
sstart = int(parts[1]) - 1
send = int(parts[2])
qlen = float(parts[4])
qseq = parts[5]
self.query_cov = 100.0 * len(qseq) / qlen
if sstart <= send:
self.start_pos = sstart
self.flip = False
else:
self.start_pos = sstart + 1
self.flip = True
if self.start_pos >= seq_len:
self.start_pos -= seq_len
def __repr__(self):
return 'BLAST hit: query=' + self.qseqid + ', subject start=' + str(self.start_pos) + \
', strand=' + ('reverse' if self.flip else 'forward') + ', ID=' + \
str(self.pident) + ', cov=' + str(self.query_cov) + ', bitscore=' + \
str(self.bitscore)
| rrwick/Unicycler | unicycler/blast_func.py | Python | gpl-3.0 | 5,533 |
import sys
import os
import re
def human_size_to_byte(number):
"""
Convert number of these units to bytes, ignore case:
b : 512
kB : 1000
K : 1024
mB : 1000*1000
m : 1024*1024
MB : 1000*1000
M : 1024*1024
GB : 1000*1000*1000
G : 1024*1024*1024
TB : 1000*1000*1000*1000
T : 1024*1024*1024*1024
PB : 1000*1000*1000*1000*1000
P : 1024*1024*1024*1024*1024
EB : 1000*1000*1000*1000*1000*1000
E : 1024*1024*1024*1024*1024*1024
ZB : 1000*1000*1000*1000*1000*1000*1000
Z : 1024*1024*1024*1024*1024*1024*1024
YB : 1000*1000*1000*1000*1000*1000*1000*1000
Y : 1024*1024*1024*1024*1024*1024*1024*1024
number is of one of these forms:
123, 123b, 123M, 1G
"""
mapping = {
'b' : 512 ,
'kb' : 1000,
'k' : 1024,
'mb' : 1000**2,
'm' : 1024**2,
'gb' : 1000**3,
'g' : 1024**3,
'tb' : 1000**4,
't' : 1024**4,
'pb' : 1000**5,
'p' : 1024**5,
'eb' : 1000**6,
'e' : 1024**6,
'zb' : 1000**7,
'z' : 1024**7,
'yb' : 1000**8,
'y' : 1024**8,
}
unit = re.sub('^[0-9]+', '', number)
if unit:
unit = unit.lower()
assert unit in mapping.keys(), "wrong unit %s " % unit
amount = int(number[:-len(unit)])
return mapping[unit] * amount
else:
return int(number)
def correct_offset(file):
"""Due to Python cache issue, the real file offset of the
underlying file descriptor may differ, this function can correct
it.
"""
cur = file.seek(0, 1)
file.seek(0, 2)
file.seek(cur)
def open_file(file):
if file == '-':
return os.fdopen(sys.stdin.fileno(), 'rb')
else:
return open(file, 'rb')
class Locator:
"""Search from the end of the file backward, locate the starting
offset of the specified amount, measured by line, or by byte.
"""
def __init__(self, ifile, mode, amount, bs=8192):
"""mode can be 'lines' or 'bytes'"""
assert ifile.seekable(), "input file is not seekable"
self.orig_pos = ifile.seek(0, 1)
self.ifile = ifile
self.mode = mode
self.amount = amount
self.bs = bs
def find_line(self, ifile, chunk, amount):
""" Find if data chunk contains 'amount' number of lines.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
count = chunk.count(b'\n')
if count <= amount:
amount -= count
return False, 0, amount
else: # found
pos = -1
for i in range(count - amount):
pos = chunk.index(b'\n', pos+1)
pos += 1
diff = len(chunk) - pos
pos = ifile.seek(-diff, 1)
return True, pos, 0
def find_byte(self, ifile, chunk, amount):
""" Find if data chunk contains 'amount' number of bytes.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
length = len(chunk)
if length < amount:
amount -= length
return False, 0, amount
else: # found
pos = ifile.seek(-amount, 1)
return True, pos, 0
def find(self, ifile, offset, size, amount):
"""Read 'size' bytes starting from offset to find.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
try:
pos = ifile.seek(offset)
except OSError:
assert False, "unkown file seeking failure"
chunk = ifile.read(size)
if self.mode == 'lines':
return self.find_line(ifile, chunk, amount)
else:
return self.find_byte(ifile, chunk, amount)
def run(self):
"""Find the offset of the last 'amount' lines"""
ifile = self.ifile
amount = self.amount
orig_pos = self.orig_pos
end = ifile.seek(0, 2) # jump to the end
# nothing to process, return the original position
total = end - orig_pos
if total <= amount:
correct_offset(ifile)
return orig_pos
bs = self.bs
# process the last block
remaining = total % bs
offset = end - remaining
stat, pos, amount = self.find(ifile, offset, remaining, amount)
while not stat and offset != orig_pos:
offset -= bs
stat, pos, amount = self.find(ifile, offset, bs, amount)
ifile.seek(self.orig_pos)
correct_offset(ifile)
return pos
class Buffer:
def __init__(self, amount):
self.min = amount
self.total = 0
self.data = []
def push(self, pair):
self.data.append(pair)
self.total += pair[0]
def pop(self):
pair = self.data.pop(0)
self.total -= pair[0]
return pair
def cut(self):
"""Pop as many pairs off the head of the self.data as
self.is_ready() is True, return a combined result.
"""
count = 0
data = b''
while self.is_ready():
x, y = self.pop()
count += x
data += y
return count, data
def is_satisfied(self):
"""The minimum amount is satisfied"""
return self.total >= self.min
def is_ready(self):
"""The buffer is ready to pop"""
return self.total - self.data[0][0] >= self.min
class HeadWorkerSL:
"""Seekable, line mode"""
def __init__(self, ifile, ofile, amount, bs=None):
self.ifile = ifile
self.ofile = ofile
self.amount = amount
self.bs = bs or 8192
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return data.count(b'\n')
def is_last(self, count):
return count >= self.amount
def action(self, data, count):
self.ofile.write(data)
self.amount -= count
def handle_last(self, data):
pos = -1
for i in range(self.amount):
pos = data.index(b'\n', pos+1)
pos += 1
self.ofile.write(data[:pos])
over_read = len(data) - pos
try:
self.ifile.seek(-over_read, 1)
except Exception:
pass
def run(self):
while self.amount:
data = self.read()
if not data:
break
count = self.transform(data)
if self.is_last(count):
self.handle_last(data)
break
else:
self.action(data, count)
class HeadWorkerSB(HeadWorkerSL):
"""Seekable, byte mode"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[:self.amount])
over_read = len(data) - self.amount
try:
self.ifile.seek(-over_read, 1)
except Exception:
pass
class HeadWorkerTL(HeadWorkerSL):
"""Terminal, line mode"""
def read(self):
return self.ifile.readline()
def action(self, data, count):
self.ofile.write(data)
self.amount -= 1
self.ofile.flush()
def handle_last(self, data):
self.ofile.write(data)
self.ofile.flush()
class HeadWorkerTB(HeadWorkerSB):
"""Terminal, byte mode"""
def read(self):
return self.ifile.readline()
class HeadWorkerULIT(HeadWorkerSL):
"""Unseekable, line mode ignore tail"""
def __init__(self, ifile, ofile, amount, bs=None):
self.ifile = ifile
self.ofile = ofile
self.amount = amount
self.bs = bs or 8192
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return data.count(b'\n')
def fill(self):
"""Fill up the buffer with content from self.ifile"""
amount = self.amount
buffer = Buffer(amount)
while True:
data = self.read()
if not data:
break
count = self.transform(data)
buffer.push((count, data))
if buffer.is_satisfied():
break
return buffer
def step(self, buffer):
"""Read and process the self.ifile step by step,
return False if nothing left in self.ifile.
"""
data = self.read()
if not data:
return False
count = self.transform(data)
buffer.push((count, data))
if buffer.is_ready():
x, data = buffer.cut()
self.proc(data)
return True
def proc(self, data):
self.ofile.write(data)
self.ofile.flush()
def handle_last(self, buffer):
while True:
x, data = buffer.pop()
if buffer.is_satisfied():
self.proc(data)
else:
diff = buffer.min - buffer.total
lines = data.splitlines(keepends=True)
self.ofile.writelines(lines[:-diff])
break
self.ofile.flush()
def run(self):
buffer = self.fill()
if buffer.is_satisfied():
while self.step(buffer):
pass
self.handle_last(buffer)
class HeadWorkerTLIT(HeadWorkerULIT):
"""Terminal, line mode ignore tail"""
def read(self):
return self.ifile.readline()
class HeadWorkerUBIT(HeadWorkerULIT):
"""Unseekable, byte mode ignore tail"""
def transform(self, data):
return len(data)
def handle_last(self, buffer):
while True:
x, data = buffer.pop()
if buffer.is_satisfied():
self.ofile.write(data)
else:
diff = buffer.min - buffer.total
self.ofile.write(data[:-diff])
break
self.ofile.flush()
class HeadWorkerTBIT(HeadWorkerUBIT):
"""Terminal, byte mode ignore tail"""
def read(self):
return self.ifile.readline()
class Mixin:
def copy_to_end(self):
while True:
chunk = self.read()
if not chunk:
break
self.ofile.write(chunk)
class TailWorkerSLIH(HeadWorkerSL, Mixin):
"""Seekable, line mode, ignore head"""
def __init__(self, ifile, ofile, amount, bs=None):
super(TailWorkerSLIH, self).__init__(ifile, ofile, amount, bs)
if amount > 0:
self.amount -= 1
def action(self, data, count):
self.amount -= count
def handle_last(self, data):
pos = -1
for i in range(self.amount):
pos = data.index(b'\n', pos+1)
pos += 1
self.ofile.write(data[pos:])
self.copy_to_end()
class TailWorkerSBIH(TailWorkerSLIH):
"""Seekable, byte mode, ignore head"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[self.amount:])
self.copy_to_end()
class TailWorkerSB(TailWorkerSLIH):
def __init__(self, ifile, ofile, bs=None):
self.ifile = ifile
self.ofile = ofile
self.bs = bs or 8192
def run(self):
self.copy_to_end()
class TailWorkerULIH(HeadWorkerULIT, Mixin):
"""Unseekable, line mode ignore head"""
def proc(self, data):
"""Just ignore the data"""
def handle_last(self, buffer):
while True:
x, data = buffer.pop()
if not buffer.is_satisfied():
diff = buffer.min - buffer.total
self.split_and_proc(data, diff)
for x, data in buffer.data:
self.ofile.write(data)
break
def split_and_proc(self, data, diff):
lines = data.splitlines(keepends=True)
self.ofile.writelines(lines[-diff:])
class TailWorkerUBIH(TailWorkerULIH):
"""Unseekable, byte mode ignore head"""
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return len(data)
def split_and_proc(self, data, diff):
self.ofile.write(data[-diff:])
class TailWorkerTLIH(TailWorkerULIH):
"""Terminal, line mode ignore head"""
def read(self):
return self.ifile.readline()
class TailWorkerTBIH(TailWorkerTLIH):
"""Terminal, byte mode ignore head"""
def transform(self, data):
return len(data)
def split_and_proc(self, data, diff):
self.ofile.write(data[-diff:])
class TailWorkerTL(TailWorkerSLIH):
"""Terminal, line mode, ignore head"""
def read(self):
return self.ifile.readline()
def handle_last(self, data):
self.copy_to_end()
class TailWorkerTB(TailWorkerTL):
"""Terminal, byte mode, ignore head"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[self.amount:])
self.copy_to_end()
class GrepNameDetermined(Exception): pass
class GrepStatusDetermined(Exception): pass
class GrepWorker:
# VT100 color code
c_fname = b'\x1b[35m' # magenta
c_sep = b'\x1b[36m' # cyan
c_lnum = b'\x1b[32m' # green
c_match = b'\x1b[31m\x1b[1m' # bold red
c_off = b'\x1b[0m' # turn off color
sep_line = b'--\n'
c_sep_line = c_sep + b'--' + c_off + b'\n'
def __init__(self, pattern, options, ifile, ofile, bs=None):
self.pattern = pattern
self.options = options
self.ifile = ifile
self.ofile = ofile
self.bs = bs or 8192
self.nr = 0 # number of records
self.fname = self.make_fname(ifile.name)
self.status = False
# Invert the sense of matching
if ('invert' in options and 'file_match' not in options
and 'count' not in options):
self.on_match, self.on_not_match = self.on_not_match, self.on_match
# set on_match method for -q option
if 'quiet' in options:
self.on_match = self.quiet_on_match
# set reader for tty input file
if ifile.isatty():
self.read = self.read_tty
self.write = self.write_tty
# setup color output
color = options['color']
if color == 'always' or self.ofile.isatty() and color == 'auto':
self.sep_line = self.c_sep_line
self.make_fname_str = self.make_color_fname_str
self.make_lnum_str = self.make_color_lnum_str
self.make_matcher = self.make_color_matcher
self.matcher = self.make_matcher(options)
def insert_line_number(self, lines, num, sep=b':'):
"""Insert line number to the head of each line"""
num = str(num).encode()
num_str = self.make_lnum_str(num, sep)
return (b'%s%s' % (num_str, line) for line in lines)
def insert_file_name(self, lines, fname, sep=b':'):
"""Insert file name to the head of each line"""
fname_str = self.make_fname_str(fname, sep)
return (b'%s%s' % (fname_str, line) for line in lines)
def make_lnum_str(self, num, sep):
return num + sep
def make_fname_str(self, fname, sep):
return fname + sep
def make_color_lnum_str(self, num, sep):
return self.c_lnum + num + self.c_sep + sep + self.c_off
def make_color_fname_str(self, fname, sep):
return self.c_fname + fname + self.c_sep + sep + self.c_off
def quiet_on_match(self, *args, **kargs):
raise GrepStatusDetermined
def read(self):
"""Return an enumerate object with line number"""
lines = self.ifile.readlines(self.bs)
if not lines:
return None
count = len(lines)
res = enumerate(lines, self.nr + 1)
self.nr += count
return res
def read_tty(self):
"""Read the terminal, line by line"""
line = self.ifile.readline()
if not line:
return None
self.nr += 1
return [(self.nr, line)]
def make_normal_matcher(self, options):
# handle -w option, match word boundary
pat = self.pattern
if 'word_regexp' in self.options:
pat = r'\b%s\b' % pat
# handle -i option, ignore case
flags = 0
if 'ignore_case' in self.options:
flags |= re.IGNORECASE
pat = re.compile(pat.encode(), flags)
return pat
def make_matcher(self, options):
pat = self.make_normal_matcher(options)
class C:
def findall(self, line):
return pat.findall(line), line
return C()
def make_color_matcher(self, options):
pat = self.make_normal_matcher(options)
c_match = self.c_match
c_off = self.c_off
class C:
def findall(self, line):
matches = pat.findall(line)
if matches:
matches = [c_match + x + c_off for x in matches]
line = re.sub(pat, self.apply_color, line)
return matches, line
def apply_color(self, m):
return c_match + m.group() + c_off
return C()
def make_fname(self, name):
"""Make a file name for output"""
if name == 0:
name = '(standard input)'.encode()
else:
name = str(name).encode()
return name
def format_output(self, lines, lnum, options, sep=b':'):
"""Format lines for output"""
# handle -n option, show line number
if 'line_number' in options:
lines = self.insert_line_number(lines, lnum, sep)
# insert file name if necessary
if options['with_filename']:
lines = self.insert_file_name(lines, self.fname, sep)
return lines
def write(self, lines):
self.ofile.writelines(lines)
def write_tty(self, lines):
"""Write to terminal, flush after every write"""
self.ofile.writelines(lines)
self.ofile.flush()
def on_match(self, matches, line, lnum):
self.status = True
# handle -o option, show only the matched part
if 'only_matching' in self.options:
lines = (x + b'\n' for x in matches)
else:
lines = [line]
lines = self.format_output(lines, lnum, self.options)
self.write(lines)
def on_not_match(self, *args, **kargs):
return None
def run(self):
while True:
lines_data = self.read()
if not lines_data:
break
for n, line in lines_data:
matches, line = self.matcher.findall(line)
if matches:
self.on_match(matches, line, n)
else:
self.on_not_match(matches, line, n)
return self.status
class GrepWorkerAgg(GrepWorker):
def __init__(self, *args, **kargs):
super(GrepWorkerAgg, self).__init__(*args, **kargs)
self.match_count = 0
def format_output(self, lines, options):
"""Format lines for output"""
# insert file name if necessary
if options['with_filename']:
lines = self.insert_file_name(lines, self.fname)
return lines
def on_match(self, matches, line, lnum):
self.status = True
self.match_count += 1
def run(self):
status = super(GrepWorkerAgg, self).run()
lines = [str(self.match_count).encode() + b'\n']
lines = self.format_output(lines, self.options)
self.write(lines)
return status
class GrepWorkerFileName(GrepWorker):
def on_match(self, matches, line, lnum):
raise GrepNameDetermined
def run(self):
try:
super(GrepWorkerFileName, self).run()
status = False
except GrepNameDetermined:
self.write([self.fname + b'\n'])
status = True
return status
class GrepWorkerContext(GrepWorker):
def __init__(self, *args, **kargs):
super(GrepWorkerContext, self).__init__(*args, **kargs)
self.before = self.options.get('before', 0)
self.after = self.options.get('after', 0)
self.b_buf = []
self.a_counter = 0
self.last_written_lnum = 0
def write_separator(self, lnum):
last_lnum = self.last_written_lnum
first_lnum = self.b_buf[0][0] if self.b_buf else lnum
if last_lnum and first_lnum - last_lnum > 1:
self.write([self.sep_line])
def on_match(self, matches, line, lnum):
# the 'before' buffer may contain more lines than needed,
# truncate it before writing the separator in order not
# to interfere the line number calculation.
if self.before:
self.b_buf = self.b_buf[-self.before:]
else:
self.b_buf.clear()
self.write_separator(lnum)
self.write_b_buffer()
super(GrepWorkerContext, self).on_match(matches, line, lnum)
self.last_written_lnum = lnum
self.reset_a_counter()
def on_not_match(self, matches, line, lnum):
if self.a_counter:
if 'only_matching' not in self.options:
lines = self.format_output([line], lnum, self.options, b'-')
self.write(lines)
self.last_written_lnum = lnum
self.a_counter -= 1
else:
self.b_buf.append((lnum, line))
def reset_a_counter(self):
self.a_counter = self.after
def write_b_buffer(self):
"""Write out the 'before' buffer"""
if not self.b_buf:
return
# write only when -o option is not presented,
if 'only_matching' not in self.options:
for lnum, line in self.b_buf:
lines = self.format_output([line], lnum, self.options, b'-')
self.write(lines)
self.last_written_lnum = self.b_buf[-1][0]
self.b_buf.clear()
def run(self):
bs = self.before
while True:
self.b_buf = self.b_buf[-bs:]
lines_data = self.read()
if not lines_data:
break
for n, line in lines_data:
matches, line = self.matcher.findall(line)
if matches:
self.on_match(matches, line, n)
else:
self.on_not_match(matches, line, n)
return self.status
def recursive_walk(worker, names, pattern, options):
"""Process all regular files, descend into directories. When
the -q option is provided, the first match will trigger an
exception named GrepStatusDetermined."""
def processor(names, pattern, options, worker):
status_list = []
for name in names:
if os.path.isfile(name):
status = worker(name, pattern, options)
status_list.append(status)
elif os.path.isdir(name):
try:
sub_names = os.listdir(name)
except Exception as e:
print(str(e), file=sys.stderr)
status_list.append(False)
else:
sub_names = [os.path.join(name, x) for x in sub_names]
names.extend(sub_names)
return status_list
return walk(worker, names, pattern, options, processor)
def walk(worker, names, pattern, options, processor=None):
"""Each file shall be a regular file. When the -q option is
provided, the first match will trigger an exception named
GrepStatusDetermined."""
if not processor:
def processor(names, pattern, options, worker):
status_list = []
for name in names:
status = worker(name, pattern, options)
status_list.append(status)
return status_list
try:
status_list = processor(names, pattern, options, worker)
except GrepStatusDetermined:
status_list = [True]
if 'quiet' in options:
return any(status_list)
else:
return all(status_list)
| iesugrace/pycmd | lib.py | Python | gpl-3.0 | 24,434 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0008_auto_20150405_1435'),
]
operations = [
migrations.AddField(
model_name='brewpispark',
name='spark_time',
field=models.BigIntegerField(default=0, verbose_name=b'Spark Time'),
preserve_default=True,
),
]
| thomast74/oinkbrew_webapp | api/migrations/0009_brewpispark_spark_time.py | Python | gpl-3.0 | 470 |
import logging
from mimeprovider.documenttype import get_default_document_types
from mimeprovider.client import get_default_client
from mimeprovider.exceptions import MimeException
from mimeprovider.exceptions import MimeBadRequest
from mimeprovider.mimerenderer import MimeRenderer
from mimeprovider.validators import get_default_validator
__all__ = ["MimeProvider"]
__version__ = "0.1.5"
log = logging.getLogger(__name__)
def build_json_ref(request):
def json_ref(route, document=None, **kw):
ref = dict()
ref["$ref"] = request.route_path(route, **kw)
rel_default = None
if document:
rel_default = getattr(document, "object_type",
document.__class__.__name__)
else:
rel_default = route
ref["rel"] = kw.pop("rel_", rel_default)
return ref
return json_ref
class MimeProvider(object):
def __init__(self, documents=[], **kw):
self.renderer_name = kw.get("renderer_name", "mime")
self.attribute_name = kw.get("attribute_name", "mime_body")
self.error_handler = kw.get("error_handler", None)
self.set_default_renderer = kw.get("set_default_renderer", False)
self.validator = kw.get("validator")
if self.validator is None:
self.validator = get_default_validator()
types = kw.get("types")
if types is None:
types = get_default_document_types()
if not types:
raise ValueError("No document types specified")
self.client = kw.get("client")
if self.client is None:
self.client = get_default_client()
self.type_instances = [t() for t in types]
self.mimeobjects = dict()
self.mimetypes = dict(self._generate_base_mimetypes())
self.error_document_type = kw.get(
"error_document_type",
self.type_instances[0])
self.register(*documents)
def _validate(self, document):
if not hasattr(document, "object_type"):
raise ValueError(
("Object does not have required 'object_type' "
"attribute {0!r}").format(document))
def _generate_base_mimetypes(self):
"""
Generate the base mimetypes as described by non customized document
types.
"""
for t in self.type_instances:
if t.custom_mime:
continue
yield t.mime, (t, None, None)
def _generate_document_mimetypes(self, documents):
for t in self.type_instances:
if not t.custom_mime:
continue
for o in documents:
mimetype = t.mime.format(o=o)
validator = None
if hasattr(o, "schema"):
validator = self.validator(o.schema)
m_value = (mimetype, (t, o, validator))
o_value = (o, (t, mimetype, validator))
yield m_value, o_value
def register(self, *documents):
documents = list(documents)
for document in documents:
self._validate(document)
generator = self._generate_document_mimetypes(documents)
for (m, m_value), (o, o_value) in generator:
self.mimeobjects.setdefault(o, []).append(o_value)
if m not in self.mimetypes:
self.mimetypes[m] = m_value
continue
_, cls, validator = self.mimetypes[m]
_, new_cls, validator = m_value
raise ValueError(
"Conflicting handler for {0}, {1} and {2}".format(
m, cls, new_cls))
def get_client(self, *args, **kw):
return self.client(self.mimetypes, self.mimeobjects, *args, **kw)
def get_mime_body(self, request):
if not request.body or not request.content_type:
return None
result = self.mimetypes.get(request.content_type)
if result is None:
raise MimeBadRequest(
"Unsupported Content-Type: " + request.content_type)
document_type, cls, validator = result
# the specific document does not support deserialization.
if not hasattr(cls, "from_data"):
raise MimeBadRequest(
"Unsupported Content-Type: " +
request.content_type)
return document_type.parse(validator, cls, request.body)
@property
def renderer(self):
if self.error_handler is None:
raise ValueError("No 'error_handler' available")
def setup_renderer(helper):
return MimeRenderer(self.mimetypes, self.error_document_type,
self.error_handler, validator=self.validator)
return setup_renderer
def add_config(self, config):
config.add_renderer(self.renderer_name, self.renderer)
if self.set_default_renderer:
config.add_renderer(None, self.renderer)
config.set_request_property(self.get_mime_body, self.attribute_name,
reify=True)
config.set_request_property(build_json_ref, "json_ref", reify=True)
config.add_view(self.error_handler, context=MimeException,
renderer=self.renderer_name)
| udoprog/mimeprovider | mimeprovider/__init__.py | Python | gpl-3.0 | 5,322 |
import zmq
from crpropa import Module
class SendCandidateProperties( Module ):
""" Sends candidate proporties given by the function
```extract_func( candidate )``` over the network
to the server on ```ip_port```
"""
def __init__( self, ip_port, extract_func ):
Module.__init__( self )
self.socket = None
self.ip_port = "tcp://" + ip_port
self.extract_func = extract_func
def beginRun( self ):
context = zmq.Context()
self.socket = context.socket( zmq.REQ )
self.socket.connect( self.ip_port )
def process(self, c):
self.socket.send_pyobj( self.extract_func( c ) )
msg_in = self.socket.recv_pyobj()
def endRun( self ):
del self.socket
class RecvCandidateProperties:
""" Server side: receive data from the client module
while listening on ```ip_port```
self.recv method should be in a non-blocking loop
"""
def __init__( self, ip_port ):
context = zmq.Context()
self.socket = context.socket( zmq.REP )
self.socket.bind( "tcp://" + ip_port )
def recv( self ):
msg = self.socket.recv_pyobj()
self.socket.send_pyobj(msg)
return msg
| adundovi/CRPropa3-scripts | python_modules/network.py | Python | gpl-3.0 | 1,236 |
#!/usr/bin/env python
"""
A basic example of loading YAML
Make sure you use the "safe_load" method and not the "load" method
that will give you warnings.
References:
- https://stackoverflow.com/questions/1773805/how-can-i-parse-a-yaml-file-in-python
"""
import yaml
with open("data_samples/basic.yaml", 'r') as stream:
try:
data=yaml.safe_load(stream)
assert "concepts" in data
except yaml.YAMLError as exc:
print(exc)
| veltzer/demos-python | src/examples/short/yaml/yaml_load.py | Python | gpl-3.0 | 455 |
#!/usr/bin/python2
import threading
from systemd import journal
from threading import Thread
import smtplib
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Mailer(threading.Thread):
"""
Mailer
:desc: Class that sends an email
Extends Thread
"""
def __init__(self):
"""
__init__
:desc: Constructor function that calls parent
"""
Thread.__init__(self)
def run(self, stri, dictio):
"""
run
:desc : Function that does the heavy lifting
:params : The string to be mailed and a dict
containing config options necessary for the mail to be delivered.
"""
dictionary = dictio
msg = MIMEMultipart("alternative")
#get it from the queue?
stripped = stri.strip()
part1 = MIMEText(stripped, "plain")
msg['Subject'] = dictionary['email_subject']
#http://pymotw.com/2/smtplib/
msg['To'] = email.utils.formataddr(('Recipient', dictionary['email_to']))
msg['From'] = email.utils.formataddr((dictionary['email_from'], dictionary['email_from']))
msg.attach(part1)
if dictionary['smtp'] == True:
# no auth
if dictionary['auth'] == False:
s = smtplib.SMTP()
s.connect(host=str(dictionary['smtp_host']), port=dictionary['smtp_port'])
try:
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
journal.send("systemd-denotify: "+message)
finally:
s.quit()
del s
# auth
elif dictionary['auth'] == True:
s = smtplib.SMTP()
s.connect(host=str(dictionary['smtp_host']), port=dictionary['smtp_port'])
s.login(str(dictionary['auth_user']), str(dictionary['auth_password']))
try:
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string().strip())
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
journal.send("systemd-denotify: "+message)
finally:
s.quit()
del s
else:
pass
#smtps
if dictionary['smtps'] == True:
# no auth ?
if dictionary['auth'] == False:
try:
if len(dictionary['smtps_cert']) > 0 and len(dictionary['smtps_key']) > 0:
s = smtplib.SMTP_SSL(host=str(dictionary['smtps_host']), port=dictionary['smtps_port'], keyfile=dictionary['smtps_key'], certfile=dictionary['smtps_cert'])
s.ehlo_or_helo_if_needed()
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
else:
s = smtplib.SMTP_SSL(host=str(dictionary['smtps_host']), port=dictionary['smtps_port'])
s.ehlo_or_helo_if_needed()
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
journal.send("systemd-denotify: "+message)
finally:
s.quit()
del s
# auth
elif dictionary['auth'] == True:
try:
#check whether it is a real file and pem encoded
if len(dictionary['smtps_cert']) > 0 and len(dictionary['smtps_key']) > 0:
s = smtplib.SMTP_SSL(host=str(dictionary['smtps_host']), port=dictionary['smtps_port'], keyfile=dictionary['smtps_key'], certfile=dictionary['smtps_cert'])
s.ehlo_or_helo_if_needed()
s.login(dictionary['auth_user'], dictionary['auth_password'])
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
else:
s = smtplib.SMTP_SSL(host=str(dictionary['smtps_host']), port=dictionary['smtps_port'])
s.ehlo_or_helo_if_needed()
s.login(dictionary['auth_user'], dictionary['auth_password'])
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
journal.send("systemd-denotify: "+message)
finally:
s.quit()
del s
else:
pass
#starttls
if dictionary['starttls'] == True:
# no auth
if dictionary['auth'] == False:
try:
s = smtplib.SMTP()
s.connect(host=str(dictionary['starttls_host']), port=dictionary['starttls_port'])
s.ehlo()
#http://pymotw.com/2/smtplib/
if s.has_extn("STARTTLS"):
#check whether it is a real file and pem encoded
if len(dictionary['starttls_cert']) > 0 and len(dictionary['starttls_key']) > 0:
s.starttls(keyfile=dictionary['starttls_key'], certfile=dictionary['starttls_cert'])
s.ehlo()
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
else:
s.starttls()
s.ehlo()
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
journal.send("systemd-denotify: "+message)
finally:
s.quit()
del s
# auth
elif dictionary['auth'] == True:
try:
s = smtplib.SMTP()
s.connect(host=str(dictionary['starttls_host']), port=dictionary['starttls_port'])
#http://pymotw.com/2/smtplib/
s.ehlo()
if s.has_extn("STARTTLS"):
#check whether it is a real file and pem encoded
if len(dictionary['starttls_cert']) >0 and len(dictionary['starttls_key'])>0:
s.starttls(keyfile=dictionary['starttls_key'], certfile=dictionary['starttls_cert'])
s.ehlo()
s.login(str(dictionary['auth_user']).strip(), str(dictionary['auth_password']))
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
else:
s.starttls()
s.ehlo()
s.login(str(dictionary['auth_user']).strip(), str(dictionary['auth_password']))
send = s.sendmail(str(dictionary['email_from']), [str(dictionary['email_to'])], msg.as_string())
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
journal.send("systemd-denotify: "+message)
finally:
s.quit()
del s
else:
pass
| gkarakou/systemd-denotify | denotify/mailer.py | Python | gpl-3.0 | 8,509 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 13:45:31 2018
@author: huyn
"""
| nguyenngochuy91/Ancestral-Blocks-Reconstruction | checkGGPS2.py | Python | gpl-3.0 | 107 |
"""
A test script for the `indextable` module
"""
from random import randrange
import pytest
from HamiltonianPy.indextable import IndexTable
class TestIndexTable:
def test_init(self):
match0 = r"unhashable type"
match1 = r"The .* has different type from the previous ones"
match2 = r"The .* object already exists"
with pytest.raises(TypeError, match=match0):
IndexTable([[0, 1], [2, 3]])
with pytest.raises(TypeError, match=match1):
IndexTable([(0, 1), "ab"])
with pytest.raises(ValueError, match=match2):
IndexTable([(0, 1), (2, 3), (0, 1)])
def test_object_type(self):
table = IndexTable((x, y) for x in range(4) for y in range(4))
assert table.object_type is tuple
def test_str_and_iteration(self):
separator = "*" * 80
table = IndexTable((x, y) for x in range(2) for y in range(2))
print(table)
print(separator)
for index in table.indices():
print(index)
print(separator)
for item in table.objects():
print(item)
print(separator)
for index, item in table:
print(index, item)
print(separator)
def test_length(self):
num0 = 4
num1 = 7
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
assert len(table) == num0 * num1
def test_query_index(self):
num0 = 7
num1 = 3
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
for i in range(5):
key = (randrange(num0), randrange(num1))
assert table(key) == key[0] * num1 + key[1]
def test_query_object(self):
num0 = 7
num1 = 3
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
for i in range(5):
index = randrange(num0 * num1)
assert table.query_object(index) == divmod(index, num1)
| wangshiphys/HamiltonianPy | HamiltonianPy/tests/unit/test_indextable.py | Python | gpl-3.0 | 1,978 |
__author__ = "Laura Martinez Sanchez"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "[email protected]"
from osgeo import gdal, gdalnumeric, ogr, osr
import numpy as np
from PIL import Image, ImageDraw
from collections import defaultdict
import pickle
import time
from texture_common import *
#Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate the pixel location of a geospatial coordinate
def world2Pixel(geoMatrix, x, y):
ulX = geoMatrix[0]
ulY = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = int((x - ulX) / xDist)
line = int((y - ulY) / yDist)
return (pixel, line)
#Converts a Python Imaging Library array to a gdalnumeric image.
def imageToArray(i):
'''
Converts a Python Imaging Library (PIL) array to a gdalnumeric image.
'''
a = gdalnumeric.fromstring(i.tobytes(), 'b')
a.shape = i.im.size[1], i.im.size[0]
return a
def ReadClipArray(lrY, ulY, lrX, ulX, img):
clip = np.empty((img.RasterCount, lrY - ulY, lrX - ulX))
#Read only the pixels needed for do the clip
for band in range(img.RasterCount):
band += 1
imgaux = img.GetRasterBand(band).ReadAsArray(ulX, ulY, lrX - ulX, lrY - ulY)
clip[band - 1] = imgaux
return clip
#Does the clip of the shape
def ObtainPixelsfromShape(field, rasterPath, shapePath, INX, *args):
# field='zona'
# open dataset, also load as a gdal image to get geotransform
# INX can be false. If True, uses additional layers.
print "Starting clip...."
start = time.time()
if args:
texture_train_Path = args[0]
print texture_train_Path
img, textArrayShp = createTextureArray(texture_train_Path, rasterPath)
else:
#print"Indexes = False"
img = gdal.Open(rasterPath)
geoTrans = img.GetGeoTransform()
geoTransaux = img.GetGeoTransform()
proj = img.GetProjection()
#open shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapePath, 0)
layer = dataSource.GetLayer()
clipdic = defaultdict(list)
count = 0
#Convert the layer extent to image pixel coordinates, we read only de pixels needed
for feature in layer:
minX, maxX, minY, maxY = feature.GetGeometryRef().GetEnvelope()
geoTrans = img.GetGeoTransform()
ulX, ulY = world2Pixel(geoTrans, minX, maxY)
lrX, lrY = world2Pixel(geoTrans, maxX, minY)
#print ulX,lrX,ulY,lrY
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
clip = ReadClipArray(lrY, ulY, lrX, ulX, img)
#EDIT: create pixel offset to pass to new image Projection info
xoffset = ulX
yoffset = ulY
#print "Xoffset, Yoffset = ( %d, %d )" % ( xoffset, yoffset )
# Create a new geomatrix for the image
geoTrans = list(geoTrans)
geoTrans[0] = minX
geoTrans[3] = maxY
# Map points to pixels for drawing the boundary on a blank 8-bit, black and white, mask image.
points = []
pixels = []
geom = feature.GetGeometryRef()
pts = geom.GetGeometryRef(0)
[points.append((pts.GetX(p), pts.GetY(p))) for p in range(pts.GetPointCount())]
[pixels.append(world2Pixel(geoTrans, p[0], p[1])) for p in points]
rasterPoly = Image.new("L", (pxWidth, pxHeight), 1)
rasterize = ImageDraw.Draw(rasterPoly)
rasterize.polygon(pixels, 0)
mask = imageToArray(rasterPoly)
#SHow the clips of the features
# plt.imshow(mask)
# plt.show()
# Clip the image using the mask into a dict
temp = gdalnumeric.choose(mask, (clip, np.nan))
# #SHow the clips of the image
# plt.imshow(temp[4])
# plt.show()
temp = np.concatenate(temp.T)
temp = temp[~np.isnan(temp[:, 0])] #NaN
#print temp.shape
clipdic[str(feature.GetField(field))].append(temp)
count += temp.shape[0]
end = time.time()
print "Time clipshape:"
print (end - start)
print "count", count
return clipdic, count
##########################################################################
| madi/DeadTrees-BDEOSS | clipshape.py | Python | gpl-3.0 | 4,289 |
#!/usr/bin/env python3
import sys
import subprocess
import urllib.request
import copy
def main(argv):
cookie = urllib.request.HTTPCookieProcessor()
oc = copy.deepcopy(cookie)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| munhyunsu/Hobby | Pickle/pickle_example.py | Python | gpl-3.0 | 242 |
"""
Playlist Generation
"""
from os import path
from random import choice
import string
import pafy
from .. import content, g, playlists, screen, util, listview
from ..playlist import Playlist
from . import command, search, album_search
@command(r'mkp\s*(.{1,100})')
def generate_playlist(sourcefile):
"""Generate a playlist from video titles in sourcefile"""
# Hooks into this, check if the argument --description is present
if "--description" in sourcefile or "-d" in sourcefile:
description_generator(sourcefile)
return
expanded_sourcefile = path.expanduser(sourcefile)
if not check_sourcefile(expanded_sourcefile):
g.message = util.F('mkp empty') % expanded_sourcefile
else:
queries = read_sourcefile(expanded_sourcefile)
g.message = util.F('mkp parsed') % (len(queries), sourcefile)
if queries:
create_playlist(queries)
g.message = util.F('pl help')
g.content = content.playlists_display()
def read_sourcefile(filename):
"""Read each line as a query from filename"""
with open(filename) as srcfl:
queries = list()
for item in srcfl.readlines():
clean_item = str(item).strip()
if not clean_item:
continue
queries.append(clean_item)
return queries
def check_sourcefile(filename):
"""Check if filename exists and has a non-zero size"""
return path.isfile(filename) and path.getsize(filename) > 0
def create_playlist(queries, title=None):
"""Add a new playlist
Create playlist with a random name, get the first
match for each title in queries and append it to the playlist
"""
plname = None
if (title is not None):
plname=title.replace(" ", "-")
else:
plname=random_plname()
if not g.userpl.get(plname):
g.userpl[plname] = Playlist(plname)
for query in queries:
g.message = util.F('mkp finding') % query
screen.update()
qresult = find_best_match(query)
if qresult:
g.userpl[plname].songs.append(qresult)
if g.userpl[plname]:
playlists.save()
def find_best_match(query):
"""Find the best(first)"""
# This assumes that the first match is the best one
qs = search.generate_search_qs(query)
wdata = pafy.call_gdata('search', qs)
results = search.get_tracks_from_json(wdata)
if results:
res, score = album_search._best_song_match(
results, query, 0.1, 1.0, 0.0)
return res
def random_plname():
"""Generates a random alphanumeric string of 6 characters"""
n_chars = 6
return ''.join(choice(string.ascii_lowercase + string.digits)
for _ in range(n_chars))
def description_generator(text):
""" Fetches a videos description and parses it for
<artist> - <track> combinations
"""
if not isinstance(g.model, Playlist):
g.message = util.F("mkp desc unknown")
return
# Use only the first result, for now
num = text.replace("--description", "")
num = num.replace("-d", "")
num = util.number_string_to_list(num)[0]
query = {}
query['id'] = g.model[num].ytid
query['part'] = 'snippet'
query['maxResults'] = '1'
data = pafy.call_gdata('videos', query)['items'][0]['snippet']
title = "mkp %s" % data['title']
data = util.fetch_songs(data['description'], data['title'])
columns = [
{"name": "idx", "size": 3, "heading": "Num"},
{"name": "artist", "size": 30, "heading": "Artist"},
{"name": "title", "size": "remaining", "heading": "Title"},
]
def run_m(idx):
""" Create playlist based on the
results selected
"""
create_playlist(idx, title)
if data:
data = [listview.ListSongtitle(x) for x in data]
g.content = listview.ListView(columns, data, run_m)
g.message = util.F("mkp desc which data")
else:
g.message = util.F("mkp no valid")
return
| np1/mps-youtube | mps_youtube/commands/generate_playlist.py | Python | gpl-3.0 | 4,048 |
import sys,os
os.environ["EPICS_CA_ADDR_LIST"] = "192.168.82.10"
os.environ["EPICS_CA_MAX_ARRAY_BYTES"] = "100000000"
import velaINJMagnetControl as VIMC
a = VIMC.velaINJMagnetController(True,False)
a.switchONpsu('SOL')
#print(a.isON('HVCOR'))
print(a.getRI('SOL'))
#print(a.getILockStates('HVxhfjsfhk01'))
a.switchONpsu('SOL')
#print(a.isON('HVCOR'))
print(a.isON('SOL'))
| adb-xkc85723/VELA-CLARA-Controllers | Controllers/VELA/INJECTOR/velaINJMagnets/bin/Release/test.py | Python | gpl-3.0 | 377 |
from openstates.utils import LXMLMixin
import datetime as dt
from pupa.scrape import Scraper, Event
from .utils import get_short_codes
from requests import HTTPError
import pytz
URL = "http://www.capitol.hawaii.gov/upcominghearings.aspx"
class HIEventScraper(Scraper, LXMLMixin):
def get_related_bills(self, href):
ret = []
try:
page = self.lxmlize(href)
except HTTPError:
return ret
bills = page.xpath(".//a[contains(@href, 'Bills')]")
for bill in bills:
try:
row = next(bill.iterancestors(tag='tr'))
except StopIteration:
continue
tds = row.xpath("./td")
descr = tds[1].text_content()
for i in ['\r\n', '\xa0']:
descr = descr.replace(i, '')
ret.append({"bill_id": bill.text_content(),
"type": "consideration",
"descr": descr})
return ret
def scrape(self):
tz = pytz.timezone("US/Eastern")
get_short_codes(self)
page = self.lxmlize(URL)
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolderCol1_GridView1']")[0]
for event in table.xpath(".//tr")[1:]:
tds = event.xpath("./td")
committee = tds[0].text_content().strip()
descr = [x.text_content() for x in tds[1].xpath(".//span")]
if len(descr) != 1:
raise Exception
descr = descr[0].replace('.', '').strip()
when = tds[2].text_content().strip()
where = tds[3].text_content().strip()
notice = tds[4].xpath(".//a")[0]
notice_href = notice.attrib['href']
notice_name = notice.text
when = dt.datetime.strptime(when, "%m/%d/%Y %I:%M %p")
when = pytz.utc.localize(when)
event = Event(name=descr, start_time=when, classification='committee-meeting',
description=descr, location_name=where, timezone=tz.zone)
if "/" in committee:
committees = committee.split("/")
else:
committees = [committee]
for committee in committees:
if "INFO" not in committee:
committee = self.short_ids.get("committee", {"chamber": "unknown",
"name": committee})
else:
committee = {
"chamber": "joint",
"name": committee,
}
event.add_committee(committee['name'], note='host')
event.add_source(URL)
event.add_document(notice_name,
notice_href,
media_type='text/html')
for bill in self.get_related_bills(notice_href):
a = event.add_agenda_item(description=bill['descr'])
a.add_bill(
bill['bill_id'],
note=bill['type']
)
yield event
| cliftonmcintosh/openstates | openstates/hi/events.py | Python | gpl-3.0 | 3,151 |
# -*- coding: utf-8 -*-
from oauth2 import Consumer, Client, Token
from httplib2 import ProxyInfo
from httplib2.socks import PROXY_TYPE_HTTP
from django.conf import settings
class Authentication(object):
def __init__(self, consumer_key, consumer_secret, token_key, token_secret):
consumer = Consumer(key=consumer_key, secret=consumer_secret)
token = Token(key=token_key, secret=token_secret)
proxy_info = None
if hasattr(settings, 'PROXY_HOST') and \
hasattr(settings, 'PROXY_PORT'):
proxy_info = ProxyInfo(
proxy_type=PROXY_TYPE_HTTP,
proxy_host=settings.PROXY_HOST,
proxy_port=settings.PROXY_PORT)
self.client = Client(
consumer=consumer,
token=token,
proxy_info=proxy_info)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| Gandi/baobab | baobab/utils/authentication.py | Python | gpl-3.0 | 950 |
# -*- coding: utf-8 -*-
def classeq(x, y):
return x.__class__==y.__class__
class Element(object): pass
| chaosim/dao | dao/base.py | Python | gpl-3.0 | 113 |
import json
import time
class TaskQueueInputError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TaskQueueSystemError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TaskQueueEmptyError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def epoch():
return int(time.time())
def obj2jsonstring(object):
return json.dumps(object)
def generate_uuid(name, schedule, db_id):
return '%s|%d|%d' % (name, schedule, db_id)
| weijuly/dist-sched-tasks | taskq/taskq/utils/common.py | Python | gpl-3.0 | 613 |
"""
Conversion pack for October 2021 release
"""
CONVERSIONS = {
# Renamed items
"Quafe Zero": "Quafe Zero Classic",
"Exigent Sentry Drone Navigation Mutaplasmid": "Exigent Sentry Drone Precision Mutaplasmid",
}
| pyfa-org/Pyfa | service/conversions/releaseOct2021.py | Python | gpl-3.0 | 225 |
ROOT = '/.well-known/acme-challenge'
ENDPOINT = '/k9s7WeOPg3HdSjwlAqEVRxnezsGGe-CFOwPfOcU3VgU'
RESPONSE = 'k9s7WeOPg3HdSjwlAqEVRxnezsGGe-CFOwPfOcU3VgU.QBkCfzPq0mKXIJSktgl4_b7psKazh3MSZ8juWnZbJbg'
| zejacobi/DeltaGreen | LetsEncryptConfig.py | Python | gpl-3.0 | 196 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import logging
import os
import stat
from unittest import mock
import fixtures
import http.server
import progressbar
import threading
import testscenarios
import testtools
import snapcraft
from snapcraft.internal import common, elf, steps
from snapcraft.internal.project_loader import grammar_processing
from tests import fake_servers, fixture_setup
from tests.file_utils import get_snapcraft_path
class ContainsList(list):
def __eq__(self, other):
return all([i[0] in i[1] for i in zip(self, other)])
class MockOptions:
def __init__(
self,
source=None,
source_type=None,
source_branch=None,
source_tag=None,
source_subdir=None,
source_depth=None,
source_commit=None,
source_checksum=None,
disable_parallel=False,
):
self.source = source
self.source_type = source_type
self.source_depth = source_depth
self.source_branch = source_branch
self.source_commit = source_commit
self.source_tag = source_tag
self.source_subdir = source_subdir
self.disable_parallel = disable_parallel
class IsExecutable:
"""Match if a file path is executable."""
def __str__(self):
return "IsExecutable()"
def match(self, file_path):
if not os.stat(file_path).st_mode & stat.S_IEXEC:
return testtools.matchers.Mismatch(
"Expected {!r} to be executable, but it was not".format(file_path)
)
return None
class LinkExists:
"""Match if a file path is a symlink."""
def __init__(self, expected_target=None):
self._expected_target = expected_target
def __str__(self):
return "LinkExists()"
def match(self, file_path):
if not os.path.exists(file_path):
return testtools.matchers.Mismatch(
"Expected {!r} to be a symlink, but it doesn't exist".format(file_path)
)
if not os.path.islink(file_path):
return testtools.matchers.Mismatch(
"Expected {!r} to be a symlink, but it was not".format(file_path)
)
target = os.readlink(file_path)
if target != self._expected_target:
return testtools.matchers.Mismatch(
"Expected {!r} to be a symlink pointing to {!r}, but it was "
"pointing to {!r}".format(file_path, self._expected_target, target)
)
return None
class TestCase(testscenarios.WithScenarios, testtools.TestCase):
def setUp(self):
super().setUp()
temp_cwd_fixture = fixture_setup.TempCWD()
self.useFixture(temp_cwd_fixture)
self.path = temp_cwd_fixture.path
# Use a separate path for XDG dirs, or changes there may be detected as
# source changes.
self.xdg_path = self.useFixture(fixtures.TempDir()).path
self.useFixture(fixture_setup.TempXDG(self.xdg_path))
self.fake_terminal = fixture_setup.FakeTerminal()
self.useFixture(self.fake_terminal)
self.useFixture(fixture_setup.SilentSnapProgress())
# Some tests will directly or indirectly change the plugindir, which
# is a module variable. Make sure that it is returned to the original
# value when a test ends.
self.addCleanup(common.set_plugindir, common.get_plugindir())
self.addCleanup(common.set_schemadir, common.get_schemadir())
self.addCleanup(common.set_librariesdir, common.get_librariesdir())
self.addCleanup(common.set_extensionsdir, common.get_extensionsdir())
self.addCleanup(common.reset_env)
common.set_schemadir(os.path.join(get_snapcraft_path(), "schema"))
self.fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(self.fake_logger)
patcher = mock.patch("multiprocessing.cpu_count")
self.cpu_count = patcher.start()
self.cpu_count.return_value = 2
self.addCleanup(patcher.stop)
# We do not want the paths to affect every test we have.
patcher = mock.patch(
"snapcraft.file_utils.get_tool_path", side_effect=lambda x: x
)
patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch(
"snapcraft.internal.indicators.ProgressBar", new=SilentProgressBar
)
patcher.start()
self.addCleanup(patcher.stop)
# These are what we expect by default
self.snap_dir = os.path.join(os.getcwd(), "snap")
self.prime_dir = os.path.join(os.getcwd(), "prime")
self.stage_dir = os.path.join(os.getcwd(), "stage")
self.parts_dir = os.path.join(os.getcwd(), "parts")
self.local_plugins_dir = os.path.join(self.snap_dir, "plugins")
# Avoid installing patchelf in the tests
self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_NO_PATCHELF", "1"))
# Disable Sentry reporting for tests, otherwise they'll hang waiting
# for input
self.useFixture(
fixtures.EnvironmentVariable("SNAPCRAFT_ENABLE_ERROR_REPORTING", "false")
)
# Don't let the managed host variable leak into tests
self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_MANAGED_HOST"))
machine = os.environ.get("SNAPCRAFT_TEST_MOCK_MACHINE", None)
self.base_environment = fixture_setup.FakeBaseEnvironment(machine=machine)
self.useFixture(self.base_environment)
# Make sure "SNAPCRAFT_ENABLE_DEVELOPER_DEBUG" is reset between tests
self.useFixture(
fixtures.EnvironmentVariable("SNAPCRAFT_ENABLE_DEVELOPER_DEBUG")
)
self.useFixture(fixture_setup.FakeSnapcraftctl())
def make_snapcraft_yaml(self, content, encoding="utf-8"):
with contextlib.suppress(FileExistsError):
os.mkdir("snap")
snapcraft_yaml = os.path.join("snap", "snapcraft.yaml")
with open(snapcraft_yaml, "w", encoding=encoding) as fp:
fp.write(content)
return snapcraft_yaml
def verify_state(self, part_name, state_dir, expected_step_name):
self.assertTrue(
os.path.isdir(state_dir),
"Expected state directory for {}".format(part_name),
)
# Expect every step up to and including the specified one to be run
step = steps.get_step_by_name(expected_step_name)
for step in step.previous_steps() + [step]:
self.assertTrue(
os.path.exists(os.path.join(state_dir, step.name)),
"Expected {!r} to be run for {}".format(step.name, part_name),
)
def load_part(
self,
part_name,
plugin_name=None,
part_properties=None,
project_options=None,
stage_packages_repo=None,
base="core",
confinement="strict",
snap_type="app",
):
if not plugin_name:
plugin_name = "nil"
properties = {"plugin": plugin_name}
if part_properties:
properties.update(part_properties)
if not project_options:
project_options = snapcraft.ProjectOptions()
validator = snapcraft.internal.project_loader.Validator()
schema = validator.part_schema
definitions_schema = validator.definitions_schema
plugin = snapcraft.internal.pluginhandler.load_plugin(
part_name=part_name,
plugin_name=plugin_name,
properties=properties,
project_options=project_options,
part_schema=schema,
definitions_schema=definitions_schema,
)
if not stage_packages_repo:
stage_packages_repo = mock.Mock()
grammar_processor = grammar_processing.PartGrammarProcessor(
plugin=plugin,
properties=properties,
project=project_options,
repo=stage_packages_repo,
)
return snapcraft.internal.pluginhandler.PluginHandler(
plugin=plugin,
part_properties=properties,
project_options=project_options,
part_schema=schema,
definitions_schema=definitions_schema,
grammar_processor=grammar_processor,
stage_packages_repo=stage_packages_repo,
snap_base_path="/snap/fake-name/current",
base=base,
confinement=confinement,
snap_type=snap_type,
soname_cache=elf.SonameCache(),
)
class TestWithFakeRemoteParts(TestCase):
def setUp(self):
super().setUp()
self.useFixture(fixture_setup.FakeParts())
class FakeFileHTTPServerBasedTestCase(TestCase):
def setUp(self):
super().setUp()
self.useFixture(fixtures.EnvironmentVariable("no_proxy", "localhost,127.0.0.1"))
self.server = http.server.HTTPServer(
("127.0.0.1", 0), fake_servers.FakeFileHTTPRequestHandler
)
server_thread = threading.Thread(target=self.server.serve_forever)
self.addCleanup(server_thread.join)
self.addCleanup(self.server.server_close)
self.addCleanup(self.server.shutdown)
server_thread.start()
class SilentProgressBar(progressbar.ProgressBar):
"""A progress bar causing no spurious output during tests."""
def start(self):
pass
def update(self, value=None):
pass
def finish(self):
pass
| sergiusens/snapcraft | tests/unit/__init__.py | Python | gpl-3.0 | 10,138 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015, Philipp Klaus. All rights reserved.
License: GPLv3
"""
from distutils.core import setup
setup(name='netio230a',
version = '1.1.9',
description = 'Python package to control the Koukaam NETIO-230A',
long_description = 'Python software to access the Koukaam NETIO-230A and NETIO-230B: power distribution units / controllable power outlets with Ethernet interface',
author = 'Philipp Klaus',
author_email = '[email protected]',
url = 'https://github.com/pklaus/netio230a',
license = 'GPL3+',
packages = ['netio230a'],
scripts = ['scripts/netio230a_cli', 'scripts/netio230a_discovery', 'scripts/netio230a_fakeserver'],
zip_safe = True,
platforms = 'any',
keywords = 'Netio230A Koukaam PDU',
classifiers = [
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'License :: OSI Approved :: GPL License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
)
| pklaus/netio230a | setup.py | Python | gpl-3.0 | 1,135 |
import actor
class Oxygen(actor.Actor):
extra_keys = ['capacity', 'pipe_length', 'is_initial']
def __init__(self, *args, **kwargs):
super(Oxygen, self).__init__(*args, **kwargs)
self.capacity = kwargs.get('capacity', 3000)
self.contained = self.capacity
self.pipe_length = kwargs.get('pipe_length', 5000)
self.is_initial = kwargs.get('is_initial', False)
def tick(self):
if self.contained == 0:
self.world.dispatch_event("on_suffocate")
| italomaia/turtle-linux | games/DigbyMarshmallow/lib/oxygen.py | Python | gpl-3.0 | 510 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
from __future__ import absolute_import, division
import os
import sys
import itertools
from twisted.trial import unittest
from twisted.python import filepath, log
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.python.compat import xrange, intToBytes, bytesEnviron
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
# A short string which is intended to appear here and nowhere else,
# particularly not in any random garbage output CPython unavoidable
# generates (such as in warning text and so forth). This is searched
# for in the output from stdio_test_lastwrite and if it is found at
# the end, the functionality works.
UNIQUE_LAST_WRITE_STRING = b'xyz123abc Twisted is great!'
skipWindowsNopywin32 = None
if platform.isWindows():
if requireModule('win32process') is None:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
else:
properEnv = bytesEnviron()
properEnv[b"PYTHONPATH"] = os.pathsep.join(sys.path).encode(
sys.getfilesystemencoding())
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
C{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or C{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def connectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not C{None}.
"""
self.data[name] = self.data.get(name, b'') + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTests(unittest.TestCase):
skip = skipWindowsNopywin32
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
args = [sys.executable,
b"-m", b"twisted.test." + sibling,
reactor.__class__.__module__] + list(args)
return reactor.spawnProcess(
proto,
sys.executable,
args,
env=properEnv,
**kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_loseconn', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, 'r') as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, b'stdio_test_halfclose', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, b'stdio_test_lastwrite', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_hostpeer')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.assertTrue(host)
self.assertTrue(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_write')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_writeseq')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
with open(junkPath, 'wb') as junkFile:
for i in xrange(1024):
junkFile.write(intToBytes(i) + b'\n')
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = list(range(100))
def connectionMade(ign):
if toWrite:
written.append(intToBytes(toWrite.pop()) + b"\n")
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, b'stdio_test_producer')
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEqual(p.data[1], b''.join(written))
self.assertFalse(
toWrite,
"Connection lost with %d writes left to go." % (len(toWrite),))
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, b'stdio_test_consumer', junkPath)
def processEnded(reason):
with open(junkPath, 'rb') as f:
self.assertEqual(p.data[1], f.read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open('wb')
self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs['stdin'] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(intToBytes(value))
break
reactor.callLater(0, spin)
reactor.callLater(0, spin)
# Once the connection is lost, make sure the counter is at the
# appropriate value.
def cbLost(reason):
self.assertEqual(next(count), howMany + 1)
self.assertEqual(
path.getContent(),
b''.join(map(intToBytes, range(howMany))))
onConnLost.addCallback(cbLost)
return onConnLost
if platform.isWindows():
test_normalFileStandardOut.skip = (
"StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/test/test_stdio.py | Python | gpl-3.0 | 13,157 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains expectations."""
import inquisition
FISHY = inquisition.SPANISH
FISHY = FISHY.replace('surprise', 'haddock')
print FISHY
| aedoler/is210-week-03-synthesizing | task_01.py | Python | mpl-2.0 | 183 |
import csv
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from bustimes.utils import download_if_changed
from ...models import Licence, Registration, Variation
def parse_date(date_string):
if date_string:
return datetime.strptime(date_string, '%d/%m/%y').date()
def download_if_modified(path):
url = f"https://content.mgmt.dvsacloud.uk/olcs.prod.dvsa.aws/data-gov-uk-export/{path}"
return download_if_changed(settings.DATA_DIR / path, url)
class Command(BaseCommand):
@staticmethod
def add_arguments(parser):
parser.add_argument('regions', nargs='?', type=str, default="FBCMKGDH")
def get_rows(self, path):
with open(settings.DATA_DIR / path) as open_file:
yield from csv.DictReader(open_file)
def handle(self, regions, **kwargs):
for region in regions:
modified_1, last_modified_1 = download_if_modified(f"Bus_RegisteredOnly_{region}.csv")
modified_2, last_modified_2 = download_if_modified(f"Bus_Variation_{region}.csv")
if modified_1 or modified_2:
print(region, last_modified_1, last_modified_2)
self.handle_region(region)
def handle_region(self, region):
lics = Licence.objects.filter(traffic_area=region)
lics = lics.in_bulk(field_name="licence_number")
lics_to_update = set()
lics_to_create = []
regs = Registration.objects.filter(licence__traffic_area=region)
regs = regs.in_bulk(field_name="registration_number")
regs_to_update = set()
regs_to_create = []
variations = Variation.objects.filter(registration__licence__traffic_area=region)
variations = variations.select_related('registration').all()
variations_dict = {}
for variation in variations:
reg_no = variation.registration.registration_number
if reg_no in variations_dict:
variations_dict[reg_no][variation.variation_number] = variation
else:
variations_dict[reg_no] = {
variation.variation_number: variation
}
# vars_to_update = set()
vars_to_create = []
# previous_line = None
# cardinals = set()
for line in self.get_rows(f"Bus_Variation_{region}.csv"):
reg_no = line["Reg_No"]
var_no = int(line["Variation Number"])
lic_no = line["Lic_No"]
if lic_no in lics:
licence = lics[lic_no]
if licence.id and licence not in lics_to_update:
licence.trading_name = ''
lics_to_update.add(licence)
else:
licence = Licence(licence_number=lic_no)
lics_to_create.append(licence)
lics[lic_no] = licence
licence.name = line['Op_Name']
# a licence can have multiple trading names
if line['trading_name'] not in licence.trading_name:
if licence.trading_name:
licence.trading_name = f"{licence.trading_name}\n{line['trading_name']}"
else:
licence.trading_name = line['trading_name']
if licence.address != line['Address']:
if licence.address:
print(licence.address, line['Address'])
licence.address = line['Address']
if licence.traffic_area:
assert licence.traffic_area == line['Current Traffic Area']
else:
licence.traffic_area = line['Current Traffic Area']
licence.discs = line['Discs in Possession'] or 0
licence.authorised_discs = line['AUTHDISCS'] or 0
licence.description = line['Description']
licence.granted_date = parse_date(line['Granted_Date'])
licence.expiry_date = parse_date(line['Exp_Date'])
if len(reg_no) > 20:
# PK0000098/PK0000098/364
parts = reg_no.split('/')
assert parts[0] == parts[1]
reg_no = f'{parts[1]}/{parts[2]}'
if reg_no in regs:
registration = regs[reg_no]
if registration.id and registration not in regs_to_update:
regs_to_update.add(registration)
else:
registration = Registration(
registration_number=reg_no,
registered=False
)
regs_to_create.append(registration)
regs[reg_no] = registration
registration.licence = licence
status = line['Registration Status']
registration.registration_status = status
if var_no == 0 and status == 'New':
registration.registered = True
elif status == 'Registered':
registration.registered = True
elif status == 'Cancelled' or status == 'Admin Cancelled' or status == 'Cancellation':
registration.registered = False
registration.start_point = line['start_point']
registration.finish_point = line['finish_point']
registration.via = line['via']
registration.subsidies_description = line['Subsidies_Description']
registration.subsidies_details = line['Subsidies_Details']
registration.traffic_area_office_covered_by_area = line['TAO Covered BY Area']
# a registration can have multiple numbers
if registration.service_number:
if line['Service Number'] not in registration.service_number:
registration.service_number = f"{registration.service_number}\n{line['Service Number']}"
else:
registration.service_number = line['Service Number']
# a registration can have multiple types
if registration.service_type_description:
if line['Service_Type_Description'] not in registration.service_type_description:
registration.service_type_description += f"\n{line['Service_Type_Description']}"
else:
registration.service_type_description = line['Service_Type_Description']
if registration.authority_description:
if line['Auth_Description'] not in registration.authority_description:
registration.authority_description += f"\n{line['Auth_Description']}"
if len(registration.authority_description) > 255:
# some National Express coach services cover many authorities
# print(reg_no)
registration.authority_description = registration.authority_description[:255]
else:
registration.authority_description = line['Auth_Description']
# if previous_line:
# if previous_line["Reg_No"] == reg_no:
# if int(previous_line["Variation Number"]) == var_no:
# for key in line:
# prev = previous_line[key]
# value = line[key]
# if prev != value:
# if key not in (
# 'Auth_Description', 'TAO Covered BY Area',
# 'trading_name', 'Pub_Text', 'Registration Status', 'end_date', 'received_date'
# 'effective_date', 'short_notice', 'Service_Type_Description'
# ):
# print(reg_no)
# print(f"'{key}': '{prev}', '{value}'")
# cardinals.add(key)
# # print(line)
variation = Variation(registration=registration, variation_number=var_no)
if reg_no in variations_dict:
if var_no in variations_dict[reg_no]:
continue # ?
else:
variations_dict[reg_no][var_no] = variation
else:
variations_dict[reg_no] = {var_no: variation}
variation.effective_date = parse_date(line['effective_date'])
variation.date_received = parse_date(line['received_date'])
variation.end_date = parse_date(line['end_date'])
variation.service_type_other_details = line['Service_Type_Other_Details']
variation.registration_status = line['Registration Status']
variation.publication_text = line['Pub_Text']
variation.short_notice = line['Short Notice']
assert not variation.id
if not variation.id:
vars_to_create.append(variation)
# previous_line = line
# previous_line = None
# cardinals = set()
# use this file to work out if a registration has not been cancelled/expired
for line in self.get_rows(f"Bus_RegisteredOnly_{region}.csv"):
reg_no = line["Reg_No"]
reg = regs[reg_no]
if reg.registration_status != line["Registration Status"]:
reg.registration_status = line["Registration Status"]
reg.registered = True
# if previous_line and previous_line["Reg_No"] == reg_no:
# for key in line:
# prev = previous_line[key]
# value = line[key]
# if prev != value:
# cardinals.add(key)
# if key == 'TAO Covered BY Area':
# print(prev, value)
# previous_line = line
# print(cardinals)
Licence.objects.bulk_update(
lics_to_update,
["name", "trading_name", "traffic_area", "discs", "authorised_discs",
"description", "granted_date", "expiry_date", "address"]
)
Licence.objects.bulk_create(lics_to_create)
for registration in regs_to_create:
registration.licence = registration.licence
Registration.objects.bulk_update(
regs_to_update,
["start_point", "finish_point", "via",
"subsidies_description", "subsidies_details",
"traffic_area_office_covered_by_area",
"service_number", "service_type_description",
"registration_status", "authority_description",
"registered"],
batch_size=1000
)
Registration.objects.bulk_create(regs_to_create)
Variation.objects.bulk_create(vars_to_create)
# Variation.objects.bulk_update(
# vars_to_update,
# ['date_received', 'end_date', 'service_type_other_details', 'registration_status', 'publication_text',
# 'short_notice']
# )
| jclgoodwin/bustimes.org.uk | vosa/management/commands/import_vosa.py | Python | mpl-2.0 | 11,021 |
from unittest import TestCase
from MyCapytain.resources.collections.cts import XmlCtsTextInventoryMetadata, XmlCtsTextgroupMetadata, XmlCtsWorkMetadata, XmlCtsEditionMetadata, XmlCtsTranslationMetadata
from MyCapytain.resources.prototypes.cts.inventory import CtsTextgroupMetadata
with open("tests/testing_data/examples/getcapabilities.seneca.xml") as f:
SENECA = f.read()
class TestCollectionCtsInheritance(TestCase):
def test_types(self):
TI = XmlCtsTextInventoryMetadata.parse(resource=SENECA)
self.assertCountEqual(
[type(descendant) for descendant in TI.descendants],
[XmlCtsTextgroupMetadata] + [XmlCtsWorkMetadata] * 10 + [XmlCtsEditionMetadata] * 10,
"Descendant should be correctly parsed into correct types"
)
self.assertCountEqual(
[type(descendant) for descendant in TI.readableDescendants],
[XmlCtsWorkMetadata] * 0 + [XmlCtsEditionMetadata] * 10,
"Descendant should be correctly parsed into correct types and filtered when readable"
)
def test_title(self):
TI = XmlCtsTextInventoryMetadata.parse(resource=SENECA)
self.assertCountEqual(
[str(descendant.get_label()) for descendant in TI.descendants],
["Seneca, Lucius Annaeus", "de Ira", "de Vita Beata", "de consolatione ad Helviam", "de Constantia",
"de Tranquilitate Animi", "de Brevitate Vitae", "de consolatione ad Polybium",
"de consolatione ad Marciam", "de Providentia", "de Otio Sapientis", "de Ira, Moral essays Vol 2",
"de Vita Beata, Moral essays Vol 2", "de consolatione ad Helviam, Moral essays Vol 2",
"de Constantia, Moral essays Vol 2", "de Tranquilitate Animi, Moral essays Vol 2",
"de Brevitate Vitae, Moral essays Vol 2", "de consolatione ad Polybium, Moral essays Vol 2",
"de consolatione ad Marciam, Moral essays Vol 2", "de Providentia, Moral essays Vol 2",
"de Otio Sapientis, Moral essays Vol 2"],
"Title should be computed correctly : default should be set"
)
def test_new_object(self):
""" When creating an object with same urn, we should retrieve the same metadata"""
TI = XmlCtsTextInventoryMetadata.parse(resource=SENECA)
a = TI["urn:cts:latinLit:stoa0255.stoa012.perseus-lat2"].metadata
b = (CtsTextgroupMetadata("urn:cts:latinLit:stoa0255")).metadata | Capitains/MyCapytain | tests/resources/collections/test_cts_collection_inheritance.py | Python | mpl-2.0 | 2,461 |
from urllib.parse import urlparse
import subprocess
import logging
import boto3
import airflow.hooks.base_hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import utils.helpers as helpers
class PostgresToS3Transfer(BaseOperator):
'''Dumps a Postgres database to a S3 key
:param url: URL to download. (templated)
:type url: str
:param postgres_conn_id: Postgres Connection's ID.
:type postgres_conn_id: str
:param tables: List of tables to export (optional, default exports all
tables).
:type tables: list of str
:param s3_conn_id: S3 Connection's ID. It needs a JSON in the `extra` field
with `aws_access_key_id` and `aws_secret_access_key`
:type s3_conn_id: str
:param s3_url: S3 url (e.g. `s3://my_bucket/my_key.zip`) (templated)
:type s3_url: str
'''
template_fields = ('s3_url',)
@apply_defaults
def __init__(self, postgres_conn_id, s3_conn_id, s3_url, tables=None, *args, **kwargs):
super(PostgresToS3Transfer, self).__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.tables = tables
self.s3_conn_id = s3_conn_id
self.s3_url = s3_url
def execute(self, context):
s3 = self._load_s3_connection(self.s3_conn_id)
s3_bucket, s3_key = self._parse_s3_url(self.s3_url)
command = [
'pg_dump',
'-Fc',
]
if self.tables:
tables_params = ['--table={}'.format(table) for table in self.tables]
command.extend(tables_params)
logging.info('Dumping database "%s" into "%s"', self.postgres_conn_id, self.s3_url)
logging.info('Command: %s <POSTGRES_URI>', ' '.join(command))
command.append(helpers.get_postgres_uri(self.postgres_conn_id))
with subprocess.Popen(command, stdout=subprocess.PIPE).stdout as dump_file:
s3.Bucket(s3_bucket) \
.upload_fileobj(dump_file, s3_key)
@staticmethod
def _parse_s3_url(s3_url):
parsed_url = urlparse(s3_url)
if not parsed_url.netloc:
raise airflow.exceptions.AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def _load_s3_connection(self, conn_id):
'''
Parses the S3 connection and returns a Boto3 resource.
This should be implementing using the S3Hook, but it currently uses
boto (not boto3) which doesn't allow streaming.
:return: Boto3 resource
:rtype: boto3.resources.factory.s3.ServiceResource
'''
conn = airflow.hooks.base_hook.BaseHook.get_connection(conn_id)
extra_dejson = conn.extra_dejson
key_id = extra_dejson['aws_access_key_id']
access_key = extra_dejson['aws_secret_access_key']
s3 = boto3.resource(
's3',
aws_access_key_id=key_id,
aws_secret_access_key=access_key
)
return s3
| opentrials/opentrials-airflow | dags/operators/postgres_to_s3_transfer.py | Python | mpl-2.0 | 3,091 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import ICapRecipe, Recipe
from weboob.tools.backend import BaseBackend
from .browser import SevenFiftyGramsBrowser
import unicodedata
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
__all__ = ['SevenFiftyGramsBackend']
class SevenFiftyGramsBackend(BaseBackend, ICapRecipe):
NAME = '750g'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '0.h'
DESCRIPTION = u'750g French recipe website'
LICENSE = 'AGPLv3+'
BROWSER = SevenFiftyGramsBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(strip_accents(unicode(pattern)).encode('utf-8'))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}
| blckshrk/Weboob | modules/750g/backend.py | Python | agpl-3.0 | 2,151 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-22 16:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0007_auto_20170422_1622'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=256),
),
]
| NotAGameDev/website | website/migrations/0008_auto_20170422_1629.py | Python | agpl-3.0 | 448 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Order BOM explode report',
'version': '0.1',
'category': 'Report',
'description': '''
Manage report for order product
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product',
'sale',
'purchase',
'mrp',
'report_aeroo',
'order_bom',
'bom_category',
'inventory_field', # for inventory field
'bom_order_utility', # Utility for filter
'bom_dynamic_structured', # for filter type category
'textilene_status', # TODO remove when moved company parameters
'production_accounting_external',
'production_forecast_order', # for forecast check
'no_parcels_count', # exclude no parcels product
'product_last_supplier', # last purchase supplier data (for filter)
],
'init_xml': [],
'demo': [],
'data': [
#'security/xml_groups.xml',
#'security/ir.model.access.csv',
'bom_explode_view.xml',
'report/explode_report.xml',
'wizard/report_component_status.xml',
#'scheduler.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| Micronaet/micronaet-bom | order_bom_explode_report/__openerp__.py | Python | agpl-3.0 | 2,240 |
from django.apps import AppConfig
class PhotosAppConfig(AppConfig):
name = 'livinglots_usercontent.photos'
def ready(self):
try:
from actstream import registry
from . import signals
registry.register(self.get_model('Photo'))
except ImportError:
# django-activity-stream is not installed and that's okay
pass
| 596acres/django-livinglots-usercontent | livinglots_usercontent/photos/apps.py | Python | agpl-3.0 | 396 |
{
"name" : "GII",
"version" : "1.0",
"depends" : ['sale','product'],
"author" : "Novasoft Consultancy Services Pvt. Ltd.",
'category' : 'Generic Modules/Others',
"description": """ GII - Management Module
""",
'website': 'http://www.novasoftindia.com',
'data': ['giisa.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'application': True,
} | Novasoft-India/OperERP-AM-Motors | openerp/addons/giicourse/__openerp__.py | Python | agpl-3.0 | 444 |
# -*- coding: utf8 -*-
# Author: Adrien Bibal
# Date: 2014
# Insert the student answer in the correction framework file.
import sys
import codecs
input_file = sys.stdin # input = file containing the student answer.
oz_file = codecs.open("/task/task.oz", "r", "utf8") # Open the "correction framework file".
new_file = codecs.open("new_file.oz", "w","utf8") # Open the final file.
for line in oz_file:
# "@@q1@@" is the arbitrary marker used to say "insert the student answer here".
if "@@q1@@" in line :
for input_line in input_file :
if '\0' in input_line :
input_line = input_line.strip('\0')
new_file.write(input_line) # Copy each line from the student answer to the final file.
else :
new_file.write(line) # Copy each line from the "correction framework file" to the final file.
oz_file.close()
new_file.close()
| GuillaumeDerval/INGInious | tests/tasks/edx/HelloWorld/insert_input.py | Python | agpl-3.0 | 891 |
import numpy as np
class PriceHistoryPack(object):
def __init__(self, input_seq_len, num_features, target_seq_len):
super(PriceHistoryPack, self).__init__()
self.sku_ids = []
self.XX = np.empty((0, input_seq_len, num_features))
self.YY = np.empty((0, target_seq_len))
self.sequence_lens = []
self.seq_mask = np.empty((0, input_seq_len))
def update(self, sku_id, inputs, targets, input_seq_len):
self.sku_ids.append(sku_id)
inputs_len = len(inputs)
self.sequence_lens.append(inputs_len)
# build current mask with zeros and ones
cur_mask = np.zeros(input_seq_len)
cur_mask[:inputs_len] = 1 # only the valid firsts should have the value of one
xx_padded = np.pad(inputs, ((0, input_seq_len - inputs_len), (0, 0)), mode='constant', constant_values=0.)
# here targets do NOT need to be padded because we do not have a sequence to sequence model
# yy_padded = np.pad(targets, (0, series_max_len - len(targets)), mode='constant', constant_values=0.)
assert len(xx_padded) == input_seq_len
self.XX = np.vstack((self.XX, xx_padded[np.newaxis]))
self.YY = np.vstack((self.YY, targets[np.newaxis]))
self.seq_mask = np.vstack((self.seq_mask, cur_mask[np.newaxis]))
def get_data(self, fraction=None, random_state=None):
# from sklearn.model_selection import train_test_split
skuIds, xx, yy, seqLens, seqMask = np.array(self.sku_ids), self.XX, self.YY, np.array(
self.sequence_lens), self.seq_mask
if fraction is None:
return skuIds, xx, yy, seqLens, seqMask
else:
random_state = np.random if random_state is None else random_state
cur_len = len(skuIds)
assert cur_len == len(xx) and cur_len == len(yy) and cur_len == len(seqLens) and cur_len == len(seqMask)
random_inds = random_state.choice(cur_len, int(cur_len * fraction))
return skuIds[random_inds], xx[random_inds], yy[random_inds], seqLens[random_inds], seqMask[random_inds]
def save(self, filepath, fraction=None, random_state=None):
if fraction is None:
np.savez(filepath, sku_ids=self.sku_ids, inputs=self.XX, targets=self.YY,
sequence_lengths=self.sequence_lens,
sequence_masks=self.seq_mask)
else:
skuIds, xx, yy, seqLens, seqMask = self.get_data(fraction=fraction, random_state=random_state)
np.savez(filepath, sku_ids=skuIds, inputs=xx, targets=yy, sequence_lengths=seqLens, sequence_masks=seqMask)
| pligor/predicting-future-product-prices | 04_time_series_prediction/data_providers/price_history_pack.py | Python | agpl-3.0 | 2,640 |
# -*- coding: utf-8 -*-
"""
Unit tests covering the program listing and detail pages.
"""
import json
import re
from urlparse import urljoin
from uuid import uuid4
import mock
from bs4 import BeautifulSoup
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.test import override_settings
from lms.envs.test import CREDENTIALS_PUBLIC_SERVICE_URL
from openedx.core.djangoapps.catalog.tests.factories import CourseFactory, CourseRunFactory, ProgramFactory
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.credentials import STUDENT_RECORDS_FLAG
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModuleStoreCourseFactory
PROGRAMS_UTILS_MODULE = 'openedx.core.djangoapps.programs.utils'
@skip_unless_lms
@override_settings(MKTG_URLS={'ROOT': 'https://www.example.com'})
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
"""Unit tests for the program listing page."""
shard = 4
maxDiff = None
password = 'test'
url = reverse_lazy('program_listing_view')
@classmethod
def setUpClass(cls):
super(TestProgramListing, cls).setUpClass()
cls.course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(cls.course.id)) # pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.first_program = ProgramFactory(courses=[course])
cls.second_program = ProgramFactory(courses=[course])
cls.data = sorted([cls.first_program, cls.second_program], key=cls.program_sort_key)
def setUp(self):
super(TestProgramListing, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@classmethod
def program_sort_key(cls, program):
"""
Helper function used to sort dictionaries representing programs.
"""
return program['title']
def load_serialized_data(self, response, key):
"""
Extract and deserialize serialized data from the response.
"""
pattern = re.compile(r'{key}: (?P<data>\[.*\])'.format(key=key))
match = pattern.search(response.content)
serialized = match.group('data')
return json.loads(serialized)
def assert_dict_contains_subset(self, superset, subset):
"""
Verify that the dict superset contains the dict subset.
Works like assertDictContainsSubset, deprecated since Python 3.2.
See: https://docs.python.org/2.7/library/unittest.html#unittest.TestCase.assertDictContainsSubset.
"""
superset_keys = set(superset.keys())
subset_keys = set(subset.keys())
intersection = {key: superset[key] for key in superset_keys & subset_keys}
self.assertEqual(subset, intersection)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_empty_state(self, mock_get_programs):
"""
Verify that the response contains no programs data when no programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
response = self.client.get(self.url)
self.assertContains(response, 'programsData: []')
def test_programs_listed(self, mock_get_programs):
"""
Verify that the response contains accurate programs data when programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
self.assert_dict_contains_subset(actual_program, expected_program)
def test_program_discovery(self, mock_get_programs):
"""
Verify that a link to a programs marketing page appears in the response.
"""
self.create_programs_config(marketing_path='bar')
mock_get_programs.return_value = self.data
marketing_root = urljoin(settings.MKTG_URLS.get('ROOT'), 'bar').rstrip('/')
response = self.client.get(self.url)
self.assertContains(response, marketing_root)
def test_links_to_detail_pages(self, mock_get_programs):
"""
Verify that links to detail pages are present.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
expected_url = reverse('program_details_view', kwargs={'program_uuid': expected_program['uuid']})
self.assertEqual(actual_program['detail_url'], expected_url)
@skip_unless_lms
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
@override_waffle_flag(STUDENT_RECORDS_FLAG, active=True)
class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, SharedModuleStoreTestCase):
"""Unit tests for the program details page."""
shard = 4
program_uuid = str(uuid4())
password = 'test'
url = reverse_lazy('program_details_view', kwargs={'program_uuid': program_uuid})
@classmethod
def setUpClass(cls):
super(TestProgramDetails, cls).setUpClass()
modulestore_course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(modulestore_course.id))
course = CourseFactory(course_runs=[course_run])
cls.data = ProgramFactory(uuid=cls.program_uuid, courses=[course])
def setUp(self):
super(TestProgramDetails, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
def assert_program_data_present(self, response):
"""Verify that program data is present."""
self.assertContains(response, 'programData')
self.assertContains(response, 'urls')
self.assertContains(response,
'"program_record_url": "{}/records/programs/'.format(CREDENTIALS_PUBLIC_SERVICE_URL))
self.assertContains(response, 'program_listing_url')
self.assertContains(response, self.data['title'])
self.assert_programs_tab_present(response)
def assert_programs_tab_present(self, response):
"""Verify that the programs tab is present in the nav."""
soup = BeautifulSoup(response.content, 'html.parser')
self.assertTrue(
any(soup.find_all('a', class_='tab-nav-link', href=reverse('program_listing_view')))
)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
catalog_integration = self.create_catalog_integration()
UserFactory(username=catalog_integration.service_username)
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
with mock.patch('lms.djangoapps.learner_dashboard.programs.get_certificates') as certs:
certs.return_value = [{'type': 'program', 'url': '/'}]
response = self.client.get(self.url)
self.assert_program_data_present(response)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_404_if_no_data(self, mock_get_programs):
"""Verify that the page 404s if no program data is found."""
self.create_programs_config()
mock_get_programs.return_value = None
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
| gsehub/edx-platform | lms/djangoapps/learner_dashboard/tests/test_programs.py | Python | agpl-3.0 | 9,854 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.styles import Color, Style, PatternFill, Font, colors
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from base import models as mdl
from base.models.enums import exam_enrollment_justification_type
HEADER = ['academic_year', 'session_title', 'learning_unit', 'program', 'registration_number', 'lastname', 'firstname',
'email', 'numbered_score', 'justification', 'end_date']
JUSTIFICATION_ALIASES = {
exam_enrollment_justification_type.ABSENCE_JUSTIFIED : "M",
exam_enrollment_justification_type.ABSENCE_UNJUSTIFIED : "S",
exam_enrollment_justification_type.CHEATING : "T",
}
def export_xls(exam_enrollments):
workbook = Workbook()
worksheet = workbook.active
worksheet.append([str(exam_enrollments[0].learning_unit_enrollment.learning_unit_year)])
worksheet.append([str('Session: %s' % exam_enrollments[0].session_exam.number_session)])
worksheet.append([str('')])
__display_creation_date_with_message_about_state(worksheet, row_number=4)
__display_warning_about_students_deliberated(worksheet, row_number=5)
worksheet.append([str('')])
__display_legends(worksheet)
worksheet.append([str('')])
__columns_resizing(worksheet)
header_translate_list = [str(_(elem)) for elem in HEADER]
worksheet.append(header_translate_list)
row_number = 11
for exam_enroll in exam_enrollments:
student = exam_enroll.learning_unit_enrollment.student
offer = exam_enroll.learning_unit_enrollment.offer
person = mdl.person.find_by_id(student.person.id)
end_date = __get_session_exam_deadline(exam_enroll)
score = None
if exam_enroll.score_final is not None:
if exam_enroll.session_exam.learning_unit_year.decimal_scores:
score = "{0:.2f}".format(exam_enroll.score_final)
else:
score = "{0:.0f}".format(exam_enroll.score_final)
justification = JUSTIFICATION_ALIASES.get(exam_enroll.justification_final, "")
worksheet.append([str(exam_enroll.learning_unit_enrollment.learning_unit_year.academic_year),
str(exam_enroll.session_exam.number_session),
exam_enroll.session_exam.learning_unit_year.acronym,
offer.acronym,
student.registration_id,
person.last_name,
person.first_name,
person.email,
score,
str(justification),
end_date])
row_number += 1
__coloring_non_editable(worksheet, row_number, score, exam_enroll.justification_final)
lst_exam_enrollments = list(exam_enrollments)
number_session = lst_exam_enrollments[0].session_exam.number_session
learn_unit_acronym = lst_exam_enrollments[0].session_exam.learning_unit_year.acronym
academic_year = lst_exam_enrollments[0].learning_unit_enrollment.learning_unit_year.academic_year
filename = "session_%s_%s_%s.xlsx" % (str(academic_year.year), str(number_session), learn_unit_acronym)
response = HttpResponse(save_virtual_workbook(workbook), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def __columns_resizing(ws):
"""
Definition of the columns sizes
"""
col_academic_year = ws.column_dimensions['A']
col_academic_year.width = 18
col_academic_year = ws.column_dimensions['C']
col_academic_year.width = 18
col_academic_year = ws.column_dimensions['E']
col_academic_year.width = 18
col_last_name = ws.column_dimensions['F']
col_last_name.width = 25
col_first_name = ws.column_dimensions['G']
col_first_name.width = 25
col_email = ws.column_dimensions['H']
col_email.width = 30
col_note = ws.column_dimensions['I']
col_note.width = 15
col_note = ws.column_dimensions['J']
col_note.width = 15
col_note = ws.column_dimensions['K']
col_note.width = 15
def __coloring_non_editable(ws, row_number, score, justification):
"""
Coloring of the non-editable columns
"""
pattern_fill_grey = PatternFill(patternType='solid', fgColor=Color('C1C1C1'))
style_no_modification = Style(fill=pattern_fill_grey)
column_number = 1
while column_number < 12:
if column_number < 9 or column_number > 10:
ws.cell(row=row_number, column=column_number).style = style_no_modification
else:
if not(score is None and justification is None):
ws.cell(row=row_number, column=9).style = style_no_modification
ws.cell(row=row_number, column=10).style = style_no_modification
column_number += 1
def __display_creation_date_with_message_about_state(ws, row_number):
date_format = str(_('date_format'))
printing_date = timezone.now()
printing_date = printing_date.strftime(date_format)
ws.cell(row=row_number, column=1).value = str('%s' % (_('warn_user_data_can_change') % printing_date))
ws.cell(row=row_number, column=1).font = Font(color=colors.RED)
def __display_warning_about_students_deliberated(ws, row_number):
ws.cell(row=row_number, column=1).value = str(_('students_deliberated_are_not_shown'))
ws.cell(row=row_number, column=1).font = Font(color=colors.RED)
def __display_legends(ws):
ws.append([
str(_('justification')),
str(_('justification_values_accepted') % mdl.exam_enrollment.justification_label_authorized())
])
ws.append([
str(''),
str(_('justification_other_values') % justification_other_values())
])
ws.append([
str(_('numbered_score')),
str(_('score_legend') % "0 - 20")
])
def justification_other_values():
return "%s, %s" % (_('unjustified_absence_export_legend'),
_('justified_absence_export_legend'))
def __get_session_exam_deadline(exam_enroll):
date_format = str(_('date_format'))
deadline = None
session_exam_deadline = mdl.exam_enrollment.get_session_exam_deadline(exam_enroll)
if session_exam_deadline:
deadline = session_exam_deadline.deadline_tutor_computed if session_exam_deadline.deadline_tutor_computed else\
session_exam_deadline.deadline
return deadline.strftime(date_format) if deadline else "-"
| uclouvain/osis_louvain | assessments/business/score_encoding_export.py | Python | agpl-3.0 | 7,914 |
from odoo.osv import expression
from odoo.addons.sale_timesheet.models.account import AccountAnalyticLine
def _timesheet_get_portal_domain(self):
""" WE revert this functionality of odoo. We want to show details of ordered quantities also
Only the timesheets with a product invoiced on delivered quantity are concerned.
since in ordered quantity, the timesheet quantity is not invoiced,
thus there is no meaning of showing invoice with ordered quantity.
"""
domain = super(AccountAnalyticLine, self)._timesheet_get_portal_domain()
return expression.AND(
[domain, [('timesheet_invoice_type', 'in', ['billable_time', 'non_billable', 'billable_fixed'])]])
AccountAnalyticLine._timesheet_get_portal_domain = _timesheet_get_portal_domain
| ingadhoc/sale | sale_timesheet_ux/models/account_analytic_line.py | Python | agpl-3.0 | 785 |
class Backstab:
pass
| etkirsch/legends-of-erukar | erukar/content/skills/thievery/Backstab.py | Python | agpl-3.0 | 25 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='base', group_xml_id='group_no_one', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
help="Access rights to be granted on the shared documents."),
'result_line_ids': fields.one2many('share.wizard.result.line', 'share_wizard_id', 'Summary', readonly=True),
'share_root_url': fields.function(_share_root_url, string='Share Access URL', type='char', readonly=True,
help='Main access page for users that are granted shared access'),
'name': fields.char('Share Title', required=True, help="Title for the share (displayed to users as menu and shortcut name)"),
'record_name': fields.char('Record name', help="Name of the shared record, if sharing a precise record"),
'message': fields.text("Personal Message", help="An optional personal message, to be included in the email notification."),
'embed_code': fields.function(_embed_code, type='text', string='Code',
help="Embed this code in your documents to provide a link to the "\
"shared document."),
'embed_option_title': fields.boolean('Display title'),
'embed_option_search': fields.boolean('Display search view'),
'embed_url': fields.function(_embed_url, string='Share URL', size=512, type='char', readonly=True),
}
_defaults = {
'view_type': 'page',
'user_type' : 'embedded',
'invite': False,
'domain': lambda self, cr, uid, context, *a: context.get('domain', '[]'),
'action_id': lambda self, cr, uid, context, *a: context.get('action_id'),
'access_mode': 'readwrite',
'embed_option_title': True,
'embed_option_search': True,
}
def has_email(self, cr, uid, context=None):
return bool(self.pool.get('res.users').browse(cr, uid, uid, context=context).email)
def go_step_1(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr,uid,ids,context)[0]
if wizard_data.user_type == 'emails' and not self.has_email(cr, uid, context=context):
raise osv.except_osv(_('No email address configured'),
_('You must configure your email address in the user preferences before using the Share button.'))
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'action_share_wizard_step1')
action = self.pool[model].read(cr, uid, [res_id], context=context)[0]
action['res_id'] = ids[0]
action.pop('context', '')
return action
def _create_share_group(self, cr, uid, wizard_data, context=None):
group_obj = self.pool.get('res.groups')
share_group_name = '%s: %s (%d-%s)' %('Shared', wizard_data.name, uid, time.time())
# create share group without putting admin in it
return group_obj.create(cr, UID_ROOT, {'name': share_group_name, 'share': True}, {'noadmin': True})
def _create_new_share_users(self, cr, uid, wizard_data, group_id, context=None):
"""Create one new res.users record for each email address provided in
wizard_data.new_users, ignoring already existing users.
Populates wizard_data.result_line_ids with one new line for
each user (existing or not). New users will also have a value
for the password field, so they can receive it by email.
Returns the ids of the created users, and the ids of the
ignored, existing ones."""
context = dict(context or {})
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, UID_ROOT, uid, context=context)
# modify context to disable shortcuts when creating share users
context['noshortcut'] = True
context['no_reset_password'] = True
created_ids = []
existing_ids = []
if wizard_data.user_type == 'emails':
# get new user list from email data
new_users = (wizard_data.new_users or '').split('\n')
new_users += [wizard_data.email_1 or '', wizard_data.email_2 or '', wizard_data.email_3 or '']
for new_user in new_users:
# Ignore blank lines
new_user = new_user.strip()
if not new_user: continue
# Ignore the user if it already exists.
if not wizard_data.invite:
existing = user_obj.search(cr, UID_ROOT, [('login', '=', new_user)])
else:
existing = user_obj.search(cr, UID_ROOT, [('email', '=', new_user)])
existing_ids.extend(existing)
if existing:
new_line = { 'user_id': existing[0],
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
continue
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_user,
'password': new_pass,
'name': new_user,
'email': new_user,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
elif wizard_data.user_type == 'embedded':
new_login = 'embedded-%s' % (uuid.uuid4().hex,)
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_login,
'password': new_pass,
'name': new_login,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
return created_ids, existing_ids
def _create_action(self, cr, uid, values, context=None):
if context is None:
context = {}
new_context = context.copy()
for key in context:
if key.startswith('default_'):
del new_context[key]
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, values, new_context)
return action_id
def _cleanup_action_context(self, context_str, user_id):
"""Returns a dict representing the context_str evaluated (safe_eval) as
a dict where items that are not useful for shared actions
have been removed. If the evaluation of context_str as a
dict fails, context_str is returned unaltered.
:param user_id: the integer uid to be passed as 'uid' in the
evaluation context
"""
result = False
if context_str:
try:
context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
result = dict(context)
for key in context:
# Remove all context keys that seem to toggle default
# filters based on the current user, as it makes no sense
# for shared users, who would not see any data by default.
if key and key.startswith('search_default_') and 'user_id' in key:
result.pop(key)
except Exception:
# Note: must catch all exceptions, as UnquoteEvalContext may cause many
# different exceptions, as it shadows builtins.
_logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
result = context_str
return result
def _shared_action_def(self, cr, uid, wizard_data, context=None):
copied_action = wizard_data.action_id
if wizard_data.access_mode == 'readonly':
view_mode = wizard_data.view_type
view_id = copied_action.view_id.id if copied_action.view_id.type == wizard_data.view_type else False
else:
view_mode = copied_action.view_mode
view_id = copied_action.view_id.id
action_def = {
'name': wizard_data.name,
'domain': copied_action.domain,
'context': self._cleanup_action_context(wizard_data.action_id.context, uid),
'res_model': copied_action.res_model,
'view_mode': view_mode,
'view_type': copied_action.view_type,
'search_view_id': copied_action.search_view_id.id if wizard_data.access_mode != 'readonly' else False,
'view_id': view_id,
'auto_search': True,
}
if copied_action.view_ids:
action_def['view_ids'] = [(0,0,{'sequence': x.sequence,
'view_mode': x.view_mode,
'view_id': x.view_id.id })
for x in copied_action.view_ids
if (wizard_data.access_mode != 'readonly' or x.view_mode == wizard_data.view_type)
]
return action_def
def _setup_action_and_shortcut(self, cr, uid, wizard_data, user_ids, make_home, context=None):
"""Create a shortcut to reach the shared data, as well as the corresponding action, for
each user in ``user_ids``, and assign it as their home action if ``make_home`` is True.
Meant to be overridden for special cases.
"""
values = self._shared_action_def(cr, uid, wizard_data, context=None)
user_obj = self.pool.get('res.users')
for user_id in user_ids:
action_id = self._create_action(cr, user_id, values)
if make_home:
# We do this only for new share users, as existing ones already have their initial home
# action. Resetting to the default menu does not work well as the menu is rather empty
# and does not contain the shortcuts in most cases.
user_obj.write(cr, UID_ROOT, [user_id], {'action_id': action_id})
def _get_recursive_relations(self, cr, uid, model, ttypes, relation_fields=None, suffix=None, context=None):
"""Returns list of tuples representing recursive relationships of type ``ttypes`` starting from
model with ID ``model_id``.
:param model: browsable model to start loading relationships from
:param ttypes: list of relationship types to follow (e.g: ['one2many','many2many'])
:param relation_fields: list of previously followed relationship tuples - to avoid duplicates
during recursion
:param suffix: optional suffix to append to the field path to reach the main object
"""
if relation_fields is None:
relation_fields = []
local_rel_fields = []
models = [x[1].model for x in relation_fields]
model_obj = self.pool.get('ir.model')
model_osv = self.pool[model.model]
for field in model_osv._fields.itervalues():
ftype = field.type
relation_field = None
if ftype in ttypes and field.comodel_name not in models:
relation_model_id = model_obj.search(cr, UID_ROOT, [('model','=',field.comodel_name)])[0]
relation_model_browse = model_obj.browse(cr, UID_ROOT, relation_model_id, context=context)
relation_osv = self.pool[field.comodel_name]
#skip virtual one2many fields (related, ...) as there is no reverse relationship
if ftype == 'one2many' and field.inverse_name:
# don't record reverse path if it's not a real m2o (that happens, but rarely)
dest_fields = relation_osv._fields
reverse_rel = field.inverse_name
if reverse_rel in dest_fields and dest_fields[reverse_rel].type == 'many2one':
relation_field = ('%s.%s'%(reverse_rel, suffix)) if suffix else reverse_rel
local_rel_fields.append((relation_field, relation_model_browse))
for parent in relation_osv._inherits:
if parent not in models:
parent_model = self.pool[parent]
parent_fields = parent_model._fields
parent_model_browse = model_obj.browse(cr, UID_ROOT,
model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
if relation_field and field.inverse_name in parent_fields:
# inverse relationship is available in the parent
local_rel_fields.append((relation_field, parent_model_browse))
else:
# TODO: can we setup a proper rule to restrict inherited models
# in case the parent does not contain the reverse m2o?
local_rel_fields.append((None, parent_model_browse))
if relation_model_id != model.id and ftype in ['one2many', 'many2many']:
local_rel_fields += self._get_recursive_relations(cr, uid, relation_model_browse,
[ftype], relation_fields + local_rel_fields, suffix=relation_field, context=context)
return local_rel_fields
def _get_relationship_classes(self, cr, uid, model, context=None):
"""Computes the *relationship classes* reachable from the given
model. The 4 relationship classes are:
- [obj0]: the given model itself (and its parents via _inherits, if any)
- [obj1]: obj0 and all other models recursively accessible from
obj0 via one2many relationships
- [obj2]: obj0 and all other models recursively accessible from
obj0 via one2many and many2many relationships
- [obj3]: all models recursively accessible from obj1 via many2one
relationships
Each class is returned as a list of pairs [(field,model_browse)], where
``model`` is the browse_record of a reachable ir.model, and ``field`` is
the dot-notation reverse relationship path coming from that model to obj0,
or None if there is no reverse path.
:return: ([obj0], [obj1], [obj2], [obj3])
"""
# obj0 class and its parents
obj0 = [(None, model)]
model_obj = self.pool[model.model]
ir_model_obj = self.pool.get('ir.model')
for parent in model_obj._inherits:
parent_model_browse = ir_model_obj.browse(cr, UID_ROOT,
ir_model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
obj0 += [(None, parent_model_browse)]
obj1 = self._get_recursive_relations(cr, uid, model, ['one2many'], relation_fields=obj0, context=context)
obj2 = self._get_recursive_relations(cr, uid, model, ['one2many', 'many2many'], relation_fields=obj0, context=context)
obj3 = self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
for dummy, model in obj1:
obj3 += self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
return obj0, obj1, obj2, obj3
def _get_access_map_for_groups_and_models(self, cr, uid, group_ids, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
user_right_ids = model_access_obj.search(cr, uid,
[('group_id', 'in', group_ids), ('model_id', 'in', model_ids)],
context=context)
user_access_matrix = {}
if user_right_ids:
for access_right in model_access_obj.browse(cr, uid, user_right_ids, context=context):
access_line = user_access_matrix.setdefault(access_right.model_id.model, set())
for perm in FULL_ACCESS:
if getattr(access_right, perm, 0):
access_line.add(perm)
return user_access_matrix
def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):
"""Adds access rights to group_id on object models referenced in ``fields_relations``,
intersecting with access rights of current user to avoid granting too much rights
"""
model_access_obj = self.pool.get('ir.model.access')
user_obj = self.pool.get('res.users')
target_model_ids = [x[1].id for x in fields_relations]
perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS
current_user = user_obj.browse(cr, uid, uid, context=context)
current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[x.id for x in current_user.groups_id], target_model_ids, context=context)
group_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[group_id], target_model_ids, context=context)
_logger.debug("Current user access matrix: %r", current_user_access_map)
_logger.debug("New group current access matrix: %r", group_access_map)
# Create required rights if allowed by current user rights and not
# already granted
for dummy, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
values = {
'name': _('Copied access for sharing'),
'group_id': group_id,
'model_id': model.id,
}
current_user_access_line = current_user_access_map.get(model.model,set())
existing_group_access_line = group_access_map.get(model.model,set())
need_creation = False
for perm in perms_to_add:
if perm in current_user_access_line \
and perm not in existing_group_access_line:
values.update({perm:True})
group_access_map.setdefault(model.model, set()).add(perm)
need_creation = True
if need_creation:
model_access_obj.create(cr, UID_ROOT, values)
_logger.debug("Creating access right for model %s with values: %r", model.model, values)
def _link_or_copy_current_user_rules(self, cr, current_user, group_id, fields_relations, context=None):
rule_obj = self.pool.get('ir.rule')
rules_done = set()
for group in current_user.groups_id:
for dummy, model in fields_relations:
for rule in group.rule_groups:
if rule.id in rules_done:
continue
rules_done.add(rule.id)
if rule.model_id.id == model.id:
if 'user.' in rule.domain_force:
# Above pattern means there is likely a condition
# specific to current user, so we must copy the rule using
# the evaluated version of the domain.
# And it's better to copy one time too much than too few
rule_obj.copy(cr, UID_ROOT, rule.id, default={
'name': '%s %s' %(rule.name, _('(Copy for sharing)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain, # evaluated version!
})
_logger.debug("Copying rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
else:
# otherwise we can simply link the rule to keep it dynamic
rule_obj.write(cr, SUPERUSER_ID, [rule.id], {
'groups': [(4,group_id)]
})
_logger.debug("Linking rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
def _check_personal_rule_or_duplicate(self, cr, group_id, rule, context=None):
"""Verifies that the given rule only belongs to the given group_id, otherwise
duplicate it for the current group, and unlink the previous one.
The duplicated rule has the original domain copied verbatim, without
any evaluation.
Returns the final rule to use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Technical group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# new users by default receive all notifications by email
# A.
if not wizard_data.invite:
self.send_emails(cr, uid, wizard_data, context=context)
# B.
else:
# Invite (OpenSocial): automatically subscribe users to the record
res_id = 0
for cond in safe_eval(main_domain):
if cond[0] == 'id':
res_id = cond[2]
# Record id not found: issue
if res_id <= 0:
raise osv.except_osv(_('Record id not found'), _('The share engine has not been able to fetch a record_id for your invitation.'))
self.pool[model.model].message_subscribe(cr, uid, [res_id], new_ids + existing_ids, context=context)
# self.send_invite_email(cr, uid, wizard_data, context=context)
# self.send_invite_note(cr, uid, model.model, res_id, wizard_data, context=context)
# CLOSE
# A. Not invite: as before
# B. Invite: skip summary screen, get back to the record
# A.
if not wizard_data.invite:
dummy, step2_form_view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'share_step2_form')
return {
'name': _('Shared access created!'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'share.wizard',
'view_id': False,
'res_id': ids[0],
'views': [(step2_form_view_id, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
# B.
else:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': model.model,
'view_id': False,
'res_id': res_id,
'views': [(False, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def send_invite_note(self, cr, uid, model_name, res_id, wizard_data, context=None):
subject = _('Invitation')
body = 'has been <b>shared</b> with'
tmp_idx = 0
for result_line in wizard_data.result_line_ids:
body += ' @%s' % (result_line.user_id.login)
if tmp_idx < len(wizard_data.result_line_ids)-2:
body += ','
elif tmp_idx == len(wizard_data.result_line_ids)-2:
body += ' and'
body += '.'
return self.pool[model_name].message_post(cr, uid, [res_id], body=body, context=context)
def send_invite_email(self, cr, uid, wizard_data, context=None):
# TDE Note: not updated because will disappear
message_obj = self.pool.get('mail.message')
notification_obj = self.pool.get('mail.notification')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = _('Invitation to collaborate about %s') % (wizard_data.record_name)
body = _("Hello,\n\n")
body += _("I have shared %s (%s) with you!\n\n") % (wizard_data.record_name, wizard_data.name)
if wizard_data.message:
body += "%s\n\n" % (wizard_data.message)
if result_line.newly_created:
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s" % (_("Username"), result_line.user_id.login) + "\n"
body += "%s: %s" % (_("Password"), result_line.password) + "\n"
body += "%s: %s" % (_("Database"), cr.dbname) + "\n"
body += _("The documents have been automatically added to your subscriptions.\n\n")
body += '%s\n\n' % ((user.signature or ''))
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
msg_id = message_obj.schedule_with_attach(cr, uid, user.email, [email_to], subject, body, model='', context=context)
notification_obj.create(cr, uid, {'user_id': result_line.user_id.id, 'message_id': msg_id}, context=context)
def send_emails(self, cr, uid, wizard_data, context=None):
_logger.info('Sending share notifications by email...')
mail_mail = self.pool.get('mail.mail')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
mail_ids = []
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = wizard_data.name
body = _("Hello,\n\n")
body += _("I've shared %s with you!\n\n") % wizard_data.name
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
if wizard_data.message:
body += '%s\n\n' % (wizard_data.message)
if result_line.newly_created:
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s\n" % (_("Username"), result_line.user_id.login)
body += "%s: %s\n" % (_("Password"), result_line.password)
body += "%s: %s\n" % (_("Database"), cr.dbname)
else:
body += _("The documents have been automatically added to your current Odoo documents.\n")
body += _("You may use your current login (%s) and password to view them.\n") % result_line.user_id.login
body += "\n\n%s\n\n" % ( (user.signature or '') )
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on http://www.openerp.com.")
mail_ids.append(mail_mail.create(cr, uid, {
'email_from': user.email,
'email_to': email_to,
'subject': subject,
'body_html': '<pre>%s</pre>' % body}, context=context))
# force direct delivery, as users expect instant notification
mail_mail.send(cr, uid, mail_ids, context=context)
_logger.info('%d share notification(s) sent.', len(mail_ids))
def onchange_embed_options(self, cr, uid, ids, opt_title, opt_search, context=None):
wizard = self.browse(cr, uid, ids[0], context)
options = dict(title=opt_title, search=opt_search)
return {'value': {'embed_code': self._generate_embedded_code(wizard, options)}}
class share_result_line(osv.osv_memory):
_name = 'share.wizard.result.line'
_rec_name = 'user_id'
def _share_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
data = dict(dbname=cr.dbname, login=this.login, password=this.password)
if this.share_wizard_id and this.share_wizard_id.action_id:
data['action_id'] = this.share_wizard_id.action_id.id
this = this.with_context(share_url_template_hash_arguments=['action_id'])
result[this.id] = this.share_wizard_id.share_url_template() % data
return result
_columns = {
'user_id': fields.many2one('res.users', required=True, readonly=True),
'login': fields.related('user_id', 'login', string='Login', type='char', size=64, required=True, readonly=True),
'password': fields.char('Password', size=64, readonly=True),
'share_url': fields.function(_share_url, string='Share URL', type='char', size=512),
'share_wizard_id': fields.many2one('share.wizard', 'Share Wizard', required=True, ondelete='cascade'),
'newly_created': fields.boolean('Newly created', readonly=True),
}
_defaults = {
'newly_created': True,
}
| OpusVL/odoo | addons/share/wizard/share_wizard.py | Python | agpl-3.0 | 50,754 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-16 10:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('patients', '0026_clinicianother_user'),
]
operations = [
migrations.AddField(
model_name='clinicianother',
name='use_other',
field=models.BooleanField(
default=False),
),
migrations.AlterField(
model_name='clinicianother',
name='user',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL),
),
]
| muccg/rdrf | rdrf/registry/patients/migrations/0027_auto_20180116_1012.py | Python | agpl-3.0 | 836 |
from . import slide_channel_technology_category
from . import slide_channel_technology
from . import slide_channel
| avanzosc/odoo-addons | slide_channel_technology/models/__init__.py | Python | agpl-3.0 | 115 |
# Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from django.db import connection
@transaction.atomic
def bulk_update_userstory_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_userstorycustomattribute set "order" = $1
where custom_attributes_userstorycustomattribute.id = $2 and
custom_attributes_userstorycustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_taskcustomattribute set "order" = $1
where custom_attributes_taskcustomattribute.id = $2 and
custom_attributes_taskcustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_issuecustomattribute set "order" = $1
where custom_attributes_issuecustomattribute.id = $2 and
custom_attributes_issuecustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
| bdang2012/taiga-back-casting | taiga/projects/custom_attributes/services.py | Python | agpl-3.0 | 2,749 |
# -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from odoo import models
from odoo.tests.common import SavepointCase
class BaseKanbanAbstractTester(models.TransientModel):
_name = 'base.kanban.abstract.tester'
_inherit = 'base.kanban.abstract'
class TestBaseKanbanAbstract(SavepointCase):
@classmethod
def _init_test_model(cls, model_cls):
""" It builds a model from model_cls in order to test abstract models.
Note that this does not actually create a table in the database, so
there may be some unidentified edge cases.
Args:
model_cls (openerp.models.BaseModel): Class of model to initialize
Returns:
model_cls: Instance
"""
registry = cls.env.registry
cr = cls.env.cr
inst = model_cls._build_model(registry, cr)
model = cls.env[model_cls._name].with_context(todo=[])
model._prepare_setup()
model._setup_base(partial=False)
model._setup_fields(partial=False)
model._setup_complete()
model._auto_init()
model.init()
model._auto_end()
cls.test_model_record = cls.env['ir.model'].search([
('name', '=', model._name),
])
return inst
@classmethod
def setUpClass(cls):
super(TestBaseKanbanAbstract, cls).setUpClass()
cls.env.registry.enter_test_mode()
cls._init_test_model(BaseKanbanAbstractTester)
cls.test_model = cls.env[BaseKanbanAbstractTester._name]
@classmethod
def tearDownClass(cls):
cls.env.registry.leave_test_mode()
super(TestBaseKanbanAbstract, cls).tearDownClass()
def setUp(self):
super(TestBaseKanbanAbstract, self).setUp()
test_stage_1 = self.env['base.kanban.stage'].create({
'name': 'Test Stage 1',
'res_model_id': self.test_model_record.id,
})
test_stage_2 = self.env['base.kanban.stage'].create({
'name': 'Test Stage 2',
'res_model_id': self.test_model_record.id,
'fold': True,
})
self.id_1 = test_stage_1.id
self.id_2 = test_stage_2.id
def test_read_group_stage_ids(self):
"""It should return the correct recordset. """
self.assertEqual(
self.test_model._read_group_stage_ids(
self.env['base.kanban.stage'], [], 'id',
),
self.env['base.kanban.stage'].search([], order='id'),
)
def test_default_stage_id(self):
""" It should return an empty RecordSet """
self.assertEqual(
self.env['base.kanban.abstract']._default_stage_id(),
self.env['base.kanban.stage']
)
| thinkopensolutions/server-tools | base_kanban_stage/tests/test_base_kanban_abstract.py | Python | agpl-3.0 | 2,793 |
"""
Tests for users API
"""
import datetime
import ddt
import pytz
from django.conf import settings
from django.template import defaultfilters
from django.test import RequestFactory, override_settings
from django.utils import timezone
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from courseware.access_response import MilestoneAccessError, StartDateError, VisibilityError
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from mobile_api.testutils import (
MobileAPITestCase,
MobileAuthTestMixin,
MobileAuthUserTestMixin,
MobileCourseAccessTestMixin
)
from openedx.core.lib.courses import course_image_url
from openedx.core.lib.tests import attr
from student.models import CourseEnrollment
from util.milestones_helpers import set_prerequisite_courses
from util.testing import UrlResetMixin
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .. import errors
from .serializers import CourseEnrollmentSerializer
@attr(shard=9)
class TestUserDetailApi(MobileAPITestCase, MobileAuthUserTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>...
"""
REVERSE_INFO = {'name': 'user-detail', 'params': ['username']}
def test_success(self):
self.login()
response = self.api_response()
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['email'], self.user.email)
@attr(shard=9)
class TestUserInfoApi(MobileAPITestCase, MobileAuthTestMixin):
"""
Tests for /api/mobile/v0.5/my_user_info
"""
def reverse_url(self, reverse_args=None, **kwargs):
return '/api/mobile/v0.5/my_user_info'
def test_success(self):
"""Verify the endpoint redirects to the user detail endpoint"""
self.login()
response = self.api_response(expected_response_code=302)
self.assertIn(self.username, response['location'])
@attr(shard=9)
@ddt.ddt
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentApi(UrlResetMixin, MobileAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ALLOW_ACCESS_TO_UNRELEASED_COURSE = True
ALLOW_ACCESS_TO_MILESTONE_COURSE = True
ALLOW_ACCESS_TO_NON_VISIBLE_COURSE = True
NEXT_WEEK = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=7)
LAST_WEEK = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=7)
ADVERTISED_START = "Spring 2016"
ENABLED_SIGNALS = ['course_published']
DATES = {
'next_week': NEXT_WEEK,
'last_week': LAST_WEEK,
'default_start_date': DEFAULT_START_DATE,
}
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(TestUserEnrollmentApi, self).setUp()
def verify_success(self, response):
"""
Verifies user course enrollment response for success
"""
super(TestUserEnrollmentApi, self).verify_success(response)
courses = response.data
self.assertEqual(len(courses), 1)
found_course = courses[0]['course']
self.assertIn('courses/{}/about'.format(self.course.id), found_course['course_about'])
self.assertIn('course_info/{}/updates'.format(self.course.id), found_course['course_updates'])
self.assertIn('course_info/{}/handouts'.format(self.course.id), found_course['course_handouts'])
self.assertIn('video_outlines/courses/{}'.format(self.course.id), found_course['video_outline'])
self.assertEqual(found_course['id'], unicode(self.course.id))
self.assertEqual(courses[0]['mode'], CourseMode.DEFAULT_MODE_SLUG)
self.assertEqual(courses[0]['course']['subscription_id'], self.course.clean_id(padding_char='_'))
expected_course_image_url = course_image_url(self.course)
self.assertIsNotNone(expected_course_image_url)
self.assertIn(expected_course_image_url, found_course['course_image'])
self.assertIn(expected_course_image_url, found_course['media']['course_image']['uri'])
def verify_failure(self, response, error_type=None):
self.assertEqual(response.status_code, 200)
courses = response.data
self.assertEqual(len(courses), 0)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_sort_order(self):
self.login()
num_courses = 3
courses = []
for course_index in range(num_courses):
courses.append(CourseFactory.create(mobile_available=True))
self.enroll(courses[course_index].id)
# verify courses are returned in the order of enrollment, with most recently enrolled first.
response = self.api_response()
for course_index in range(num_courses):
self.assertEqual(
response.data[course_index]['course']['id'],
unicode(courses[num_courses - course_index - 1].id)
)
@patch.dict(settings.FEATURES, {
'ENABLE_PREREQUISITE_COURSES': True,
'DISABLE_START_DATES': False,
'ENABLE_MKTG_SITE': True,
})
def test_courseware_access(self):
self.login()
course_with_prereq = CourseFactory.create(start=self.LAST_WEEK, mobile_available=True)
prerequisite_course = CourseFactory.create()
set_prerequisite_courses(course_with_prereq.id, [unicode(prerequisite_course.id)])
# Create list of courses with various expected courseware_access responses and corresponding expected codes
courses = [
course_with_prereq,
CourseFactory.create(start=self.NEXT_WEEK, mobile_available=True),
CourseFactory.create(visible_to_staff_only=True, mobile_available=True),
CourseFactory.create(start=self.LAST_WEEK, mobile_available=True, visible_to_staff_only=False),
]
expected_error_codes = [
MilestoneAccessError().error_code, # 'unfulfilled_milestones'
StartDateError(self.NEXT_WEEK).error_code, # 'course_not_started'
VisibilityError().error_code, # 'not_visible_to_user'
None,
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
# Verify courses have the correct response through error code. Last enrolled course is first course in response
response = self.api_response()
for course_index in range(len(courses)):
result = response.data[course_index]['course']['courseware_access']
self.assertEqual(result['error_code'], expected_error_codes[::-1][course_index])
if result['error_code'] is not None:
self.assertFalse(result['has_access'])
@ddt.data(
('next_week', ADVERTISED_START, ADVERTISED_START, "string"),
('next_week', None, defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('next_week', '', defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('default_start_date', ADVERTISED_START, ADVERTISED_START, "string"),
('default_start_date', '', None, "empty"),
('default_start_date', None, None, "empty"),
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False, 'ENABLE_MKTG_SITE': True})
def test_start_type_and_display(self, start, advertised_start, expected_display, expected_type):
"""
Tests that the correct start_type and start_display are returned in the
case the course has not started
"""
self.login()
course = CourseFactory.create(start=self.DATES[start], advertised_start=advertised_start, mobile_available=True)
self.enroll(course.id)
response = self.api_response()
self.assertEqual(response.data[0]['course']['start_type'], expected_type)
self.assertEqual(response.data[0]['course']['start_display'], expected_display)
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True, 'ENABLE_MKTG_SITE': True})
def test_discussion_url(self):
self.login_and_enroll()
response = self.api_response()
response_discussion_url = response.data[0]['course']['discussion_url']
self.assertIn('/api/discussion/v1/courses/{}'.format(self.course.id), response_discussion_url)
def test_org_query(self):
self.login()
# Create list of courses with various organizations
courses = [
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True, visible_to_staff_only=True),
CourseFactory.create(org='Proversity.org', mobile_available=True),
CourseFactory.create(org='MITx', mobile_available=True),
CourseFactory.create(org='HarvardX', mobile_available=True),
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
response = self.api_response(data={'org': 'edX'})
# Test for 3 expected courses
self.assertEqual(len(response.data), 3)
# Verify only edX courses are returned
for entry in response.data:
self.assertEqual(entry['course']['org'], 'edX')
@attr(shard=9)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentCertificates(UrlResetMixin, MobileAPITestCase, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ENABLED_SIGNALS = ['course_published']
def verify_pdf_certificate(self):
"""
Verifies the correct URL is returned in the response
for PDF certificates.
"""
self.login_and_enroll()
certificate_url = "http://test_certificate_url"
GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url=certificate_url,
)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertEquals(certificate_data['url'], certificate_url)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_no_certificate(self):
self.login_and_enroll()
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertDictEqual(certificate_data, {})
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_disabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_enabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_web_certificate(self):
CourseMode.objects.create(
course_id=self.course.id,
mode_display_name="Honor",
mode_slug=CourseMode.HONOR,
)
self.login_and_enroll()
self.course.cert_html_view_enabled = True
self.store.update_item(self.course, self.user.id)
with mock_passing_grade():
generate_user_certificates(self.user, self.course.id)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertRegexpMatches(
certificate_data['url'],
r'http.*/certificates/user/{user_id}/course/{course_id}'.format(
user_id=self.user.id,
course_id=self.course.id,
)
)
@attr(shard=9)
class CourseStatusAPITestCase(MobileAPITestCase):
"""
Base test class for /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
REVERSE_INFO = {'name': 'user-course-status', 'params': ['username', 'course_id']}
def setUp(self):
"""
Creates a basic course structure for our course
"""
super(CourseStatusAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
)
self.sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category='vertical',
)
self.other_sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.other_unit = ItemFactory.create(
parent=self.other_sub_section,
category='vertical',
)
@attr(shard=9)
class TestCourseStatusGET(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for GET of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def test_success(self):
self.login_and_enroll()
response = self.api_response()
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.sub_section.location)
)
self.assertEqual(
response.data["last_visited_module_path"],
[unicode(module.location) for module in [self.sub_section, self.section, self.course]]
)
@attr(shard=9)
class TestCourseStatusPATCH(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for PATCH of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def url_method(self, url, **kwargs):
# override implementation to use PATCH method.
return self.client.patch(url, data=kwargs.get('data', None))
def test_success(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": unicode(self.other_unit.location)})
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_module(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_nonexistent_module(self):
self.login_and_enroll()
non_existent_key = self.course.id.make_usage_key('video', 'non-existent')
response = self.api_response(data={"last_visited_module_id": non_existent_key}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_no_timezone(self):
self.login_and_enroll()
past_date = datetime.datetime.now()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": past_date.isoformat()
},
expected_response_code=400
)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
def _date_sync(self, date, initial_unit, update_unit, expected_subsection):
"""
Helper for test cases that use a modification to decide whether
to update the course status
"""
self.login_and_enroll()
# save something so we have an initial date
self.api_response(data={"last_visited_module_id": unicode(initial_unit.location)})
# now actually update it
response = self.api_response(
data={
"last_visited_module_id": unicode(update_unit.location),
"modification_date": date.isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(expected_subsection.location)
)
def test_old_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=-100)
self._date_sync(date, self.unit, self.other_unit, self.sub_section)
def test_new_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=100)
self._date_sync(date, self.unit, self.other_unit, self.other_sub_section)
def test_no_initial_date(self):
self.login_and_enroll()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": timezone.now().isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_date(self):
self.login_and_enroll()
response = self.api_response(data={"modification_date": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
@attr(shard=9)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestCourseEnrollmentSerializer(MobileAPITestCase, MilestonesTestCaseMixin):
"""
Test the course enrollment serializer
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestCourseEnrollmentSerializer, self).setUp()
self.login_and_enroll()
self.request = RequestFactory().get('/')
self.request.user = self.user
def test_success(self):
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['name'], self.course.display_name)
self.assertEqual(serialized['course']['number'], self.course.id.course)
self.assertEqual(serialized['course']['org'], self.course.id.org)
# Assert utm parameters
expected_utm_parameters = {
'twitter': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=twitter',
'facebook': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=facebook'
}
self.assertEqual(serialized['course']['course_sharing_utm_parameters'], expected_utm_parameters)
def test_with_display_overrides(self):
self.course.display_coursenumber = "overridden_number"
self.course.display_organization = "overridden_org"
self.store.update_item(self.course, self.user.id)
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['number'], self.course.display_coursenumber)
self.assertEqual(serialized['course']['org'], self.course.display_organization)
| teltek/edx-platform | lms/djangoapps/mobile_api/users/tests.py | Python | agpl-3.0 | 20,030 |
from .MaterialModifier import *
class MagicOre(MaterialModifier):
pass
| etkirsch/legends-of-erukar | erukar/content/modifiers/material/base/MagicOre.py | Python | agpl-3.0 | 76 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.addons.http_routing.models.ir_http import slug
class EventEvent(models.Model):
_inherit = "event.event"
community_menu = fields.Boolean(
"Community Menu", compute="_compute_community_menu",
readonly=False, store=True,
help="Display community tab on website")
community_menu_ids = fields.One2many(
"website.event.menu", "event_id", string="Event Community Menus",
domain=[("menu_type", "=", "community")])
@api.depends("event_type_id", "website_menu", "community_menu")
def _compute_community_menu(self):
""" At type onchange: synchronize. At website_menu update: synchronize. """
for event in self:
if event.event_type_id and event.event_type_id != event._origin.event_type_id:
event.community_menu = event.event_type_id.community_menu
elif event.website_menu and event.website_menu != event._origin.website_menu or not event.community_menu:
event.community_menu = True
elif not event.website_menu:
event.community_menu = False
# ------------------------------------------------------------
# WEBSITE MENU MANAGEMENT
# ------------------------------------------------------------
# OVERRIDES: ADD SEQUENCE
def _get_menu_update_fields(self):
update_fields = super(EventEvent, self)._get_menu_update_fields()
update_fields += ['community_menu']
return update_fields
def _update_website_menus(self, menus_update_by_field=None):
super(EventEvent, self)._update_website_menus(menus_update_by_field=menus_update_by_field)
for event in self:
if event.menu_id and (not menus_update_by_field or event in menus_update_by_field.get('community_menu')):
event._update_website_menu_entry('community_menu', 'community_menu_ids', '_get_community_menu_entries')
def _get_menu_type_field_matching(self):
res = super(EventEvent, self)._get_menu_type_field_matching()
res['community'] = 'community_menu'
return res
def _get_community_menu_entries(self):
self.ensure_one()
return [(_('Community'), '/event/%s/community' % slug(self), False, 80, 'community')]
def _get_track_menu_entries(self):
""" Remove agenda as this is now managed separately """
self.ensure_one()
return [
(_('Talks'), '/event/%s/track' % slug(self), False, 10, 'track'),
(_('Agenda'), '/event/%s/agenda' % slug(self), False, 70, 'track')
]
def _get_track_proposal_menu_entries(self):
""" See website_event_track._get_track_menu_entries() """
self.ensure_one()
return [(_('Talk Proposals'), '/event/%s/track_proposal' % slug(self), False, 15, 'track_proposal')]
| ygol/odoo | addons/website_event_track_online/models/event_event.py | Python | agpl-3.0 | 2,962 |
# -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': "Absence Management",
'summary': """Create time based absence notifications""",
'author': 'Onestein',
'website': 'http://www.onestein.eu',
'images': ['static/description/main_screenshot.png'],
'category': 'Human Resources',
'version': '10.0.1.0.0',
'license': 'AGPL-3',
'depends': [
'hr_holidays',
],
'data': [
'security/ir.model.access.csv',
'views/hr_holidays_status.xml',
'views/hr_holidays.xml',
'data/hr_absenteeism_cron.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
}
| VitalPet/addons-onestein | hr_absenteeism/__manifest__.py | Python | agpl-3.0 | 776 |
# Copyright 2020 Ecosoft Co., Ltd. (http://ecosoft.co.th)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Account Invoice Payment Retention",
"version": "14.0.1.0.1",
"category": "Accounting & Finance",
"author": "Ecosoft, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/account-invoicing",
"depends": ["account"],
"data": [
"security/security.xml",
"views/res_config_settings_views.xml",
"views/account_move_views.xml",
"wizard/account_payment_register_views.xml",
],
"maintainer": ["kittiu"],
"installable": True,
"development_status": "Alpha",
}
| OCA/account-invoicing | account_invoice_payment_retention/__manifest__.py | Python | agpl-3.0 | 700 |
# -*- coding: utf-8 -*-
import unittest
import doctest
import sys
import time
import timeside.core
class _TextTestResult(unittest.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
unittest.TestResult.__init__(self)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
self.currentTestCase = None
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
if self.showAll:
if self.currentTestCase != test.__class__:
self.currentTestCase = test.__class__
self.stream.writeln()
self.stream.writeln("[%s]" % self.currentTestCase.__name__)
self.stream.write(" " + self.getDescription(test))
self.stream.write(" ... ")
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
def addSkip(self, test, reason):
unittest.TestResult.addSkip(self, test, reason)
if self.showAll:
self.stream.writeln("SKIP : " + reason)
elif self.dots:
self.stream.write('S')
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: [%s] --> %s "
% (flavour,
test.__class__.__name__,
self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class _WritelnDecorator:
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self, stream):
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream, attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TestRunner:
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=2):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed:
self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
def run_test_module(test_modules_list=None, test_prefix=None):
suite = unittest.TestSuite()
finder = doctest.DocTestFinder(exclude_empty=False) # finder for doctest
if test_prefix:
unittest.TestLoader.testMethodPrefix = test_prefix
if not test_modules_list:
test_modules_list = []
elif not isinstance(test_modules_list, list):
test_modules_list = [test_modules_list]
test_modules_list.append('__main__')
for test in test_modules_list:
# Doctest
suite.addTest(doctest.DocTestSuite(test, test_finder=finder))
# unittest
suite.addTest(unittest.loader.TestLoader().loadTestsFromModule(test))
TestRunner().run(suite)
| Parisson/TimeSide | tests/unit_timeside.py | Python | agpl-3.0 | 5,327 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##############################################################################
#
# sci.AI EXE
# Copyright(C) 2017 sci.AI
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
# without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see < http://www.gnu.org/licenses/ >.
#
##############################################################################
from flask import Flask
from redis import Redis
from rq import Queue
import rq_dashboard
from flask_mongoengine import MongoEngine
from validator.config import Configuration
app = Flask(__name__)
app.config.from_object(Configuration)
db = MongoEngine(app)
redis_conn = Redis()
queue = Queue('high', connection=redis_conn, default_timeout=1800)
from validator.routes import app_routes
app.register_blueprint(app_routes)
# RQ dashboards
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
| sciAI/exe | validator/__init__.py | Python | agpl-3.0 | 1,545 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProjectPodcast',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=512, verbose_name='Title')),
('slug', autoslug.fields.AutoSlugField(editable=True, populate_from=b'title', unique_with=(b'show',), verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Publication date')),
('image', models.ImageField(upload_to=b'shows', verbose_name='Image', blank=True)),
],
options={
'verbose_name': 'Project podcast',
'verbose_name_plural': 'Project podcasts',
},
),
migrations.CreateModel(
name='ProjectProducer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('slug', autoslug.fields.AutoSlugField(populate_from=b'name', editable=True, unique=True, verbose_name='Slug')),
],
options={
'verbose_name': 'Project producer',
'verbose_name_plural': 'Project producers',
},
),
migrations.CreateModel(
name='ProjectShow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256, verbose_name='Name')),
('slug', autoslug.fields.AutoSlugField(editable=True, populate_from=b'name', unique_with=(b'category',), verbose_name='Slug')),
('description', models.TextField(verbose_name='Description', blank=True)),
('featured', models.BooleanField(default=False, verbose_name='Featured')),
('image', models.ImageField(upload_to=b'shows', verbose_name='Image', blank=True)),
('producer', models.ForeignKey(verbose_name='Producer', to='projects.ProjectProducer')),
],
options={
'verbose_name': 'Project show',
'verbose_name_plural': 'Project shows',
},
),
migrations.AddField(
model_name='projectpodcast',
name='show',
field=models.ForeignKey(verbose_name='Show', to='projects.ProjectShow'),
),
]
| GISAElkartea/amv2 | antxetamedia/projects/migrations/0001_initial.py | Python | agpl-3.0 | 2,923 |
#!env/python3
# coding: utf-8
from core.managers.containers import *
from core.managers.imports import *
from core.managers.analysis_manager import AnalysisManager
from core.managers.annotation_manager import AnnotationManager
from core.managers.file_manager import FileManager
from core.managers.filter_manager import FilterEngine
from core.managers.job_manager import JobManager
from core.managers.pipeline_manager import PipelineManager
from core.managers.project_manager import ProjectManager
from core.managers.sample_manager import SampleManager
from core.managers.user_manager import UserManager
from core.managers.search_manager import SearchManager
from core.managers.event_manager import EventManager
from core.managers.subject_manager import SubjectManager
from core.managers.admin_manager import AdminManager
from core.managers.phenotype_manager import PhenotypeManager
from core.managers.panel_manager import PanelManager | REGOVAR/Regovar | regovar/core/managers/__init__.py | Python | agpl-3.0 | 954 |
#####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import, division, print_function
import json
import os
import sys
from six import PY3
from twisted.internet.selectreactor import SelectReactor
from twisted.internet.task import LoopingCall
from crossbar.controller import cli
from .test_cli import CLITestBase
# Turn this to `True` to print the stdout/stderr of the Crossbars spawned
DEBUG = False
def make_lc(self, reactor, func):
if DEBUG:
self.stdout_length = 0
self.stderr_length = 0
def _(lc, reactor):
if DEBUG:
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
if self.stdout.getvalue()[self.stdout_length:]:
print(self.stdout.getvalue()[self.stdout_length:],
file=sys.__stdout__)
if self.stderr.getvalue()[self.stderr_length:]:
print(self.stderr.getvalue()[self.stderr_length:],
file=sys.__stderr__)
self.stdout_length = len(stdout)
self.stderr_length = len(stderr)
return func(lc, reactor)
lc = LoopingCall(_)
lc.a = (lc, reactor)
lc.clock = reactor
lc.start(0.1)
return lc
class ContainerRunningTests(CLITestBase):
def setUp(self):
CLITestBase.setUp(self)
# Set up the configuration directories
self.cbdir = os.path.abspath(self.mktemp())
os.mkdir(self.cbdir)
self.config = os.path.abspath(os.path.join(self.cbdir, "config.json"))
self.code_location = os.path.abspath(self.mktemp())
os.mkdir(self.code_location)
def _start_run(self, config, app, stdout_expected, stderr_expected,
end_on):
with open(self.config, "wb") as f:
f.write(json.dumps(config, ensure_ascii=False).encode('utf8'))
with open(self.code_location + "/myapp.py", "w") as f:
f.write(app)
reactor = SelectReactor()
make_lc(self, reactor, end_on)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(self.cbdir),
"--logformat=syslogd"],
reactor=reactor)
out = self.stdout.getvalue()
err = self.stderr.getvalue()
for i in stdout_expected:
if i not in out:
self.fail(u"Error: '{}' not in:\n{}".format(i, out))
for i in stderr_expected:
if i not in err:
self.fail(u"Error: '{}' not in:\n{}".format(i, err))
def test_start_run(self):
"""
A basic start, that enters the reactor.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_run_guest(self):
"""
A basic start of a guest.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "guest",
"executable": sys.executable,
"arguments": [os.path.join(self.code_location, "myapp.py")]
}
]
}
myapp = """#!/usr/bin/env python
print("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_utf8_logging(self):
"""
Logging things that are UTF8 but not Unicode should work fine.
"""
expected_stdout = [
"Entering reactor event loop", u"\u2603"
]
expected_stderr = []
def _check(lc, reactor):
if u"\u2603" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info(u"\\u2603")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_run_exception_utf8(self):
"""
Raising an ApplicationError with Unicode will raise that error through
to the caller.
"""
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """from __future__ import absolute_import, print_function
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
from twisted.internet.defer import inlineCallbacks
class MySession(ApplicationSession):
log = Logger()
@inlineCallbacks
def onJoin(self, details):
def _err():
raise ApplicationError(u"com.example.error.form_error", u"\\u2603")
e = yield self.register(_err, u'com.example.err')
try:
yield self.call(u'com.example.err')
except ApplicationError as e:
assert e.args[0] == u"\\u2603"
print("Caught error:", e)
except:
print('other err:', e)
self.log.info("Loaded the component")
"""
if PY3:
expected_stdout = ["Loaded the component", "\u2603", "Caught error:"]
else:
expected_stdout = ["Loaded the component", "\\u2603", "Caught error:"]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure1(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
expected_stdout = []
expected_stderr = ["No module named"]
def _check(_1, _2):
pass
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure2(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession2",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
def _check(_1, _2):
pass
expected_stdout = []
if sys.version_info >= (3, 5):
expected_stderr = ["module 'myapp' has no attribute 'MySession2'"]
else:
expected_stderr = ["'module' object has no attribute 'MySession2'"]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure3(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
a = 1 / 0
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = ["Component instantiation failed"]
if PY3:
expected_stderr.append("division by zero")
else:
expected_stderr.append("integer division")
expected_stderr.append("by zero")
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure4(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
a = 1 / 0 # trigger exception
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = ["Fatal error in component", "While firing onJoin"]
if PY3:
expected_stderr.append("division by zero")
else:
expected_stderr.append("integer division")
expected_stderr.append("by zero")
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure5(self):
config = {
"controller": {
},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.leave()
def onLeave(self, details):
self.log.info("Session ended: {details}", details=details)
self.disconnect()
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = [
"Component 'component1' failed to start; shutting down node."
]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure6(self):
config = {
"controller": {
},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.util import sleep
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.log.info("Sleeping a couple of secs and then shutting down ..")
yield sleep(2)
self.leave()
def onLeave(self, details):
self.log.info("Session ended: {details}", details=details)
self.disconnect()
"""
def _check(_1, _2):
pass
expected_stdout = [
"Session ended: CloseDetails",
"Sleeping a couple of secs and then shutting down",
"Container is hosting no more components: shutting down"
]
expected_stderr = []
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_failure7(self):
config = {
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8090
},
"url": "ws://127.0.0.1:8090/ws"
}
}
]
}
]
}
myapp = """
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.log.info("MySession.onJoin()")
self.leave()
"""
def _check(_1, _2):
pass
expected_stdout = []
expected_stderr = [
("Could not connect container component to router - transport "
"establishment failed")
]
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
class InitTests(CLITestBase):
def test_hello(self):
def _check(lc, reactor):
if "published to 'oncounter'" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
appdir = self.mktemp()
cbdir = os.path.join(appdir, ".crossbar")
reactor = SelectReactor()
cli.run("crossbar",
["init",
"--appdir={}".format(appdir),
"--template=hello:python"],
reactor=reactor)
self.assertIn("Application template initialized",
self.stdout.getvalue())
reactor = SelectReactor()
make_lc(self, reactor, _check)
# In case it hard-locks
reactor.callLater(self._subprocess_timeout, reactor.stop)
cli.run("crossbar",
["start",
"--cbdir={}".format(cbdir.path),
"--logformat=syslogd"],
reactor=reactor)
stdout_expected = ["published to 'oncounter'"]
for i in stdout_expected:
self.assertIn(i, self.stdout.getvalue())
if not os.environ.get("CB_FULLTESTS"):
del ContainerRunningTests
del InitTests
| NinjaMSP/crossbar | crossbar/controller/test/test_run.py | Python | agpl-3.0 | 42,995 |
# Copyright 2018 Tecnativa - Vicent Cubells <[email protected]>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
from odoo import fields, models
SORTING_CRITERIA = [
("name", "By name"),
("product_id.name", "By product name"),
("product_id.default_code", "By product reference"),
("date_planned", "By date planned"),
("price_unit", "By price"),
("product_qty", "By quantity"),
]
SORTING_DIRECTION = [
("asc", "Ascending"),
("desc", "Descending"),
]
class ResCompany(models.Model):
_inherit = "res.company"
default_po_line_order = fields.Selection(
selection=SORTING_CRITERIA,
string="Line Order",
help="Select a sorting criteria for purchase order lines.",
)
default_po_line_direction = fields.Selection(
selection=SORTING_DIRECTION,
string="Sort Direction",
help="Select a sorting direction for purchase order lines.",
)
| OCA/purchase-workflow | purchase_order_line_deep_sort/models/res_company.py | Python | agpl-3.0 | 948 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import os
import gobject
import json
import shutil
import tarfile
from glue.paths import INSTALLER
from data.skarphed.Skarphed import AbstractInstaller, AbstractDestroyer
from glue.lng import _
from glue.paths import COREFILES
import logging
TARGETNAME = "Debian 7 / nginx"
EXTRA_PARAMS = {
'nginx.domain':(_('Domain'),_('example.org or leave empty')),
'nginx.subdomain':(_('Subdomain'),_('sub.example.org or leave empty')),
'nginx.port':(_('Port'),_('80'))
}
class Installer(AbstractInstaller):
def execute_installation(self):
os.mkdir(self.BUILDPATH)
p = os.path.dirname(os.path.realpath(__file__))
nginx_template = open(os.path.join(p,"nginx.conf"),"r").read()
nginx_domain = ""
domainlineterm = ""
if self.data['nginx.port'] == "":
self.data['nginx.port'] = "80"
if self.data['nginx.domain'] != "":
nginx_domain = "server_name "+self.data['nginx.domain']
self.domain = self.data['nginx.domain']
domainlineterm = ";"
nginx_subdomain = ""
if self.data['nginx.subdomain'] != "":
nginx_subdomain = "alias "+self.data['nginx.subdomain']
domainlineterm = ";"
nginxconf = nginx_template%{'port':self.data['nginx.port'],
'domain':nginx_domain,
'subdomain':nginx_subdomain,
'domainlineterm':domainlineterm}
nginxconfresult = open(os.path.join(self.BUILDPATH,"nginx.conf"),"w")
nginxconfresult.write(nginxconf)
nginxconfresult.close()
self.status = 10
gobject.idle_add(self.updated)
scv_config = {}
for key,val in self.data.items():
if key.startswith("core.") or key.startswith("db."):
if key == "db.name":
scv_config[key] = val+".fdb"
continue
scv_config[key] = val
scv_config_defaults = {
"core.session_duration":2,
"core.session_extend":1,
"core.cookielaw":1,
"core.debug":True
}
scv_config.update(scv_config_defaults)
jenc = json.JSONEncoder()
config_json = open(os.path.join(self.BUILDPATH,"config.json"),"w")
config_json.write(jenc.encode(scv_config))
config_json.close()
shutil.copyfile(os.path.join(p,"skarphed.conf"), os.path.join(self.BUILDPATH,"skarphed.conf"))
shutil.copyfile(os.path.join(p,"install.sh"), os.path.join(self.BUILDPATH,"install.sh"))
shutil.copyfile(os.path.join(p,"uwsgi.conf"), os.path.join(self.BUILDPATH,"uwsgi.conf"))
self.status = 30
gobject.idle_add(self.updated)
shutil.copytree(os.path.join(COREFILES,"web"), os.path.join(self.BUILDPATH, "web"))
shutil.copytree(os.path.join(COREFILES,"lib"), os.path.join(self.BUILDPATH,"lib"))
tar = tarfile.open(os.path.join(self.BUILDPATH,"scv_install.tar.gz"),"w:gz")
tar.add(os.path.join(self.BUILDPATH,"nginx.conf"))
tar.add(os.path.join(self.BUILDPATH,"uwsgi.conf"))
tar.add(os.path.join(self.BUILDPATH,"config.json"))
tar.add(os.path.join(self.BUILDPATH,"skarphed.conf"))
tar.add(os.path.join(self.BUILDPATH,"install.sh"))
tar.add(os.path.join(self.BUILDPATH,"web"))
tar.add(os.path.join(self.BUILDPATH,"lib"))
tar.close()
self.status = 45
gobject.idle_add(self.updated)
con = self.server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("mkdir /tmp/scvinst"+str(self.installationId))
self.status = 50
gobject.idle_add(self.updated)
con = self.server.getSSH()
ftp = con.open_sftp()
ftp.put(os.path.join(self.BUILDPATH,"scv_install.tar.gz"),"/tmp/scvinst"+str(self.installationId)+"/scv_install.tar.gz")
ftp.close()
self.status = 65
gobject.idle_add(self.updated)
con = self.server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("cd /tmp/scvinst"+str(self.installationId)+"; tar xvfz scv_install.tar.gz -C / ; chmod 755 install.sh ; ./install.sh ")
output = con_stdout.read()
logging.debug("SSH-outputlength: %d"%len(output))
logging.debug(output)
shutil.rmtree(self.BUILDPATH)
self.status = 100
gobject.idle_add(self.updated)
gobject.idle_add(self.addInstanceToServer)
class Destroyer(AbstractDestroyer):
def execute_destruction(self):
p = os.path.dirname(os.path.realpath(__file__))
server = self.instance.getServer()
self.status = 10
gobject.idle_add(self.updated)
con = server.getSSH()
ftp = con.open_sftp()
ftp.put(os.path.join(p,"teardown.sh"),"/tmp/teardown.sh")
ftp.close()
self.status = 30
gobject.idle_add(self.updated)
con = server.getSSH()
con_stdin, con_stdout, con_stderr = con.exec_command("cd /tmp/ ; chmod 755 teardown.sh ; ./teardown.sh %d "%self.instanceid)
logging.debug(con_stdout.read())
self.status = 100
gobject.idle_add(self.updated)
gobject.idle_add(self.updated)
gobject.idle_add(self.removeInstanceFromServer)
| skarphed/skarphed | admin/installer/debian7_nginx/__init__.py | Python | agpl-3.0 | 6,232 |
from documents.models import Document
from categories.models import Category
import os
def move_doc(doc_id, cat_id):
doc = Document.objects.get(pk=int(doc_id))
old_cat = doc.refer_category
new_cat = Category.objects.get(pk=int(cat_id))
for p in doc.pages.all():
cmd = "mv " + p.get_absolute_path() + " " + new_cat.get_absolute_path() + "/"
os.system(cmd)
doc.refer_category = new_cat
doc.save()
old_cat.documents.remove(doc)
new_cat.documents.add(doc)
| Foxugly/MyTaxAccountant | scripts/move_document.py | Python | agpl-3.0 | 504 |
# -*- coding: utf-8 -*-
from pyramid.view import view_config
import logging
import pysite.resmgr
L = logging.getLogger('PySite')
@view_config(
name='',
context=pysite.plugins.models.Node,
renderer='pysite:plugins/templates/index.mako',
permission='admin'
)
def index(context, request):
return dict()
| dmdm/PySite | pysite/plugins/views.py | Python | agpl-3.0 | 327 |
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, fields, models
class ResPartner(models.Model):
_inherit = "res.partner"
_allowed_inactive_link_models = ["res.partner"]
_inactive_cascade = True
sta_mandate_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
sta_mandate_inactive_ids = fields.One2many(
comodel_name="sta.mandate",
inverse_name="partner_id",
string="State Mandates (Inactive)",
domain=[("active", "=", False)],
)
int_mandate_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
int_mandate_inactive_ids = fields.One2many(
comodel_name="int.mandate",
inverse_name="partner_id",
string="Internal Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates",
domain=[("active", "=", True)],
context={"force_recompute": True},
)
ext_mandate_inactive_ids = fields.One2many(
comodel_name="ext.mandate",
inverse_name="partner_id",
string="External Mandates (Inactive)",
domain=[("active", "=", False)],
)
ext_mandate_count = fields.Integer(
string="External Mandates Nbr", compute="_compute_mandate_assembly_count"
)
ext_assembly_count = fields.Integer(
string="External Assemblies", compute="_compute_mandate_assembly_count"
)
def get_mandate_action(self):
"""
return an action for an ext.mandate contains into the domain a
specific tuples to get concerned mandates
"""
self.ensure_one()
res_ids = self._get_assemblies()._get_mandates().ids
domain = [("id", "in", res_ids)]
# get model's action to update its domain
action = self.env["ir.actions.act_window"]._for_xml_id(
"mozaik_mandate.ext_mandate_action"
)
action["domain"] = domain
return action
def _get_assemblies(self):
"""
return the assemblies of the current partner
"""
self.ensure_one()
assembly_model = "ext.assembly"
if self.is_assembly:
field = "partner_id"
else:
field = "ref_partner_id"
domain = [(field, "=", self.id)]
assembly_obj = self.env[assembly_model]
assemblies = assembly_obj.search(domain)
return assemblies
def _compute_mandate_assembly_count(self):
"""
count the number of assemblies linked to the current partner
count the number of mandates linked to the assemblies of the
current partner
"""
for partner in self:
assemblies = partner._get_assemblies()
partner.ext_assembly_count = len(assemblies)
partner.ext_mandate_count = len(assemblies._get_mandates())
def add_mandate_action(self):
self.ensure_one()
return {
"type": "ir.actions.act_window",
"name": _("Add a new mandate"),
"res_model": self._context.get("mandate_model"),
"context": {"default_partner_id": self.id},
"view_mode": "form",
"target": "new",
}
| mozaik-association/mozaik | mozaik_mandate/models/res_partner.py | Python | agpl-3.0 | 3,660 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test, with_context
from factories import reset_all_pk_sequences
class TestAPI(Test):
endpoints = ['app', 'task', 'taskrun', 'user']
| stefanhahmann/pybossa | test/test_api/__init__.py | Python | agpl-3.0 | 904 |
# Copyright 2021 Alfredo de la Fuente - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models, fields, api
class AccountGroup(models.Model):
_inherit = 'account.group'
length_account = fields.Integer(
string='Length account', compute='_compute_length_account',
store=True)
without_headquarter = fields.Boolean(
string='Without headquarter in invoices and accounting entries',
default=True)
@api.depends('code_prefix_start')
def _compute_length_account(self):
for group in self:
group.length_account = len(group.code_prefix_start)
def _find_account_group_headquarter(self):
found = False
group = self
while not found:
if not group.parent_id:
found = True
without_headquarter_control = group.without_headquarter
else:
cond = [('id', '=', group.parent_id.id)]
group = self.env['account.group'].search(cond, limit=1)
return without_headquarter_control
| avanzosc/odoo-addons | account_headquarter/models/account_group.py | Python | agpl-3.0 | 1,098 |
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, models, api
from openerp.addons.event_track_assistant._common import\
_convert_to_utc_date, _convert_to_local_date, _convert_time_to_float
date2string = fields.Date.to_string
datetime2string = fields.Datetime.to_string
str2datetime = fields.Datetime.from_string
class WizEventAppendAssistant(models.TransientModel):
_inherit = 'wiz.event.append.assistant'
type_hour = fields.Many2one(
comodel_name='hr.type.hour', string='Type hour')
start_time = fields.Float(string='Start time', default=0.0)
end_time = fields.Float(string='End time', default=0.0)
@api.model
def default_get(self, var_fields):
tz = self.env.user.tz
res = super(WizEventAppendAssistant, self).default_get(var_fields)
res.update({
'start_time': _convert_time_to_float(
_convert_to_utc_date(res.get('min_from_date'), tz=tz), tz=tz),
'end_time': _convert_time_to_float(
_convert_to_utc_date(res.get('max_to_date'), tz=tz), tz=tz),
})
return res
@api.multi
@api.onchange('from_date', 'start_time', 'to_date', 'end_time', 'partner')
def onchange_dates_and_partner(self):
self.ensure_one()
res = super(WizEventAppendAssistant, self).onchange_dates_and_partner()
return res
def revert_dates(self):
tz = self.env.user.tz
super(WizEventAppendAssistant, self).revert_dates()
self.start_time = _convert_time_to_float(_convert_to_utc_date(
self.min_from_date, tz=tz), tz=tz)
self.end_time = _convert_time_to_float(_convert_to_utc_date(
self.max_to_date, tz=tz), tz=tz)
def _update_registration_start_date(self, registration):
super(WizEventAppendAssistant, self)._update_registration_start_date(
registration)
reg_date_start = str2datetime(registration.date_start)
if self.start_time:
wiz_from_date = _convert_to_utc_date(
self.from_date, time=self.start_time, tz=self.env.user.tz)
if wiz_from_date != reg_date_start:
registration.date_start = wiz_from_date
def _update_registration_date_end(self, registration):
super(WizEventAppendAssistant, self)._update_registration_date_end(
registration)
reg_date_end = str2datetime(registration.date_end)
if self.end_time:
wiz_to_date = _convert_to_utc_date(
self.to_date, time=self.end_time, tz=self.env.user.tz)
if wiz_to_date != reg_date_end:
registration.date_end = wiz_to_date
def _prepare_registration_data(self, event):
vals = super(WizEventAppendAssistant,
self)._prepare_registration_data(event)
date_start = _convert_to_local_date(self.from_date).date()
date_start = _convert_to_utc_date(
date_start, time=self.start_time, tz=self.env.user.tz)
date_end = _convert_to_local_date(self.to_date).date()
date_end = _convert_to_utc_date(
date_end, time=self.end_time, tz=self.env.user.tz)
vals.update({
'date_start': event.date_begin
if datetime2string(date_start) < event.date_begin else date_start,
'date_end': event.date_end
if datetime2string(date_end) > event.date_end else date_end,
})
return vals
def _calc_dates_for_search_track(self, from_date, to_date):
super(WizEventAppendAssistant,
self)._calc_dates_for_search_track(from_date, to_date)
from_date = self._prepare_date_for_control(
from_date, time=self.start_time or 0.0)
to_date = self._prepare_date_for_control(
to_date, time=self.end_time or 24.0)
return from_date, to_date
| avanzosc/event-wip | sale_order_create_event_hour/wizard/wiz_event_append_assistant.py | Python | agpl-3.0 | 3,966 |
# -*- coding: utf-8 -*-
# @copyright (C) 2014-2015
#Developpeurs 'BARDOU AUGUSTIN - BREZILLON ANTOINE - EUZEN DAVID - FRANCOIS SEBASTIEN - JOUNEAU NICOLAS - KIBEYA AISHA - LE CONG SEBASTIEN -
# MAGREZ VALENTIN - NGASSAM NOUMI PAOLA JOVANY - OUHAMMOUCH SALMA - RIAND MORGAN - TREIMOLEIRO ALEX - TRULLA AURELIEN '
# @license https://www.gnu.org/licenses/gpl-3.0.html GPL version 3
from models import *
from django.contrib.auth.models import User as django_User
from datetime import datetime
from django import forms
from django.contrib.gis.geos import Point
class LoginForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
}
exclude = ['name', 'firstname', 'sex', 'city', 'zipCode', 'phone', 'idHomeAddress', 'idWorkAddress']
class EmailAuthBackend(object):
def authenticate(self,username=None, password=None):
try:
user = django_User.objects.get(email=username)
if user and check_password(password, user.password):
return user
except django_User.DoesNotExist:
return None
def authenticate2(self,username=None, password=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user and (check_password(password, user.password)):
return user
except User.DoesNotExist:
return None
def auth_email(self, username=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user:
return user
except User.DoesNotExist:
return None
def auth_email2(self, username=None):
try:
user = django_User.objects.get(email=username)
if user:
return user
except User.DoesNotExist:
return None
class ContactForm(forms.Form):
firstname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
lastname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
phone = forms.CharField(widget=forms.TextInput(
attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}))
sender = forms.EmailField(widget=forms.EmailInput(attrs={'aria-invalid': 'false', 'pattern': 'email'}), required=False)
subjectCHOICES = (('Demandeur','Je cherche un trajet'),('Offreur','Je souhaite proposer un trajet'),
('Infos','Informations diverses'),('Autre','Autre'))
subject = forms.ChoiceField(choices=subjectCHOICES)
goalOfApplicationCHOICES = [('', '')] + list(MenusSettings.objects.filter(type="goalOfApplication").values_list('string', 'string'))
goalOfApplication = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=goalOfApplicationCHOICES, required=False)
yearOfBirthCHOICES = (tuple((str(n), str(n)) for n in range(1900, datetime.now().year - 15))+(('',''),))[::-1]
yearOfBirth = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=yearOfBirthCHOICES, required=False)
message = forms.CharField(widget=forms.Textarea(attrs={'required': 'required'}))
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['goalOfApplication'].choices = get_menus_settings('goalOfApplication')
def get_menus_settings(type, required=True):
if required:
return [('', '')] + list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
else:
return list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
class UserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'sex': forms.RadioSelect(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode',
'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true',
'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress']
class ProviderRegisterForm(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = get_menus_settings('howKnowledge')
widgets = {
'password': forms.PasswordInput(attrs={'id': 'password', 'required': 'required'}),
'company': forms.TextInput(attrs={'list':'datalistCompany', 'autocomplete':'off'}),
'howKnowledge': forms.Select(attrs={'required':'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login']
def __init__(self, *args, **kwargs):
super(ProviderRegisterForm, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class ProviderForm2(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = [('','')] + list(MenusSettings.objects.filter(type="howKnowledge").values_list('string', 'string'))
widgets = {
'company': forms.TextInput(attrs={'list': 'datalistCompany', 'autocomplete': 'off'}),
'howKnowledge': forms.Select(attrs={'required': 'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login', 'password']
def __init__(self, *args, **kwargs):
super(ProviderForm2, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class AddressRegisterForm(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street':forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu',
'autocomplete': 'on', 'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterForm, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordTab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordTab[0]), float(coordTab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode'] = zipcode
return cleaned_data
class AddressRegisterFormWork(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu', 'autocomplete': 'on',
'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterFormWork, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordtab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordtab[0]), float(coordtab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode']= zipcode
return cleaned_data
class PathDepartureRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value': '08:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class PathArrivalRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value':'18:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class TestUserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode', 'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress', 'sex']
class newMdpForm(forms.Form):
oldmdp = forms.CharField(widget=forms.PasswordInput(), label='Ancien mot de passe', required=True)
newmdp1 = forms.CharField(widget=forms.PasswordInput(), label='Nouveau mot de passe', required=True) | ehopsolidaires/ehop-solidaires.fr | ehop/ehopSolidaire_providers_register/forms.py | Python | agpl-3.0 | 10,906 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import numbers
import copy
import random
import numpy as np
from nupic.data.fieldmeta import FieldMetaType
import nupic.math.roc_utils as roc
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.utils import MovingAverage
from collections import deque
from operator import itemgetter
from safe_interpreter import SafeInterpreter
from io import BytesIO, StringIO
from functools import partial
###############################################################################
# Public Metric specification class
###############################################################################
class MetricSpec(object):
""" This class represents a single Metrics specification in the TaskControl
block
"""
_LABEL_SEPARATOR = ":"
def __init__(self, metric, inferenceElement, field=None, params=None):
"""
metric: A metric type name that identifies which metrics module is
to be constructed by the metrics factory method
opf.metrics.getModule(); e.g., "rmse"
inferenceElement: Some inference types (such as classification), can output
more than one type of inference (i.e. the predicted class
AND the predicted next step). This field specifies which
of these inferences to compute the metrics on
field: Field name on which this metric is to be collected
params: Custom parameters dict for the metrics module's constructor
"""
self.metric = metric
self.inferenceElement = inferenceElement
self.field = field
self.params = params
return
def __repr__(self):
return "{0!s}(metric={1!r}, inferenceElement={2!r}, field={3!r}, params={4!r})".format(self.__class__.__name__,
self.metric,
self.inferenceElement,
self.field,
self.params)
def getLabel(self, inferenceType=None):
""" Helper method that generates a unique label
for a MetricSpec / InferenceType pair. The label is formatted
as follows:
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
"""
result = []
if inferenceType is not None:
result.append(InferenceType.getLabel(inferenceType))
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if params is not None:
sortedParams= params.keys()
sortedParams.sort()
for param in sortedParams:
# Don't include the customFuncSource - it is too long an unwieldy
if param in ('customFuncSource', 'customFuncDef', 'customExpr'):
continue
value = params[param]
if isinstance(value, str):
result.extend(["{0!s}='{1!s}'".format(param, value)])
else:
result.extend(["{0!s}={1!s}".format(param, value)])
if self.field:
result.append("field={0!s}".format((self.field)) )
return self._LABEL_SEPARATOR.join(result)
@classmethod
def getInferenceTypeFromLabel(cls, label):
""" Extracts the PredicitonKind (temporal vs. nontemporal) from the given
metric label
Parameters:
-----------------------------------------------------------------------
label: A label (string) for a metric spec generated by getMetricLabel
(above)
Returns: An InferenceType value
"""
infType, _, _= label.partition(cls._LABEL_SEPARATOR)
if not InferenceType.validate(infType):
return None
return infType
def getModule(metricSpec):
"""
factory method to return an appropriate MetricsIface-based module
args:
metricSpec - an instance of MetricSpec.
metricSpec.metric must be one of:
rmse (root-mean-square error)
aae (average absolute error)
acc (accuracy, for enumerated types)
return:
an appropriate Metric module
"""
metricName = metricSpec.metric
if metricName == 'rmse':
return MetricRMSE(metricSpec)
if metricName == 'nrmse':
return MetricNRMSE(metricSpec)
elif metricName == 'aae':
return MetricAAE(metricSpec)
elif metricName == 'acc':
return MetricAccuracy(metricSpec)
elif metricName == 'avg_err':
return MetricAveError(metricSpec)
elif metricName == 'trivial':
return MetricTrivial(metricSpec)
elif metricName == 'two_gram':
return MetricTwoGram(metricSpec)
elif metricName == 'moving_mean':
return MetricMovingMean(metricSpec)
elif metricName == 'moving_mode':
return MetricMovingMode(metricSpec)
elif metricName == 'neg_auc':
return MetricNegAUC(metricSpec)
elif metricName == 'custom_error_metric':
return CustomErrorMetric(metricSpec)
elif metricName == 'multiStep':
return MetricMultiStep(metricSpec)
elif metricName == 'multiStepProbability':
return MetricMultiStepProbability(metricSpec)
elif metricName == 'ms_aae':
return MetricMultiStepAAE(metricSpec)
elif metricName == 'ms_avg_err':
return MetricMultiStepAveError(metricSpec)
elif metricName == 'passThruPrediction':
return MetricPassThruPrediction(metricSpec)
elif metricName == 'altMAPE':
return MetricAltMAPE(metricSpec)
elif metricName == 'MAPE':
return MetricMAPE(metricSpec)
elif metricName == 'multi':
return MetricMulti(metricSpec)
elif metricName == 'negativeLogLikelihood':
return MetricNegativeLogLikelihood(metricSpec)
else:
raise Exception("Unsupported metric type: {0!s}".format(metricName))
################################################################################
# Helper Methods and Classes #
################################################################################
class _MovingMode(object):
""" Helper class for computing windowed moving
mode of arbitrary values """
def __init__(self, windowSize = None):
"""
Parameters:
-----------------------------------------------------------------------
windowSize: The number of values that are used to compute the
moving average
"""
self._windowSize = windowSize
self._countDict = dict()
self._history = deque([])
def __call__(self, value):
if len(self._countDict) == 0:
pred = ""
else:
pred = max(self._countDict.items(), key = itemgetter(1))[0]
# Update count dict and history buffer
self._history.appendleft(value)
if not value in self._countDict:
self._countDict[value] = 0
self._countDict[value] += 1
if len(self._history) > self._windowSize:
removeElem = self._history.pop()
self._countDict[removeElem] -= 1
assert(self._countDict[removeElem] > -1)
return pred
def _isNumber(value):
return isinstance(value, (numbers.Number, np.number))
class MetricsIface(object):
"""
A Metrics module compares a prediction Y to corresponding ground truth X and returns a single
measure representing the "goodness" of the prediction. It is up to the implementation to
determine how this comparison is made.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, metricSpec):
"""
instantiate a MetricsIface-based module.
args:
metricSpec is an instance of MetricSpec
"""
@abstractmethod
def addInstance(self, groundTruth, prediction, record = None, result = None):
""" add one instance consisting of ground truth and a prediction.
Parameters:
-----------------------------------------------------------------------
groundTruth:
The actual measured value at the current timestep
prediction:
The value predicted by the network at the current timestep
groundTruthEncoding:
The binary encoding of the groundTruth value (as a numpy array). Right
now this is only used by CLA networks
predictionEncoding:
The binary encoding of the prediction value (as a numpy array). Right
now this is only used by CLA networks
result:
An ModelResult class (see opfutils.py)
return:
The average error as computed over the metric's window size
"""
@abstractmethod
def getMetric(self):
"""
return:
{value : <current measurement>, "stats" : {<stat> : <value> ...}}
metric name is defined by the MetricIface implementation. stats is expected to contain further
information relevant to the given metric, for example the number of timesteps represented in
the current measurement. all stats are implementation defined, and "stats" can be None
"""
class AggregateMetric(MetricsIface):
"""
Partial implementation of Metrics Interface for metrics that
accumulate an error and compute an aggregate score, potentially
over some window of previous data. This is a convenience class that
can serve as the base class for a wide variety of metrics
"""
___metaclass__ = ABCMeta
#FIXME @abstractmethod - this should be marked abstract method and required to be implemented
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):
"""
Updates the accumulated error given the prediction and the
ground truth.
groundTruth: Actual value that is observed for the current timestep
prediction: Value predicted by the network for the given timestep
accumulatedError: The total accumulated score from the previous
predictions (possibly over some finite window)
historyBuffer: A buffer of the last <self.window> ground truth values
that have been observed.
If historyBuffer = None, it means that no history is being kept.
result: An ModelResult class (see opfutils.py), used for advanced
metric calculation (e.g., MetricNegativeLogLikelihood)
retval:
The new accumulated error. That is:
self.accumulatedError = self.accumulate(groundTruth, predictions, accumulatedError)
historyBuffer should also be updated in this method.
self.spec.params["window"] indicates the maximum size of the window
"""
#FIXME @abstractmethod - this should be marked abstract method and required to be implemented
def aggregate(self, accumulatedError, historyBuffer, steps):
"""
Updates the final aggregated score error given the prediction and the
ground truth.
accumulatedError: The total accumulated score from the previous
predictions (possibly over some finite window)
historyBuffer: A buffer of the last <self.window> ground truth values
that have been observed.
If historyBuffer = None, it means that no history is being kept.
steps: The total number of (groundTruth, prediction) pairs that have
been passed to the metric. This does not include pairs where
the groundTruth = SENTINEL_VALUE_FOR_MISSING_DATA
retval:
The new aggregate (final) error measure.
"""
def __init__(self, metricSpec):
""" Initialize this metric
If the params contains the key 'errorMetric', then that is the name of
another metric to which we will pass a modified groundTruth and prediction
to from our addInstance() method. For example, we may compute a moving mean
on the groundTruth and then pass that to the AbsoluteAveError metric
"""
# Init default member variables
self.id = None
self.verbosity = 0
self.window = -1
self.history = None
self.accumulatedError = 0
self.aggregateError = None
self.steps = 0
self.spec = metricSpec
self.disabled = False
# Number of steps ahead we are trying to predict. This is a list of
# prediction steps are processing
self._predictionSteps = [0]
# Where we store the ground truth history
self._groundTruthHistory = deque([])
# The instances of another metric to which we will pass a possibly modified
# groundTruth and prediction to from addInstance(). There is one instance
# for each step present in self._predictionSteps
self._subErrorMetrics = None
# The maximum number of records to process. After this many records have
# been processed, the metric value never changes. This can be used
# as the optimization metric for swarming, while having another metric without
# the maxRecords limit to get an idea as to how well a production model
# would do on the remaining data
self._maxRecords = None
# Parse the metric's parameters
if metricSpec is not None and metricSpec.params is not None:
self.id = metricSpec.params.get('id', None)
self._predictionSteps = metricSpec.params.get('steps', [0])
# Make sure _predictionSteps is a list
if not hasattr(self._predictionSteps, '__iter__'):
self._predictionSteps = [self._predictionSteps]
self.verbosity = metricSpec.params.get('verbosity', 0)
self._maxRecords = metricSpec.params.get('maxRecords', None)
# Get the metric window size
if 'window' in metricSpec.params:
assert metricSpec.params['window'] >= 1
self.history = deque([])
self.window = metricSpec.params['window']
# Get the name of the sub-metric to chain to from addInstance()
if 'errorMetric' in metricSpec.params:
self._subErrorMetrics = []
for step in self._predictionSteps:
subSpec = copy.deepcopy(metricSpec)
# Do all ground truth shifting before we pass onto the sub-metric
subSpec.params.pop('steps', None)
subSpec.params.pop('errorMetric')
subSpec.metric = metricSpec.params['errorMetric']
self._subErrorMetrics.append(getModule(subSpec))
def _getShiftedGroundTruth(self, groundTruth):
""" Utility function that saves the passed in groundTruth into a local
history buffer, and returns the groundTruth from self._predictionSteps ago,
where self._predictionSteps is defined by the 'steps' parameter.
This can be called from the beginning of a derived class's addInstance()
before it passes groundTruth and prediction onto accumulate().
"""
# Save this ground truth into our input history
self._groundTruthHistory.append(groundTruth)
# This is only supported when _predictionSteps has one item in it
assert (len(self._predictionSteps) == 1)
# Return the one from N steps ago
if len(self._groundTruthHistory) > self._predictionSteps[0]:
return self._groundTruthHistory.popleft()
else:
if hasattr(groundTruth, '__iter__'):
return [None] * len(groundTruth)
else:
return None
def addInstance(self, groundTruth, prediction, record = None, result = None):
# This base class does not support time shifting the ground truth or a
# subErrorMetric.
assert (len(self._predictionSteps) == 1)
assert self._predictionSteps[0] == 0
assert self._subErrorMetrics is None
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth,
prediction, self.getMetric())
# Ignore if we've reached maxRecords
if self._maxRecords is not None and self.steps >= self._maxRecords:
return self.aggregateError
# If there is a sub-metric, chain into it's addInstance
# Accumulate the error
self.accumulatedError = self.accumulate(groundTruth, prediction,
self.accumulatedError, self.history, result)
self.steps += 1
return self._compute()
def getMetric(self):
return {'value': self.aggregateError, "stats" : {"steps" : self.steps}}
def _compute(self):
self.aggregateError = self.aggregate(self.accumulatedError, self.history,
self.steps)
return self.aggregateError
class MetricNegativeLogLikelihood(AggregateMetric):
"""
computes negative log-likelihood. Likelihood is the predicted probability of
the true data from a model. It is more powerful than metrics that only considers
the single best prediction (e.g. MSE) as it considers the entire probability
distribution predicted by a model.
It is more appropriate to use likelihood as the error metric when multiple
predictions are possible.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):
bucketll = result.inferences['multiStepBucketLikelihoods']
bucketIdxTruth = result.classifierInput.bucketIndex
if bucketIdxTruth is not None:
# a manually set minimum prediction probability so that the log(LL) doesn't blow up
minProb = 0.00001
negLL = 0
for step in bucketll.keys():
outOfBucketProb = 1 - sum(bucketll[step].values())
if bucketIdxTruth in bucketll[step].keys():
prob = bucketll[step][bucketIdxTruth]
else:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL -= np.log(prob)
accumulatedError += negLL
if historyBuffer is not None:
historyBuffer.append(negLL)
if len(historyBuffer) > self.spec.params["window"]:
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError / float(n)
class MetricRMSE(AggregateMetric):
"""
computes root-mean-square error
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = (groundTruth - prediction)**2
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return np.sqrt(accumulatedError / float(n))
class MetricNRMSE(MetricRMSE):
"""computes normalized root-mean-square error"""
def __init__(self, *args, **kwargs):
super(MetricNRMSE, self).__init__(*args, **kwargs)
self.groundTruths = []
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
self.groundTruths.append(groundTruth)
return super(MetricNRMSE, self).accumulate(groundTruth,
prediction,
accumulatedError,
historyBuffer,
result)
def aggregate(self, accumulatedError, historyBuffer, steps):
rmse = super(MetricNRMSE, self).aggregate(accumulatedError,
historyBuffer,
steps)
denominator = np.std(self.groundTruths)
return rmse / denominator if denominator > 0 else float("inf")
class MetricAAE(AggregateMetric):
"""
computes average absolute error
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = abs(groundTruth - prediction)
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricAltMAPE(AggregateMetric):
"""
computes the "Alternative" Mean Absolute Percent Error.
A generic MAPE computes the percent error for each sample, and then gets
an average. This can suffer from samples where the actual value is very small
or zero - this one sample can drastically alter the mean.
This metric on the other hand first computes the average of the actual values
and the averages of the errors before dividing. This washes out the effects of
a small number of samples with very small actual values.
"""
def __init__(self, metricSpec):
super(MetricAltMAPE, self).__init__(metricSpec)
self._accumulatedGroundTruth = 0
self._accumulatedError = 0
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
# Compute absolute error
error = abs(groundTruth - prediction)
if self.verbosity > 0:
print "MetricAltMAPE:\n groundTruth: %s\n Prediction: " \
"%s\n Error: %s" % (groundTruth, prediction, error)
# Update the accumulated groundTruth and aggregate error
if self.history is not None:
self.history.append((groundTruth, error))
if len(self.history) > self.spec.params["window"] :
(oldGT, oldErr) = self.history.popleft()
self._accumulatedGroundTruth -= oldGT
self._accumulatedError -= oldErr
self._accumulatedGroundTruth += abs(groundTruth)
self._accumulatedError += error
# Compute aggregate pct error
if self._accumulatedGroundTruth > 0:
self.aggregateError = 100.0 * self._accumulatedError / \
self._accumulatedGroundTruth
else:
self.aggregateError = 0
if self.verbosity >= 1:
print " accumGT:", self._accumulatedGroundTruth
print " accumError:", self._accumulatedError
print " aggregateError:", self.aggregateError
self.steps += 1
return self.aggregateError
class MetricMAPE(AggregateMetric):
"""
computes the "Classic" Mean Absolute Percent Error.
This computes the percent error for each sample, and then gets
an average. Note that this can suffer from samples where the actual value is
very small or zero - this one sample can drastically alter the mean. To
avoid this potential issue, use 'altMAPE' instead.
This metric is provided mainly as a convenience when comparing results against
other investigations that have also used MAPE.
"""
def __init__(self, metricSpec):
super(MetricMAPE, self).__init__(metricSpec)
self._accumulatedPctError = 0
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
# Compute absolute error
if groundTruth != 0:
pctError = float(abs(groundTruth - prediction))/groundTruth
else:
# Ignore this sample
if self.verbosity > 0:
print "Ignoring sample with groundTruth of 0"
self.steps += 1
return self.aggregateError
if self.verbosity > 0:
print "MetricMAPE:\n groundTruth: %s\n Prediction: " \
"%s\n Error: %s" % (groundTruth, prediction, pctError)
# Update the accumulated groundTruth and aggregate error
if self.history is not None:
self.history.append(pctError)
if len(self.history) > self.spec.params["window"] :
(oldPctErr) = self.history.popleft()
self._accumulatedPctError -= oldPctErr
self._accumulatedPctError += pctError
# Compute aggregate pct error
self.aggregateError = 100.0 * self._accumulatedPctError / len(self.history)
if self.verbosity >= 1:
print " accumPctError:", self._accumulatedPctError
print " aggregateError:", self.aggregateError
self.steps += 1
return self.aggregateError
class MetricPassThruPrediction(MetricsIface):
"""
This is not a metric, but rather a facility for passing the predictions
generated by a baseline metric through to the prediction output cache produced
by a model.
For example, if you wanted to see the predictions generated for the TwoGram
metric, you would specify 'PassThruPredictions' as the 'errorMetric' parameter.
This metric class simply takes the prediction and outputs that as the
aggregateMetric value.
"""
def __init__(self, metricSpec):
self.spec = metricSpec
self.window = metricSpec.params.get("window", 1)
self.avg = MovingAverage(self.window)
self.value = None
def addInstance(self, groundTruth, prediction, record = None, result = None):
"""Compute and store metric value"""
self.value = self.avg(prediction)
def getMetric(self):
"""Return the metric value """
return {"value": self.value}
#def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer):
# # Simply return the prediction as the accumulated error
# return prediction
#
#def aggregate(self, accumulatedError, historyBuffer, steps):
# # Simply return the prediction as the aggregateError
# return accumulatedError
class MetricMovingMean(AggregateMetric):
"""
computes error metric based on moving mean prediction
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricMovingMean, self).__init__(metricSpec)
# Only supports 1 item in _predictionSteps
assert (len(self._predictionSteps) == 1)
self.mean_window = 10
if metricSpec.params.has_key('mean_window'):
assert metricSpec.params['mean_window'] >= 1
self.mean_window = metricSpec.params['mean_window']
# Construct moving average instance
self._movingAverage = MovingAverage(self.mean_window)
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth, prediction, self.getMetric())
# Use ground truth from 'steps' steps ago as our most recent ground truth
lastGT = self._getShiftedGroundTruth(groundTruth)
if lastGT is None:
return self._subErrorMetrics[0].aggregateError
mean = self._movingAverage(lastGT)
return self._subErrorMetrics[0].addInstance(groundTruth, mean, record)
def evalCustomErrorMetric(expr, prediction, groundTruth, tools):
sandbox = SafeInterpreter(writer=StringIO())
if isinstance(prediction, dict):
sandbox.symtable['prediction'] = tools.mostLikely(prediction)
sandbox.symtable['EXP'] = tools.expValue(prediction)
sandbox.symtable['probabilityDistribution'] = prediction
else:
sandbox.symtable['prediction'] = prediction
sandbox.symtable['groundTruth'] = groundTruth
sandbox.symtable['tools'] = tools
error = sandbox(expr)
return error
class CustomErrorMetric(MetricsIface):
"""
Custom Error Metric class that handles user defined error metrics
"""
class CircularBuffer():
"""
implementation of a fixed size constant random access circular buffer
"""
def __init__(self,length):
#Create an array to back the buffer
#If the length<0 create a zero length array
self.data = [None for i in range(max(length,0))]
self.elements = 0
self.index = 0
self.dataLength = length
def getItem(self,n):
#Get item from n steps back
if n >= self.elements or (n >= self.dataLength and not self.dataLength < 0):
assert False,"Trying to access data not in the stored window"
return None
if self.dataLength>=0:
getInd = (self.index-n-1)%min(self.elements,self.dataLength)
else:
getInd = (self.index-n-1)%self.elements
return self.data[getInd]
def pushToEnd(self,obj):
ret = None
#If storing everything simply append right to the list
if(self.dataLength < 0 ):
self.data.append(obj)
self.index+=1
self.elements+=1
return None
if(self.elements==self.dataLength):
#pop last added element
ret = self.data[self.index % self.dataLength]
else:
#else push new element and increment the element counter
self.elements += 1
self.data[self.index % self.dataLength] = obj
self.index += 1
return ret
def __len__(self):
return self.elements
def __init__(self,metricSpec):
self.metricSpec = metricSpec
self.steps = 0
self.error = 0
self.averageError = None
self.errorMatrix = None
self.evalError = self.evalAbsErr
self.errorWindow = 1
self.storeWindow=-1
self.userDataStore = dict()
if "errorWindow" in metricSpec.params:
self.errorWindow = metricSpec.params["errorWindow"]
assert self.errorWindow != 0 , "Window Size cannon be zero"
if "storeWindow" in metricSpec.params:
self.storeWindow = metricSpec.params["storeWindow"]
assert self.storeWindow != 0 , "Window Size cannon be zero"
self.errorStore = self.CircularBuffer(self.errorWindow)
self.recordStore = self.CircularBuffer(self.storeWindow)
if "customExpr" in metricSpec.params:
assert not "customFuncDef" in metricSpec.params
assert not "customFuncSource" in metricSpec.params
self.evalError = partial(evalCustomErrorMetric, metricSpec.params["customExpr"])
elif "customFuncSource" in metricSpec.params:
assert not "customFuncDef" in metricSpec.params
assert not "customExpr" in metricSpec.params
exec(metricSpec.params["customFuncSource"])
#pull out defined function from locals
self.evalError = locals()["getError"]
elif "customFuncDef" in metricSpec.params:
assert not "customFuncSource" in metricSpec.params
assert not "customExpr" in metricSpec.params
self.evalError = metricSpec.params["customFuncDef"]
def getPrediction(self,n):
#Get prediction from n steps ago
return self.recordStore.getItem(n)["prediction"]
def getFieldValue(self,n,field):
#Get field value from record n steps ago
record = self.recordStore.getItem(n)["record"]
value = record[field]
return value
def getGroundTruth(self,n):
#Get the groundTruth from n steps ago
return self.recordStore.getItem(n)["groundTruth"]
def getBufferLen(self):
return len(self.recordStore)
def storeData(self,name,obj):
#Store custom user data
self.userDataStore[name] = obj
def getData(self,name):
#Retrieve user data
if name in self.userDataStore:
return self.userDataStore[name]
return None
def mostLikely(self, pred):
""" Helper function to return a scalar value representing the most
likely outcome given a probability distribution
"""
if len(pred) == 1:
return pred.keys()[0]
mostLikelyOutcome = None
maxProbability = 0
for prediction, probability in pred.items():
if probability > maxProbability:
mostLikelyOutcome = prediction
maxProbability = probability
return mostLikelyOutcome
def expValue(self, pred):
""" Helper function to return a scalar value representing the expected
value of a probability distribution
"""
if len(pred) == 1:
return pred.keys()[0]
return sum([x*p for x,p in pred.items()])
def evalAbsErr(self,pred,ground):
return abs(pred-ground)
def getMetric(self):
return {'value': self.averageError, "stats" : {"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
#If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.averageError
self.recordStore.pushToEnd({"groundTruth":groundTruth,
"prediction":prediction,"record":record})
if isinstance(prediction, dict):
assert not any(True for p in prediction if p is None), \
"Invalid prediction of `None` in call to {0!s}.addInstance()".format( \
self.__class__.__name__)
error = self.evalError(prediction,groundTruth,self)
popped = self.errorStore.pushToEnd({"error":error})
if not popped is None:
#Subtract error that dropped out of the buffer
self.error -= popped["error"]
self.error+= error
self.averageError = float(self.error)/self.errorStore.elements
self.steps+=1
return self.averageError
class MetricMovingMode(AggregateMetric):
"""
computes error metric based on moving mode prediction
"""
def __init__(self, metricSpec):
super(MetricMovingMode, self).__init__(metricSpec)
self.mode_window = 100
if metricSpec.params.has_key('mode_window'):
assert metricSpec.params['mode_window'] >= 1
self.mode_window = metricSpec.params['mode_window']
# Only supports one stepsize
assert len(self._predictionSteps) == 1
# Construct moving average instance
self._movingMode = _MovingMode(self.mode_window)
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth, prediction,
self.getMetric())
# Use ground truth from 'steps' steps ago as our most recent ground truth
lastGT = self._getShiftedGroundTruth(groundTruth)
if lastGT is None:
return self._subErrorMetrics[0].aggregateError
mode = self._movingMode(lastGT)
result = self._subErrorMetrics[0].addInstance(groundTruth, mode, record)
return result
class MetricTrivial(AggregateMetric):
"""
computes a metric against the ground truth N steps ago. The metric to
compute is designated by the 'errorMetric' entry in the metric params.
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricTrivial, self).__init__(metricSpec)
# Only supports one stepsize
assert len(self._predictionSteps) == 1
# Must have a suberror metric
assert self._subErrorMetrics is not None, "This metric requires that you" \
+ " specify the name of another base metric via the 'errorMetric' " \
+ " parameter."
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# Use ground truth from 'steps' steps ago as our "prediction"
prediction = self._getShiftedGroundTruth(groundTruth)
if self.verbosity > 0:
print "groundTruth:\n{0!s}\nPredictions:\n{1!s}\n{2!s}\n".format(groundTruth,
prediction, self.getMetric())
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
# Our "prediction" is simply what happened 'steps' steps ago
return self._subErrorMetrics[0].addInstance(groundTruth, prediction, record)
class MetricTwoGram(AggregateMetric):
"""
computes error metric based on one-grams. The groundTruth passed into
this metric is the encoded output of the field (an array of 1's and 0's).
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricTwoGram, self).__init__(metricSpec)
# Only supports 1 stepsize
assert len(self._predictionSteps) == 1
# Must supply the predictionField
assert(metricSpec.params.has_key('predictionField'))
self.predictionField = metricSpec.params['predictionField']
self.twoGramDict = dict()
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data return previous error (assuming one gram will always
# receive an instance of ndarray)
if groundTruth.any() == False:
return self._subErrorMetrics[0].aggregateError
# Get actual ground Truth value from record. For this metric, the
# "groundTruth" parameter is the encoder output and we use actualGroundTruth
# to hold the input to the encoder (either a scalar or a category string).
#
# We will use 'groundTruthKey' (the stringified encoded value of
# groundTruth) as the key for our one-gram dict and the 'actualGroundTruth'
# as the values in our dict, which are used to compute our prediction.
actualGroundTruth = record[self.predictionField]
# convert binary array to a string
groundTruthKey = str(groundTruth)
# Get the ground truth key from N steps ago, that is what we will base
# our prediction on. Note that our "prediction" is the prediction for the
# current time step, to be compared to actualGroundTruth
prevGTKey = self._getShiftedGroundTruth(groundTruthKey)
# -------------------------------------------------------------------------
# Get the prediction based on the previously known ground truth
# If no previous, just default to "" or 0, depending on the groundTruth
# data type.
if prevGTKey is None:
if isinstance(actualGroundTruth,str):
pred = ""
else:
pred = 0
# If the previous was never seen before, create a new dict for it.
elif not prevGTKey in self.twoGramDict:
if isinstance(actualGroundTruth,str):
pred = ""
else:
pred = 0
# Create a new dict for it
self.twoGramDict[prevGTKey] = {actualGroundTruth:1}
# If it was seen before, compute the prediction from the past history
else:
# Find most often occurring 1-gram
if isinstance(actualGroundTruth,str):
# Get the most frequent category that followed the previous timestep
twoGramMax = max(self.twoGramDict[prevGTKey].items(), key=itemgetter(1))
pred = twoGramMax[0]
else:
# Get average of all possible values that followed the previous
# timestep
pred = sum(self.twoGramDict[prevGTKey].iterkeys())
pred /= len(self.twoGramDict[prevGTKey])
# Add current ground truth to dict
if actualGroundTruth in self.twoGramDict[prevGTKey]:
self.twoGramDict[prevGTKey][actualGroundTruth] += 1
else:
self.twoGramDict[prevGTKey][actualGroundTruth] = 1
if self.verbosity > 0:
print "\nencoding:{0!s}\nactual:{1!s}\nprevEncoding:{2!s}\nprediction:{3!s}\nmetric:{4!s}".format(groundTruth, actualGroundTruth, prevGTKey, pred, self.getMetric())
return self._subErrorMetrics[0].addInstance(actualGroundTruth, pred, record)
class MetricAccuracy(AggregateMetric):
"""
computes simple accuracy for an enumerated type. all inputs are treated as
discrete members of a set, therefore for example 0.5 is only a correct
response if the ground truth is exactly 0.5. Inputs can be strings, integers,
or reals
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
# This is really an accuracy measure rather than an "error" measure
error = 1.0 if groundTruth == prediction else 0.0
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricAveError(AggregateMetric):
"""Simply the inverse of the Accuracy metric
More consistent with scalar metrics because
they all report an error to be minimized"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = 1.0 if groundTruth != prediction else 0.0
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricNegAUC(AggregateMetric):
""" Computes -1 * AUC (Area Under the Curve) of the ROC (Receiver Operator
Characteristics) curve. We compute -1 * AUC because metrics are optimized
to be LOWER when running hypersearch.
For this, we assuming that category 1 is the "positive" category and
we are generating an ROC curve with the TPR (True Positive Rate) of
category 1 on the y-axis and the FPR (False Positive Rate) on the x-axis.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
""" Accumulate history of groundTruth and "prediction" values.
For this metric, groundTruth is the actual category and "prediction" is a
dict containing one top-level item with a key of 0 (meaning this is the
0-step classificaton) and a value which is another dict, which contains the
probability for each category as output from the classifier. For example,
this is what "prediction" would be if the classifier said that category 0
had a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4}
"""
# We disable it within aggregate() if we find that the classifier classes
# are not compatible with AUC calculations.
if self.disabled:
return 0
# Just store the groundTruth, probability into our history buffer. We will
# wait until aggregate gets called to actually compute AUC.
if historyBuffer is not None:
historyBuffer.append((groundTruth, prediction[0]))
if len(historyBuffer) > self.spec.params["window"] :
historyBuffer.popleft()
# accumulatedError not used in this metric
return 0
def aggregate(self, accumulatedError, historyBuffer, steps):
# If disabled, do nothing.
if self.disabled:
return 0.0
if historyBuffer is not None:
n = len(historyBuffer)
else:
return 0.0
# For performance reasons, only re-compute this every 'computeEvery' steps
frequency = self.spec.params.get('computeEvery', 1)
if ((steps+1) % frequency) != 0:
return self.aggregateError
# Compute the ROC curve and the area underneath it
actuals = [gt for (gt, probs) in historyBuffer]
classes = np.unique(actuals)
# We can only compute ROC when we have at least 1 sample of each category
if len(classes) < 2:
return -1 * 0.5
# Print warning the first time this metric is asked to be computed on a
# problem with more than 2 classes
if sorted(classes) != [0,1]:
print "WARNING: AUC only implemented for binary classifications where " \
"the categories are category 0 and 1. In this network, the " \
"categories are: %s" % (classes)
print "WARNING: Computation of this metric is disabled for the remainder of " \
"this experiment."
self.disabled = True
return 0.0
# Compute the ROC and AUC. Note that because we are online, there's a
# chance that some of the earlier classification probabilities don't
# have the True class (category 1) yet because it hasn't been seen yet.
# Therefore, we use probs.get() with a default value of 0.
scores = [probs.get(1, 0) for (gt, probs) in historyBuffer]
(fpr, tpr, thresholds) = roc.ROCCurve(actuals, scores)
auc = roc.AreaUnderCurve(fpr, tpr)
# Debug?
if False:
print
print "AUC metric debug info ({0:d} steps):".format((steps))
print " actuals:", actuals
print " probabilities:", ["{0:.2f}".format(x) for x in scores]
print " fpr:", fpr
print " tpr:", tpr
print " thresholds:", thresholds
print " AUC:", auc
return -1 * auc
class MetricMultiStep(AggregateMetric):
"""
This is an "uber" metric which is used to apply one of the other basic
metrics to a specific step in a multi-step prediction.
The specParams are expected to contain:
'errorMetric': name of basic metric to apply
'steps': compare prediction['steps'] to the current
ground truth.
Note that the metrics manager has already performed the time shifting
for us - it passes us the prediction element from 'steps' steps ago
and asks us to compare that to the current ground truth.
When multiple steps of prediction are requested, we average the results of
the underlying metric for each step.
"""
def __init__(self, metricSpec):
super(MetricMultiStep, self).__init__(metricSpec)
assert self._subErrorMetrics is not None
def getMetric(self):
return {'value': self.aggregateError, "stats" : {"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self.aggregateError
# Get the prediction for this time step
aggErrSum = 0
try:
for step, subErrorMetric in \
zip(self._predictionSteps, self._subErrorMetrics):
stepPrediction = prediction[step]
# Unless this is a custom_error_metric, when we have a dict of
# probabilities, get the most probable one. For custom error metrics,
# we pass the probabilities in so that it can decide how best to deal with
# them.
if isinstance(stepPrediction, dict) \
and not isinstance(subErrorMetric, CustomErrorMetric):
predictions = [(prob,value) for (value, prob) in \
stepPrediction.iteritems()]
predictions.sort()
stepPrediction = predictions[-1][1]
# Get sum of the errors
aggErr = subErrorMetric.addInstance(groundTruth, stepPrediction, record, result)
if self.verbosity >= 2:
print "MetricMultiStep {0!s}: aggErr for stepSize {1:d}: {2!s}".format(self._predictionSteps, step, aggErr)
aggErrSum += aggErr
except:
pass
# Return average aggregate error across all step sizes
self.aggregateError = aggErrSum / len(self._subErrorMetrics)
if self.verbosity >= 2:
print "MetricMultiStep {0!s}: aggErrAvg: {1!s}".format(self._predictionSteps,
self.aggregateError)
self.steps += 1
if self.verbosity >= 1:
print "\nMetricMultiStep %s: \n groundTruth: %s\n Predictions: %s" \
"\n Metric: %s" % (self._predictionSteps, groundTruth, prediction,
self.getMetric())
return self.aggregateError
class MetricMultiStepProbability(AggregateMetric):
"""
This is an "uber" metric which is used to apply one of the other basic
metrics to a specific step in a multi-step prediction.
The specParams are expected to contain:
'errorMetric': name of basic metric to apply
'steps': compare prediction['steps'] to the current
ground truth.
Note that the metrics manager has already performed the time shifting
for us - it passes us the prediction element from 'steps' steps ago
and asks us to compare that to the current ground truth.
"""
def __init__(self, metricSpec):
# Default window should be 1
if not 'window' in metricSpec.params:
metricSpec.params['window'] = 1
super(MetricMultiStepProbability, self).__init__(metricSpec)
# Must have a suberror metric
assert self._subErrorMetrics is not None, "This metric requires that you" \
+ " specify the name of another base metric via the 'errorMetric' " \
+ " parameter."
# Force all subErrorMetric windows to 1. This is necessary because by
# default they each do their own history averaging assuming that their
# addInstance() gets called once per interation. But, in this metric
# we actually call into each subErrorMetric multiple times per iteration
for subErrorMetric in self._subErrorMetrics:
subErrorMetric.window = 1
subErrorMetric.spec.params['window'] = 1
self._movingAverage = MovingAverage(self.window)
def getMetric(self):
return {'value': self.aggregateError, "stats" :
{"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self.aggregateError
if self.verbosity >= 1:
print "\nMetricMultiStepProbability %s: \n groundTruth: %s\n " \
"Predictions: %s" % (self._predictionSteps, groundTruth,
prediction)
# Get the aggregateErrors for all requested step sizes and average them
aggErrSum = 0
for step, subErrorMetric in \
zip(self._predictionSteps, self._subErrorMetrics):
stepPrediction = prediction[step]
# If it's a dict of probabilities, get the expected value
error = 0
if isinstance(stepPrediction, dict):
expectedValue = 0
# For every possible prediction multiply its error by its probability
for (pred, prob) in stepPrediction.iteritems():
error += subErrorMetric.addInstance(groundTruth, pred, record) \
* prob
else:
error += subErrorMetric.addInstance(groundTruth, stepPrediction,
record)
if self.verbosity >= 2:
print ("MetricMultiStepProbability {0!s}: aggErr for stepSize {1:d}: {2!s}".format(self._predictionSteps, step, error))
aggErrSum += error
# Return aggregate error
avgAggErr = aggErrSum / len(self._subErrorMetrics)
self.aggregateError = self._movingAverage(avgAggErr)
if self.verbosity >= 2:
print ("MetricMultiStepProbability %s: aggErr over all steps, this "
"iteration (%d): %s" % (self._predictionSteps, self.steps, avgAggErr))
print ("MetricMultiStepProbability {0!s}: aggErr moving avg: {1!s}".format(self._predictionSteps, self.aggregateError))
self.steps += 1
if self.verbosity >= 1:
print "MetricMultiStepProbability {0!s}: \n Error: {1!s}\n Metric: {2!s}".format(self._predictionSteps, avgAggErr, self.getMetric())
return self.aggregateError
class MetricMulti(MetricsIface):
"""Multi metric can combine multiple other (sub)metrics and
weight them to provide combined score."""
def __init__(self, metricSpec):
"""MetricMulti constructor using metricSpec is not allowed."""
raise ValueError("MetricMulti cannot be constructed from metricSpec string! "
"Use MetricMulti(weights,metrics) constructor instead.")
def __init__(self, weights, metrics, window=None):
"""MetricMulti
@param weights - [list of floats] used as weights
@param metrics - [list of submetrics]
@param window - (opt) window size for moving average, or None when disabled
"""
if (weights is None or not isinstance(weights, list) or
not len(weights) > 0 or
not isinstance(weights[0], float)):
raise ValueError("MetricMulti requires 'weights' parameter as a [list of floats]")
self.weights = weights
if (metrics is None or not isinstance(metrics, list) or
not len(metrics) > 0 or
not isinstance(metrics[0], MetricsIface)):
raise ValueError("MetricMulti requires 'metrics' parameter as a [list of Metrics]")
self.metrics = metrics
if window is not None:
self.movingAvg = MovingAverage(windowSize=window)
else:
self.movingAvg = None
def addInstance(self, groundTruth, prediction, record = None, result = None):
err = 0.0
subResults = [m.addInstance(groundTruth, prediction, record) for m in self.metrics]
for i in xrange(len(self.weights)):
if subResults[i] is not None:
err += subResults[i]*self.weights[i]
else: # submetric returned None, propagate
self.err = None
return None
if self.verbosity > 2:
print "IN=",groundTruth," pred=",prediction,": w=",self.weights[i]," metric=",self.metrics[i]," value=",m," err=",err
if self.movingAvg is not None:
err=self.movingAvg(err)
self.err = err
return err
def __repr__(self):
return "MetricMulti(weights={0!s}, metrics={1!s})".format(self.weights, self.metrics)
def getMetric(self):
return {'value': self.err, "stats" : {"weights" : self.weights}}
| runt18/nupic | src/nupic/frameworks/opf/metrics.py | Python | agpl-3.0 | 54,801 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-15 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import print_calendar_report
| mohamedhagag/community-addons | hr_attendance_analysis/wizard/__init__.py | Python | agpl-3.0 | 1,055 |
import datetime
from django.db.models import Q
from django.http import HttpResponse, HttpResponseServerError, Http404, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.template import RequestContext
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.core.exceptions import MultipleObjectsReturned
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.forms.models import formset_factory, modelformset_factory, inlineformset_factory, BaseModelFormSet
from django.forms import ValidationError
import json as simplejson
from django.utils.datastructures import SortedDict
from django.contrib.auth.forms import UserCreationForm
from django.conf import settings
from django_rea.valueaccounting.models import *
from django_rea.board.forms import *
from django_rea.valueaccounting.views import get_agent
def default_context_agent():
return EconomicAgent.objects.get(id=3) #todo: BIG hack alert!!!!
#todo: a lot of this can be configured instead of hard-coded
def dhen_board(request, context_agent_id=None):
#import pdb; pdb.set_trace()
agent = get_agent(request)
pattern = ProcessPattern.objects.get(name="Herbs")
selected_resource_type = None
#filter_form = FilterForm(pattern=pattern, data=request.POST or None,)
if context_agent_id:
context_agent = EconomicAgent.objects.get(id=context_agent_id)
else:
context_agent = default_context_agent()
seller = EconomicAgent.objects.get(id=4) #todo: even worse hack!!
rec_extype = ExchangeType.objects.get(name="Purchase to Drying Site")
e_date = datetime.date.today()
init = {"start_date": e_date }
available_extype = ExchangeType.objects.get(name="Make Available")
available_form = AvailableForm(initial=init, exchange_type=available_extype, context_agent=context_agent, prefix="AVL")
init = {"event_date": e_date, "paid": "later", }
receive_form = ReceiveForm(initial=init, exchange_type=rec_extype, context_agent=context_agent, prefix="REC")
et = EventType.objects.get(name="Resource Production")
farm_stage = None
#harvester_stage = ExchangeType.objects.get(name="Farm to Harvester")
dryer_stage = ExchangeType.objects.get(name="Harvester to Drying Site")
seller_stage = ExchangeType.objects.get(name="Drying Site to Seller")
rts = pattern.get_resource_types(event_type=et)
for rt in rts:
init = {"event_date": e_date,}
rt.farm_commits = rt.commits_for_exchange_stage(stage=farm_stage)
for com in rt.farm_commits:
if com.start_date > e_date:
com.future = True
prefix = com.form_prefix()
qty_help = " ".join([com.unit_of_quantity.abbrev, ", up to 2 decimal places"])
com.transfer_form = ExchangeFlowForm(initial=init, qty_help=qty_help, assoc_type_identifier="DryingSite", context_agent=context_agent, prefix=prefix)
com.zero_form = ZeroOutForm(prefix=prefix)
com.lot_form = NewResourceForm(prefix=prefix)
com.multiple_formset = create_exchange_formset(context_agent=context_agent, assoc_type_identifier="Harvester", prefix=prefix)
rt.dryer_resources = rt.onhand_for_exchange_stage(stage=dryer_stage)
init = {"event_date": e_date, "paid": "later"}
for res in rt.dryer_resources:
prefix = res.form_prefix()
qty_help = " ".join([res.unit_of_quantity().abbrev, ", up to 2 decimal places"])
res.transfer_form = TransferFlowForm(initial=init, qty_help=qty_help, assoc_type_identifier="Seller", context_agent=context_agent, prefix=prefix)
rt.seller_resources = rt.onhand_for_exchange_stage(stage=seller_stage)
if rt.seller_resources:
init_rt = {"event_date": e_date,}
rt.combine_form = CombineResourcesForm(prefix = rt.form_prefix(), initial=init_rt, resource_type=rt, stage=seller_stage)
return render_to_response("board/dhen_board.html", {
"agent": agent,
"context_agent": context_agent,
"seller": seller,
"available_form": available_form,
"receive_form": receive_form,
#"filter_form": filter_form,
"resource_types": rts,
"available_extype": available_extype,
}, context_instance=RequestContext(request))
@login_required
def add_available(request, context_agent_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
context_agent = EconomicAgent.objects.get(id=context_agent_id)
form = AvailableForm(data=request.POST, prefix="AVL")
if form.is_valid():
commit = form.save(commit=False)
commit.event_type = EventType.objects.get(name="Give")
commit.to_agent = context_agent
commit.context_agent = context_agent
commit.due_date = commit.start_date
commit.commitment_date = commit.start_date
commit.unit_of_quantity = commit.resource_type.unit
commit.exchange_stage = None
commit.created_by = request.user
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def receive_directly(request, context_agent_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Harvester to Drying Site")
exchange_type = ExchangeType.objects.get(name="Purchase to Drying Site") #todo: odd to have stage different....
form = ReceiveForm(data=request.POST, prefix="REC")
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
identifier = data["identifier"]
from_agent = data["from_agent"]
to_agent = data["to_agent"]
resource_type = data["resource_type"]
quantity = data["quantity"]
description = data["description"]
paid = data["paid"]
value = data["value"]
unit_of_value = data["unit_of_value"]
receive_et = EventType.objects.get(name="Receive")
give_et = EventType.objects.get(name="Give")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
exchange = Exchange(
name="Purchase " + resource_type.name + " from " + from_agent.nick,
use_case=UseCase.objects.get(identifier="supply_xfer"),
start_date=event_date,
context_agent=context_agent,
exchange_type=exchange_type,
created_by=request.user,
)
exchange.save()
resource = EconomicResource(
identifier=identifier,
resource_type=resource_type,
quantity=quantity,
exchange_stage=stage,
notes=description,
created_by=request.user
)
resource.save()
transfer_type = exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
xfer.save()
event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource_type,
transfer = xfer,
exchange_stage=stage,
from_agent = from_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource_type.unit,
value = value,
unit_of_value = unit_of_value,
description=description,
created_by = request.user,
)
event.save()
if paid == "paid":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource_type.name
pay_xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
pay_xfer.save()
pay_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer = pay_xfer,
exchange_stage=stage,
from_agent = event.to_agent,
to_agent = event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
elif paid == "later":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource_type.name
pay_xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
pay_xfer.save()
commit = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=pay_xfer,
exchange_stage=stage,
due_date=event_date,
from_agent=event.to_agent,
to_agent=event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value,
unit_of_quantity=unit_of_value,
value=value,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
def create_exchange_formset(context_agent, assoc_type_identifier, prefix, data=None):
ExchangeFormSet = formset_factory(MultipleExchangeEventForm, extra=10)
#init = {"paid": "paid"}
formset = ExchangeFormSet(data=data, prefix=prefix)
to_agents = context_agent.all_has_associates_by_type(assoc_type_identifier=assoc_type_identifier)
for form in formset:
#id = int(form["facet_id"].value())
form.fields["to_agent"].queryset = to_agents
form.fields["paid_stage_1"].initial = "never"
form.fields["paid_stage_2"].initial = "later"
return formset
#todo: hardcoded recipe and exchange types
def get_next_stage(exchange_type=None):
if not exchange_type:
next_stage = ExchangeType.objects.get(name="Farm to Harvester")
elif exchange_type.name == "Farm to Harvester":
next_stage = ExchangeType.objects.get(name="Harvester to Drying Site")
elif exchange_type.name == "Harvester to Drying Site":
next_stage = ExchangeType.objects.get(name="Drying Site to Seller")
else:
next_stage = None
return next_stage
@login_required
def purchase_resource(request, context_agent_id, commitment_id): #this is the farm > harvester > drying site, confusing name
if request.method == "POST":
#import pdb; pdb.set_trace()
commitment = get_object_or_404(Commitment, id=commitment_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = None
next_stage = get_next_stage(stage)
next_next_stage = get_next_stage(next_stage)
prefix = commitment.form_prefix()
form = ExchangeFlowForm(prefix=prefix, data=request.POST)
lot_form = NewResourceForm(prefix=prefix, data=request.POST)
zero_form = ZeroOutForm(prefix=prefix, data=request.POST)
if zero_form.is_valid():
#import pdb; pdb.set_trace()
zero_data = zero_form.cleaned_data
zero_out = zero_data["zero_out"]
if zero_out == True:
commitment.finished = True
commitment.save()
if form.is_valid() and lot_form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
to_agent = data["to_agent"]
unit_of_value = data["unit_of_value"]
notes = data["notes"]
lot_data = lot_form.cleaned_data
identifier = lot_data["identifier"]
purch_use_case = UseCase.objects.get(identifier="supply_xfer")
purch_exchange_type = ExchangeType.objects.get(name="Farm to Harvester")
xfer_use_case = UseCase.objects.get(identifier="intrnl_xfer")
xfer_exchange_type = ExchangeType.objects.get(name="Harvester to Drying Site")
proc_use_case = UseCase.objects.get(identifier="rand")
proc_pattern = None
proc_patterns = [puc.pattern for puc in proc_use_case.patterns.all()]
if proc_patterns:
proc_pattern = proc_patterns[0]
give_et = EventType.objects.get(name="Give")
receive_et = EventType.objects.get(name="Receive")
consume_et = EventType.objects.get(name="Resource Consumption")
produce_et = EventType.objects.get(name="Resource Production")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
formset = create_exchange_formset(prefix=prefix, data=request.POST, context_agent=context_agent, assoc_type_identifier="Harvester")
quantity = 0
ces = []
#import pdb; pdb.set_trace()
for form_ee in formset.forms:
if form_ee.is_valid():
data_ee = form_ee.cleaned_data
breakout_to_agent = data_ee["to_agent"]
if breakout_to_agent:
breakout_quantity = data_ee["quantity"]
quantity += breakout_quantity
value_stage_1 = data_ee["value_stage_1"]
paid_stage_1 = data_ee["paid_stage_1"]
value_stage_2 = data_ee["value_stage_2"]
paid_stage_2 = data_ee["paid_stage_2"]
exchange = Exchange(
name="Transfer " + commitment.resource_type.name + " from farm",
use_case=purch_use_case,
exchange_type=purch_exchange_type,
start_date=event_date,
context_agent=context_agent,
created_by=request.user,
)
exchange.save()
resource = EconomicResource(
identifier=commitment.resource_type.name + " from farm",
resource_type=commitment.resource_type,
quantity=0,
exchange_stage=next_next_stage,
created_by=request.user
)
resource.save()
transfer_type = purch_exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
receipt_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_stage,
transfer=xfer,
commitment=commitment,
from_agent = commitment.from_agent,
to_agent = breakout_to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_1,
unit_of_value = unit_of_value,
created_by = request.user,
)
receipt_event.save()
if paid_stage_1 == "paid":
if value_stage_1 > 0:
transfer_type = purch_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event_1 = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
exchange_stage=next_stage,
transfer=xfer,
from_agent = receipt_event.to_agent,
to_agent = receipt_event.from_agent,
context_agent = context_agent,
quantity = value_stage_1,
unit_of_quantity = unit_of_value,
value = value_stage_1,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_1.save()
elif paid_stage_1 == "later":
if value_stage_1 > 0:
transfer_type = purch_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit_1 = Commitment (
commitment_date=event_date,
event_type=give_et,
exchange_stage=next_stage,
transfer=xfer,
due_date=event_date,
from_agent=receipt_event.to_agent,
to_agent=receipt_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value_stage_1,
unit_of_quantity=unit_of_value,
value=value_stage_1,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit_1.save()
xfer_exchange = Exchange(
name="Transfer " + commitment.resource_type.name,
use_case=xfer_use_case,
start_date=event_date,
context_agent=context_agent,
exchange_type=xfer_exchange_type,
created_by=request.user,
)
xfer_exchange.save()
transfer_type = xfer_exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
xfer_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
transfer=xfer,
from_agent = breakout_to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_event.save()
xfer_event_receive = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
transfer=xfer,
from_agent = breakout_to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_event_receive.save()
if paid_stage_2 == "paid":
if value_stage_2 > 0:
transfer_type = xfer_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event_2 = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer = xfer,
exchange_stage=next_next_stage,
from_agent = xfer_event.to_agent,
to_agent = xfer_event.from_agent,
context_agent = context_agent,
quantity = value_stage_2,
unit_of_quantity = unit_of_value,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_2.save()
pay_event_2_receive = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource_type = pay_rt,
transfer = xfer,
exchange_stage=next_next_stage,
from_agent = xfer_event.to_agent,
to_agent = xfer_event.from_agent,
context_agent = context_agent,
quantity = value_stage_2,
unit_of_quantity = unit_of_value,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_2_receive.save()
elif paid_stage_2 == "later":
if value_stage_2 > 0:
transfer_type = xfer_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit_2 = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=xfer,
exchange_stage=next_next_stage,
due_date=event_date,
from_agent=xfer_event.to_agent,
to_agent=xfer_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value_stage_2,
unit_of_quantity=unit_of_value,
value=value_stage_2,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit_2.save()
consume_event = EconomicEvent(
event_type = consume_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
from_agent = to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
created_by = request.user,
)
consume_event.save()
ces.append(consume_event)
process = Process(
name="Combined harvested: new lot",
process_pattern=proc_pattern,
end_date=event_date,
start_date=event_date,
started=event_date,
context_agent=context_agent,
finished=True,
process_type=ProcessType.objects.get(name="Into Drying Room"),
created_by=request.user,
)
process.save()
for ce in ces:
ce.process = process
ce.save()
prod_resource = EconomicResource(
identifier=identifier,
resource_type=commitment.resource_type,
quantity=quantity,
exchange_stage=next_next_stage,
notes=notes,
created_by=request.user
)
prod_resource.save()
prod_event = EconomicEvent(
event_type = produce_et,
event_date = event_date,
resource = prod_resource,
resource_type = prod_resource.resource_type,
exchange_stage=next_next_stage,
process = process,
from_agent = to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = prod_resource.resource_type.unit,
description=notes,
created_by = request.user,
)
prod_event.save()
#todo: put skip stage here!
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def transfer_resource(request, context_agent_id, resource_id): #this is drying site to seller
if request.method == "POST":
#import pdb; pdb.set_trace()
resource = get_object_or_404(EconomicResource, id=resource_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Harvester to Drying Site")
next_stage = get_next_stage(stage)
prefix = resource.form_prefix()
form = TransferFlowForm(prefix=prefix, data=request.POST)
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
to_agent = data["to_agent"]
quantity = data["quantity"]
value = data["value"]
if not value:
value = 0
unit_of_value = data["unit_of_value"]
paid = data["paid"]
notes = data["notes"]
xfer_use_case = UseCase.objects.get(identifier="intrnl_xfer")
exchange_type = next_stage
give_et = EventType.objects.get(name="Give")
receive_et = EventType.objects.get(name="Receive")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
#import pdb; pdb.set_trace()
xfer_exchange = Exchange(
name="Transfer " + resource.resource_type.name,
use_case=xfer_use_case,
start_date=event_date,
context_agent=context_agent,
exchange_type=exchange_type,
created_by=request.user,
)
xfer_exchange.save()
transfer_type = exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
xfer_give_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
transfer=xfer,
exchange_stage=next_stage,
from_agent = resource.owner_based_on_exchange(),
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource.resource_type.unit,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_give_event.save()
xfer_rec_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
transfer=xfer,
exchange_stage=next_stage,
from_agent = resource.owner_based_on_exchange(),
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource.resource_type.unit,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_rec_event.save()
resource.exchange_stage = next_stage
resource.quantity = quantity
if resource.notes:
resource.notes = resource.notes + " ------- " + notes
else:
resource.notes = notes
resource.save()
if paid == "paid":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer=xfer,
exchange_stage=next_stage,
from_agent = xfer_give_event.to_agent,
to_agent = xfer_give_event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
pay_rec_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource_type = pay_rt,
transfer=xfer,
exchange_stage=next_stage,
from_agent = xfer_give_event.to_agent,
to_agent = xfer_give_event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
elif paid == "later":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=xfer,
exchange_stage=next_stage,
due_date=event_date,
from_agent=xfer_give_event.to_agent,
to_agent=xfer_give_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value,
unit_of_quantity=unit_of_value,
value=value,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
def combine_resources(request, context_agent_id, resource_type_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
resource_type = get_object_or_404(EconomicResourceType, id=resource_type_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Drying Site to Seller") #actually the stage here should be the process stage, and the rest should handle that
prefix = resource_type.form_prefix()
form = CombineResourcesForm(prefix=prefix, data=request.POST)
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
resources = data["resources"]
identifier = data["identifier"]
notes = data["notes"]
proc_use_case = UseCase.objects.get(identifier="rand")
proc_pattern = None
proc_patterns = [puc.pattern for puc in proc_use_case.patterns.all()]
if proc_patterns:
proc_pattern = proc_patterns[0]
consume_et = EventType.objects.get(name="Resource Consumption")
produce_et = EventType.objects.get(name="Resource Production")
if resources:
process = Process(
name="Combined: new lot",
process_pattern=proc_pattern,
end_date=event_date,
start_date=event_date,
started=event_date,
context_agent=context_agent,
finished=True,
process_type=ProcessType.objects.get(name="Combine Lots"),
created_by=request.user,
)
process.save()
qty = 0
for res in resources:
consume_event = EconomicEvent(
event_type = consume_et,
event_date = event_date,
resource = res,
resource_type = res.resource_type,
process=process,
exchange_stage=stage,
from_agent = res.owner_based_on_exchange(),
to_agent = res.owner_based_on_exchange(),
context_agent = context_agent,
quantity = res.quantity,
unit_of_quantity = res.resource_type.unit,
created_by = request.user,
)
consume_event.save()
qty += res.quantity
res.quantity = 0
res.save()
prod_resource = EconomicResource(
identifier=identifier,
resource_type=resource_type,
quantity=qty,
exchange_stage=stage,
notes=notes,
created_by=request.user
)
prod_resource.save()
prod_event = EconomicEvent(
event_type = produce_et,
event_date = event_date,
resource = prod_resource,
resource_type = prod_resource.resource_type,
exchange_stage=stage,
process = process,
from_agent = res.owner_based_on_exchange(),
to_agent = res.owner_based_on_exchange(),
context_agent = context_agent,
quantity = qty,
unit_of_quantity = prod_resource.resource_type.unit,
description=notes,
created_by = request.user,
)
prod_event.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def change_available(request, commitment_id):
commitment = get_object_or_404(Commitment, pk=commitment_id)
context_agent_id = commitment.context_agent.id
if request.method == "POST":
prefix = commitment.form_prefix()
form = CommitmentForm(instance=commitment, data=request.POST, prefix=prefix)
if form.is_valid():
data = form.cleaned_data
form.save()
commitment.unit_of_quantity = commitment.resource_type.unit
commitment.save()
zero_form = ZeroOutForm(prefix=prefix, data=request.POST)
if zero_form.is_valid():
zero_data = zero_form.cleaned_data
zero_out = zero_data["zero_out"]
if zero_out == True:
commitment.finished = True
commitment.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def delete_farm_commitment(request, commitment_id):
commitment = get_object_or_404(Commitment, pk=commitment_id)
context_agent_id = commitment.context_agent.id
if commitment.is_deletable():
commitment.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def undo_col2(request, resource_id):
resource = get_object_or_404(EconomicResource, pk=resource_id)
context_agent_id = default_context_agent().id
#import pdb; pdb.set_trace()
flows = resource.incoming_value_flows()
for item in flows:
if item.class_label() == "Economic Event":
if item.commitment:
commit = item.commitment
commit.finished = False
commit.save()
item.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def undo_col3(request, resource_id):
resource = get_object_or_404(EconomicResource, pk=resource_id)
context_agent_id = default_context_agent().id
#import pdb; pdb.set_trace()
flows = resource.incoming_value_flows()
#todo: I'm not sure how to delete the right rows without going too far back in the chain......
#for item in flows:
# if item.class_label() == "Economic Event":
# item.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
| django-rea/nrp | django_rea/board/views.py | Python | agpl-3.0 | 46,450 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_budgeting', '0007_update-strings'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='description',
field=ckeditor.fields.RichTextField(verbose_name='Description'),
),
migrations.AlterField(
model_name='proposal',
name='name',
field=models.CharField(max_length=120, verbose_name='Name'),
),
]
| liqd/a4-meinberlin | meinberlin/apps/budgeting/migrations/0008_auto_20170529_1302.py | Python | agpl-3.0 | 663 |
import string
import random
import json
from collections import defaultdict
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from catmaid.fields import Double3D
from catmaid.models import Log, NeuronSearch, CELL_BODY_CHOICES, \
SORT_ORDERS_DICT, Relation, Class, ClassInstance, \
ClassInstanceClassInstance
def _create_relation(user, project_id, relation_id, instance_a_id, instance_b_id):
relation = ClassInstanceClassInstance()
relation.user = user
relation.project_id = project_id
relation.relation_id = relation_id
relation.class_instance_a_id = instance_a_id
relation.class_instance_b_id = instance_b_id
relation.save()
return relation
def insert_into_log(project_id, user_id, op_type, location=None, freetext=None):
""" Inserts a new entry into the log table. If the location parameter is
passed, it is expected to be an iteratable (list, tuple).
"""
# valid operation types
operation_type_array = [
"rename_root",
"create_neuron",
"rename_neuron",
"remove_neuron",
"move_neuron",
"create_group",
"rename_group",
"remove_group",
"move_group",
"create_skeleton",
"rename_skeleton",
"remove_skeleton",
"move_skeleton",
"split_skeleton",
"join_skeleton",
"reroot_skeleton",
"change_confidence"
]
if not op_type in operation_type_array:
return {'error': 'Operation type {0} not valid'.format(op_type)}
new_log = Log()
new_log.user_id = user_id
new_log.project_id = project_id
new_log.operation_type = op_type
if not location is None:
new_log.location = Double3D(*location)
if not freetext is None:
new_log.freetext = freetext
new_log.save()
# $q = $db->insertIntoId('log', $data );
# echo json_encode( array ( 'error' => "Failed to insert operation $op_type for user $uid in project %pid." ) );
# Tip from: http://lincolnloop.com/blog/2008/may/10/getting-requestcontext-your-templates/
# Required because we need a RequestContext, not just a Context - the
# former looks at TEMPLATE_CONTEXT_PROCESSORS, while the latter doesn't.
def my_render_to_response(req, *args, **kwargs):
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
def json_error_response(message):
"""
When an operation fails we should return a JSON dictionary
with the key 'error' set to an error message. This is a
helper method to return such a structure:
"""
return HttpResponse(json.dumps({'error': message}),
content_type='text/json')
def order_neurons(neurons, order_by=None):
column, reverse = 'name', False
if order_by and (order_by in SORT_ORDERS_DICT):
column, reverse, _ = SORT_ORDERS_DICT[order_by]
if column == 'name':
neurons.sort(key=lambda x: x.name)
elif column == 'gal4':
neurons.sort(key=lambda x: x.cached_sorted_lines_str)
elif column == 'cell_body':
neurons.sort(key=lambda x: x.cached_cell_body)
else:
raise Exception("Unknown column (%s) in order_neurons" % (column,))
if reverse:
neurons.reverse()
return neurons
# Both index and visual_index take a request and kwargs and then
# return a list of neurons and a NeuronSearch form:
def get_form_and_neurons(request, project_id, kwargs):
# If we've been passed parameters in a REST-style GET request,
# create a form from them. Otherwise, if it's a POST request,
# create the form from the POST parameters. Otherwise, it's a
# plain request, so create the default search form.
rest_keys = ('search', 'cell_body_location', 'order_by')
if any((x in kwargs) for x in rest_keys):
kw_search = kwargs.get('search', None) or ""
kw_cell_body_choice = kwargs.get('cell_body_location', None) or "a"
kw_order_by = kwargs.get('order_by', None) or 'name'
search_form = NeuronSearch({'search': kw_search,
'cell_body_location': kw_cell_body_choice,
'order_by': kw_order_by})
elif request.method == 'POST':
search_form = NeuronSearch(request.POST)
else:
search_form = NeuronSearch({'search': '',
'cell_body_location': 'a',
'order_by': 'name'})
if search_form.is_valid():
search = search_form.cleaned_data['search']
cell_body_location = search_form.cleaned_data['cell_body_location']
order_by = search_form.cleaned_data['order_by']
else:
search = ''
cell_body_location = 'a'
order_by = 'name'
cell_body_choices_dict = dict(CELL_BODY_CHOICES)
all_neurons = ClassInstance.objects.filter(
project__id=project_id,
class_column__class_name='neuron',
name__icontains=search).exclude(name='orphaned pre').exclude(name='orphaned post')
if cell_body_location != 'a':
location = cell_body_choices_dict[cell_body_location]
all_neurons = all_neurons.filter(
project__id=project_id,
cici_via_a__relation__relation_name='has_cell_body',
cici_via_a__class_instance_b__name=location)
cici_qs = ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='has_cell_body',
class_instance_a__class_column__class_name='neuron',
class_instance_b__class_column__class_name='cell_body_location')
neuron_id_to_cell_body_location = dict(
(x.class_instance_a.id, x.class_instance_b.name) for x in cici_qs)
neuron_id_to_driver_lines = defaultdict(list)
for cici in ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='expresses_in',
class_instance_a__class_column__class_name='driver_line',
class_instance_b__class_column__class_name='neuron'):
neuron_id_to_driver_lines[cici.class_instance_b.id].append(cici.class_instance_a)
all_neurons = list(all_neurons)
for n in all_neurons:
n.cached_sorted_lines = sorted(
neuron_id_to_driver_lines[n.id], key=lambda x: x.name)
n.cached_sorted_lines_str = ", ".join(x.name for x in n.cached_sorted_lines)
n.cached_cell_body = neuron_id_to_cell_body_location.get(n.id, 'Unknown')
all_neurons = order_neurons(all_neurons, order_by)
return (all_neurons, search_form)
# TODO After all PHP functions have been replaced and all occurrence of
# this odd behavior have been found, change callers to not depend on this
# legacy functionality.
def makeJSON_legacy_list(objects):
'''
The PHP function makeJSON, when operating on a list of rows as
results, will output a JSON list of key-values, with keys being
integers from 0 and upwards. We return a dict with the same
structure so that it looks the same when used with json.dumps.
'''
i = 0
res = {}
for o in objects:
res[i] = o
i += 1
return res
def cursor_fetch_dictionary(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_relation_to_id_map(project_id):
return {rname: ID for rname, ID in Relation.objects.filter(project=project_id).values_list("relation_name", "id")}
def get_class_to_id_map(project_id):
return {cname: ID for cname, ID in Class.objects.filter(project=project_id).values_list("class_name", "id")}
def urljoin(a, b):
""" Joins to URL parts a and b while making sure this
exactly one slash inbetween.
"""
if a[-1] != '/':
a = a + '/'
if b[0] == '/':
b = b[1:]
return a + b
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
""" Creates a random string of the specified length.
"""
return ''.join(random.choice(chars) for x in range(size))
| htem/CATMAID | django/applications/catmaid/control/common.py | Python | agpl-3.0 | 8,243 |
# -*- coding: utf-8 -*-
# © 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <[email protected]>
# © 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class ProductSupplierInfo(models.Model):
_inherit = 'product.supplierinfo'
discount = fields.Float(
string='Discount (%)', digits_compute=dp.get_precision('Discount'))
@api.onchange('name')
@api.multi
def onchange_name(self):
for supplierinfo in self.filtered('name'):
supplierinfo.discount =\
supplierinfo.name.default_supplierinfo_discount
| SerpentCS/purchase-workflow | product_supplierinfo_discount/models/product_supplierinfo.py | Python | agpl-3.0 | 753 |
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys
# add the path to the volt python client, just based on knowing
# where we are now
sys.path.append('../../lib/python')
sys.path.append('./normalizer/')
import random
import time
import subprocess
import cPickle
import os.path
import imp
import re
import traceback
from voltdbclient import *
from optparse import OptionParser
from Query import VoltQueryClient
from SQLCoverageReport import generate_summary
from SQLGenerator import SQLGenerator
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from subprocess import call # invoke unix/linux cmds
from XMLUtils import prettify # To create a human readable xml file
class Config:
def __init__(self, filename):
fd = open(filename, "r")
self.__content = fd.read()
fd.close()
self.__config = eval(self.__content.strip())
def get_configs(self):
return self.__config.keys()
def get_config(self, config_name):
return self.__config[config_name]
def minutes_colon_seconds(seconds):
return re.sub("^0:", "", str(datetime.timedelta(0, round(seconds))), 1)
def print_seconds(seconds=0, message_end="", message_begin="Total time: ",
include_current_time=False):
""" Prints, and returns, a message containing the specified number of
seconds, first in a minutes:seconds format (e.g. "01:02", or "1:43:48"),
then just the exact number of seconds in parentheses, e.g.,
"1:02 (61.9 seconds)", preceded by the 'message_begin' and followed by
'message_end'. Optionally, if 'include_current_time' is True, the current
time (in seconds since January 1, 1970) is also printed, in brackets, e.g.,
"1:02 (61.9 seconds) [at 1408645826.68], ", which is useful for debugging
purposes.
"""
time_msg = minutes_colon_seconds(seconds) + " ({0:.6f} seconds)".format(seconds)
if (include_current_time):
time_msg += " [at " + str(time.time()) + "]"
message = message_begin + time_msg + ", " + message_end
print message
return message
def print_elapsed_seconds(message_end="", prev_time=-1,
message_begin="Elapsed time: "):
"""Computes, returns and prints the difference (in seconds) between the
current system time and a previous time, which is either the specified
'prev_time' or, if that is negative (or unspecified), the previous time
at which this function was called. The printed message is preceded by
'message_begin' and followed by 'message_end'; the elapsed time is printed
in a minutes:seconds format, with the exact number of seconds in parentheses,
e.g., 61.9 seconds would be printed as "01:02 (61.9 seconds), ".
"""
now = time.time()
global save_prev_time
if (prev_time < 0):
prev_time = save_prev_time
save_prev_time = now
diff_time = now - prev_time
print_seconds(diff_time, message_end, message_begin)
return diff_time
def run_once(name, command, statements_path, results_path,
submit_verbosely, testConfigKit, precision):
print "Running \"run_once\":"
print " name: %s" % (name)
print " command: %s" % (command)
print " statements_path: %s" % (statements_path)
print " results_path: %s" % (results_path)
if precision:
print " precision: %s" % (precision)
sys.stdout.flush()
host = defaultHost
port = defaultPort
if(name == "jni"):
akey = "hostname"
if akey in testConfigKit:
host = testConfigKit["hostname"]
port = testConfigKit["hostport"]
global normalize
if(host == defaultHost):
server = subprocess.Popen(command + " backend=" + name, shell=True)
client = None
clientException = None
for i in xrange(30):
try:
client = VoltQueryClient(host, port)
client.set_quiet(True)
client.set_timeout(5.0) # 5 seconds
break
except socket.error as e:
clientException = e
time.sleep(1)
if client == None:
print >> sys.stderr, "Unable to connect/create client: there may be a problem with the VoltDB server or its ports:"
print >> sys.stderr, "name:", str(name)
print >> sys.stderr, "host:", str(host)
print >> sys.stderr, "port:", str(port)
print >> sys.stderr, "client (socket.error) exception:", str(clientException)
sys.stderr.flush()
return -1
# for key in testConfigKits:
# print "999 Key = '%s', Val = '%s'" % (key, testConfigKits[key])
if(host != defaultHost):
# Flush database
client.onecmd("updatecatalog " + testConfigKit["testCatalog"] + " " + testConfigKit["deploymentFile"])
statements_file = open(statements_path, "rb")
results_file = open(results_path, "wb")
while True:
try:
statement = cPickle.load(statements_file)
except EOFError:
break
try:
if submit_verbosely:
print "Submitting to backend " + name + " adhoc " + statement["SQL"]
client.onecmd("adhoc " + statement["SQL"])
except:
print >> sys.stderr, "Error occurred while executing '%s': %s" % \
(statement["SQL"], sys.exc_info()[1])
if(host == defaultHost):
# Should kill the server now
killer = subprocess.Popen("kill -9 %d" % (server.pid), shell=True)
killer.communicate()
if killer.returncode != 0:
print >> sys.stderr, \
"Failed to kill the server process %d" % (server.pid)
break
table = None
if client.response == None:
print >> sys.stderr, "No error, but an unexpected null client response (server crash?) from executing statement '%s': %s" % \
(statement["SQL"], sys.exc_info()[1])
if(host == defaultHost):
killer = subprocess.Popen("kill -9 %d" % (server.pid), shell=True)
killer.communicate()
if killer.returncode != 0:
print >> sys.stderr, \
"Failed to kill the server process %d" % (server.pid)
break
if client.response.tables:
### print "DEBUG: got table(s) from ", statement["SQL"] ,"."
if precision:
table = normalize(client.response.tables[0], statement["SQL"], precision)
else:
table = normalize(client.response.tables[0], statement["SQL"])
if len(client.response.tables) > 1:
print "WARNING: ignoring extra table(s) from result of query ?", statement["SQL"] , "?"
# else:
# print "WARNING: returned no table(s) from ?", statement["SQL"] ,"?"
cPickle.dump({"Status": client.response.status,
"Info": client.response.statusString,
"Result": table,
"Exception": str(client.response.exception)},
results_file)
results_file.close()
statements_file.close()
if(host == defaultHost):
client.onecmd("shutdown")
server.communicate()
else:
client.onecmd("disconnect")
sys.stdout.flush()
sys.stderr.flush()
if(host == defaultHost):
return server.returncode
else:
return 0
def get_max_mismatches(comparison_database, suite_name):
"""Returns the maximum number of acceptable mismatches, i.e., the number of
'known' failures for VoltDB to match the results of the comparison database
(HSQL or PostgreSQL), which is normally zero; however, there are sometimes
a few exceptions, e.g., for queries that are not supported by PostgreSQL.
"""
max_mismatches = 0
# Kludge to not fail for known issues, when running against PostgreSQL
# (or the PostGIS extension of PostgreSQL)
if comparison_database.startswith('Post'):
# Known failures in the basic-joins test suite, and in the basic-index-joins,
# and basic-compoundex-joins "extended" test suites (see ENG-10775)
if (config_name == 'basic-joins' or config_name == 'basic-index-joins' or
config_name == 'basic-compoundex-joins'):
max_mismatches = 5280
# Known failures, related to the ones above, in the basic-int-joins test
# suite (see ENG-10775, ENG-11401)
elif config_name == 'basic-int-joins':
max_mismatches = 600
# Known failures in the joined-matview-* test suites ...
# Failures in joined-matview-default-full due to ENG-11086
elif config_name == 'joined-matview-default-full':
max_mismatches = 3387
# Failures in joined-matview-int due to ENG-11086
elif config_name == 'joined-matview-int':
max_mismatches = 46440
return max_mismatches
def get_config_path(basedir, config_key, config_value):
"""Returns the correct path to a specific (ddl, normalizer, schema, or
template) file, given its config 'key' and 'value'. The 'key' will be one
of 'ddl', 'normalizer', 'schema', or 'template', the last of which is the
more complicated case, requiring us to check the various subdirectories.
"""
for subdir in os.walk(os.path.join(basedir, config_key)):
filename = os.path.join(subdir[0], config_value)
if os.path.isfile(filename):
return os.path.abspath(filename)
# If you cannot find the file, leave the value unchanged
return config_value
def run_config(suite_name, config, basedir, output_dir, random_seed,
report_invalid, report_all, generate_only, subversion_generation,
submit_verbosely, ascii_only, args, testConfigKit):
# Store the current, initial system time (in seconds since January 1, 1970)
time0 = time.time()
precision = 0
within_minutes = 0
for key in config.iterkeys():
if key == "precision":
precision = int(config["precision"])
elif key == "within-minutes":
within_minutes = int(config["within-minutes"])
elif not os.path.isabs(config[key]):
config[key] = get_config_path(basedir, key, config[key])
print "in run_config key = '%s', config[key] = '%s'" % (key, str(config[key]))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
global comparison_database
comparison_database_lower = comparison_database.lower()
statements_path = os.path.abspath(os.path.join(output_dir, "statements.data"))
cmpdb_path = os.path.abspath(os.path.join(output_dir, comparison_database_lower + ".data"))
jni_path = os.path.abspath(os.path.join(output_dir, "jni.data"))
modified_sql_path = None
debug_transform_sql_arg = ''
global debug_transform_sql
if debug_transform_sql:
if comparison_database == 'PostgreSQL' or comparison_database == 'PostGIS':
modified_sql_path = os.path.abspath(os.path.join(output_dir, 'postgresql_transform.out'))
debug_transform_sql_arg = ' -Dsqlcoverage.transform.sql.file='+modified_sql_path
template = config["template"]
global normalize
if "normalizer" in config:
normalize = imp.load_source("normalizer", config["normalizer"]).normalize
# print "DEBUG: using normalizer ", config["normalizer"], " for ", template
self_check_safecmp = imp.load_source("normalizer", config["normalizer"]).safecmp
theNow = datetime.datetime.now()
if self_check_safecmp([theNow], [theNow]) != 0:
print >> sys.stderr, "safe_cmp fails [datetime] selfcheck"
exit(2)
if self_check_safecmp([None], [None]) != 0:
print >> sys.stderr, "safe_cmp fails [None] selfcheck"
exit(2)
if self_check_safecmp([theNow], [None]) <= 0:
print >> sys.stderr, "safe_cmp fails [datetime], [None] selfcheck"
exit(2)
theLater = datetime.datetime.now()
if self_check_safecmp([None, theNow], [None, theLater]) >= 0:
print >> sys.stderr, "safe_cmp fails [None, datetime] selfcheck"
exit(2)
else:
normalize = lambda x, y: x
# print "DEBUG: using no normalizer for ", template
command = " ".join(args[2:])
command += " schema=" + os.path.basename(config['ddl'])
if debug_transform_sql:
command = command.replace(" -server ", debug_transform_sql_arg+" -server ")
random_state = random.getstate()
if "template-jni" in config:
template = config["template-jni"]
generator = SQLGenerator(config["schema"], template, subversion_generation, ascii_only)
counter = 0
statements_file = open(statements_path, "wb")
for i in generator.generate(submit_verbosely):
cPickle.dump({"id": counter, "SQL": i}, statements_file)
counter += 1
statements_file.close()
min_statements_per_pattern = generator.min_statements_per_pattern()
max_statements_per_pattern = generator.max_statements_per_pattern()
num_inserts = generator.num_insert_statements()
num_patterns = generator.num_patterns()
num_unresolved = generator.num_unresolved_statements()
if generate_only or submit_verbosely:
print "Generated %d statements." % counter
if generate_only:
# Claim success without running servers.
return {"keyStats" : None, "mis" : 0}
# Print the elapsed time, with a message
global total_gensql_time
gensql_time = print_elapsed_seconds("for generating statements (" + suite_name + ")", time0)
total_gensql_time += gensql_time
num_crashes = 0
failed = False
try:
if run_once("jni", command, statements_path, jni_path,
submit_verbosely, testConfigKit, precision) != 0:
print >> sys.stderr, "Test with the JNI (VoltDB) backend had errors (crash?)."
failed = True
except:
print >> sys.stderr, "JNI (VoltDB) backend crashed!!!"
traceback.print_exc()
failed = True
if (failed):
print >> sys.stderr, " jni_path: %s" % (jni_path)
sys.stderr.flush()
num_crashes += 1
#exit(1)
# Print the elapsed time, with a message
global total_voltdb_time
voltdb_time = print_elapsed_seconds("for running VoltDB (JNI) statements (" + suite_name + ")")
total_voltdb_time += voltdb_time
random.seed(random_seed)
random.setstate(random_state)
failed = False
try:
if run_once(comparison_database_lower, command, statements_path, cmpdb_path,
submit_verbosely, testConfigKit, precision) != 0:
print >> sys.stderr, "Test with the " + comparison_database + " backend had errors (crash?)."
failed = True
except:
print >> sys.stderr, comparison_database + " backend crashed!!"
traceback.print_exc()
failed = True
if (failed):
print >> sys.stderr, " cmpdb_path: %s" % (cmpdb_path)
sys.stderr.flush()
num_crashes += 1
#exit(1)
# Print the elapsed time, with a message
global total_cmpdb_time
cmpdb_time = print_elapsed_seconds("for running " + comparison_database + " statements (" + suite_name + ")")
total_cmpdb_time += cmpdb_time
someStats = (get_numerical_html_table_element(min_statements_per_pattern, strong_warn_below=1) +
get_numerical_html_table_element(max_statements_per_pattern, strong_warn_below=1, warn_above=100000) +
get_numerical_html_table_element(num_inserts, warn_below=4, strong_warn_below=1, warn_above=1000) +
get_numerical_html_table_element(num_patterns, warn_below=4, strong_warn_below=1, warn_above=10000) +
get_numerical_html_table_element(num_unresolved, error_above=0) +
get_time_html_table_element(gensql_time) +
get_time_html_table_element(voltdb_time) +
get_time_html_table_element(cmpdb_time) )
extraStats = get_numerical_html_table_element(num_crashes, error_above=0) + someStats
max_mismatches = get_max_mismatches(comparison_database, suite_name)
global compare_results
try:
compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results
success = compare_results(suite_name, random_seed, statements_path, cmpdb_path,
jni_path, output_dir, report_invalid, report_all, extraStats,
comparison_database, modified_sql_path, max_mismatches, within_minutes)
except:
print >> sys.stderr, "Compare (VoltDB & " + comparison_database + ") results crashed!"
traceback.print_exc()
print >> sys.stderr, " jni_path: %s" % (jni_path)
print >> sys.stderr, " cmpdb_path: %s" % (cmpdb_path)
sys.stderr.flush()
num_crashes += 1
gray_zero_html_table_element = get_numerical_html_table_element(0, use_gray=True)
errorStats = (gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element + gray_zero_html_table_element +
gray_zero_html_table_element +
get_numerical_html_table_element(num_crashes, error_above=0) + someStats + '</tr>' )
success = {"keyStats": errorStats, "mis": -1}
# Print & save the elapsed time and total time, with a message
global total_compar_time
compar_time = print_elapsed_seconds("for comparing DB results (" + suite_name + ")")
total_compar_time += compar_time
suite_secs = print_elapsed_seconds("for run_config of '" + suite_name + "'", time0, "Sub-tot time: ")
sys.stdout.flush()
# Accumulate the total number of Valid, Invalid, Mismatched & Total statements
global total_statements
def next_keyStats_column_value():
prefix = "<td"
suffix = "</td>"
global keyStats_start_index
start_index = 0
end_index = 0
next_col_val = "0"
try:
start_index = success["keyStats"].index(prefix, keyStats_start_index) + len(prefix)
start_index = success["keyStats"].index('>', start_index) + 1
end_index = success["keyStats"].index(suffix, start_index)
next_col_val = success["keyStats"][start_index: end_index]
keyStats_start_index = end_index + len(suffix)
except:
print "Caught exception:\n", sys.exc_info()[0]
print "success[keyStats]:\n", success["keyStats"]
print "keyStats_start_index:", keyStats_start_index
print "start_index :", start_index
print "end_index :", end_index
print "next_col_val:", next_col_val
return next_col_val
global valid_statements
global invalid_statements
global mismatched_statements
global keyStats_start_index
global total_volt_npes
global total_cmp_npes
global total_num_crashes
global total_num_inserts
global total_num_patterns
global total_num_unresolved
global min_all_statements_per_pattern
global max_all_statements_per_pattern
keyStats_start_index = 0
valid_statements += int(next_keyStats_column_value())
next_keyStats_column_value() # ignore Valid %
invalid_statements += int(next_keyStats_column_value())
next_keyStats_column_value() # ignore Invalid %
total_statements += int(next_keyStats_column_value())
mismatched_statements += int(next_keyStats_column_value())
next_keyStats_column_value() # ignore Mismatched %
total_volt_npes += int(next_keyStats_column_value())
total_cmp_npes += int(next_keyStats_column_value())
total_num_crashes += num_crashes
total_num_inserts += num_inserts
total_num_patterns += num_patterns
total_num_unresolved += num_unresolved
min_all_statements_per_pattern = min(min_all_statements_per_pattern, min_statements_per_pattern)
max_all_statements_per_pattern = max(max_all_statements_per_pattern, max_statements_per_pattern)
finalStats = (get_time_html_table_element(compar_time) +
get_time_html_table_element(suite_secs) )
success["keyStats"] = success["keyStats"].replace('</tr>', finalStats + '</tr>')
return success
def get_html_table_element_color(value, error_below, strong_warn_below, warn_below,
error_above, strong_warn_above, warn_above, use_gray):
color = ''
if (use_gray):
color = ' bgcolor=#D3D3D3' # gray
elif (value < error_below or value > error_above):
color = ' bgcolor=#FF0000' # red
elif (value < strong_warn_below or value > strong_warn_above):
color = ' bgcolor=#FFA500' # orange
elif (value < warn_below or value > warn_above):
color = ' bgcolor=#FFFF00' # yellow
return color
def get_numerical_html_table_element(value, error_below=-1, strong_warn_below=0, warn_below=0,
error_above=1000000000, strong_warn_above=1000000, warn_above=100000, # 1 billion, 1 million, 100,000
use_gray=False):
return ('<td align=right%s>%d</td>' %
(get_html_table_element_color(value, error_below, strong_warn_below, warn_below,
error_above, strong_warn_above, warn_above, use_gray),
value) )
def get_time_html_table_element(seconds, error_below=0, strong_warn_below=0, warn_below=0,
error_above=28800, strong_warn_above=3600, warn_above=600, # 8 hours, 1 hour, 10 minutes
use_gray=False):
return ('<td align=right%s>%s</td>' %
(get_html_table_element_color(seconds, error_below, strong_warn_below, warn_below,
error_above, strong_warn_above, warn_above, use_gray),
minutes_colon_seconds(seconds)) )
def get_voltcompiler(basedir):
key = "voltdb"
(head, tail) = basedir.split(key)
voltcompiler = head + key + "/bin/voltcompiler"
if(os.access(voltcompiler, os.X_OK)):
return voltcompiler
else:
return None
def get_hostinfo(options):
if options.hostname == None:
hostname = defaultHost
else:
hostname = options.hostname
if options.hostport == None:
hostport = defaultPort
else:
if(options.hostport.isdigit()):
hostport = int(options.hostport)
else:
print "Invalid value for port number: #%s#" % options.hostport
usage()
sys.exit(3)
return (hostname, hostport)
def create_catalogFile(voltcompiler, projectFile, catalogFilename):
catalogFile = "/tmp/" + catalogFilename + ".jar"
cmd = voltcompiler + " /tmp " + projectFile + " " + catalogFile
call(cmd, shell=True)
if not os.path.exists(catalogFile):
catalogFile = None
return catalogFile
def create_projectFile(ddl, projFilename):
proj = Element('project')
db = SubElement(proj, 'database')
schemas = SubElement(db, 'schemas')
schema = SubElement(schemas, 'schema', {'path':ddl})
thisProjectFile = "/tmp/" + projFilename + "4projectFile.xml"
fo = open(thisProjectFile, "wb")
fo.write(prettify(proj))
fo.close()
if not os.path.exists(thisProjectFile):
thisProjectFile = None
return thisProjectFile
def create_deploymentFile(options):
kfactor = options.kfactor
sitesperhost = options.sitescount
hostcount = options.hostcount
deployment = Element('deployment')
cluster = SubElement(deployment, 'cluster',
{'kfactor':kfactor, 'sitesperhost':sitesperhost, 'hostcount':hostcount})
httpd = SubElement(deployment, 'httpd', {'port':"8080"})
jsonapi = SubElement(httpd, 'jsonapi', {'enabled':"true"})
deploymentFile = "/tmp/deploymentFile.xml"
fo = open(deploymentFile, "wb")
fo.write(prettify(deployment))
fo.close()
if not os.path.exists(deploymentFile):
deploymentFile = None
return deploymentFile
# To store all necessary test config info in a dictionary variable
def create_testConfigKits(options, basedir):
testConfigKits = {}
voltcompiler = get_voltcompiler(basedir)
if voltcompiler == None:
print >> sys.stderr, "Cannot find the executable voltcompiler!"
sys.exit(3)
else:
testConfigKits["voltcompiler"] = voltcompiler
deploymentFile = create_deploymentFile(options)
if deploymentFile == None:
print >> sys.stderr, "Cannot find the deployment xml file!"
sys.exit(3)
else:
testConfigKits["deploymentFile"] = deploymentFile
(hostname, hostport) = get_hostinfo(options)
testConfigKits["hostname"] = hostname
testConfigKits["hostport"] = hostport
return testConfigKits
def usage():
print sys.argv[0], "config output_dir command"
print """
config\t\tThe configuration file containing the filenames of the schema,
\t\tthe template, and the normalizer.
output_dir\tThe output directory for the HTML reports.
command\t\tThe command to launch the server.
The schema is merely a Python dictionary which describes the name of the tables
and the column names and types in those tables. The following is an example of a
schema description,
\t{
\t "T": {
\t "columns": (("DESC", FastSerializer.VOLTTYPE_STRING),
\t ("ID", FastSerializer.VOLTTYPE_INTEGER),
\t ("NUM", FastSerializer.VOLTTYPE_INTEGER)),
\t "partitions": (),
\t "indexes": ("ID")
\t }
\t }
This dictionary describes a table called "T" with three columns "DESC", "ID",
and "NUM".
The template is a .sql file containing SQL statements to run in the test. The
SQL statements are templates with place holders which will be substituted with
real values when the test is run. An example looks like this,
\tSELECT _variable FROM _table WHERE _variable _cmp _variable LIMIT _value[byte];
A possible SQL statement generated from this template based on the table
description above would be
\tSELECT ID FROM T WHERE ID < NUM LIMIT 3;
The following place holders are supported,
\t_variable[type]\tWill be replaced with a column name of the given type,
\t\t\ttype can be int,byte,int16,int32,int64,float,string,
\t\t\tdate. int is a superset of byte,int16,int32,int64.
\t\t\tType can be omitted.
\t_table\t\tWill be replaced with a table name
\t_value[type]\tWill be replaced with a random value of the given type,
\t\t\ttype can be id,byte,int16,int32,int64,float,string,date.
\t\t\tid is unique integer type incremented by 1 each time.
\t\t\tYou can also specify an integer within a range,
\t\t\te.g. _value[int:0,100]
\t_cmp\t\tWill be replaced with a comparison operator
\t_math\t\tWill be replaced with a arithmatic operator
\t_agg\t\tWill be replaced with an aggregation operator
\t_maybe\t\tWill be replaced with NOT or simply removed
\t_distinct\t\tWill be replaced with DISTINCT or simply removed
\t_like\t\tWill be replaced with LIKE or NOT LIKE
\t_set\t\tWill be replaced with a set operator
\t_logic\t\tWill be replaced with a logic operator
\t_sortordert\tWill be replaced with ASC, DESC, or 'blank' (implicitly ascending)
"""
if __name__ == "__main__":
#print the whole command line, maybe useful for debugging
#print " ".join(sys.argv)
# Print the current, initial system time
time0 = time.time()
print "Initial time: " + str(time0) + ", at start (in seconds since January 1, 1970)"
save_prev_time = time0
total_gensql_time = 0.0
total_voltdb_time = 0.0
total_cmpdb_time = 0.0
total_compar_time = 0.0
keyStats_start_index = 0
valid_statements = 0
invalid_statements = 0
mismatched_statements = 0
total_statements = 0
total_volt_npes = 0
total_cmp_npes = 0
total_num_crashes = 0
total_num_inserts = 0
total_num_patterns = 0
total_num_unresolved = 0
max_all_statements_per_pattern = 0
min_all_statements_per_pattern = sys.maxint
parser = OptionParser()
parser.add_option("-l", "--leader", dest="hostname",
help="the hostname of the leader")
parser.add_option("-n", "--number", dest="hostcount",
help="the number of total hosts used in this test")
parser.add_option("-k", "--kfactor", dest="kfactor",
help="the number of kfactor used in this test")
parser.add_option("-t", "--sitescount", dest="sitescount",
help="the number of partitions used in this test")
parser.add_option("-p", "--port", dest="hostport",
help="the port number of the leader")
parser.add_option("-s", "--seed", dest="seed",
help="seed for random number generator")
parser.add_option("-c", "--config", dest="config", default=None,
help="the name of the config to run")
parser.add_option("-S", "--subversion_generation", dest="subversion_generation",
action="store_true", default=None,
help="enable generation of additional subquery forms for select statements")
parser.add_option("-a", "--ascii-only", action="store_true",
dest="ascii_only", default=False,
help="include only ASCII values in randomly generated string constants")
parser.add_option("-i", "--report-invalid", action="store_true",
dest="report_invalid", default=False,
help="report invalid SQL statements, not just mismatches")
parser.add_option("-r", "--report-all", action="store_true",
dest="report_all", default=False,
help="report all attempted SQL statements, not just mismatches")
parser.add_option("-g", "--generate-only", action="store_true",
dest="generate_only", default=False,
help="only generate and report SQL statements, do not start any database servers")
parser.add_option("-P", "--postgresql", action="store_true",
dest="postgresql", default=False,
help="compare VoltDB results to PostgreSQL, rather than HSqlDB")
parser.add_option("-G", "--postgis", action="store_true",
dest="postgis", default=False,
help="compare VoltDB results to PostgreSQL, with the PostGIS extension")
(options, args) = parser.parse_args()
if options.seed == None:
seed = random.randint(0, 2 ** 63)
print "Random seed: %d" % seed
else:
seed = int(options.seed)
print "Using supplied seed: " + str(seed)
random.seed(seed)
if len(args) < 3:
usage()
sys.exit(3)
config_filename = args[0]
output_dir = args[1]
# Parent directory of the 'config' directory (i.e., this would
# normally be the 'sqlcoverage' directory)
basedir = os.path.dirname(os.path.dirname(config_filename))
config_list = Config(config_filename)
# print "config_list name = '" + config_list.__class__.__name__ + "'"
configs_to_run = []
if options.config != None:
if options.config not in config_list.get_configs():
print >> sys.stderr, \
"Selected config %s not present in config file %s" % (options.config, config_filename)
sys.exit(3)
else:
configs_to_run.append(options.config)
else:
configs_to_run = config_list.get_configs()
comparison_database = "HSqlDB" # default value
debug_transform_sql = False
if options.postgresql:
comparison_database = 'PostgreSQL'
debug_transform_sql = True
if options.postgis:
comparison_database = 'PostGIS'
debug_transform_sql = True
testConfigKits = {}
defaultHost = "localhost"
defaultPort = 21212
if(options.hostname != None and options.hostname != defaultHost):
# To set a dictionary with following 4 keys:
# testConfigKits["voltcompiler"]
# testConfigKits["deploymentFile"]
# testConfigKits["hostname"]
# testConfigKits["hostport"]
testConfigKits = create_testConfigKits(options, basedir)
success = True
statistics = {}
for config_name in configs_to_run:
print >> sys.stderr, "\nSQLCOVERAGE: STARTING ON CONFIG: %s\n" % config_name
report_dir = output_dir + '/' + config_name
config = config_list.get_config(config_name)
if(options.hostname != None and options.hostname != defaultHost):
testDDL = basedir + "/" + config['ddl']
testProjectFile = create_projectFile(testDDL, 'test')
testCatalog = create_catalogFile(testConfigKits['voltcompiler'], testProjectFile, 'test')
# To add one more key
testConfigKits["testCatalog"] = testCatalog
result = run_config(config_name, config, basedir, report_dir, seed,
options.report_invalid, options.report_all,
options.generate_only, options.subversion_generation,
options.report_all, options.ascii_only, args, testConfigKits)
statistics[config_name] = result["keyStats"]
statistics["seed"] = seed
# The maximum number of acceptable mismatches is normally zero, except
# for certain rare cases involving known errors in PostgreSQL
if result["mis"] > get_max_mismatches(comparison_database, config_name):
success = False
# Write the summary
time1 = time.time()
if total_statements > 0:
valid_percent = '{0:.2f}'.format(100.00 * valid_statements / total_statements)
invalid_percent = '{0:.2f}'.format(100.00 * invalid_statements / total_statements)
mismatched_percent = '{0:.2f}'.format(100.00 * mismatched_statements / total_statements)
else:
valid_percent = '0.00'
invalid_percent = '0.00'
mismatched_percent = '0.00'
statistics["totals"] = "\n<td align=right>" + str(valid_statements) + "</td>" + \
"\n<td align=right>" + valid_percent + "%</td>" + \
"\n<td align=right>" + str(invalid_statements) + "</td>" + \
"\n<td align=right>" + invalid_percent + "%</td>" + \
"\n<td align=right>" + str(total_statements) + "</td>" + \
"\n<td align=right>" + str(mismatched_statements) + "</td>" + \
"\n<td align=right>" + mismatched_percent + "%</td>" + \
"\n<td align=right>" + str(total_volt_npes) + "</td>" + \
"\n<td align=right>" + str(total_cmp_npes) + "</td>" + \
"\n<td align=right>" + str(total_num_crashes) + "</td>" + \
"\n<td align=right>" + str(min_all_statements_per_pattern) + "</td>" + \
"\n<td align=right>" + str(max_all_statements_per_pattern) + "</td>" + \
"\n<td align=right>" + str(total_num_inserts) + "</td>" + \
"\n<td align=right>" + str(total_num_patterns) + "</td>" + \
"\n<td align=right>" + str(total_num_unresolved) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_gensql_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_voltdb_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_cmpdb_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(total_compar_time) + "</td>" + \
"\n<td align=right>" + minutes_colon_seconds(time1-time0) + "</td></tr>\n"
generate_summary(output_dir, statistics, comparison_database)
# Print the total time, for each type of activity
print_seconds(total_gensql_time, "for generating ALL SQL statements")
print_seconds(total_voltdb_time, "for running ALL VoltDB (JNI) statements")
print_seconds(total_cmpdb_time, "for running ALL " + comparison_database + " statements")
print_seconds(total_compar_time, "for comparing ALL DB results")
print_elapsed_seconds("for generating the output report", time1, "Total time: ")
print_elapsed_seconds("for the entire run", time0, "Total time: ")
if total_num_unresolved > 0:
success = False
print "Total number of invalid statements with unresolved symbols: %d" % total_num_unresolved
if total_cmp_npes > 0:
print "Total number of " + comparison_database + " NullPointerExceptions (NPEs): %d" % total_cmp_npes
if total_volt_npes > 0:
success = False
print "Total number of VoltDB NullPointerExceptions (NPEs): %d" % total_volt_npes
if mismatched_statements > 0:
print "Total number of mismatched statements (i.e., test failures): %d" % mismatched_statements
if total_num_crashes > 0:
print "Total number of (VoltDB, " + comparison_database + ", or compare results) crashes: %d" % total_num_crashes
success = False
if not success:
sys.stdout.flush()
sys.stderr.flush()
print >> sys.stderr, "SQL coverage has errors."
exit(1)
| deerwalk/voltdb | tests/sqlcoverage/sql_coverage_test.py | Python | agpl-3.0 | 38,815 |
import logging
from openfisca_core.tools import assert_near
from openfisca_country_template import CountryTaxBenefitSystem
from openfisca_survey_manager.tests.test_scenario import (
create_randomly_initialized_survey_scenario
)
log = logging.getLogger(__name__)
tax_benefit_system = CountryTaxBenefitSystem()
def test_compute_marginal_tax_rate():
survey_scenario = create_randomly_initialized_survey_scenario(use_marginal_tax_rate = True)
assert survey_scenario._modified_simulation is not None
assert_near(
survey_scenario.compute_marginal_tax_rate(target_variable = 'income_tax', period = 2017),
(1 - .15),
relative_error_margin = 1e-6,
)
survey_scenario.compute_marginal_tax_rate(target_variable = 'disposable_income', period = 2017)
if __name__ == "__main__":
import sys
log = logging.getLogger(__name__)
logging.basicConfig(level = logging.DEBUG, stream = sys.stdout)
test_compute_marginal_tax_rate()
| openfisca/openfisca-survey-manager | openfisca_survey_manager/tests/test_marginal_tax_rate.py | Python | agpl-3.0 | 989 |
#!/usr/bin/env python
# "convertor" - converts ODF files from a YUSCII font-encoding to proper UTF-8.
# Copyright (C) 2009 Damjan Georgievski
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import setuptools
__author__ = 'Damjan Georgievski'
__version__ = '2.0'
__email__ = '[email protected]'
setuptools.setup(
name = 'convertor',
version = __version__,
author = __author__,
author_email = __email__,
description = 'converts ODF files from a YUSCII font-encoding to proper UTF-8 ODF',
license = 'AGPL 3.0',
url = 'http://github.com/gdamjan/convertor',
packages = ['convertor'],
package_data = {},
keywords = "ODF",
include_package_data = True,
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6'
],
test_suite = '',
zip_safe = False,
entry_points = {
'console_scripts':
['convertor=convertor.__main__:main']
},
install_requires = ['lxml'],
extras_require = {
"web": "Werkzeug"
}
)
| gdamjan/convertor | setup.py | Python | agpl-3.0 | 1,692 |
from django.core.management.base import BaseCommand, CommandError
from quotes_app.tasks import rank_all
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Runs reranking algorithms on the Quotes.'
def handle(self, *args, **options):
logger.info('Running {0} management task.'.format(__name__))
rank_all()
| podcastquotes/podcastquotes | django_project/quotes_app/management/commands/rank_quotes.py | Python | agpl-3.0 | 387 |
from collections import defaultdict
from django.core.files.storage import DefaultStorage
from django.core.management.base import BaseCommand, CommandError
from candidates.csv_helpers import list_to_csv, memberships_dicts_for_csv
from elections.models import Election
def safely_write(output_filename, memberships_list):
"""
Use Django's storage backend to write the CSV file to the MEDIA_ROOT.
If using S3 (via Django Storages) the file is atomically written when the
file is closed (when the context manager closes).
That is, the file can be opened and written to but nothing changes at
the public S3 URL until the object is closed. Meaning it's not possible to
have a half written file.
If not using S3, there will be a short time where the file is empty
during write.
"""
csv = list_to_csv(memberships_list)
file_store = DefaultStorage()
with file_store.open(output_filename, "wb") as out_file:
out_file.write(csv.encode("utf-8"))
class Command(BaseCommand):
help = "Output CSV files for all elections"
def add_arguments(self, parser):
parser.add_argument(
"--site-base-url",
help="The base URL of the site (for full image URLs)",
)
parser.add_argument(
"--election",
metavar="ELECTION-SLUG",
help="Only output CSV for the election with this slug",
)
def slug_to_file_name(self, slug):
return "{}-{}.csv".format(self.output_prefix, slug)
def handle(self, **options):
if options["election"]:
try:
election = Election.objects.get(slug=options["election"])
election_slug = election.slug
except Election.DoesNotExist:
message = "Couldn't find an election with slug {election_slug}"
raise CommandError(
message.format(election_slug=options["election"])
)
else:
election_slug = None
self.options = options
self.output_prefix = "candidates"
membership_by_election, elected_by_election = memberships_dicts_for_csv(
election_slug
)
# Write a file per election, optionally adding candidates
# We still want a file to exist if there are no candidates yet,
# as the files linked to as soon as the election is created
election_qs = Election.objects.all()
if election_slug:
election_qs = election_qs.filter(slug=election_slug)
for election in election_qs:
safely_write(
self.slug_to_file_name(election.slug),
membership_by_election.get(election.slug, []),
)
# Make a CSV file per election date
slugs_by_date = defaultdict(list)
for slug in membership_by_election.keys():
slugs_by_date[slug.split(".")[-1]].append(slug)
for date, slugs in slugs_by_date.items():
memberships_for_date = []
for slug in slugs:
memberships_for_date += membership_by_election[slug]
safely_write(self.slug_to_file_name(date), memberships_for_date)
# If we're not outputting a single election, output all elections
if not election_slug:
sorted_elections = sorted(
membership_by_election.keys(),
key=lambda key: key.split(".")[-1],
)
all_memberships = []
all_elected = []
for slug in sorted_elections:
all_memberships += membership_by_election[slug]
all_elected += elected_by_election[slug]
safely_write(self.slug_to_file_name("all"), all_memberships)
safely_write(self.slug_to_file_name("elected-all"), all_elected)
| DemocracyClub/yournextrepresentative | ynr/apps/candidates/management/commands/candidates_create_csv.py | Python | agpl-3.0 | 3,846 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import add_days, cstr, getdate
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
from webnotes import msgprint, _
from stock.utils import get_valid_serial_nos
from utilities.transaction_base import TransactionBase, delete_events
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def get_item_details(self, item_code):
item = webnotes.conn.sql("""select item_name, description from `tabItem`
where name = %s""", (item_code), as_dict=1)
ret = {
'item_name': item and item[0]['item_name'] or '',
'description' : item and item[0]['description'] or ''
}
return ret
def generate_schedule(self):
self.doclist = self.doc.clear_table(self.doclist, 'maintenance_schedule_detail')
count = 0
webnotes.conn.sql("delete from `tabMaintenance Schedule Detail` where parent='%s'" %(self.doc.name))
for d in getlist(self.doclist, 'item_maintenance_detail'):
self.validate_maintenance_detail()
s_list =[]
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits)
for i in range(d.no_of_visits):
child = addchild(self.doc, 'maintenance_schedule_detail',
'Maintenance Schedule Detail', self.doclist)
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count+1
child.incharge_name = d.incharge_name
child.save(1)
self.on_update()
def on_submit(self):
if not getlist(self.doclist, 'maintenance_schedule_detail'):
msgprint("Please click on 'Generate Schedule' to get schedule")
raise Exception
self.check_serial_no_added()
self.validate_schedule()
email_map ={}
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
if d.incharge_name not in email_map:
email_map[d.incharge_name] = webnotes.bean("Sales Person",
d.incharge_name).run_method("get_email_id")
scheduled_date =webnotes.conn.sql("select scheduled_date from `tabMaintenance Schedule Detail` \
where incharge_name='%s' and item_code='%s' and parent='%s' " %(d.incharge_name, \
d.item_code, self.doc.name), as_dict=1)
for key in scheduled_date:
if email_map[d.incharge_name]:
description = "Reference: %s, Item Code: %s and Customer: %s" % \
(self.doc.name, d.item_code, self.doc.customer)
webnotes.bean({
"doctype": "Event",
"owner": email_map[d.incharge_name] or self.doc.owner,
"subject": description,
"description": description,
"starts_on": key["scheduled_date"] + " 10:00:00",
"event_type": "Private",
"ref_type": self.doc.doctype,
"ref_name": self.doc.name
}).insert()
webnotes.conn.set(self.doc, 'status', 'Submitted')
#get schedule dates
#----------------------
def create_schedule_list(self, start_date, end_date, no_of_visit):
schedule_list = []
start_date1 = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff/no_of_visit
#schedule_list.append(start_date1)
while(getdate(start_date1) < getdate(end_date)):
start_date1 = add_days(start_date1, add_by)
if len(schedule_list) < no_of_visit:
schedule_list.append(getdate(start_date1))
return schedule_list
#validate date range and periodicity selected
#-------------------------------------------------
def validate_period(self, arg):
arg1 = eval(arg)
if getdate(arg1['start_date']) >= getdate(arg1['end_date']):
msgprint("Start date should be less than end date ")
raise Exception
period = (getdate(arg1['end_date'])-getdate(arg1['start_date'])).days+1
if (arg1['periodicity']=='Yearly' or arg1['periodicity']=='Half Yearly' or arg1['periodicity']=='Quarterly') and period<365:
msgprint(cstr(arg1['periodicity'])+ " periodicity can be set for period of atleast 1 year or more only")
raise Exception
elif arg1['periodicity']=='Monthly' and period<30:
msgprint("Monthly periodicity can be set for period of atleast 1 month or more")
raise Exception
elif arg1['periodicity']=='Weekly' and period<7:
msgprint("Weekly periodicity can be set for period of atleast 1 week or more")
raise Exception
def get_no_of_visits(self, arg):
arg1 = eval(arg)
self.validate_period(arg)
period = (getdate(arg1['end_date'])-getdate(arg1['start_date'])).days+1
count =0
if arg1['periodicity'] == 'Weekly':
count = period/7
elif arg1['periodicity'] == 'Monthly':
count = period/30
elif arg1['periodicity'] == 'Quarterly':
count = period/91
elif arg1['periodicity'] == 'Half Yearly':
count = period/182
elif arg1['periodicity'] == 'Yearly':
count = period/365
ret = {'no_of_visits':count}
return ret
def validate_maintenance_detail(self):
if not getlist(self.doclist, 'item_maintenance_detail'):
msgprint("Please enter Maintaince Details first")
raise Exception
for d in getlist(self.doclist, 'item_maintenance_detail'):
if not d.item_code:
msgprint("Please select item code")
raise Exception
elif not d.start_date or not d.end_date:
msgprint("Please select Start Date and End Date for item "+d.item_code)
raise Exception
elif not d.no_of_visits:
msgprint("Please mention no of visits required")
raise Exception
elif not d.incharge_name:
msgprint("Please select Incharge Person's name")
raise Exception
if getdate(d.start_date) >= getdate(d.end_date):
msgprint("Start date should be less than end date for item "+d.item_code)
raise Exception
def validate_sales_order(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.prevdoc_docname:
chk = webnotes.conn.sql("select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1", d.prevdoc_docname)
if chk:
msgprint("Maintenance Schedule against "+d.prevdoc_docname+" already exist")
raise Exception
def validate(self):
self.validate_maintenance_detail()
self.validate_sales_order()
def on_update(self):
webnotes.conn.set(self.doc, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_bean = webnotes.bean("Serial No", serial_no)
serial_no_bean.doc.amc_expiry_date = amc_expiry_date
serial_no_bean.save()
def validate_serial_no(self, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = webnotes.conn.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "status", "delivery_date"], as_dict=1)
if sr_details.warranty_expiry_date and sr_details.warranty_expiry_date>=amc_start_date:
webnotes.throw("""Serial No: %s is already under warranty upto %s.
Please check AMC Start Date.""" % (serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and sr_details.amc_expiry_date >= amc_start_date:
webnotes.throw("""Serial No: %s is already under AMC upto %s.
Please check AMC Start Date.""" % (serial_no, sr_details.amc_expiry_date))
if sr_details.status=="Delivered" and sr_details.delivery_date and \
sr_details.delivery_date >= amc_start_date:
webnotes.throw(_("Maintenance start date can not be before \
delivery date for serial no: ") + serial_no)
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in getlist(self.doclist, 'maintenance_schedule_detail'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
msgprint("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'")
raise Exception
else:
for x in item_lst1:
if x not in item_lst2:
msgprint("Maintenance Schedule is not generated for item "+x+". Please click on 'Generate Schedule'")
raise Exception
#check if serial no present in item maintenance table
#-----------------------------------------------------------
def check_serial_no_added(self):
serial_present =[]
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_present.append(d.item_code)
for m in getlist(self.doclist, 'maintenance_schedule_detail'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
msgprint("Please click on 'Generate Schedule' to fetch serial no added for item "+m.item_code)
raise Exception
def on_cancel(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
webnotes.conn.set(self.doc, 'status', 'Cancelled')
delete_events(self.doc.doctype, self.doc.name)
def on_trash(self):
delete_events(self.doc.doctype, self.doc.name)
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doclist("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"incharge_name": "service_person"
}
}
}, target_doclist)
return [d.fields for d in doclist] | gangadhar-kadam/nassimapp | support/doctype/maintenance_schedule/maintenance_schedule.py | Python | agpl-3.0 | 10,225 |
class Year(object):
def __init__(self, year):
self.year = year
def is_leap_year(self):
return (self._by_4() and not self._by_100()) \
or self._by_400()
def _by_4(self):
return self.year % 4 == 0
def _by_100(self):
return self.year % 100 == 0
def _by_400(self):
return self.year % 400 == 0
| mscoutermarsh/exercism_coveralls | assignments/python/leap/example.py | Python | agpl-3.0 | 368 |
# -*- coding: utf-8 -*-
from openerp import fields, models, api
import re
class res_partner(models.Model):
_inherit = 'res.partner'
#def _get_default_tp_type(self):
# return self.env.ref('l10n_cl_invoice.res_IVARI').id
# todo: pasar los valores por defecto a un nuevo módulo
# por ejemplo "l10n_cl_res_partner_defaults
#def _get_default_doc_type(self):
# return self.env.ref('l10n_cl_invoice.dt_RUT').id
responsability_id = fields.Many2one(
'sii.responsability', 'Sale/Purchase Doc Type')
# dejamos el default pendiente para instalar en otro modulo,
# porque da problemas en instalaciones nuevas
# 'sii.responsability', 'Responsability', default = _get_default_tp_type)
document_type_id = fields.Many2one(
'sii.document_type', 'ID Type')
# 'sii.document_type', 'Document type', default = _get_default_doc_type)
document_number = fields.Char('Document number', size=64)
start_date = fields.Date('Start-up Date')
tp_sii_code = fields.Char('Tax Payer SII Code', compute='_get_tp_sii_code',
readonly=True)
@api.multi
@api.onchange('responsability_id')
def _get_tp_sii_code(self):
for record in self:
record.tp_sii_code=str(record.responsability_id.tp_sii_code)
@api.onchange('document_number', 'document_type_id')
def onchange_document(self):
mod_obj = self.env['ir.model.data']
if self.document_number and ((
'sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_RUT') or ('sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_RUN')):
document_number = (
re.sub('[^1234567890Kk]', '', str(
self.document_number))).zfill(9).upper()
self.vat = 'CL%s' % document_number
self.document_number = '%s.%s.%s-%s' % (
document_number[0:2], document_number[2:5],
document_number[5:8], document_number[-1])
elif self.document_number and (
'sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_Sigd'):
self.document_number = ''
| odoo-chile/l10n_cl_invoice | models/partner.py | Python | agpl-3.0 | 2,394 |
""""""
from __future__ import annotations
from flask import Flask
from .criterion import TagCriterion
from .extension import TagsExtension
__all__ = ["TagsExtension", "TagCriterion"]
def register_plugin(app: Flask):
TagsExtension(app)
| abilian/abilian-core | src/abilian/web/tags/__init__.py | Python | lgpl-2.1 | 244 |
# coding: utf-8
import os
import sys
from nxdrive.logging_config import get_logger
from nxdrive.utils import safe_long_path
from tests.common_unit_test import UnitTestCase
if sys.platform == 'win32':
import win32api
log = get_logger(__name__)
# Number of chars in path c://.../Nuxeo.. is approx 96 chars
FOLDER_A = 'A' * 90
FOLDER_B = 'B' * 90
FOLDER_C = 'C' * 90
FOLDER_D = 'D' * 50
class TestLongPath(UnitTestCase):
def setUp(self):
UnitTestCase.setUp(self)
self.local_1 = self.local_client_1
self.remote_1 = self.remote_document_client_1
log.info("Create a folder AAAA... (90 chars) in server")
self.folder_a = self.remote_1.make_folder("/", FOLDER_A)
self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B)
self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C)
self.remote_1.make_file(self.folder_c, "File1.txt", "Sample Content")
def tearDown(self):
log.info("Delete the folder AAA... in server")
self.remote_1.delete(self.folder_a, use_trash=False)
UnitTestCase.tearDown(self)
def test_long_path(self):
self.engine_1.start()
self.wait_sync(wait_for_async=True)
parent_path = os.path.join(self.local_1.abspath('/'),
FOLDER_A, FOLDER_B, FOLDER_C, FOLDER_D)
log.info("Creating folder with path: %s", parent_path)
if sys.platform == 'win32' and not os.path.exists(parent_path):
log.debug('Add \\\\?\\ prefix to path %r', parent_path)
parent_path = safe_long_path(parent_path)
os.makedirs(parent_path)
if sys.platform == 'win32':
log.info("Convert path of FOLDER_D\File2.txt to short path format")
parent_path = win32api.GetShortPathName(parent_path)
new_file = os.path.join(parent_path, "File2.txt")
log.info("Creating file with path: %s", new_file)
with open(new_file, "w") as f:
f.write("Hello world")
self.wait_sync(wait_for_async=True, timeout=45, fail_if_timeout=False)
remote_children_of_c = self.remote_1.get_children_info(self.folder_c)
children_names = [item.name for item in remote_children_of_c]
log.warn("Verify if FOLDER_D is uploaded to server")
self.assertIn(FOLDER_D, children_names)
folder_d = [item.uid for item in remote_children_of_c if item.name == FOLDER_D][0]
remote_children_of_d = self.remote_1.get_children_info(folder_d)
children_names = [item.name for item in remote_children_of_d]
log.warn("Verify if FOLDER_D\File2.txt is uploaded to server")
self.assertIn('File2.txt', children_names)
def test_setup_on_long_path(self):
""" NXDRIVE 689: Fix error when adding a new account when installation
path is greater than 245 characters.
"""
self.engine_1.stop()
self.engine_1.reinit()
# On Mac, avoid permission denied error
self.engine_1.get_local_client().clean_xattr_root()
test_folder_len = 245 - len(str(self.local_nxdrive_folder_1))
test_folder = 'A' * test_folder_len
self.local_nxdrive_folder_1 = os.path.join(self.local_nxdrive_folder_1,
test_folder)
self.assertTrue(len(self.local_nxdrive_folder_1) > 245)
self.manager_1.unbind_all()
self.engine_1 = self.manager_1.bind_server(
self.local_nxdrive_folder_1, self.nuxeo_url, self.user_2,
self.password_2, start_engine=False)
self.engine_1.start()
self.engine_1.stop()
| ssdi-drive/nuxeo-drive | nuxeo-drive-client/tests/test_long_path.py | Python | lgpl-2.1 | 3,664 |
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from fife import fifechan
from fife.extensions.pychan.attrs import IntAttr, FloatAttr
from .widget import Widget
class Slider(Widget):
""" A slider widget
Use a callback to read out the slider value every time the marker
is moved.
New Attributes
==============
- orientation: 1 = horizontal, 0=vertical
- scale_start: float: default 0.0
- scale_end: float: default 1.0
- step_length: float: default scale_end/10
- marker_length: int: default 10
FIXME:
- update docstrings
"""
HORIZONTAL = fifechan.Slider.Horizontal
VERTICAL = fifechan.Slider.Vertical
ATTRIBUTES = Widget.ATTRIBUTES + [ IntAttr('orientation'),
FloatAttr('scale_start'),
FloatAttr('scale_end'),
FloatAttr('step_length'),
IntAttr('marker_length')
]
DEFAULT_HEXPAND = True
DEFAULT_VEXPAND = False
DEFAULT_SIZE = 10,10
DEFAULT_MIN_SIZE = 10,10
DEFAULT_SCALE_START = 0.0
DEFAULT_SCALE_END = 1.0
DEFAULT_STEP_LENGTH = 0.1
DEFAULT_MARKER_LENGTH = 10
DEFAULT_ORIENTATION = HORIZONTAL
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None,
scale_start = None,
scale_end = None,
step_length = None,
marker_length = None,
orientation = None):
self.real_widget = fifechan.Slider(scale_start or self.DEFAULT_SCALE_START, scale_end or self.DEFAULT_SCALE_END)
self.orientation = self.DEFAULT_ORIENTATION
self.step_length = self.DEFAULT_STEP_LENGTH
self.marker_length = self.DEFAULT_MARKER_LENGTH
super(Slider, self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
base_color=base_color,
background_color=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment)
if orientation is not None: self.orientation = orientation
if scale_start is not None: self.scale_start = scale_start
if scale_end is not None: self.scale_end = scale_end
if step_length is not None: self.step_length = step_length
if marker_length is not None: self.marker_length = marker_length
self.accepts_data = True
self._realSetData = self._setValue
self._realGetData = self._getValue
def clone(self, prefix):
sliderClone = Slider(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment,
self.scale_start,
self.scale_end,
self.step_length,
self.marker_length,
self.orientation)
return sliderClone
def _setScale(self, start, end):
"""setScale(self, double scaleStart, double scaleEnd)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScale(start, end)
def _getScaleStart(self):
"""getScaleStart(self) -> double"""
return self.real_widget.getScaleStart()
def _setScaleStart(self, start):
"""setScaleStart(self, double scaleStart)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
self.real_widget.setScaleStart(start)
scale_start = property(_getScaleStart, _setScaleStart)
def _getScaleEnd(self):
"""getScaleEnd(self) -> double"""
return self.real_widget.getScaleEnd()
def _setScaleEnd(self, end):
"""setScaleEnd(self, double scaleEnd)"""
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScaleEnd(end)
scale_end = property(_getScaleEnd, _setScaleEnd)
def _getValue(self):
"""getValue(self) -> double"""
return self.real_widget.getValue()
def _setValue(self, value):
"""setValue(self, double value)"""
if type(value) != float:
raise RuntimeError("Slider only accepts float values")
self.real_widget.setValue(value)
value = property(_getValue, _setValue)
def _setMarkerLength(self, length):
"""setMarkerLength(self, int length)"""
if type(length) != int:
raise RuntimeError("Slider only accepts int for Marker length")
self.real_widget.setMarkerLength(length)
def _getMarkerLength(self):
"""getMarkerLength(self) -> int"""
return self.real_widget.getMarkerLength()
marker_length = property(_getMarkerLength, _setMarkerLength)
def _setOrientation(self, orientation):
"""setOrientation(self, Orientation orientation)"""
self.real_widget.setOrientation(orientation)
def _getOrientation(self):
"""getOrientation(self) -> int"""
return self.real_widget.getOrientation()
orientation = property(_getOrientation, _setOrientation)
def _setStepLength(self, length):
"""setStepLength(self, double length)"""
if type(length) != float:
raise RuntimeError("Slider only accepts floats for step length")
self.real_widget.setStepLength(length)
def _getStepLength(self):
"""getStepLength(self) -> double"""
return self.real_widget.getStepLength()
step_length = property(_getStepLength, _setStepLength)
| fifengine/fifengine | engine/python/fife/extensions/pychan/widgets/slider.py | Python | lgpl-2.1 | 7,472 |
Subsets and Splits