repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
it-events-ro/scripts | update-from-eventbrite.py | 1 | 6855 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import sys
import utils
included_organizers = {
2491303902, # http://www.eventbrite.com/o/itcamp-2491303902
6873285549, # http://www.eventbrite.com/o/sponge-media-lab-6873285549
3001324227, # http://www.eventbrite.com/o/labview-student-ambassador-upb-3001324227
2300226659, # http://www.eventbrite.com/o/techstars-startup-programs-2300226659
5899601137, # http://www.eventbrite.com/o/oana-calugar-amp-fabio-carati-amp-cristian-dascalu-5899601137
4662547959, # http://www.eventbrite.com/o/clujhub-4662547959
4138472935, # http://www.eventbrite.com/o/yonder-4138472935
6397991619, # http://www.eventbrite.com/o/facultatea-de-inginerie-electrica-in-colaborare-cu-best-cluj-napoca-6397991619
3367422098, # http://www.eventbrite.com/o/andreea-popescu-3367422098
4206997271, # http://www.eventbrite.com/o/babele-create-together-4206997271
3168795376, # http://www.eventbrite.com/o/girls-in-tech-romania-3168795376
6671021543, # http://www.eventbrite.com/o/asociatia-ip-workshop-6671021543
2761218168, # http://www.eventbrite.com/o/ccsir-2761218168
9377817403, # http://www.eventbrite.com/o/hellojs-9377817403
7802438407, # http://www.eventbrite.com/o/innodrive-7802438407
10949312400, # http://www.eventbrite.com/o/school-of-content-10949312400
6795968089, # http://www.eventbrite.com/o/iiba-romania-chapter-6795968089
10963965257, # http://www.eventbrite.com/o/sinaptiq-edu-10963965257
4246372985, # http://www.eventbrite.com/o/hackathon-in-a-box-4246372985
8767089022, # http://www.eventbrite.com.au/o/bm-college-8767089022
6886785391, # http://www.eventbrite.com/o/sprint-consulting-6886785391
8270334915, # http://www.eventbrite.co.uk/o/msg-systems-romania-8270334915
2670928534, # http://www.eventbrite.com/o/itcamp-community-2670928534
5340605367, # http://www.eventbrite.com/o/techhub-bucharest-5340605367
8042013777, # http://www.eventbrite.com/o/owasp-foundation-8042013777
11097508562, # http://www.eventbrite.com/o/robertino-vasilescu-si-bogdan-socol-ambasadori-prestashop-11097508562
}
excluded_organizers = {
8035595159, # http://www.eventbrite.com/o/magicianul-augustin-8035595159
8193977126, # http://www.eventbrite.com/o/growth-marketing-conference-8193977126
2725971154, # http://www.eventbrite.com/o/lost-worlds-racing-2725971154
7795480037, # http://www.eventbrite.de/o/dexcar-autovermietung-ug-7795480037
10911641537, # http://www.eventbrite.com/o/johanna-house-10911641537
10950100881, # http://www.eventbrite.com/o/peace-action-training-and-research-institute-of-romania-patrir-10950100881
8349138707, # http://www.eventbrite.com/o/trix-bike-primaria-tg-jiu-consilul-local-gorj-salvamont-gorj-8349138707
5715420837, # http://www.eventbrite.com/o/mattei-events-5715420837
2087207893, # http://www.eventbrite.com/o/john-stevens-zero-in-2087207893
11050568264, # http://www.eventbrite.com/o/cristian-vasilescu-11050568264
10924487836, # http://www.eventbrite.com/o/kmdefensecom-krav-maga-scoala-bukan-10924487836
10797347037, # http://www.eventbrite.co.uk/o/story-travels-ltd-10797347037
10933030217, # http://www.eventbrite.com/o/10933030217
5570020107, # http://www.eventbrite.com/o/marius-5570020107
10948788760, # http://www.eventbrite.com/o/centrul-de-dezvoltare-personala-constanta-10948788760
10796273575, # http://www.eventbrite.com/o/summer-foundation-10796273575
10931790600, # http://www.eventbrite.com/o/ioana-amp-vali-10931790600
10024410089, # http://www.eventbrite.com/o/leagea-atractiei-in-actiune-10024410089
6837788799, # http://www.eventbrite.com/o/lost-worlds-travel-6837788799
10868911506, # http://www.eventbrite.com/o/the-city-of-green-buildings-association-10868911506
10973196426, # http://www.eventbrite.com/o/10973196426
8428263732, # http://www.eventbrite.com/o/upwork-8428263732
10967928809, # http://www.eventbrite.com/o/eastern-artisans-atelier-10967928809
1863005385, # http://www.eventbrite.com/o/sigma-3-survival-school-1863005385
8541146418, # http://www.eventbrite.com/o/modularity-8541146418
10909622502, # http://www.eventbrite.com/o/different-angle-cluster-10909622502
8384351483, # http://www.eventbrite.com/o/sciencehub-8384351483
10894747098, # http://www.eventbrite.com/o/consact-consulting-10894747098
10952849991, # http://www.eventbrite.co.uk/o/art-live-10952849991
10902884665, # http://www.eventbrite.com/o/10902884665
10942128462, # http://www.eventbrite.com/o/eurotech-assessment-and-certification-services-pvt-ltd-10942128462
9631107106, # http://www.eventbrite.com/o/de-ce-nu-eu-9631107106
11054013211, # http://www.eventbrite.co.uk/o/first-people-solutions-aviation-11054013211
10867523860, # http://www.eventbrite.com/o/igloo-media-10867523860
11063098365, # http://www.eventbrite.co.uk/o/glas-expert-11063098365
8348933279, # http://www.eventbrite.com/o/parentis-8348933279
11087510059, # http://www.eventbrite.co.uk/o/untold-ong-11087510059
11085577626, # http://www.eventbrite.com/o/11085577626
}
# TODO: make somehow API calls return historical events also
# TODO: make API calls handle paging
print ('Looking for new organizations')
has_unknown_orgs = False
events = utils.eventbriteApi('events/search/?venue.country=RO&include_unavailable_events=true')
for e in events['events']:
organizer_id = int(e['organizer_id'])
if (organizer_id in included_organizers) or (organizer_id in excluded_organizers):
continue
has_unknown_orgs = True
org = utils.eventbriteApi('organizers/%d/' % organizer_id)
print('Unknown organization %d:\n- %s\n- %s' % (organizer_id, e['url'], org['url']))
if has_unknown_orgs:
print('Had unknown orgs, stopping')
sys.exit(1)
orgs, venues, events = {}, {}, []
def _getOrganizersAndEvents(org_id):
global events, orgs
org = utils.eventbriteApi('organizers/%d/' % org_id)
orgs[org_id] = org
org_events = utils.eventbriteApi(
'organizers/%d/events/?start_date.range_start=2010-01-01T00:00:00&status=all' % org_id)
events += [e for e in org_events['events'] if 'venue_id' in e and e['venue_id'] is not None]
utils.repeat(included_organizers, 'Fetching organization data for %d', _getOrganizersAndEvents)
def _getVenueInfo(venue_id):
global venues
venue = utils.eventbriteApi('venues/%d/' % venue_id)
# some organizations do events world-wide, not in RO only
if venue['address']['country'] != 'RO': return
venues[venue_id] = venue
unique_venues = frozenset(int(e['venue_id']) for e in events)
utils.repeat(unique_venues, 'Fetching venue information for %d', _getVenueInfo)
# filter out events not from RO
events = [e for e in events if int(e['venue_id']) in venues]
result = dict(orgs=orgs, venues=venues, events=events)
with open('eventbrites.json', 'w') as f:
f.write(json.dumps(result, sort_keys=True, indent=4))
| agpl-3.0 | -8,798,751,040,999,646,000 | 52.554688 | 122 | 0.756966 | false |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/test/test_rospy/test/rostest/test_latch.py | 2 | 3693 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
PKG = 'test_rospy'
NAME = 'test_latch'
import sys
import time
import unittest
from std_msgs.msg import String
class TestLatch(unittest.TestCase):
def setUp(self):
self.callback_invoked = {}
for i in range(0, 6):
self.callback_invoked[i] = False
def callback_args(self, msg, i):
self.assertEquals('foo', msg.data)
self.callback_invoked[i] = True
def callback(self, msg):
self.assertEquals('foo', msg.data)
self.callback_invoked[0] = True
def test_latch(self):
import rospy
# multi-part test. First check that we get latched message, then check
# that subscribers to same topic also receive latched message
# #1852
rospy.init_node(NAME)
s0 = rospy.Subscriber('s', String, self.callback)
# 20 seconds to receive first latched message
timeout_t = time.time() + 20.
print("waiting for 20 seconds")
while not self.callback_invoked[0] and \
not rospy.is_shutdown() and \
timeout_t > time.time():
time.sleep(0.2)
self.failIf(timeout_t < time.time(), "timeout exceeded")
self.failIf(rospy.is_shutdown(), "node shutdown")
self.assert_(self.callback_invoked[0], "callback not invoked")
# register three more callbacks, make sure they get invoked with message
# - callbacks are actually called inline, but in spirit of test, async callback is allowed
for i in range(1, 5):
self.failIf(self.callback_invoked[i])
s = rospy.Subscriber('s', String, self.callback_args, i)
timeout_t = time.time() + 0.5
while not self.callback_invoked[i] and \
not rospy.is_shutdown() and \
timeout_t > time.time():
time.sleep(0.1)
self.assert_(self.callback_invoked[i])
if __name__ == '__main__':
import rostest
rostest.run(PKG, NAME, TestLatch, sys.argv)
| bsd-3-clause | 7,342,764,846,024,820,000 | 39.141304 | 98 | 0.663959 | false |
ProgVal/Limnoria-test | plugins/GPG/test.py | 5 | 5812 | ###
# Copyright (c) 2015, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.utils.minisix as minisix
import supybot.gpg as gpg
PRIVATE_KEY = """
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1.4.12 (GNU/Linux)
lQHYBFD7GxQBBACeu7bj/wgnnv5NkfHImZJVJLaq2cwKYc3rErv7pqLXpxXZbDOI
jP+5eSmTLhPUK67aRD6gG0wQ9iAhYR03weOmyjDGh0eF7kLYhu/4Il56Y/YbB8ll
Imz/pep/Hi72ShcW8AtifDup/KeHjaWa1yF2WThHbX/0N2ghSxbJnatpBwARAQAB
AAP6Arf7le7FD3ZhGZvIBkPr25qca6i0Qxb5XpOinV7jLcoycZriJ9Xofmhda9UO
xhNVppMvs/ofI/m0umnR4GLKtRKnJSc8Edxi4YKyqLehfBTF20R/kBYPZ772FkNW
Kzo5yCpP1jpOc0+QqBuU7OmrG4QhQzTLXIUgw4XheORncEECAMGkvR47PslJqzbY
VRIzWEv297r1Jxqy6qgcuCJn3RWYJbEZ/qdTYy+MgHGmaNFQ7yhfIzkBueq0RWZp
Z4PfJn8CANHZGj6AJZcvb+VclNtc5VNfnKjYD+qQOh2IS8NhE/0umGMKz3frH1TH
yCbh2LlPR89cqNcd4QvbHKA/UmzISXkB/37MbUnxXTpS9Y4HNpQCh/6SYlB0lucV
QN0cgjfhd6nBrb6uO6+u40nBzgynWcEpPMNfN0AtQeA4Dx+WrnK6kZqfd7QMU3Vw
eWJvdCB0ZXN0iLgEEwECACIFAlD7GxQCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
AheAAAoJEMnTMjwgrwErV3AD/0kRq8UWPlkc6nyiIR6qiT3EoBNHKIi4cz68Wa1u
F2M6einrRR0HolrxonynTGsdr1u2f3egOS4fNfGhTNAowSefYR9q5kIYiYE2DL5G
YnjJKNfmnRxZM9YqmEnN50rgu2cifSRehp61fXdTtmOAR3js+9wb73dwbYzr3kIc
3WH1
=UBcd
-----END PGP PRIVATE KEY BLOCK-----
"""
WRONG_TOKEN_SIGNATURE = """
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
{a95dc112-780e-47f7-a83a-c6f3820d7dc3}
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.12 (GNU/Linux)
iJwEAQECAAYFAlD7Jb0ACgkQydMyPCCvASv9HgQAhQf/oFMWcKwGncH0hjXC3QYz
7ck3chgL3S1pPAvS69viz6i2bwYZYD8fhzHNJ/qtw/rx6thO6PwT4SpdhKerap+I
kdem3LjM4fAGHRunHZYP39obNKMn1xv+f26mEAAWxdv/W/BLAFqxi3RijJywRkXm
zo5GUl844kpnV+uk0Xk=
=z2Cz
-----END PGP SIGNATURE-----
"""
FINGERPRINT = '2CF3E41500218D30F0B654F5C9D3323C20AF012B'
class GPGTestCase(PluginTestCase):
plugins = ('GPG', 'User')
def setUp(self):
super(GPGTestCase, self).setUp()
gpg.loadKeyring()
if gpg.available and network:
def testGpgAddRemove(self):
self.assertNotError('register foo bar')
self.assertError('gpg key add 51E516F0B0C5CE6A pgp.mit.edu')
self.assertResponse('gpg key add EB17F1E0CEB63930 pgp.mit.edu',
'1 key imported, 0 unchanged, 0 not imported.')
self.assertNotError(
'gpg key remove F88ECDE235846FA8652DAF5FEB17F1E0CEB63930')
self.assertResponse('gpg key add EB17F1E0CEB63930 pgp.mit.edu',
'1 key imported, 0 unchanged, 0 not imported.')
self.assertResponse('gpg key add EB17F1E0CEB63930 pgp.mit.edu',
'Error: This key is already associated with your account.')
if gpg.available:
def testGpgAuth(self):
self.assertNotError('register spam egg')
gpg.keyring.import_keys(PRIVATE_KEY).__dict__
(id, user) = list(ircdb.users.items())[0]
user.gpgkeys.append(FINGERPRINT)
msg = self.getMsg('gpg signing gettoken').args[-1]
match = re.search('is: ({.*}).', msg)
assert match, repr(msg)
token = match.group(1)
def fakeGetUrlFd(*args, **kwargs):
fd.geturl = lambda :None
return fd
(utils.web.getUrlFd, realGetUrlFd) = (fakeGetUrlFd, utils.web.getUrlFd)
fd = minisix.io.StringIO()
fd.write('foo')
fd.seek(0)
self.assertResponse('gpg signing auth http://foo.bar/baz.gpg',
'Error: Signature or token not found.')
fd = minisix.io.StringIO()
fd.write(token)
fd.seek(0)
self.assertResponse('gpg signing auth http://foo.bar/baz.gpg',
'Error: Signature or token not found.')
fd = minisix.io.StringIO()
fd.write(WRONG_TOKEN_SIGNATURE)
fd.seek(0)
self.assertRegexp('gpg signing auth http://foo.bar/baz.gpg',
'Error: Unknown token.*')
fd = minisix.io.StringIO()
fd.write(str(gpg.keyring.sign(token)))
fd.seek(0)
self.assertResponse('gpg signing auth http://foo.bar/baz.gpg',
'You are now authenticated as spam.')
utils.web.getUrlFd = realGetUrlFd
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| bsd-3-clause | -8,063,978,653,867,004,000 | 40.219858 | 83 | 0.707502 | false |
alexsilva/smartbackup | smartbackup/plugins.py | 1 | 1085 | import peewee
from bakthat.plugin import Plugin
import bakthat
import backends
import models
import bakthat.models
__author__ = 'alex'
class MysqlEngineBackend(Plugin):
def activate(self):
#self.log.info("Connecting plugin '{0}'".format(backends.S3BackendPlus.name))
#bakthat.models.database_proxy.initialize(peewee.MySQLDatabase(None))
# bakthat.models.create_tables()
pass
class S3BackendPlusPlugin(Plugin):
def activate(self):
#self.log.info("Connecting plugin '{0}'".format(backends.S3BackendPlus.name))
bakthat.STORAGE_BACKEND[backends.S3BackendPlus.name] = backends.S3BackendPlus
class LocalBackendPlugin(Plugin):
def activate(self):
#self.log.info("Connecting plugin '{0}'".format(backends.LocalStorageBackend.name))
bakthat.STORAGE_BACKEND[backends.LocalStorageBackend.name] = backends.LocalStorageBackend
class BackupsModelPlugin(Plugin):
def activate(self):
#self.log.info("Connecting plugin '{0}'".format(self.__class__.__name__))
bakthat.Backups = models.Backups | mit | 2,441,257,913,264,602,000 | 26.15 | 97 | 0.716129 | false |
HengeSense/website | website/migrations/0003_auto__chg_field_userprofile_gender__chg_field_userprofile_date_of_birt.py | 1 | 4876 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.gender'
db.alter_column('website_userprofile', 'gender', self.gf('django.db.models.fields.CharField')(max_length=1, null=True))
# Changing field 'UserProfile.date_of_birth'
db.alter_column('website_userprofile', 'date_of_birth', self.gf('django.db.models.fields.DateField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UserProfile.gender'
raise RuntimeError("Cannot reverse this migration. 'UserProfile.gender' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'UserProfile.date_of_birth'
raise RuntimeError("Cannot reverse this migration. 'UserProfile.date_of_birth' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.userprofile': {
'Meta': {'ordering': "('_order',)", 'object_name': 'UserProfile'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'receive_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['website'] | agpl-3.0 | 3,340,166,466,635,874,000 | 64.905405 | 182 | 0.576292 | false |
Atrasoftware/PyTMCL | PyTMCL/tests/test_codec.py | 1 | 7568 | #!/usr/bin/env python
import unittest
from TMCL import *
import random as rnd
MAXITER = 200
REQUEST_KEYS = codec.REQUEST_KEYS + ['value']
REPLY_KEYS = codec.REPLY_KEYS + ['value']
class CodecTestCase(unittest.TestCase):
def _gen_byte(self, min_i=0, max_i=256):
return rnd.randint(min_i, max_i-1)
def _gen_bytes(self, length=5):
return [self._gen_byte() for _ in range(length)]
def _gen_pos_bytes(self, length=5):
return [self._gen_byte(max_i=128)] + self._gen_bytes(length-1)
def _gen_neg_bytes(self, length=5):
return [self._gen_byte(min_i=128)] + self._gen_bytes(length-1)
def _gen_number(self, length=None):
if length is None:
length = rnd.randint(1, 9)
value = [rnd.randint(0, 9) for _ in range(length)]
value = (str(s) for s in value)
value = "".join(value)
return int(value)
def _gen_cmd_string(self, length=8):
values = [rnd.randint(0, 9) for _ in range(length)]
chksum = sum(values)
values.append(chksum)
string = "".join(chr(v) for v in values)
return string
def test_byte(self):
for i in range(MAXITER):
self.assertIn(codec.byte(i), range(256))
def test_checksum(self):
for i in range(MAXITER):
self.assertEqual(codec.checksum(i*[i]), codec.byte(i*i))
def test_encodeBytes(self):
value = 123456789
bytes = codec.encodeBytes(value)
self.assertEqual([7, 91, 205, 21], bytes)
new_value = codec.decodeBytes(bytes)
self.assertEqual(value, new_value)
def test_decodeBytes(self):
bytes = [1, 2, 3, 4]
value = codec.decodeBytes(bytes)
self.assertEqual(16909060, value)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def test_encdecBytes(self):
for _ in range(MAXITER):
value = self._gen_number()
bytes = codec.encodeBytes(value)
new_value = codec.decodeBytes(bytes)
self.assertEqual(value, new_value)
def test_decencBytes(self):
for _ in range(MAXITER):
bytes = self._gen_bytes(length=4)
value = codec.decodeBytes(bytes)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def test_decencNegBytes(self):
for _ in range(MAXITER):
bytes = self._gen_neg_bytes(length=4)
value = codec.decodeBytes(bytes)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def test_decencPosBytes(self):
for _ in range(MAXITER):
bytes = self._gen_pos_bytes(length=4)
value = codec.decodeBytes(bytes)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def _help_test_encodeReAllCommand(self, encoder, decoder, keys):
string = "ABCD\x00\x00\x00EO"
values = [ord(s) for s in string]
result = encoder(values[0], values[1], values[2], values[3], sum(values[4:8]))
self.assertEqual(string, result)
def _help_test_decodeReAllCommand(self, encoder, decoder, keys):
string = "ABCD\x00\x00\x00EO"
result = decoder(string)
for i, k in enumerate(keys[:4]):
self.assertEqual(ord(string[i]), result[k])
values = sum(ord(s) for s in string[4:8])
self.assertEqual(values, result['value'])
self.assertEqual(ord(string[7]), result['value'])
self.assertEqual(ord(string[8]), result['checksum'])
def test_encodeRequestCommand(self):
self._help_test_encodeReAllCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_decodeRequestCommand(self):
self._help_test_decodeReAllCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_encodeReplyCommand(self):
self._help_test_encodeReAllCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def test_decodeReplyCommand(self):
self._help_test_decodeReAllCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def _help_test_encdecReAllCommand(self, encoder, decoder, keys):
for _ in range(MAXITER):
values = self._gen_bytes(length=len(keys))
string = encoder(*values)
result = decoder(string)
for i, k in enumerate(keys):
self.assertEqual(values[i], result[k])
self.assertEqual(sum(values) % 256, result['checksum'])
def _help_test_decencReALLCommand(self, encoder, decoder, keys):
for _ in range(MAXITER):
string = self._gen_cmd_string()
values = decoder(string)
unpacked = (values[k] for k in keys)
new_string = encoder(*unpacked)
self.assertEqual(string, new_string)
def test_encdecRequestCommand(self):
self._help_test_encdecReAllCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_decencRequestCommand(self):
self._help_test_decencReALLCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_encdecReplyCommand(self):
self._help_test_encdecReAllCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def test_decencReplyCommand(self):
self._help_test_decencReALLCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def test_encodeCommand(self):
string = "ABCD\x00\x00\x00EO"
params = [ord(s) for s in string[:4]]
values = ord(string[7])
# values = sum(ord(s) for s in string[4:8])
new_string = codec.encodeCommand(params, values)
self.assertEqual(string, new_string)
def test_decodeCommand(self):
keys = range(4)
string = "ABCD\x00\x00\x00EO"
result = codec.decodeCommand(string, keys)
for i, k in enumerate(keys):
self.assertEqual(ord(string[i]), result[k])
values = sum(ord(s) for s in string[4:8])
self.assertEqual(values, result['value'])
self.assertEqual(ord(string[7]), result['value'])
self.assertEqual(ord(string[8]), result['checksum'])
def test_encdecCommand(self):
keys = range(4)
for _ in range(MAXITER):
params = self._gen_bytes(length=4)
values = self._gen_byte()
chksum = sum(params, values) % 256
string = codec.encodeCommand(params, values)
result = codec.decodeCommand(string, keys)
for i, k in enumerate(keys):
self.assertEqual(params[i], result[k])
self.assertEqual(values, result['value'])
self.assertEqual(chksum, result['checksum'])
def test_decencCommand(self):
keys = range(4)
for _ in range(MAXITER):
string = self._gen_cmd_string()
decoded = codec.decodeCommand(string, keys)
params = [decoded[k] for k in keys]
values = decoded['value']
new_string = codec.encodeCommand(params, values)
self.assertEqual(string[:4], new_string[:4]) # parameter part
self.assertEqual(string[4:8], new_string[4:8]) # value part
self.assertEqual(string[8], new_string[8]) # checksum part
self.assertEqual(string, new_string)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -1,073,172,985,004,600,400 | 29.516129 | 112 | 0.612711 | false |
liesbethvanherpe/NeuroM | neurom/fst/__init__.py | 1 | 6830 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' NeuroM, lightweight and fast
Examples:
Obtain some morphometrics
>>> ap_seg_len = fst.get('segment_lengths', nrn, neurite_type=neurom.APICAL_DENDRITE)
>>> ax_sec_len = fst.get('section_lengths', nrn, neurite_type=neurom.AXON)
'''
import numpy as _np
from . import _neuritefunc as _nrt
from . import _neuronfunc as _nrn
from ._core import FstNeuron
from ..core import NeuriteType as _ntype
from ..core import iter_neurites as _ineurites
from ..core.types import tree_type_checker as _is_type
from ..exceptions import NeuroMError
NEURITEFEATURES = {
'total_length': _nrt.total_length,
'total_length_per_neurite': _nrt.total_length_per_neurite,
'neurite_lengths': _nrt.total_length_per_neurite,
'terminal_path_lengths_per_neurite': _nrt.terminal_path_lengths_per_neurite,
'section_lengths': _nrt.section_lengths,
'section_term_lengths': _nrt.section_term_lengths,
'section_bif_lengths': _nrt.section_bif_lengths,
'neurite_volumes': _nrt.total_volume_per_neurite,
'neurite_volume_density': _nrt.neurite_volume_density,
'section_volumes': _nrt.section_volumes,
'section_areas': _nrt.section_areas,
'section_tortuosity': _nrt.section_tortuosity,
'section_path_distances': _nrt.section_path_lengths,
'number_of_sections': _nrt.number_of_sections,
'number_of_sections_per_neurite': _nrt.number_of_sections_per_neurite,
'number_of_neurites': _nrt.number_of_neurites,
'number_of_bifurcations': _nrt.number_of_bifurcations,
'number_of_forking_points': _nrt.number_of_forking_points,
'number_of_terminations': _nrt.number_of_terminations,
'section_branch_orders': _nrt.section_branch_orders,
'section_term_branch_orders': _nrt.section_term_branch_orders,
'section_bif_branch_orders': _nrt.section_bif_branch_orders,
'section_radial_distances': _nrt.section_radial_distances,
'local_bifurcation_angles': _nrt.local_bifurcation_angles,
'remote_bifurcation_angles': _nrt.remote_bifurcation_angles,
'partition': _nrt.bifurcation_partitions,
'partition_asymmetry': _nrt.partition_asymmetries,
'number_of_segments': _nrt.number_of_segments,
'segment_lengths': _nrt.segment_lengths,
'segment_volumes': _nrt.segment_volumes,
'segment_radii': _nrt.segment_radii,
'segment_midpoints': _nrt.segment_midpoints,
'segment_taper_rates': _nrt.segment_taper_rates,
'segment_radial_distances': _nrt.segment_radial_distances,
'segment_meander_angles': _nrt.segment_meander_angles,
'principal_direction_extents': _nrt.principal_direction_extents,
'total_area_per_neurite': _nrt.total_area_per_neurite,
}
NEURONFEATURES = {
'soma_radii': _nrn.soma_radii,
'soma_surface_areas': _nrn.soma_surface_areas,
'trunk_origin_radii': _nrn.trunk_origin_radii,
'trunk_origin_azimuths': _nrn.trunk_origin_azimuths,
'trunk_origin_elevations': _nrn.trunk_origin_elevations,
'trunk_section_lengths': _nrn.trunk_section_lengths,
'sholl_frequency': _nrn.sholl_frequency,
}
def register_neurite_feature(name, func):
'''Register a feature to be applied to neurites
Parameters:
name: name of the feature, used for access via get() function.
func: single parameter function of a neurite.
'''
if name in NEURITEFEATURES:
raise NeuroMError('Attempt to hide registered feature %s', name)
def _fun(neurites, neurite_type=_ntype.all):
'''Wrap neurite function from outer scope and map into list'''
return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type)))
NEURONFEATURES[name] = _fun
def get(feature, obj, **kwargs):
'''Obtain a feature from a set of morphology objects
Parameters:
feature(string): feature to extract
obj: a neuron, population or neurite tree
**kwargs: parameters to forward to underlying worker functions
Returns:
features as a 1D or 2D numpy array.
'''
feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES
else NEURONFEATURES[feature])
return _np.array(list(feature(obj, **kwargs)))
_INDENT = ' ' * 4
def _indent(string, count):
'''indent `string` by `count` * INDENT'''
indent = _INDENT * count
ret = indent + string.replace('\n', '\n' + indent)
return ret.rstrip()
def _get_doc():
'''Get a description of all the known available features'''
def get_docstring(func):
'''extract doctstring, if possible'''
docstring = ':\n'
if func.__doc__:
docstring += _indent(func.__doc__, 2)
return docstring
ret = ['\nNeurite features (neurite, neuron, neuron population):']
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURITEFEATURES.items()))
ret.append('\nNeuron features (neuron, neuron population):')
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURONFEATURES.items()))
return '\n'.join(ret)
get.__doc__ += _indent('\nFeatures:\n', 1) + _indent(_get_doc(), 2) # pylint: disable=no-member
| bsd-3-clause | 317,015,164,251,236,800 | 39.898204 | 96 | 0.701611 | false |
denys-duchier/kivy | kivy/uix/filechooser.py | 1 | 32541 | '''
FileChooser
===========
.. versionadded:: 1.0.5
.. versionchanged:: 1.2.0
In the chooser template, the `controller` is not a direct reference anymore
but a weak-reference.
You must update all the notation `root.controller.xxx` to
`root.controller().xxx`.
Simple example
--------------
main.py
.. include:: ../../examples/RST_Editor/main.py
:literal:
editor.kv
.. highlight:: kv
.. include:: ../../examples/RST_Editor/editor.kv
:literal:
'''
__all__ = ('FileChooserListView', 'FileChooserIconView',
'FileChooserListLayout', 'FileChooserIconLayout',
'FileChooser', 'FileChooserController',
'FileChooserProgressBase', 'FileSystemAbstract',
'FileSystemLocal')
from weakref import ref
from time import time
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.utils import platform as core_platform
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import (
StringProperty, ListProperty, BooleanProperty, ObjectProperty,
NumericProperty, OptionProperty, AliasProperty)
from os import listdir
from os.path import (
basename, join, sep, normpath, expanduser, altsep,
splitdrive, realpath, getsize, isdir, abspath, pardir)
from fnmatch import fnmatch
import collections
platform = core_platform
filesize_units = ('B', 'KB', 'MB', 'GB', 'TB')
_have_win32file = False
if platform == 'win':
# Import that module here as it's not available on non-windows machines.
# See http://bit.ly/i9klJE except that the attributes are defined in
# win32file not win32com (bug on page).
# Note: For some reason this doesn't work after a os.chdir(), no matter to
# what directory you change from where. Windows weirdness.
try:
from win32file import FILE_ATTRIBUTE_HIDDEN, GetFileAttributesExW, error
_have_win32file = True
except ImportError:
Logger.error('filechooser: win32file module is missing')
Logger.error('filechooser: we cant check if a file is hidden or not')
def alphanumeric_folders_first(files, filesystem):
return (sorted(f for f in files if filesystem.is_dir(f)) +
sorted(f for f in files if not filesystem.is_dir(f)))
class FileSystemAbstract(object):
'''Class for implementing a File System view that can be used with the
:class:`FileChooser`.:attr:`~FileChooser.file_system`.
.. versionadded:: 1.8.0
'''
def listdir(self, fn):
'''Return the list of files in the directory `fn`
'''
pass
def getsize(self, fn):
'''Return the size in bytes of a file
'''
pass
def is_hidden(self, fn):
'''Return True if the file is hidden
'''
pass
def is_dir(self, fn):
'''Return True if the argument passed to this method is a directory
'''
pass
class FileSystemLocal(FileSystemAbstract):
'''Implementation of :class:`FileSystemAbstract` for local files
.. versionadded:: 1.8.0
'''
def listdir(self, fn):
return listdir(fn)
def getsize(self, fn):
return getsize(fn)
def is_hidden(self, fn):
if platform == 'win':
if not _have_win32file:
return False
try:
return GetFileAttributesExW(fn)[0] & FILE_ATTRIBUTE_HIDDEN
except error:
# This error can occured when a file is already accessed by
# someone else. So don't return to True, because we have lot
# of chances to not being able to do anything with it.
Logger.exception('unable to access to <%s>' % fn)
return True
return basename(fn).startswith('.')
def is_dir(self, fn):
return isdir(fn)
class FileChooserProgressBase(FloatLayout):
'''Base for implementing a progress view. This view is used when too many
entries need to be created and are delayed over multiple frames.
.. versionadded:: 1.2.0
'''
path = StringProperty('')
'''Current path of the FileChooser, read-only.
'''
index = NumericProperty(0)
'''Current index of :attr:`total` entries to be loaded.
'''
total = NumericProperty(1)
'''Total number of entries to load.
'''
def cancel(self, *largs):
'''Cancel any action from the FileChooserController.
'''
if self.parent:
self.parent.cancel()
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_down(touch)
return True
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_move(touch)
return True
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_up(touch)
return True
class FileChooserProgress(FileChooserProgressBase):
pass
class FileChooserLayout(FloatLayout):
'''Base class for file chooser layouts.
.. versionadded:: 1.9.0
'''
VIEWNAME = 'undefined'
__events__ = ('on_entry_added', 'on_entries_cleared',
'on_subentry_to_entry', 'on_remove_subentry', 'on_submit')
controller = ObjectProperty()
'''
Reference to the controller handling this layout.
:class:`~kivy.properties.ObjectProperty`
'''
def on_entry_added(self, node, parent=None):
pass
def on_entries_cleared(self):
pass
def on_subentry_to_entry(self, subentry, entry):
pass
def on_remove_subentry(self, subentry, entry):
pass
def on_submit(self, selected, touch=None):
pass
class FileChooserListLayout(FileChooserLayout):
'''File chooser layout using a list view.
.. versionadded:: 1.9.0
'''
VIEWNAME = 'list'
_ENTRY_TEMPLATE = 'FileListEntry'
def __init__(self, **kwargs):
super(FileChooserListLayout, self).__init__(**kwargs)
self.fast_bind('on_entries_cleared', self.scroll_to_top)
def scroll_to_top(self, *args):
self.ids.scrollview.scroll_y = 1.0
class FileChooserIconLayout(FileChooserLayout):
'''File chooser layout using an icon view.
.. versionadded:: 1.9.0
'''
VIEWNAME = 'icon'
_ENTRY_TEMPLATE = 'FileIconEntry'
def __init__(self, **kwargs):
super(FileChooserIconLayout, self).__init__(**kwargs)
self.fast_bind('on_entries_cleared', self.scroll_to_top)
def scroll_to_top(self, *args):
self.ids.scrollview.scroll_y = 1.0
class FileChooserController(RelativeLayout):
'''Base for implementing a FileChooser. Don't use this class directly, but
prefer using an implementation such as the :class:`FileChooser`,
:class:`FileChooserListView` or :class:`FileChooserIconView`.
.. versionchanged:: 1.9.0
:Events:
`on_entry_added`: entry, parent
Fired when a root-level entry is added to the file list.
`on_entries_cleared`
Fired when the the entries list is cleared, usually when the
root is refreshed.
`on_subentry_to_entry`: entry, parent
Fired when a sub-entry is added to an existing entry.
Fired when entries are removed from an entry, usually when
a node is closed.
`on_submit`: selection, touch
Fired when a file has been selected with a double-tap.
'''
_ENTRY_TEMPLATE = None
layout = ObjectProperty(baseclass=FileChooserLayout)
'''
Reference to the layout widget instance.
layout is an :class:`~kivy.properties.ObjectProperty`.
.. versionadded:: 1.9.0
'''
path = StringProperty(u'/')
'''
:class:`~kivy.properties.StringProperty`, defaults to the current working
directory as a unicode string. It specifies the path on the filesystem that
this controller should refer to.
.. warning::
If a unicode path is specified, all the files returned will be in
unicode allowing the display of unicode files and paths. If a bytes
path is specified, only files and paths with ascii names will be
displayed properly: non-ascii filenames will be displayed and listed
with questions marks (?) instead of their unicode characters.
'''
filters = ListProperty([])
''':class:`~kivy.properties.ListProperty`, defaults to [], equal to '\*'.
Specifies the filters to be applied to the files in the directory.
The filters are not reset when the path changes. You need to do that
yourself if desired.
There are two kinds of filters: patterns and callbacks.
#. Patterns
e.g. ['\*.png'].
You can use the following patterns:
========== =================================
Pattern Meaning
========== =================================
\* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
========== =================================
#. Callbacks
You can specify a function that will be called for each file. The
callback will be passed the folder and file name as the first
and second parameters respectively. It should return True to
indicate a match and False otherwise.
.. versionchanged:: 1.4.0
If the filter is a callable (function or method), it will be called
with the path and the file name as arguments for each file in the
directory.
The callable should returns True to indicate a match and False
overwise.
'''
filter_dirs = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Indicates whether filters should also apply to directories.
'''
sort_func = ObjectProperty(alphanumeric_folders_first)
'''
:class:`~kivy.properties.ObjectProperty`.
Provides a function to be called with a list of filenames, and the
filesystem implementation as the second argument.
Returns a list of filenames sorted for display in the view.
.. versionchanged:: 1.8.0
The signature needs now 2 arguments: first the list of files,
second the filesystem class to use.
'''
files = ListProperty([])
'''
Read-only :class:`~kivy.properties.ListProperty`.
The list of files in the directory specified by path after applying the
filters.
'''
show_hidden = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether hidden files and folders should be shown.
'''
selection = ListProperty([])
'''
Read-only :class:`~kivy.properties.ListProperty`.
Contains the list of files that are currently selected.
'''
multiselect = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether the user is able to select multiple files or not.
'''
dirselect = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether directories are valid selections or not.
.. versionadded:: 1.1.0
'''
rootpath = StringProperty(None, allownone=True)
'''
Root path to use instead of the system root path. If set, it will not show
a ".." directory to go up to the root path. For example, if you set
rootpath to /users/foo, the user will be unable to go to /users or to any
other directory not starting with /users/foo.
.. versionadded:: 1.2.0
:class:`~kivy.properties.StringProperty`, defaults to None.
.. note::
Similar to :attr:`path`, if `rootpath` is specified, whether it's a
bytes or unicode string determines the type of the filenames and paths
read.
'''
progress_cls = ObjectProperty(FileChooserProgress)
'''Class to use for displaying a progress indicator for filechooser
loading.
.. versionadded:: 1.2.0
:class:`~kivy.properties.ObjectProperty`, defaults to
:class:`FileChooserProgress`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
file_encodings = ListProperty(['utf-8', 'latin1', 'cp1252'])
'''Possible encodings for decoding a filename to unicode. In the case that
the user has a weird filename, undecodable without knowing it's
initial encoding, we have no other choice than to guess it.
Please note that if you encounter an issue because of a missing encoding
here, we'll be glad to add it to this list.
.. versionadded:: 1.3.0
.. deprecated:: 1.8.0
This property is no longer used as the filechooser no longer decodes
the file names.
file_encodings is a :class:`~kivy.properties.ListProperty` and defaults to
['utf-8', 'latin1', 'cp1252'],
'''
file_system = ObjectProperty(FileSystemLocal(),
baseclass=FileSystemAbstract)
'''Implementation to access the file system. Must be an instance of
FileSystemAbstract.
.. versionadded:: 1.8.0
:class:`~kivy.properties.ObjectProperty`, defaults to
:class:`FileSystemLocal()`
'''
__events__ = ('on_entry_added', 'on_entries_cleared',
'on_subentry_to_entry', 'on_remove_subentry', 'on_submit')
def __init__(self, **kwargs):
self._progress = None
super(FileChooserController, self).__init__(**kwargs)
self._items = []
fbind = self.fast_bind
fbind('selection', self._update_item_selection)
self._previous_path = [self.path]
fbind('path', self._save_previous_path)
update = self._trigger_update
fbind('path', update)
fbind('filters', update)
fbind('rootpath', update)
update()
def on_touch_down(self, touch):
# don't respond to touchs outside self
if not self.collide_point(*touch.pos):
return
if self.disabled:
return True
return super(FileChooserController, self).on_touch_down(touch)
def on_touch_up(self, touch):
# don't respond to touchs outside self
if not self.collide_point(*touch.pos):
return
if self.disabled:
return True
return super(FileChooserController, self).on_touch_up(touch)
def _update_item_selection(self, *args):
for item in self._items:
item.selected = item.path in self.selection
def _save_previous_path(self, instance, value):
self._previous_path.append(value)
self._previous_path = self._previous_path[-2:]
def _trigger_update(self, *args):
Clock.unschedule(self._update_files)
Clock.schedule_once(self._update_files)
def on_entry_added(self, node, parent=None):
if self.layout:
self.layout.dispatch('on_entry_added', node, parent)
def on_entries_cleared(self):
if self.layout:
self.layout.dispatch('on_entries_cleared')
def on_subentry_to_entry(self, subentry, entry):
if self.layout:
self.layout.dispatch('on_subentry_to_entry', subentry, entry)
def on_remove_subentry(self, subentry, entry):
if self.layout:
self.layout.dispatch('on_remove_subentry', subentry, entry)
def on_submit(self, selected, touch=None):
if self.layout:
self.layout.dispatch('on_submit', selected, touch)
def entry_touched(self, entry, touch):
'''(internal) This method must be called by the template when an entry
is touched by the user.
'''
if (
'button' in touch.profile and touch.button in (
'scrollup', 'scrolldown', 'scrollleft', 'scrollright')):
return False
_dir = self.file_system.is_dir(entry.path)
dirselect = self.dirselect
if _dir and dirselect and touch.is_double_tap:
self.open_entry(entry)
return
if self.multiselect:
if entry.path in self.selection:
self.selection.remove(entry.path)
else:
if _dir and not self.dirselect:
self.open_entry(entry)
return
self.selection.append(entry.path)
else:
if _dir and not self.dirselect:
self.open_entry
return
self.selection = [entry.path, ]
def entry_released(self, entry, touch):
'''(internal) This method must be called by the template when an entry
is touched by the user.
.. versionadded:: 1.1.0
'''
if (
'button' in touch.profile and touch.button in (
'scrollup', 'scrolldown', 'scrollleft', 'scrollright')):
return False
if not self.multiselect:
if self.file_system.is_dir(entry.path) and not self.dirselect:
self.open_entry(entry)
elif touch.is_double_tap:
if self.dirselect and self.file_system.is_dir(entry.path):
self.open_entry(entry)
else:
self.dispatch('on_submit', self.selection, touch)
def open_entry(self, entry):
try:
# Just check if we can list the directory. This is also what
# _add_file does, so if it fails here, it would also fail later
# on. Do the check here to prevent setting path to an invalid
# directory that we cannot list.
self.file_system.listdir(entry.path)
except OSError:
entry.locked = True
else:
# If entry.path is to jump to previous directory, update path with
# parent directory
self.path = abspath(join(self.path, entry.path))
self.selection = []
def _apply_filters(self, files):
if not self.filters:
return files
filtered = []
for filt in self.filters:
if isinstance(filt, collections.Callable):
filtered.extend([fn for fn in files if filt(self.path, fn)])
else:
filtered.extend([fn for fn in files if fnmatch(fn, filt)])
if not self.filter_dirs:
dirs = [fn for fn in files if self.file_system.is_dir(fn)]
filtered.extend(dirs)
return list(set(filtered))
def get_nice_size(self, fn):
'''Pass the filepath. Returns the size in the best human readable
format or '' if it is a directory (Don't recursively calculate size.).
'''
if self.file_system.is_dir(fn):
return ''
try:
size = self.file_system.getsize(fn)
except OSError:
return '--'
for unit in filesize_units:
if size < 1024.0:
return '%1.0f %s' % (size, unit)
size /= 1024.0
def _update_files(self, *args, **kwargs):
# trigger to start gathering the files in the new directory
# we'll start a timer that will do the job, 10 times per frames
# (default)
self._gitems = []
self._gitems_parent = kwargs.get('parent', None)
self._gitems_gen = self._generate_file_entries(
path=kwargs.get('path', self.path),
parent=self._gitems_parent)
# cancel any previous clock if exist
Clock.unschedule(self._create_files_entries)
# show the progression screen
self._hide_progress()
if self._create_files_entries():
# not enough for creating all the entries, all a clock to continue
# start a timer for the next 100 ms
Clock.schedule_interval(self._create_files_entries, .1)
def _get_file_paths(self, items):
return [file.path for file in items]
def _create_files_entries(self, *args):
# create maximum entries during 50ms max, or 10 minimum (slow system)
# (on a "fast system" (core i7 2700K), we can create up to 40 entries
# in 50 ms. So 10 is fine for low system.
start = time()
finished = False
index = total = count = 1
while time() - start < 0.05 or count < 10:
try:
index, total, item = next(self._gitems_gen)
self._gitems.append(item)
count += 1
except StopIteration:
finished = True
break
except TypeError: # in case _gitems_gen is None
finished = True
break
# if this wasn't enough for creating all the entries, show a progress
# bar, and report the activity to the user.
if not finished:
self._show_progress()
self._progress.total = total
self._progress.index = index
return True
# we created all the files, now push them on the view
self._items = items = self._gitems
parent = self._gitems_parent
if parent is None:
self.dispatch('on_entries_cleared')
for entry in items:
self.dispatch('on_entry_added', entry, parent)
else:
parent.entries[:] = items
for entry in items:
self.dispatch('on_subentry_to_entry', entry, parent)
self.files[:] = self._get_file_paths(items)
# stop the progression / creation
self._hide_progress()
self._gitems = None
self._gitems_gen = None
Clock.unschedule(self._create_files_entries)
return False
def cancel(self, *largs):
'''Cancel any background action started by filechooser, such as loading
a new directory.
.. versionadded:: 1.2.0
'''
Clock.unschedule(self._create_files_entries)
self._hide_progress()
if len(self._previous_path) > 1:
# if we cancel any action, the path will be set same as the
# previous one, so we can safely cancel the update of the previous
# path.
self.path = self._previous_path[-2]
Clock.unschedule(self._update_files)
def _show_progress(self):
if self._progress:
return
cls = self.progress_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
self._progress = cls(path=self.path)
self._progress.value = 0
self.add_widget(self._progress)
def _hide_progress(self):
if self._progress:
self.remove_widget(self._progress)
self._progress = None
def _generate_file_entries(self, *args, **kwargs):
# Generator that will create all the files entries.
# the generator is used via _update_files() and _create_files_entries()
# don't use it directly.
is_root = False
path = kwargs.get('path', self.path)
have_parent = kwargs.get('parent', None) is not None
# Add the components that are always needed
if self.rootpath:
rootpath = realpath(self.rootpath)
path = realpath(path)
if not path.startswith(rootpath):
self.path = rootpath
return
elif path == rootpath:
is_root = True
else:
if platform == 'win':
is_root = splitdrive(path)[1] in (sep, altsep)
elif platform in ('macosx', 'linux', 'android', 'ios'):
is_root = normpath(expanduser(path)) == sep
else:
# Unknown fs, just always add the .. entry but also log
Logger.warning('Filechooser: Unsupported OS: %r' % platform)
# generate an entries to go back to previous
if not is_root and not have_parent:
back = '..' + sep
pardir = self._create_entry_widget(dict(
name=back, size='', path=back, controller=ref(self),
isdir=True, parent=None, sep=sep, get_nice_size=lambda: ''))
yield 0, 1, pardir
# generate all the entries for files
try:
for index, total, item in self._add_files(path):
yield index, total, item
except OSError:
Logger.exception('Unable to open directory <%s>' % self.path)
self.files[:] = []
def _create_entry_widget(self, ctx):
template = self.layout._ENTRY_TEMPLATE\
if self.layout else self._ENTRY_TEMPLATE
return Builder.template(template, **ctx)
def _add_files(self, path, parent=None):
path = expanduser(path)
files = []
fappend = files.append
for f in self.file_system.listdir(path):
try:
# In the following, use fully qualified filenames
fappend(normpath(join(path, f)))
except UnicodeDecodeError:
Logger.exception('unable to decode <{}>'.format(f))
except UnicodeEncodeError:
Logger.exception('unable to encode <{}>'.format(f))
# Apply filename filters
files = self._apply_filters(files)
# Sort the list of files
files = self.sort_func(files, self.file_system)
is_hidden = self.file_system.is_hidden
if not self.show_hidden:
files = [x for x in files if not is_hidden(x)]
self.files[:] = files
total = len(files)
wself = ref(self)
for index, fn in enumerate(files):
def get_nice_size():
# Use a closure for lazy-loading here
return self.get_nice_size(fn)
ctx = {'name': basename(fn),
'get_nice_size': get_nice_size,
'path': fn,
'controller': wself,
'isdir': self.file_system.is_dir(fn),
'parent': parent,
'sep': sep}
entry = self._create_entry_widget(ctx)
yield index, total, entry
def entry_subselect(self, entry):
if not self.file_system.is_dir(entry.path):
return
self._update_files(path=entry.path, parent=entry)
def close_subselection(self, entry):
for subentry in entry.entries:
self.dispatch('on_remove_subentry', subentry, entry)
class FileChooserListView(FileChooserController):
'''Implementation of :class:`FileChooserController` using a list view.
.. versionadded:: 1.9.0
'''
_ENTRY_TEMPLATE = 'FileListEntry'
class FileChooserIconView(FileChooserController):
'''Implementation of :class:`FileChooserController` using an icon view.
.. versionadded:: 1.9.0
'''
_ENTRY_TEMPLATE = 'FileIconEntry'
class FileChooser(FileChooserController):
'''Implementation of :class:`FileChooserController` which supports
switching between multiple, synced layout views.
.. versionadded:: 1.9.0
'''
manager = ObjectProperty()
'''
Reference to the :class:`~kivy.uix.screenmanager.ScreenManager` instance.
:class:`~kivy.properties.ObjectProperty`
'''
_view_list = ListProperty()
def get_view_list(self):
return self._view_list
view_list = AliasProperty(get_view_list, bind=('_view_list',))
'''
List of views added to this FileChooser.
:class:`~kivy.properties.AliasProperty` of type :class:`list`.
'''
_view_mode = StringProperty()
def get_view_mode(self):
return self._view_mode
def set_view_mode(self, mode):
if mode not in self._view_list:
raise ValueError('unknown view mode %r' % mode)
self._view_mode = mode
view_mode = AliasProperty(
get_view_mode, set_view_mode, bind=('_view_mode',))
'''
Current layout view mode.
:class:`~kivy.properties.AliasProperty` of type :class:`str`.
'''
@property
def _views(self):
return [screen.children[0] for screen in self.manager.screens]
def __init__(self, **kwargs):
super(FileChooser, self).__init__(**kwargs)
self.manager = ScreenManager()
super(FileChooser, self).add_widget(self.manager)
self.trigger_update_view = Clock.create_trigger(self.update_view)
self.fast_bind('view_mode', self.trigger_update_view)
def add_widget(self, widget, **kwargs):
if widget is self._progress:
super(FileChooser, self).add_widget(widget, **kwargs)
elif hasattr(widget, 'VIEWNAME'):
name = widget.VIEWNAME + 'view'
screen = Screen(name=name)
widget.controller = self
screen.add_widget(widget)
self.manager.add_widget(screen)
self.trigger_update_view()
else:
raise ValueError(
'widget must be a FileChooserLayout,'
' not %s' % type(widget).__name__)
def rebuild_views(self):
views = [view.VIEWNAME for view in self._views]
if views != self._view_list:
self._view_list = views
if self._view_mode not in self._view_list:
self._view_mode = self._view_list[0]
self._trigger_update()
def update_view(self, *args):
self.rebuild_views()
sm = self.manager
viewlist = self._view_list
view = self.view_mode
current = sm.current[:-4]
viewindex = viewlist.index(view) if view in viewlist else 0
currentindex = viewlist.index(current) if current in viewlist else 0
direction = 'left' if currentindex < viewindex else 'right'
sm.transition.direction = direction
sm.current = view + 'view'
def _create_entry_widget(self, ctx):
return [Builder.template(view._ENTRY_TEMPLATE, **ctx)
for view in self._views]
def _get_file_paths(self, items):
if self._views:
return [file[0].path for file in items]
return []
def _update_item_selection(self, *args):
for viewitem in self._items:
selected = viewitem[0].path in self.selection
for item in viewitem:
item.selected = selected
def on_entry_added(self, node, parent=None):
for index, view in enumerate(self._views):
view.dispatch(
'on_entry_added',
node[index], parent[index] if parent else None)
def on_entries_cleared(self):
for view in self._views:
view.dispatch('on_entries_cleared')
def on_subentry_to_entry(self, subentry, entry):
for index, view in enumerate(self._views):
view.dispatch('on_subentry_to_entry', subentry[index], entry)
def on_remove_subentry(self, subentry, entry):
for index, view in enumerate(self._views):
view.dispatch('on_remove_subentry', subentry[index], entry)
def on_submit(self, selected, touch=None):
view_mode = self.view_mode
for view in self._views:
if view_mode == view.VIEWNAME:
view.dispatch('on_submit', selected, touch)
return
if __name__ == '__main__':
from kivy.app import App
from pprint import pprint
import textwrap
import sys
root = Builder.load_string(textwrap.dedent('''\
BoxLayout:
orientation: 'vertical'
BoxLayout:
size_hint_y: None
height: sp(52)
Button:
text: 'Icon View'
on_press: fc.view_mode = 'icon'
Button:
text: 'List View'
on_press: fc.view_mode = 'list'
FileChooser:
id: fc
FileChooserIconLayout
FileChooserListLayout
'''))
class FileChooserApp(App):
def build(self):
v = root.ids.fc
if len(sys.argv) > 1:
v.path = sys.argv[1]
v.bind(selection=lambda *x: pprint("selection: %s" % x[1:]))
v.bind(path=lambda *x: pprint("path: %s" % x[1:]))
return root
FileChooserApp().run()
| mit | -4,341,889,288,043,636,700 | 31.476048 | 80 | 0.598414 | false |
sonofatailor/django-oscar | tests/integration/basket/test_models.py | 1 | 11161 | # -*- coding: utf-8 -*-
from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.basket.models import Basket
from oscar.apps.catalogue.models import Option
from oscar.apps.partner import availability, prices, strategy
from oscar.test import factories
from oscar.test.factories import (
BasketFactory, BasketLineAttributeFactory, OptionFactory, ProductFactory)
class TestANewBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
def test_has_zero_lines(self):
self.assertEqual(0, self.basket.num_lines)
def test_has_zero_items(self):
self.assertEqual(0, self.basket.num_items)
def test_doesnt_contain_vouchers(self):
self.assertFalse(self.basket.contains_a_voucher)
def test_can_be_edited(self):
self.assertTrue(self.basket.can_be_edited)
def test_is_empty(self):
self.assertTrue(self.basket.is_empty)
def test_is_not_submitted(self):
self.assertFalse(self.basket.is_submitted)
def test_has_no_applied_offers(self):
self.assertEqual({}, self.basket.applied_offers())
class TestBasketLine(TestCase):
def test_description(self):
basket = BasketFactory()
product = ProductFactory(title="A product")
basket.add_product(product)
line = basket.lines.first()
self.assertEqual(line.description, "A product")
def test_description_with_attributes(self):
basket = BasketFactory()
product = ProductFactory(title="A product")
basket.add_product(product)
line = basket.lines.first()
BasketLineAttributeFactory(
line=line, value=u'\u2603', option__name='with')
self.assertEqual(line.description, u"A product (with = '\u2603')")
def test_create_line_reference(self):
basket = BasketFactory()
product = ProductFactory(title="A product")
option = OptionFactory(name="product_option", code="product_option")
option_product = ProductFactory(title=u'Asunciรณn')
options = [{'option': option, 'value': option_product}]
basket.add_product(product, options=options)
def test_basket_lines_queryset_is_ordered(self):
# This is needed to make sure a formset is not performing the query
# again with an order_by clause (losing all calculated discounts)
basket = BasketFactory()
product = ProductFactory(title="A product")
another_product = ProductFactory(title="Another product")
basket.add_product(product)
basket.add_product(another_product)
queryset = basket.all_lines()
self.assertTrue(queryset.ordered)
def test_line_tax_for_zero_tax_strategies(self):
basket = Basket()
basket.strategy = strategy.Default()
product = factories.create_product()
# Tax for the default strategy will be 0
factories.create_stockrecord(
product, price_excl_tax=D('75.00'), num_in_stock=10)
basket.add(product, 1)
self.assertEqual(basket.lines.first().line_tax, D('0'))
def test_line_tax_for_unknown_tax_strategies(self):
class UnknownTaxStrategy(strategy.Default):
""" A test strategy where the tax is not known """
def pricing_policy(self, product, stockrecord):
return prices.FixedPrice('GBP', stockrecord.price_excl_tax, tax=None)
basket = Basket()
basket.strategy = UnknownTaxStrategy()
product = factories.create_product()
factories.create_stockrecord(product, num_in_stock=10)
basket.add(product, 1)
self.assertEqual(basket.lines.first().line_tax, None)
class TestAddingAProductToABasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
currency='GBP',
product=self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.basket.add(self.product)
def test_creates_a_line(self):
self.assertEqual(1, self.basket.num_lines)
def test_sets_line_prices(self):
line = self.basket.lines.all()[0]
self.assertEqual(line.price_incl_tax, self.purchase_info.price.incl_tax)
self.assertEqual(line.price_excl_tax, self.purchase_info.price.excl_tax)
def test_adding_negative_quantity(self):
self.assertEqual(1, self.basket.num_lines)
self.basket.add(self.product, quantity=4)
self.assertEqual(5, self.basket.line_quantity(self.product, self.record))
self.basket.add(self.product, quantity=-10)
self.assertEqual(0, self.basket.line_quantity(self.product, self.record))
def test_means_another_currency_product_cannot_be_added(self):
product = factories.create_product()
factories.create_stockrecord(
currency='USD', product=product, price_excl_tax=D('20.00'))
with self.assertRaises(ValueError):
self.basket.add(product)
class TestANonEmptyBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.basket.add(self.product, 10)
def test_can_be_flushed(self):
self.basket.flush()
self.assertEqual(self.basket.num_items, 0)
def test_returns_correct_product_quantity(self):
self.assertEqual(10, self.basket.product_quantity(
self.product))
def test_returns_correct_line_quantity_for_existing_product_and_stockrecord(self):
self.assertEqual(10, self.basket.line_quantity(
self.product, self.record))
def test_returns_zero_line_quantity_for_alternative_stockrecord(self):
record = factories.create_stockrecord(
self.product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
self.product, record))
def test_returns_zero_line_quantity_for_missing_product_and_stockrecord(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
product, record))
def test_returns_correct_quantity_for_existing_product_and_stockrecord_and_options(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
option = Option.objects.create(name="Message")
options = [{"option": option, "value": "2"}]
self.basket.add(product, options=options)
self.assertEqual(0, self.basket.line_quantity(
product, record))
self.assertEqual(1, self.basket.line_quantity(
product, record, options))
def test_total_sums_product_totals(self):
product = factories.create_product()
factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
self.basket.add(product, 1)
self.assertEqual(self.basket.total_excl_tax, 105)
def test_totals_for_free_products(self):
basket = Basket()
basket.strategy = strategy.Default()
# Add a zero-priced product to the basket
product = factories.create_product()
factories.create_stockrecord(
product, price_excl_tax=D('0.00'), num_in_stock=10)
basket.add(product, 1)
self.assertEqual(basket.lines.count(), 1)
self.assertEqual(basket.total_excl_tax, 0)
self.assertEqual(basket.total_incl_tax, 0)
def test_basket_prices_calculation_for_unavailable_pricing(self):
new_product = factories.create_product()
factories.create_stockrecord(
new_product, price_excl_tax=D('5.00'))
self.basket.add(new_product, 1)
class UnavailableProductStrategy(strategy.Default):
""" A test strategy that makes a specific product unavailable """
def availability_policy(self, product, stockrecord):
if product == new_product:
return availability.Unavailable()
return super(UnavailableProductStrategy, self).availability_policy(product, stockrecord)
def pricing_policy(self, product, stockrecord):
if product == new_product:
return prices.Unavailable()
return super(UnavailableProductStrategy, self).pricing_policy(product, stockrecord)
self.basket.strategy = UnavailableProductStrategy()
line = self.basket.all_lines()[1]
self.assertEqual(line.get_warning(), u"'D\xf9\uff4d\u03fb\u03d2 title' is no longer available")
self.assertIsNone(line.line_price_excl_tax)
self.assertIsNone(line.line_price_incl_tax)
self.assertIsNone(line.line_price_excl_tax_incl_discounts)
self.assertIsNone(line.line_price_incl_tax_incl_discounts)
self.assertIsNone(line.line_tax)
self.assertEqual(self.basket.total_excl_tax, 100)
self.assertEqual(self.basket.total_incl_tax, 100)
self.assertEqual(self.basket.total_excl_tax_excl_discounts, 100)
self.assertEqual(self.basket.total_incl_tax_excl_discounts, 100)
class TestMergingTwoBaskets(TestCase):
def setUp(self):
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.purchase_info = factories.create_purchase_info(self.record)
self.main_basket = Basket()
self.main_basket.strategy = strategy.Default()
self.main_basket.add(self.product, quantity=2)
self.merge_basket = Basket()
self.merge_basket.strategy = strategy.Default()
self.merge_basket.add(self.product, quantity=1)
self.main_basket.merge(self.merge_basket)
def test_doesnt_sum_quantities(self):
self.assertEqual(1, self.main_basket.num_lines)
def test_changes_status_of_merge_basket(self):
self.assertEqual(Basket.MERGED, self.merge_basket.status)
class TestASubmittedBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.basket.submit()
def test_has_correct_status(self):
self.assertTrue(self.basket.is_submitted)
def test_can_be_edited(self):
self.assertFalse(self.basket.can_be_edited)
class TestMergingAVoucherBasket(TestCase):
def test_transfers_vouchers_to_new_basket(self):
baskets = [factories.BasketFactory(), factories.BasketFactory()]
voucher = factories.VoucherFactory()
baskets[0].vouchers.add(voucher)
baskets[1].merge(baskets[0])
self.assertEqual(1, baskets[1].vouchers.all().count())
| bsd-3-clause | 7,816,845,905,431,013,000 | 37.088737 | 104 | 0.661828 | false |
sam-m888/gprime | gprime/filters/rules/media/_matchesfilter.py | 1 | 1711 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import MatchesFilterBase
#-------------------------------------------------------------------------
#
# MatchesFilter
#
#-------------------------------------------------------------------------
class MatchesFilter(MatchesFilterBase):
"""Rule that checks against another filter."""
name = _('Media objects matching the <filter>')
description = _("Matches media objects matched by the "
"specified filter name")
namespace = 'Media'
| gpl-2.0 | -4,651,842,400,809,518,000 | 35.404255 | 79 | 0.534191 | false |
patjouk/djangogirls | jobs/migrations/0004_auto_20150712_1803.py | 1 | 3951 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0003_auto_20150510_1707'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='reviewers_comment',
),
migrations.RemoveField(
model_name='meetup',
name='reviewers_comment',
),
migrations.AddField(
model_name='job',
name='internal_comment',
field=models.TextField(null=True, blank=True, help_text="Write you comments here. They won't be sent to the company/organisation."),
),
migrations.AddField(
model_name='job',
name='message_to_organisation',
field=models.TextField(null=True, blank=True, help_text='Write your message to the company/organisation here.'),
),
migrations.AddField(
model_name='meetup',
name='internal_comment',
field=models.TextField(null=True, blank=True, help_text="Write you comments here. They won't be sent to the company/organisation."),
),
migrations.AddField(
model_name='meetup',
name='message_to_organisation',
field=models.TextField(null=True, blank=True, help_text='Write your message to the company/organisation here.'),
),
migrations.AlterField(
model_name='job',
name='expiration_date',
field=models.DateField(null=True, blank=True, help_text='Automatically is set 60 days from posting. You can override this.'),
),
migrations.AlterField(
model_name='job',
name='review_status',
field=models.CharField(choices=[('OPN', 'Open'), ('URE', 'Under review'), ('RTP', 'Ready to publish'), ('REJ', 'Rejected'), ('PUB', 'Published')], max_length=3, default='OPN'),
),
migrations.AlterField(
model_name='job',
name='website',
field=models.URLField(null=True, blank=True, help_text='Link to your offer or company website.'),
),
migrations.AlterField(
model_name='meetup',
name='expiration_date',
field=models.DateField(null=True, blank=True, help_text='Automatically is set 60 days from posting. You can override this.'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_end_date',
field=models.DateTimeField(null=True, blank=True, help_text='Date format: YYYY-MM-DD'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_start_date',
field=models.DateTimeField(null=True, help_text='If this is a recurring meetup/event, please enter a start date. Date format: YYYY-MM-DD'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_type',
field=models.CharField(choices=[('MEET', 'meetup'), ('CONF', 'conference'), ('WORK', 'workshop')], max_length=4, default='MEET'),
),
migrations.AlterField(
model_name='meetup',
name='recurrence',
field=models.CharField(null=True, blank=True, max_length=255, help_text='Provide details of recurrence if applicable.'),
),
migrations.AlterField(
model_name='meetup',
name='review_status',
field=models.CharField(choices=[('OPN', 'Open'), ('URE', 'Under review'), ('RTP', 'Ready to publish'), ('REJ', 'Rejected'), ('PUB', 'Published')], max_length=3, default='OPN'),
),
migrations.AlterField(
model_name='meetup',
name='website',
field=models.URLField(null=True, blank=True, help_text='Link to your meetup or organisation website.'),
),
]
| bsd-3-clause | 2,459,570,391,821,128,700 | 41.945652 | 188 | 0.576563 | false |
Stanford-Online/edx-ora2 | openassessment/xblock/openassessmentblock.py | 1 | 42271 | """An XBlock where students can read a question and compose their response"""
import copy
import datetime as dt
import json
import logging
import os
from lazy import lazy
import pkg_resources
import pytz
from webob import Response
from xblock.core import XBlock
from xblock.fields import Boolean, Integer, List, Scope, String
from xblock.fragment import Fragment
from django.conf import settings
from django.template.loader import get_template
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.course_items_listing_mixin import CourseItemsListingMixin
from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format
from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import
from openassessment.xblock.grade_mixin import GradeMixin
from openassessment.xblock.leaderboard_mixin import LeaderboardMixin
from openassessment.xblock.lms_mixin import LmsCompatibilityMixin
from openassessment.xblock.message_mixin import MessageMixin
from openassessment.xblock.peer_assessment_mixin import PeerAssessmentMixin
from openassessment.xblock.resolve_dates import DISTANT_FUTURE, DISTANT_PAST, parse_date_value, resolve_dates
from openassessment.xblock.self_assessment_mixin import SelfAssessmentMixin
from openassessment.xblock.staff_area_mixin import StaffAreaMixin
from openassessment.xblock.staff_assessment_mixin import StaffAssessmentMixin
from openassessment.xblock.student_training_mixin import StudentTrainingMixin
from openassessment.xblock.studio_mixin import StudioMixin
from openassessment.xblock.submission_mixin import SubmissionMixin
from openassessment.xblock.validation import validator
from openassessment.xblock.workflow_mixin import WorkflowMixin
from openassessment.xblock.xml import parse_from_xml, serialize_content_to_xml
logger = logging.getLogger(__name__)
UI_MODELS = {
"submission": {
"name": "submission",
"class_id": "step--response",
"title": "Your Response"
},
"student-training": {
"name": "student-training",
"class_id": "step--student-training",
"title": "Learn to Assess"
},
"peer-assessment": {
"name": "peer-assessment",
"class_id": "step--peer-assessment",
"title": "Assess Peers' Responses"
},
"self-assessment": {
"name": "self-assessment",
"class_id": "step--self-assessment",
"title": "Assess Your Response"
},
"staff-assessment": {
"name": "staff-assessment",
"class_id": "step--staff-assessment",
"title": "Staff Grade"
},
"grade": {
"name": "grade",
"class_id": "step--grade",
"title": "Your Grade:"
},
"leaderboard": {
"name": "leaderboard",
"class_id": "step--leaderboard",
"title": "Leaderboard"
}
}
VALID_ASSESSMENT_TYPES = [
"student-training",
"peer-assessment",
"self-assessment",
"staff-assessment"
]
def load(path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
@XBlock.needs("i18n")
@XBlock.needs("user")
class OpenAssessmentBlock(MessageMixin,
SubmissionMixin,
PeerAssessmentMixin,
SelfAssessmentMixin,
StaffAssessmentMixin,
StudioMixin,
GradeMixin,
LeaderboardMixin,
StaffAreaMixin,
WorkflowMixin,
StudentTrainingMixin,
LmsCompatibilityMixin,
CourseItemsListingMixin,
XBlock):
"""Displays a prompt and provides an area where students can compose a response."""
public_dir = 'static'
submission_start = String(
default=DEFAULT_START, scope=Scope.settings,
help="ISO-8601 formatted string representing the submission start date."
)
submission_due = String(
default=DEFAULT_DUE, scope=Scope.settings,
help="ISO-8601 formatted string representing the submission due date."
)
text_response_raw = String(
help="Specify whether learners must include a text based response to this problem's prompt.",
default="required",
scope=Scope.settings
)
file_upload_response_raw = String(
help="Specify whether learners are able to upload files as a part of their response.",
default=None,
scope=Scope.settings
)
allow_file_upload = Boolean(
default=False,
scope=Scope.content,
help="Do not use. For backwards compatibility only."
)
file_upload_type_raw = String(
default=None,
scope=Scope.content,
help="File upload to be included with submission (can be 'image', 'pdf-and-image', or 'custom')."
)
white_listed_file_types = List(
default=[],
scope=Scope.content,
help="Custom list of file types allowed with submission."
)
allow_latex = Boolean(
default=False,
scope=Scope.settings,
help="Latex rendering allowed with submission."
)
title = String(
default="Open Response Assessment",
scope=Scope.content,
help="A title to display to a student (plain text)."
)
leaderboard_show = Integer(
default=0,
scope=Scope.content,
help="The number of leaderboard results to display (0 if none)"
)
prompt = String(
default=DEFAULT_PROMPT,
scope=Scope.content,
help="The prompts to display to a student."
)
track_changes = String(
default="",
scope=Scope.content,
help="URL to track changes library, currently ICE"
)
prompts_type = String(
default='text',
scope=Scope.content,
help="The type of prompt. html or text"
)
rubric_criteria = List(
default=DEFAULT_RUBRIC_CRITERIA,
scope=Scope.content,
help="The different parts of grading for students giving feedback."
)
rubric_feedback_prompt = String(
default=DEFAULT_RUBRIC_FEEDBACK_PROMPT,
scope=Scope.content,
help="The rubric feedback prompt displayed to the student"
)
rubric_feedback_default_text = String(
default=DEFAULT_RUBRIC_FEEDBACK_TEXT,
scope=Scope.content,
help="The default rubric feedback text displayed to the student"
)
rubric_assessments = List(
default=DEFAULT_ASSESSMENT_MODULES,
scope=Scope.content,
help="The requested set of assessments and the order in which to apply them."
)
course_id = String(
default=u"TestCourse",
scope=Scope.content,
help="The course_id associated with this prompt (until we can get it from runtime)."
)
submission_uuid = String(
default=None,
scope=Scope.user_state,
help="The student's submission that others will be assessing."
)
has_saved = Boolean(
default=False,
scope=Scope.user_state,
help="Indicates whether the user has saved a response."
)
saved_response = String(
default=u"",
scope=Scope.user_state,
help="Saved response submission for the current user."
)
saved_files_descriptions = String(
default=u"",
scope=Scope.user_state,
help="Saved descriptions for each uploaded file."
)
no_peers = Boolean(
default=False,
scope=Scope.user_state,
help="Indicates whether or not there are peers to grade."
)
@property
def course_id(self):
return self._serialize_opaque_key(self.xmodule_runtime.course_id) # pylint:disable=E1101
@property
def text_response(self):
"""
Backward compatibility for existing blocks that were created without text_response
or file_upload_response fields. These blocks will be treated as required text.
"""
if not self.file_upload_response_raw and not self.text_response_raw:
return 'required'
else:
return self.text_response_raw
@text_response.setter
def text_response(self, value):
"""
Setter for text_response_raw
"""
self.text_response_raw = value if value else None
@property
def file_upload_response(self):
"""
Backward compatibility for existing block before that were created without
'text_response' and 'file_upload_response_raw' fields.
"""
if not self.file_upload_response_raw and (self.file_upload_type_raw is not None or self.allow_file_upload):
return 'optional'
else:
return self.file_upload_response_raw
@file_upload_response.setter
def file_upload_response(self, value):
"""
Setter for file_upload_response_raw
"""
self.file_upload_response_raw = value if value else None
@property
def file_upload_type(self):
"""
Backward compatibility for existing block before the change from allow_file_upload to file_upload_type_raw.
This property will use new file_upload_type_raw field when available, otherwise will fall back to
allow_file_upload field for old blocks.
"""
if self.file_upload_type_raw is not None:
return self.file_upload_type_raw
if self.allow_file_upload:
return 'image'
else:
return None
@file_upload_type.setter
def file_upload_type(self, value):
"""
Setter for file_upload_type_raw
"""
self.file_upload_type_raw = value
@property
def white_listed_file_types_string(self):
"""
Join the white listed file types into comma delimited string
"""
if self.white_listed_file_types:
return ','.join(self.white_listed_file_types)
else:
return ''
@white_listed_file_types_string.setter
def white_listed_file_types_string(self, value):
"""
Convert comma delimited white list string into list with some clean up
"""
self.white_listed_file_types = [file_type.strip().strip('.').lower()
for file_type in value.split(',')] if value else None
def get_anonymous_user_id(self, username, course_id):
"""
Get the anonymous user id from Xblock user service.
Args:
username(str): user's name entered by staff to get info.
course_id(str): course id.
Returns:
A unique id for (user, course) pair
"""
return self.runtime.service(self, 'user').get_anonymous_user_id(username, course_id)
def get_student_item_dict(self, anonymous_user_id=None):
"""Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
Args:
anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
Returns:
(dict): The student item associated with this XBlock instance. This
includes the student id, item id, and course id.
"""
item_id = self._serialize_opaque_key(self.scope_ids.usage_id)
# This is not the real way course_ids should work, but this is a
# temporary expediency for LMS integration
if hasattr(self, "xmodule_runtime"):
course_id = self.course_id # pylint:disable=E1101
if anonymous_user_id:
student_id = anonymous_user_id
else:
student_id = self.xmodule_runtime.anonymous_student_id # pylint:disable=E1101
else:
course_id = "edX/Enchantment_101/April_1"
if self.scope_ids.user_id is None:
student_id = None
else:
student_id = unicode(self.scope_ids.user_id)
student_item_dict = dict(
student_id=student_id,
item_id=item_id,
course_id=course_id,
item_type='openassessment'
)
return student_item_dict
def add_javascript_files(self, fragment, item):
"""
Add all the JavaScript files from a directory to the specified fragment
"""
if pkg_resources.resource_isdir(__name__, item):
for child_item in pkg_resources.resource_listdir(__name__, item):
path = os.path.join(item, child_item)
if not pkg_resources.resource_isdir(__name__, path):
fragment.add_javascript_url(self.runtime.local_resource_url(self, path))
else:
fragment.add_javascript_url(self.runtime.local_resource_url(self, item))
def student_view(self, context=None):
"""The main view of OpenAssessmentBlock, displayed when viewing courses.
The main view which displays the general layout for Open Ended
Assessment Questions. The contents of the XBlock are determined
dynamically based on the assessment workflow configured by the author.
Args:
context: Not used for this view.
Returns:
(Fragment): The HTML Fragment for this XBlock, which determines the
general frame of the Open Ended Assessment Question.
"""
# On page load, update the workflow status.
# We need to do this here because peers may have graded us, in which
# case we may have a score available.
try:
self.update_workflow_status()
except AssessmentWorkflowError:
# Log the exception, but continue loading the page
logger.exception('An error occurred while updating the workflow on page load.')
ui_models = self._create_ui_models()
# All data we intend to pass to the front end.
context_dict = {
"title": self.title,
"prompts": self.prompts,
"prompts_type": self.prompts_type,
"rubric_assessments": ui_models,
"show_staff_area": self.is_course_staff and not self.in_studio_preview,
}
template = get_template("openassessmentblock/oa_base.html")
return self._create_fragment(template, context_dict, initialize_js_func='OpenAssessmentBlock')
def ora_blocks_listing_view(self, context=None):
"""This view is used in the Open Response Assessment tab in the LMS Instructor Dashboard
to display all available course ORA blocks.
Args:
context: contains two items:
"ora_items" - all course items with names and parents, example:
[{"parent_name": "Vertical name",
"name": "ORA Display Name",
"url_grade_available_responses": "/grade_available_responses_view",
"staff_assessment": false,
"parent_id": "vertical_block_id",
"url_base": "/student_view",
"id": "openassessment_block_id"
}, ...]
"ora_item_view_enabled" - enabled LMS API endpoint to serve XBlock view or not
Returns:
(Fragment): The HTML Fragment for this XBlock.
"""
ora_items = context.get('ora_items', []) if context else []
ora_item_view_enabled = context.get('ora_item_view_enabled', False) if context else False
context_dict = {
"ora_items": json.dumps(ora_items),
"ora_item_view_enabled": ora_item_view_enabled
}
template = get_template('openassessmentblock/instructor_dashboard/oa_listing.html')
min_postfix = '.min' if settings.DEBUG else ''
return self._create_fragment(
template,
context_dict,
initialize_js_func='CourseOpenResponsesListingBlock',
additional_css=["static/css/lib/backgrid/backgrid%s.css" % min_postfix],
additional_js=["static/js/lib/backgrid/backgrid%s.js" % min_postfix]
)
def grade_available_responses_view(self, context=None): # pylint: disable=unused-argument
"""Grade Available Responses view.
Auxiliary view which displays the staff grading area
(used in the Open Response Assessment tab in the Instructor Dashboard of LMS)
Args:
context: Not used for this view.
Returns:
(Fragment): The HTML Fragment for this XBlock.
"""
student_item = self.get_student_item_dict()
staff_assessment_required = "staff-assessment" in self.assessment_steps
context_dict = {
"title": self.title,
'staff_assessment_required': staff_assessment_required
}
if staff_assessment_required:
context_dict.update(
self.get_staff_assessment_statistics_context(student_item["course_id"], student_item["item_id"])
)
template = get_template('openassessmentblock/instructor_dashboard/oa_grade_available_responses.html')
return self._create_fragment(template, context_dict, initialize_js_func='StaffAssessmentBlock')
def _create_fragment(self, template, context_dict, initialize_js_func, additional_css=None, additional_js=None):
"""
Creates a fragment for display.
"""
fragment = Fragment(template.render(context_dict))
if additional_css is None:
additional_css = []
if additional_js is None:
additional_js = []
i18n_service = self.runtime.service(self, 'i18n')
if hasattr(i18n_service, 'get_language_bidi') and i18n_service.get_language_bidi():
css_url = "static/css/openassessment-rtl.css"
else:
css_url = "static/css/openassessment-ltr.css"
if settings.DEBUG:
for css in additional_css:
fragment.add_css_url(self.runtime.local_resource_url(self, css))
fragment.add_css_url(self.runtime.local_resource_url(self, css_url))
for js in additional_js:
self.add_javascript_files(fragment, js)
self.add_javascript_files(fragment, "static/js/src/oa_shared.js")
self.add_javascript_files(fragment, "static/js/src/oa_server.js")
self.add_javascript_files(fragment, "static/js/src/lms")
else:
# TODO: load CSS and JavaScript as URLs once they can be served by the CDN
for css in additional_css:
fragment.add_css(load(css))
fragment.add_css(load(css_url))
# minified additional_js should be already included in 'make javascript'
fragment.add_javascript(load("static/js/openassessment-lms.min.js"))
ui_models = self._create_ui_models()
track_changes_fragments = [x['track_changes'] for x in ui_models if x.get('track_changes', None)]
if track_changes_fragments:
for tr_frag in track_changes_fragments:
fragment.add_javascript_url(tr_frag) # TODO: move the URL to course advanced setting
if settings.DEBUG:
fragment.add_css_url(self.runtime.local_resource_url(self, "static/css/trackchanges.css"))
else:
fragment.add_css(load("static/css/trackchanges.css"))
js_context_dict = {
"ALLOWED_IMAGE_MIME_TYPES": self.ALLOWED_IMAGE_MIME_TYPES,
"ALLOWED_FILE_MIME_TYPES": self.ALLOWED_FILE_MIME_TYPES,
"FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
"FILE_TYPE_WHITE_LIST": self.white_listed_file_types,
}
fragment.initialize_js(initialize_js_func, js_context_dict)
return fragment
@property
def is_admin(self):
"""
Check whether the user has global staff permissions.
Returns:
bool
"""
if hasattr(self, 'xmodule_runtime'):
return getattr(self.xmodule_runtime, 'user_is_admin', False)
else:
return False
@property
def is_course_staff(self):
"""
Check whether the user has course staff permissions for this XBlock.
Returns:
bool
"""
if hasattr(self, 'xmodule_runtime'):
return getattr(self.xmodule_runtime, 'user_is_staff', False)
else:
return False
@property
def is_beta_tester(self):
"""
Check whether the user is a beta tester.
Returns:
bool
"""
if hasattr(self, 'xmodule_runtime'):
return getattr(self.xmodule_runtime, 'user_is_beta_tester', False)
else:
return False
@property
def in_studio_preview(self):
"""
Check whether we are in Studio preview mode.
Returns:
bool
"""
# When we're running in Studio Preview mode, the XBlock won't provide us with a user ID.
# (Note that `self.xmodule_runtime` will still provide an anonymous
# student ID, so we can't rely on that)
return self.scope_ids.user_id is None
def _create_ui_models(self):
"""Combine UI attributes and XBlock configuration into a UI model.
This method takes all configuration for this XBlock instance and appends
UI attributes to create a UI Model for rendering all assessment modules.
This allows a clean separation of static UI attributes from persistent
XBlock configuration.
"""
ui_models = [UI_MODELS["submission"]]
staff_assessment_required = False
for assessment in self.valid_assessments:
if assessment["name"] == "staff-assessment":
if not assessment["required"]:
continue
else:
staff_assessment_required = True
ui_model = UI_MODELS.get(assessment["name"])
if ui_model:
ui_models.append(dict(assessment, **ui_model))
if not staff_assessment_required and self.staff_assessment_exists(self.submission_uuid):
ui_models.append(UI_MODELS["staff-assessment"])
ui_models.append(UI_MODELS["grade"])
if self.leaderboard_show > 0:
ui_models.append(UI_MODELS["leaderboard"])
return ui_models
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench.
These scenarios are only intended to be used for Workbench XBlock
Development.
"""
return [
(
"OpenAssessmentBlock File Upload: Images",
load('static/xml/file_upload_image_only.xml')
),
(
"OpenAssessmentBlock File Upload: PDF and Images",
load('static/xml/file_upload_pdf_and_image.xml')
),
(
"OpenAssessmentBlock File Upload: Custom File Types",
load('static/xml/file_upload_custom.xml')
),
(
"OpenAssessmentBlock File Upload: allow_file_upload compatibility",
load('static/xml/file_upload_compat.xml')
),
(
"OpenAssessmentBlock Unicode",
load('static/xml/unicode.xml')
),
(
"OpenAssessmentBlock Poverty Rubric",
load('static/xml/poverty_rubric_example.xml')
),
(
"OpenAssessmentBlock Leaderboard",
load('static/xml/leaderboard.xml')
),
(
"OpenAssessmentBlock Leaderboard with Custom File Type",
load('static/xml/leaderboard_custom.xml')
),
(
"OpenAssessmentBlock (Peer Only) Rubric",
load('static/xml/poverty_peer_only_example.xml')
),
(
"OpenAssessmentBlock (Self Only) Rubric",
load('static/xml/poverty_self_only_example.xml')
),
(
"OpenAssessmentBlock Censorship Rubric",
load('static/xml/censorship_rubric_example.xml')
),
(
"OpenAssessmentBlock Promptless Rubric",
load('static/xml/promptless_rubric_example.xml')
),
(
"OpenAssessmentBlock Track Changes",
load('static/xml/track_changes_example.xml')
),
]
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""Instantiate XBlock object from runtime XML definition.
Inherited by XBlock core.
"""
config = parse_from_xml(node)
block = runtime.construct_xblock_from_class(cls, keys)
xblock_validator = validator(block, block._, strict_post_release=False)
xblock_validator(
create_rubric_dict(config['prompts'], config['rubric_criteria']),
config['rubric_assessments'],
submission_start=config['submission_start'],
submission_due=config['submission_due'],
leaderboard_show=config['leaderboard_show']
)
block.rubric_criteria = config['rubric_criteria']
block.rubric_feedback_prompt = config['rubric_feedback_prompt']
block.rubric_feedback_default_text = config['rubric_feedback_default_text']
block.rubric_assessments = config['rubric_assessments']
block.submission_start = config['submission_start']
block.submission_due = config['submission_due']
block.title = config['title']
block.prompts = config['prompts']
block.prompts_type = config['prompts_type']
block.text_response = config['text_response']
block.file_upload_response = config['file_upload_response']
block.allow_file_upload = config['allow_file_upload']
block.file_upload_type = config['file_upload_type']
block.white_listed_file_types_string = config['white_listed_file_types']
block.allow_latex = config['allow_latex']
block.leaderboard_show = config['leaderboard_show']
block.group_access = config['group_access']
return block
@property
def _(self):
i18nService = self.runtime.service(self, 'i18n')
return i18nService.ugettext
@property
def prompts(self):
"""
Return the prompts.
Initially a block had a single prompt which was saved as a simple
string in the prompt field. Now prompts are saved as a serialized
list of dicts in the same field. If prompt field contains valid json,
parse and return it. Otherwise, assume it is a simple string prompt
and return it in a list of dict.
Returns:
list of dict
"""
return create_prompts_list(self.prompt)
@prompts.setter
def prompts(self, value):
"""
Serialize the prompts and save to prompt field.
Args:
value (list of dict): The prompts to set.
"""
if value is None:
self.prompt = None
elif len(value) == 1:
# For backwards compatibility. To be removed after all code
# is migrated to use prompts property instead of prompt field.
self.prompt = value[0]['description']
else:
self.prompt = json.dumps(value)
@property
def valid_assessments(self):
"""
Return a list of assessment dictionaries that we recognize.
This allows us to gracefully handle situations in which unrecognized
assessment types are stored in the XBlock field (e.g. because
we roll back code after releasing a feature).
Returns:
list
"""
_valid_assessments = [
asmnt for asmnt in self.rubric_assessments
if asmnt.get('name') in VALID_ASSESSMENT_TYPES
]
return update_assessments_format(copy.deepcopy(_valid_assessments))
@property
def assessment_steps(self):
return [asmnt['name'] for asmnt in self.valid_assessments]
@lazy
def rubric_criteria_with_labels(self):
"""
Backwards compatibility: We used to treat "name" as both a user-facing label
and a unique identifier for criteria and options.
Now we treat "name" as a unique identifier, and we've added an additional "label"
field that we display to the user.
If criteria/options in the problem definition do NOT have a "label" field
(because they were created before this change),
we create a new label that has the same value as "name".
The result of this call is cached, so it should NOT be used in a runtime
that can modify the XBlock settings (in the LMS, settings are read-only).
Returns:
list of criteria dictionaries
"""
criteria = copy.deepcopy(self.rubric_criteria)
for criterion in criteria:
if 'label' not in criterion:
criterion['label'] = criterion['name']
for option in criterion['options']:
if 'label' not in option:
option['label'] = option['name']
return criteria
def render_assessment(self, path, context_dict=None):
"""Render an Assessment Module's HTML
Given the name of an assessment module, find it in the list of
configured modules, and ask for its rendered HTML.
Args:
path (str): The path to the template used to render this HTML
section.
context_dict (dict): A dictionary of context variables used to
populate this HTML section.
Returns:
(Response): A Response Object with the generated HTML fragment. This
is intended for AJAX calls to load dynamically into a larger
document.
"""
if not context_dict:
context_dict = {}
template = get_template(path)
return Response(template.render(context_dict), content_type='application/html', charset='UTF-8')
def add_xml_to_node(self, node):
"""
Serialize the XBlock to XML for exporting.
"""
serialize_content_to_xml(self, node)
def render_error(self, error_msg):
"""
Render an error message.
Args:
error_msg (unicode): The error message to display.
Returns:
Response: A response object with an HTML body.
"""
context = {'error_msg': error_msg}
template = get_template('openassessmentblock/oa_error.html')
return Response(template.render(context), content_type='application/html', charset='UTF-8')
def is_closed(self, step=None, course_staff=None):
"""
Checks if the question is closed.
Determines if the start date is in the future or the end date has
passed. Optionally limited to a particular step in the workflow.
Start/due dates do NOT apply to course staff, since course staff may need to get to
the peer grading step AFTER the submission deadline has passed.
This may not be necessary when we implement a grading interface specifically for course staff.
Keyword Arguments:
step (str): The step in the workflow to check. Options are:
None: check whether the problem as a whole is open.
"submission": check whether the submission section is open.
"peer-assessment": check whether the peer-assessment section is open.
"self-assessment": check whether the self-assessment section is open.
course_staff (bool): Whether to treat the user as course staff (disable start/due dates).
If not specified, default to the current user's status.
Returns:
tuple of the form (is_closed, reason, start_date, due_date), where
is_closed (bool): indicates whether the step is closed.
reason (str or None): specifies the reason the step is closed ("start" or "due")
start_date (datetime): is the start date of the step/problem.
due_date (datetime): is the due date of the step/problem.
Examples:
>>> is_closed()
False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="submission")
True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
>>> is_closed(step="self-assessment")
True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
"""
submission_range = (self.submission_start, self.submission_due)
assessment_ranges = [
(asmnt.get('start'), asmnt.get('due'))
for asmnt in self.valid_assessments
]
# Resolve unspecified dates and date strings to datetimes
start, due, date_ranges = resolve_dates(
self.start, self.due, [submission_range] + assessment_ranges, self._
)
open_range = (start, due)
assessment_steps = self.assessment_steps
if step == 'submission':
open_range = date_ranges[0]
elif step in assessment_steps:
step_index = assessment_steps.index(step)
open_range = date_ranges[1 + step_index]
# Course staff always have access to the problem
if course_staff is None:
course_staff = self.is_course_staff
if course_staff:
return False, None, DISTANT_PAST, DISTANT_FUTURE
if self.is_beta_tester:
beta_start = self._adjust_start_date_for_beta_testers(open_range[0])
open_range = (beta_start, open_range[1])
# Check if we are in the open date range
now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
if now < open_range[0]:
return True, "start", open_range[0], open_range[1]
elif now >= open_range[1]:
return True, "due", open_range[0], open_range[1]
else:
return False, None, open_range[0], open_range[1]
def get_waiting_details(self, status_details):
"""
Returns waiting status (boolean value) based on the given status_details.
Args:
status_details (dict): A dictionary containing the details of each
assessment module status. This will contain keys such as
"peer", "ai", and "staff", referring to dictionaries, which in
turn will have the key "graded". If this key has a value set,
these assessment modules have been graded.
Returns:
True if waiting for a grade from peer, ai, or staff assessment, else False.
Examples:
>>> now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
>>> status_details = {
>>> 'peer': {
>>> 'completed': None,
>>> 'graded': now
>>> },
>>> 'ai': {
>>> 'completed': now,
>>> 'graded': None
>>> }
>>> }
>>> self.get_waiting_details(status_details)
True
"""
steps = ["peer", "ai", "staff"] # These are the steps that can be submitter-complete, but lack a grade
for step in steps:
if step in status_details and not status_details[step]["graded"]:
return True
return False
def is_released(self, step=None):
"""
Check if a question has been released.
Keyword Arguments:
step (str): The step in the workflow to check.
None: check whether the problem as a whole is open.
"submission": check whether the submission section is open.
"peer-assessment": check whether the peer-assessment section is open.
"self-assessment": check whether the self-assessment section is open.
Returns:
bool
"""
# By default, assume that we're published, in case the runtime doesn't support publish date.
if hasattr(self.runtime, 'modulestore'):
is_published = self.runtime.modulestore.has_published_version(self)
else:
is_published = True
is_closed, reason, __, __ = self.is_closed(step=step)
is_released = is_published and (not is_closed or reason == 'due')
if self.start:
is_released = is_released and dt.datetime.now(pytz.UTC) > parse_date_value(self.start, self._)
return is_released
def get_assessment_module(self, mixin_name):
"""
Get a configured assessment module by name.
Args:
mixin_name (str): The name of the mixin (e.g. "self-assessment" or "peer-assessment")
Returns:
dict
Example:
>>> self.get_assessment_module('peer-assessment')
{
"name": "peer-assessment",
"start": None,
"due": None,
"must_grade": 5,
"must_be_graded_by": 3,
}
"""
for assessment in self.valid_assessments:
if assessment["name"] == mixin_name:
return assessment
def publish_assessment_event(self, event_name, assessment, **kwargs):
"""
Emit an analytics event for the peer assessment.
Args:
event_name (str): An identifier for this event type.
assessment (dict): The serialized assessment model.
Returns:
None
"""
parts_list = []
for part in assessment["parts"]:
# Some assessment parts do not include point values,
# only written feedback. In this case, the assessment
# part won't have an associated option.
option_dict = None
if part["option"] is not None:
option_dict = {
"name": part["option"]["name"],
"points": part["option"]["points"],
}
# All assessment parts are associated with criteria
criterion_dict = {
"name": part["criterion"]["name"],
"points_possible": part["criterion"]["points_possible"]
}
parts_list.append({
"option": option_dict,
"criterion": criterion_dict,
"feedback": part["feedback"]
})
event_data = {
"feedback": assessment["feedback"],
"rubric": {
"content_hash": assessment["rubric"]["content_hash"],
},
"scorer_id": assessment["scorer_id"],
"score_type": assessment["score_type"],
"scored_at": assessment["scored_at"],
"submission_uuid": assessment["submission_uuid"],
"parts": parts_list
}
for key in kwargs:
event_data[key] = kwargs[key]
self.runtime.publish(
self, event_name,
event_data
)
@XBlock.json_handler
def publish_event(self, data, suffix=''): # pylint: disable=unused-argument
"""
Publish the given data to an event.
Expects key 'event_name' to be present in the data dictionary.
"""
try:
event_name = data['event_name']
except KeyError:
logger.exception("Could not find the name of the event to be triggered.")
return {'success': False}
# Remove the name so we don't publish as part of the data.
del data['event_name']
self.runtime.publish(self, event_name, data)
return {'success': True}
def _serialize_opaque_key(self, key):
"""
Gracefully handle opaque keys, both before and after the transition.
https://github.com/edx/edx-platform/wiki/Opaque-Keys
Currently uses `to_deprecated_string()` to ensure that new keys
are backwards-compatible with keys we store in ORA2 database models.
Args:
key (unicode or OpaqueKey subclass): The key to serialize.
Returns:
unicode
"""
if hasattr(key, 'to_deprecated_string'):
return key.to_deprecated_string()
else:
return unicode(key)
def get_username(self, anonymous_user_id):
"""
Return the username of the user associated with anonymous_user_id
Args:
anonymous_user_id (str): the anonymous user id of the user
Returns: the username if it can be identified. If the xblock service to converts to a real user
fails, returns None and logs the error.
"""
if hasattr(self, "xmodule_runtime"):
user = self.xmodule_runtime.get_real_user(anonymous_user_id)
if user:
return user.username
else:
logger.exception(
"XBlock service could not find user for anonymous_user_id '{}'".format(anonymous_user_id)
)
return None
def _adjust_start_date_for_beta_testers(self, start):
if hasattr(self, "xmodule_runtime"):
days_early_for_beta = getattr(self.xmodule_runtime, 'days_early_for_beta', 0)
if days_early_for_beta is not None:
delta = dt.timedelta(days_early_for_beta)
effective = start - delta
return effective
return start
def get_xblock_id(self):
"""
Returns the xblock id
"""
return self._serialize_opaque_key(self.scope_ids.usage_id)
| agpl-3.0 | -6,988,523,098,980,771,000 | 35.315292 | 116 | 0.594545 | false |
cleberzavadniak/canivett | canivett/tree.py | 1 | 6306 | import os
import errno
import logging
import imp
from fuse import Operations, FuseOSError
class Tree(Operations):
"""
Most of this class is based on the work of Stavros Korokithakis:
https://www.stavros.io/posts/python-fuse-filesystem/
;-)
"""
def __init__(self, base):
self.logger = logging.getLogger(self.__class__.__name__)
self.base = base
self.modules = {}
self.virtual_tree = {}
self.verbose = True
self.create_root()
self.load_config()
def destroy(self, path):
for module_instance in self.virtual_tree.values():
module_instance.destroy()
# Helpers:
def create_root(self):
root_path = os.path.join(self.base, 'root')
if not os.path.exists(root_path):
os.mkdir(root_path)
def load_config(self):
config_path = os.path.join(self.base, 'canivett.cfg')
with open(config_path) as cfg_file:
for line in cfg_file:
content, *comments = line.split('#') # NOQA
if not content:
continue
path, module_name = content.strip().split('=')
self.mkdirs(path)
module = self.get_module(module_name)
self.virtual_tree[path] = module.Module(self, path)
def mkdirs(self, path):
parts = path.split('/')
current = '/'
for part in parts:
current = os.path.join(current, part)
complete = self.get_real_path(current)
if not os.path.exists(complete):
os.mkdir(complete)
def _do_load_module(self, name):
real_name = 'canivett_' + name
_, path, *_ = imp.find_module(real_name) # NOQA
module = imp.load_package(real_name, path)
self.modules[name] = module
return module
def get_module(self, name):
try:
return self.modules[name]
except KeyError:
return self._do_load_module(name)
def get_real_path(self, path):
real = os.path.join(self.base, 'root', path.strip('/'))
return real
def __call__(self, op, path, *args):
verbose = self.verbose and not (op in ('statfs', 'getattr') and path == '/')
for managed_path, module in self.virtual_tree.items():
if path.startswith(managed_path):
obj = module.root
break
else:
obj = self
f = getattr(obj, op)
try:
result = f(path, *args)
except Exception as ex:
if verbose:
self.logger.info('{}({}) -> {}'.format(op, path, ex))
raise ex
else:
if verbose:
self.logger.info('{}({}) -> {}'.format(op, path, result))
return result
def raise_error(self, error):
raise FuseOSError(error)
# Init
# ==================
def init(self, path):
return 0
# Filesystem methods
# ==================
def access(self, path, mode):
full_path = self.get_real_path(path)
if not os.access(full_path, mode):
raise FuseOSError(errno.EACCES)
def chmod(self, path, mode):
full_path = self.get_real_path(path)
return os.chmod(full_path, mode)
def chown(self, path, uid, gid):
full_path = self.get_real_path(path)
return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
full_path = self.get_real_path(path)
st = os.lstat(full_path)
keys = ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime',
'st_nlink', 'st_size', 'st_uid')
return dict((key, getattr(st, key)) for key in keys)
def readdir(self, path, fh):
full_path = self.get_real_path(path)
dirents = ['.', '..']
if os.path.isdir(full_path):
dirents.extend(os.listdir(full_path))
for r in dirents:
yield r
def readlink(self, path):
pathname = os.readlink(self.get_real_path(path))
if pathname.startswith("/"):
# Path name is absolute, sanitize it.
return os.path.relpath(pathname, self.base)
else:
return pathname
def mknod(self, path, mode, dev):
return os.mknod(self.get_real_path(path), mode, dev)
def rmdir(self, path):
full_path = self.get_real_path(path)
return os.rmdir(full_path)
def mkdir(self, path, mode):
return os.mkdir(self.get_real_path(path), mode)
def statfs(self, path):
full_path = self.get_real_path(path)
stv = os.statvfs(full_path)
keys = ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',
'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')
return dict((key, getattr(stv, key)) for key in keys)
def unlink(self, path):
return os.unlink(self.get_real_path(path))
def symlink(self, name, target):
return os.symlink(name, self.get_real_path(target))
def rename(self, old, new):
return os.rename(self.get_real_path(old), self.get_real_path(new))
def link(self, target, name):
return os.link(self.get_real_path(target), self.get_real_path(name))
def utimens(self, path, times=None):
return os.utime(self.get_real_path(path), times)
# File methods
# ============
def open(self, path, flags):
full_path = self.get_real_path(path)
return os.open(full_path, flags)
def create(self, path, mode, fi=None):
full_path = self.get_real_path(path)
return os.open(full_path, os.O_WRONLY | os.O_CREAT, mode)
def read(self, path, length, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.read(fh, length)
def write(self, path, buf, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.write(fh, buf)
def truncate(self, path, length, fh=None):
full_path = self.get_real_path(path)
with open(full_path, 'r+') as f:
f.truncate(length)
def flush(self, path, fh):
return os.fsync(fh)
def release(self, path, fh):
return os.close(fh)
def fsync(self, path, fdatasync, fh):
return self.flush(path, fh)
| gpl-2.0 | -1,390,266,037,243,918,800 | 28.605634 | 84 | 0.553283 | false |
gabriel-samfira/jrunner | jrunner/jobqueue/preprocesors/__init__.py | 1 | 1097 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import pickle as p
def process(task):
"""
Unele task-uri au nevoie de procesare inante de a fi trimise catre MQ
Procesatorul va adauga informatiile lipsa inainte de a face push.
"""
procesor = None
try:
procesor = importlib.import_module(
'jrunner.jobqueue.preprocesors.%s' % str(task.resource))
except Exception as err:
pass
if procesor is None:
return task
return procesor.process(task)
| apache-2.0 | -7,388,835,889,981,652,000 | 30.342857 | 77 | 0.714676 | false |
lvxejay/jmw-capstone | proteus/base_types/base_list_socket.py | 1 | 2046 | # ---------------------------------------------------------------------------------------#
# ----------------------------------------------------------------------------- HEADER --#
"""
:author:
Jared Webber
:synopsis:
:description:
:applications:
:see_also:
:license:
see license.txt and EULA.txt
"""
# ---------------------------------------------------------------------------------------#
# ---------------------------------------------------------------------------- IMPORTS --#
from .base_socket import MaterialXSocket
# ---------------------------------------------------------------------------------------#
# -------------------------------------------------------------------------- FUNCTIONS --#
# ---------------------------------------------------------------------------------------#
# ---------------------------------------------------------------------------- CLASSES --#
class MaterialXListSocket(MaterialXSocket):
_is_list = True
def draw_color(self, context, node):
pass
@classmethod
def get_default_value(cls):
raise NotImplementedError()
@classmethod
def get_from_values_code(cls):
raise NotImplementedError()
@classmethod
def get_join_lists_code(cls):
raise NotImplementedError()
@classmethod
def get_conversion_code(cls, data_type):
pass
@classmethod
def correct_value(cls, value):
pass
class PythonListSocket(MaterialXListSocket):
@classmethod
def get_default_value(cls):
return []
@classmethod
def get_default_value_code(cls):
return "[]"
@classmethod
def get_from_values_code(cls):
return "value"
@classmethod
def get_join_lists_code(cls):
return "list(itertools.chain(value))"
@classmethod
def correct_value(cls, value):
pass
@classmethod
def get_conversion_code(cls, data_type):
pass
| gpl-3.0 | -7,207,979,154,944,302,000 | 22.070588 | 90 | 0.388563 | false |
MariusWirtz/TM1py | TM1py/Services/TM1Service.py | 1 | 1569 | import pickle
from TM1py.Services import *
class TM1Service:
""" All features of TM1py are exposed through this service
Can be saved and restored from File, to avoid multiple authentication with TM1.
"""
def __init__(self, **kwargs):
self._tm1_rest = RESTService(**kwargs)
# instantiate all Services
self.chores = ChoreService(self._tm1_rest)
self.cubes = CubeService(self._tm1_rest)
self.dimensions = DimensionService(self._tm1_rest)
self.monitoring = MonitoringService(self._tm1_rest)
self.processes = ProcessService(self._tm1_rest)
self.security = SecurityService(self._tm1_rest)
self.server = ServerService(self._tm1_rest)
self.applications = ApplicationService(self._tm1_rest)
# Deprecated, use cubes.cells instead!
self.data = CellService(self._tm1_rest)
def logout(self):
self._tm1_rest.logout()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.logout()
@property
def whoami(self):
return self.security.get_current_user()
@property
def version(self):
return self._tm1_rest.version
@property
def connection(self):
return self._tm1_rest
def save_to_file(self, file_name):
with open(file_name, 'wb') as file:
pickle.dump(self, file)
@classmethod
def restore_from_file(cls, file_name):
with open(file_name, 'rb') as file:
return pickle.load(file)
| mit | -6,446,374,943,573,261,000 | 27.527273 | 83 | 0.631612 | false |
sridevikoushik31/nova | nova/virt/baremetal/ipmi.py | 1 | 10006 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
default=None,
help='path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=5,
help='maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
f.write(password)
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id == None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
if self.address == None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
if self.user == None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
if self.password == None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"),
locals())
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _power_on(self):
"""Turn the power to this node ON."""
def _wait_for_power_on():
"""Called at an interval until the node's power is on."""
if self.is_power_on():
self.state = baremetal_states.ACTIVE
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power on")
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=0.5).wait()
def _power_off(self):
"""Turn the power to this node OFF."""
def _wait_for_power_off():
"""Called at an interval until the node's power is off."""
if self.is_power_on() is False:
self.state = baremetal_states.DELETED
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power off")
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=0.5).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON.
Sets node next-boot to PXE and turns the power on,
waiting up to ipmi_power_retry/2 seconds for confirmation
that the power is on.
:returns: One of baremetal_states.py, representing the new state.
"""
if self.is_power_on() and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node.
Turns the power off, sets next-boot to PXE, and turns the power on.
Each action waits up to ipmi_power_retry/2 seconds for confirmation
that the power state has changed.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF.
Turns the power off, and waits up to ipmi_power_retry/2 seconds
for confirmation that the power is off.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
return self.state
def is_power_on(self):
"""Check if the power is currently on.
:returns: True if on; False if off; None if unable to determine.
"""
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
res = self._exec_ipmitool("power status")[0]
if res == ("Chassis Power is on\n"):
return True
elif res == ("Chassis Power is off\n"):
return False
return None
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
x.append('</dev/null')
x.append('>/dev/null')
x.append('2>&1')
utils.execute(' '.join(x), shell=True)
finally:
bm_utils.unlink_without_raise(pwfile)
def stop_console(self):
console_pid = _get_console_pid(self.node_id)
if console_pid:
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', '-TERM', str(console_pid),
run_as_root=True,
check_exit_code=[0, 99])
bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
| apache-2.0 | 7,198,640,514,716,515,000 | 33.38488 | 79 | 0.573056 | false |
Arthraim/douban2mongo | book2mongo.py | 1 | 2157 | # coding= utf-8
from bs4 import BeautifulSoup
import codecs
from mongoengine import *
from book import Book
connect('mydouban')
import os
os.chdir("book")
for filename in os.listdir("."):
with codecs.open(filename, "r", "utf-8") as html_file:
soup = BeautifulSoup(html_file.read())
for item in soup.find_all("li", "subject-item"):
# <a href="http://book.douban.com/subject/5992037/" onclick=""moreurl(this,{i:'14'})"" title="ไธบไปๅๅค็่ฐๆ">ไธบไปๅๅค็่ฐๆ</a>
a_tag = item.find_all("a")[1]
link = a_tag.get('href').encode('UTF-8')
title = a_tag.get('title').encode('UTF-8')
# <div class="pub">่ๅณฐ / ไธญไฟกๅบ็็คพ / 2011-4 / 29.00ๅ
</div>
pub = item.find("div", "pub").string.strip().encode('UTF-8')
# <div class="short-note">
# <div>
# <span class="rating4-t"></span>
# <span class="date">2013-12-27 ่ฏป่ฟ</span>
# <span class="tags">ๆ ็ญพ: ้ฉฌไผฏๅบธ ๅฐ่ฏด ๅๅฒ ไธญๅฝ ็ฅฅ็ๅพกๅ
</span>
# </div>
# <p class="comment">blabla</p>
# </div>
short_note = item.find("div", "short-note")
spans = short_note.div.find_all("span")
rating = spans[0]['class'][0].replace("rating","").replace("-t","")
date = spans[1].string.encode("UTF-8").replace("่ฏป่ฟ","").strip()
if len(spans) > 2:
tags = spans[2].string.encode("UTF-8").replace("ๆ ็ญพ:","").strip().split(" ")
comment = short_note.p.string.encode("UTF-8").strip()
print ""
print title, pub, link
print rating, date, tags
print comment
book = Book()
book.title = title
book.pub = pub
book.link = link
book.rating = rating
book.date = date
book.tags = tags
book.comment = comment
try:
book.save()
except NotUniqueError as e:
print e
continue
| mit | 4,743,349,884,267,529,000 | 35.982143 | 136 | 0.494447 | false |
dahlia/wikidata | wikidata/quantity.py | 1 | 1837 | """:mod:`wikidata.quantity` --- Quantity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.7.0
"""
from typing import Optional
from .entity import Entity
__all__ = 'Quantity',
class Quantity:
"""A Quantity value represents a decimal number, together with information
about the uncertainty interval of this number, and a unit of measurement.
"""
amount = None # type: float
lower_bound = None # type: Optional[float]
upper_bound = None # type: Optional[float]
unit = None # type: Optional[Entity]
def __init__(self,
amount: float,
lower_bound: Optional[float],
upper_bound: Optional[float],
unit: Optional[Entity]) -> None:
self.amount = amount
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.unit = unit
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
raise TypeError(
'expected an instance of {0.__module__}.{0.__qualname__}, '
'not {1!r}'.format(type(self), other)
)
return (other.amount == self.amount and
other.lower_bound == self.lower_bound and
other.upper_bound == self.upper_bound and
other.unit == self.unit)
def __hash__(self):
return hash((self.amount,
self.lower_bound,
self.upper_bound,
self.unit))
def __repr__(self) -> str:
return ('{0.__module__}.{0.__qualname__}({1!r}, '
'{2!r}, {3!r}, {4!r})').format(
type(self),
self.amount,
self.lower_bound,
self.upper_bound,
self.unit
)
| gpl-3.0 | 4,169,294,263,064,760,000 | 30.135593 | 78 | 0.494284 | false |
jeroanan/Gyroscope | gyroscope.py | 1 | 1371 | from collections import ChainMap
import logging
import time
import sys
import GetArgs
import GetSite
from Config import Defaults
from Config import LoadSites
from Config import LoadConfig
from Config import Settings
def work():
def init_config():
args = GetArgs.get_args()
return ChainMap(args, LoadConfig.load_config(args.get("config"), args.get("no_config")), Defaults.get_defaults())
def init_logging():
logfile_location = Settings.get_logfile_location(config)
if logfile_location == "" or config.get("no_logfile", False):
logging.basicConfig(level=config["log_level"], format="%(asctime)s %(message)s")
else:
logging.basicConfig(filename=logfile_location, level=config["log_level"],
filemode=config["logfile_mode"], format="%(asctime)s %(message)s")
def get_site(site):
def site_disabled():
return site.get("disabled", False)
if not site_disabled():
GetSite.get_site(site, config)
config = init_config()
init_logging()
logging.info("Start")
start_time = time.time()
list(map(get_site, LoadSites.load_sites(config["sites_file"])))
logging.info("End (total time: %d seconds)" % (time.time() - start_time))
try:
work()
except KeyboardInterrupt:
logging.shutdown()
sys.exit(0)
| mit | -2,587,680,111,265,204,000 | 28.170213 | 121 | 0.646244 | false |
ooici/marine-integrations | mi/dataset/driver/dosta_abcdjm/dcl/driver.py | 1 | 6591 | """
@package mi.dataset.driver.dosta_abcdjm.dcl.driver
@file marine-integrations/mi/dataset/driver/dosta_abcdjm/dcl/driver.py
@author Steve Myerson
@brief Driver for the dosta_abcdjm_dcl
Release notes:
Initial Release
"""
__author__ = 'Steve Myerson'
__license__ = 'Apache 2.0'
from mi.core.common import BaseEnum
from mi.core.exceptions import ConfigurationException
from mi.core.log import get_logger; log = get_logger()
from mi.dataset.dataset_driver import \
DataSetDriverConfigKeys, \
HarvesterType, \
MultipleHarvesterDataSetDriver
from mi.dataset.harvester import \
SingleDirectoryHarvester
from mi.dataset.parser.dosta_abcdjm_dcl import \
DostaAbcdjmDclRecoveredParser, \
DostaAbcdjmDclTelemeteredParser, \
DostaAbcdjmDclRecoveredInstrumentDataParticle, \
DostaAbcdjmDclTelemeteredInstrumentDataParticle
class DataTypeKey(BaseEnum):
"""
These are the possible harvester/parser pairs for this driver
"""
DOSTA_ABCDJM_RECOVERED = 'dosta_abcdjm_dcl_recovered'
DOSTA_ABCDJM_TELEMETERED = 'dosta_abcdjm_dcl_telemetered'
class DostaAbcdjmDclDataSetDriver(MultipleHarvesterDataSetDriver):
def __init__(self, config, memento, data_callback, state_callback,
event_callback, exception_callback):
# Initialize the possible types of harvester/parser pairs
# for this driver.
data_keys = DataTypeKey.list()
# Link the data keys to the harvester type.
# Recovered harvester is single directory.
# Telemetered harvester is single directory.
harvester_type = {
DataTypeKey.DOSTA_ABCDJM_RECOVERED: HarvesterType.SINGLE_DIRECTORY,
DataTypeKey.DOSTA_ABCDJM_TELEMETERED: HarvesterType.SINGLE_DIRECTORY,
}
super(DostaAbcdjmDclDataSetDriver, self).__init__(config, memento,
data_callback, state_callback, event_callback,
exception_callback, data_keys, harvester_type=harvester_type)
@classmethod
def stream_config(cls):
return [
DostaAbcdjmDclRecoveredInstrumentDataParticle.type(),
DostaAbcdjmDclTelemeteredInstrumentDataParticle.type()
]
def _build_harvester(self, driver_state):
"""
Build the harvesters.
Verify correctness of data keys.
Display warnings if error detected in data keys or in the
creation of the harvesters.
@param driver_state The starting driver state
"""
harvesters = []
# Verify that the Recovered harvester has been configured.
# If so, build the harvester and add it to the list of harvesters.
if DataTypeKey.DOSTA_ABCDJM_RECOVERED in self._harvester_config:
rec_harvester = SingleDirectoryHarvester(
self._harvester_config.get(DataTypeKey.DOSTA_ABCDJM_RECOVERED),
driver_state[DataTypeKey.DOSTA_ABCDJM_RECOVERED],
lambda filename:
self._new_file_callback(filename,
DataTypeKey.DOSTA_ABCDJM_RECOVERED),
lambda modified:
self._modified_file_callback(modified,
DataTypeKey.DOSTA_ABCDJM_RECOVERED),
self._exception_callback)
harvesters.append(rec_harvester)
else:
log.warn('No configuration for dosta_abcdjm_dcl recovered harvester, not building')
# Verify that the Telemetered harvester has been configured.
# If so, build the harvester and add it to the list of harvesters.
if DataTypeKey.DOSTA_ABCDJM_TELEMETERED in self._harvester_config:
tel_harvester = SingleDirectoryHarvester(
self._harvester_config.get(DataTypeKey.DOSTA_ABCDJM_TELEMETERED),
driver_state[DataTypeKey.DOSTA_ABCDJM_TELEMETERED],
lambda filename:
self._new_file_callback(filename,
DataTypeKey.DOSTA_ABCDJM_TELEMETERED),
lambda modified:
self._modified_file_callback(modified,
DataTypeKey.DOSTA_ABCDJM_TELEMETERED),
self._exception_callback)
harvesters.append(tel_harvester)
else:
log.warn('No configuration for dosta_abcdjm_dcl telemetered harvester, not building')
return harvesters
def _build_parser(self, parser_state, stream_in, data_key):
"""
Build the requested parser based on the data key
@param parser_state starting parser state to pass to parser
@param stream_in Handle of open file to pass to parser
@param stream_in Filename string to pass to parser
@param data_key Key to determine which parser type is built
"""
# Build the recovered parser if requested.
if data_key == DataTypeKey.DOSTA_ABCDJM_RECOVERED:
config = self._parser_config[data_key]
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.dosta_abcdjm_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS:
None
})
parser = DostaAbcdjmDclRecoveredParser(
config,
stream_in,
parser_state,
lambda state, ingested:
self._save_parser_state(state, data_key, ingested),
self._data_callback,
self._sample_exception_callback)
# Build the telemetered parser if requested.
elif data_key == DataTypeKey.DOSTA_ABCDJM_TELEMETERED:
config = self._parser_config[data_key]
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.dosta_abcdjm_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS:
None
})
parser = DostaAbcdjmDclTelemeteredParser(
config,
stream_in,
parser_state,
lambda state, ingested:
self._save_parser_state(state, data_key, ingested),
self._data_callback,
self._sample_exception_callback)
# Not one of the keys we recognize?
# No parser for you!
else:
raise ConfigurationException('Dosta_abcdjm Parser configuration incorrect %s',
data_key)
return parser
| bsd-2-clause | -5,689,769,519,623,224,000 | 35.821229 | 97 | 0.617812 | false |
rouge8/hitsearch | threadtest/maker.py | 1 | 1288 | from time import sleep
import time
import threading
class BoxFiller(threading.Thread):
def __init__(self,parent):
threading.Thread.__init__(self)
self.parent = parent
def run(self):
count = 0
for i in range(30):
sleep(.5)
count += 1
self.parent._box_lock.acquire()
self.parent._box.append(count)
self.parent._box_lock.release()
class Maker:
def __init__(self):
self._box = []
self._boring = range(10)
self._box_lock = threading.Lock()
self.filler = BoxFiller(self)
def go(self):
self.filler.start()
@property
def box(self):
while True:
if len(self._box) == 0 and not self.filler.is_alive():
raise StopIteration
if len(self._box) == 0:
sleep(.05)
continue
self._box_lock.acquire()
tmp = self._box.pop(0)
self._box_lock.release()
yield tmp
@property
def boring(self):
while True and len(self._boring) != 0:
#self._box_lock.acquire()
tmp = self._boring.pop(0)
#self._box_lock.release()
yield tmp
raise StopIteration
| mit | 5,738,367,370,088,499,000 | 24.76 | 66 | 0.505435 | false |
sassoftware/jobslave | jobslave/slave.py | 1 | 1471 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import sys
from conary.lib.cfg import ConfigFile
from conary.lib.cfgtypes import CfgBool, CfgString, CfgPath
from jobslave import jobhandler
from jobslave.util import setupLogging
class SlaveConfig(ConfigFile):
debugMode = (CfgBool, False)
masterUrl = (CfgString, None)
conaryProxy = (CfgString, None)
jobDataPath = (CfgPath, '/tmp/jobData')
templateCache = (CfgPath, '/mnt/anaconda-templates')
binPath = (CfgPath, '/usr/bin')
def main(args):
if len(args) > 1:
sys.exit("Usage: %s [config]" % sys.argv[0])
elif args:
configPath = args.pop(0)
else:
configPath = '/srv/jobslave/config'
setupLogging(logLevel=logging.DEBUG)
cfg = SlaveConfig()
cfg.read(configPath)
jobData = json.load(open(cfg.jobDataPath))
handler = jobhandler.getHandler(cfg, jobData)
handler.run()
| apache-2.0 | 1,234,899,746,340,290,800 | 27.288462 | 74 | 0.711081 | false |
eigoshimizu/Genomon | scripts/genomon_pipeline/dna_pipeline.py | 1 | 57868 | import os
import shutil
import glob
from ruffus import *
from genomon_pipeline.config.run_conf import *
from genomon_pipeline.config.genomon_conf import *
from genomon_pipeline.config.sample_conf import *
from genomon_pipeline.dna_resource.bamtofastq import *
from genomon_pipeline.dna_resource.fastq_splitter import *
from genomon_pipeline.dna_resource.bwa_align import *
from genomon_pipeline.dna_resource.markduplicates import *
from genomon_pipeline.dna_resource.mutation_call import *
from genomon_pipeline.dna_resource.mutation_merge import *
from genomon_pipeline.dna_resource.sv_parse import *
from genomon_pipeline.dna_resource.sv_merge import *
from genomon_pipeline.dna_resource.sv_filt import *
from genomon_pipeline.dna_resource.qc_bamstats import *
from genomon_pipeline.dna_resource.qc_coverage import *
from genomon_pipeline.dna_resource.qc_merge import *
from genomon_pipeline.dna_resource.post_analysis import *
from genomon_pipeline.dna_resource.pre_pmsignature import *
from genomon_pipeline.dna_resource.pmsignature import *
from genomon_pipeline.dna_resource.paplot import *
# set task classes
bamtofastq = Bam2Fastq(genomon_conf.get("bam2fastq", "qsub_option"), run_conf.drmaa)
fastq_splitter = Fastq_splitter(genomon_conf.get("split_fastq", "qsub_option"), run_conf.drmaa)
bwa_align = Bwa_align(genomon_conf.get("bwa_mem", "qsub_option"), run_conf.drmaa)
markduplicates = Markduplicates(genomon_conf.get("markduplicates", "qsub_option"), run_conf.drmaa)
mutation_call = Mutation_call(genomon_conf.get("mutation_call", "qsub_option"), run_conf.drmaa)
mutation_merge = Mutation_merge(genomon_conf.get("mutation_merge", "qsub_option"), run_conf.drmaa)
sv_parse = SV_parse(genomon_conf.get("sv_parse", "qsub_option"), run_conf.drmaa)
sv_merge = SV_merge(genomon_conf.get("sv_merge", "qsub_option"), run_conf.drmaa)
sv_filt = SV_filt(genomon_conf.get("sv_filt", "qsub_option"), run_conf.drmaa)
r_qc_bamstats = Res_QC_Bamstats(genomon_conf.get("qc_bamstats", "qsub_option"), run_conf.drmaa)
r_qc_coverage = Res_QC_Coverage(genomon_conf.get("qc_coverage", "qsub_option"), run_conf.drmaa)
r_qc_merge = Res_QC_Merge(genomon_conf.get("qc_merge", "qsub_option"), run_conf.drmaa)
r_paplot = Res_PA_Plot(genomon_conf.get("paplot", "qsub_option"), run_conf.drmaa)
r_post_analysis = Res_PostAnalysis(genomon_conf.get("post_analysis", "qsub_option"), run_conf.drmaa)
r_pre_pmsignature = Res_PrePmsignature(genomon_conf.get("pre_pmsignature", "qsub_option"), run_conf.drmaa)
r_pmsignature_ind = Res_Pmsignature(genomon_conf.get("pmsignature_ind", "qsub_option"), run_conf.drmaa)
r_pmsignature_full = Res_Pmsignature(genomon_conf.get("pmsignature_full", "qsub_option"), run_conf.drmaa)
_debug = False
if genomon_conf.has_section("develop"):
if genomon_conf.has_option("develop", "debug") == True:
_debug = genomon_conf.getboolean("develop", "debug")
# generate output list of 'linked fastq'
linked_fastq_list = []
for sample in sample_conf.fastq:
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/1.sorted.bam'): continue
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/' + sample + '.markdup.bam'): continue
link_fastq_arr1 = []
link_fastq_arr2 = []
for (count, fastq_file) in enumerate(sample_conf.fastq[sample][0]):
fastq_prefix, ext = os.path.splitext(fastq_file)
link_fastq_arr1.append(run_conf.project_root + '/fastq/' + sample + '/' + str(count+1) + '_1' + ext)
link_fastq_arr2.append(run_conf.project_root + '/fastq/' + sample + '/' + str(count+1) + '_2' + ext)
linked_fastq_list.append([link_fastq_arr1,link_fastq_arr2])
# generate output list of 'bam2fastq'
bam2fastq_output_list = []
for sample in sample_conf.bam_tofastq:
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/1.sorted.bam'): continue
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/' + sample + '.markdup.bam'): continue
bam2fastq_arr1 = []
bam2fastq_arr2 = []
bam2fastq_arr1.append(run_conf.project_root + '/fastq/' + sample + '/1_1.fastq')
bam2fastq_arr2.append(run_conf.project_root + '/fastq/' + sample + '/1_2.fastq')
bam2fastq_output_list.append([bam2fastq_arr1,bam2fastq_arr2])
# generate input list of 'mutation call'
markdup_bam_list = []
merge_mutation_list = []
for complist in sample_conf.mutation_call:
if os.path.exists(run_conf.project_root + '/mutation/' + complist[0] + '/' + complist[0] + '.genomon_mutation.result.filt.txt'): continue
tumor_bam = run_conf.project_root + '/bam/' + complist[0] + '/' + complist[0] + '.markdup.bam'
normal_bam = run_conf.project_root + '/bam/' + complist[1] + '/' + complist[1] + '.markdup.bam' if complist[1] != None else None
panel = run_conf.project_root + '/mutation/control_panel/' + complist[2] + ".control_panel.txt" if complist[2] != None else None
markdup_bam_list.append([tumor_bam, normal_bam, panel])
# generate input list of 'SV parse'
parse_sv_bam_list = []
all_target_bams = []
unique_bams = []
for complist in sample_conf.sv_detection:
tumor_sample = complist[0]
if tumor_sample != None:
all_target_bams.append(run_conf.project_root + '/bam/' + tumor_sample + '/' + tumor_sample + '.markdup.bam')
normal_sample = complist[1]
if normal_sample != None:
all_target_bams.append(run_conf.project_root + '/bam/' + normal_sample + '/' + normal_sample + '.markdup.bam')
panel_name = complist[2]
if panel_name != None:
for panel_sample in sample_conf.control_panel[panel_name]:
all_target_bams.append(run_conf.project_root + '/bam/' + panel_sample + '/' + panel_sample + '.markdup.bam')
unique_bams = list(set(all_target_bams))
for bam in unique_bams:
dir_name = os.path.dirname(bam)
sample_name = os.path.basename(dir_name)
if os.path.exists(run_conf.project_root + '/sv/' + sample_name + '/' + sample_name + '.junction.clustered.bedpe.gz') and os.path.exists(run_conf.project_root + '/sv/' + sample_name + '/' + sample_name + '.junction.clustered.bedpe.gz.tbi'): continue
parse_sv_bam_list.append(bam)
# generate input list of 'SV merge'
unique_complist = []
merge_bedpe_list = []
for complist in sample_conf.sv_detection:
control_panel_name = complist[2]
if control_panel_name != None and control_panel_name not in unique_complist:
unique_complist.append(control_panel_name)
for control_panel_name in unique_complist:
if os.path.exists(run_conf.project_root + '/sv/non_matched_control_panel/' + control_panel_name + '.merged.junction.control.bedpe.gz') and os.path.exists(run_conf.project_root + '/sv/non_matched_control_panel/' + control_panel_name + '.merged.junction.control.bedpe.gz.tbi'): continue
tmp_list = []
tmp_list.append(run_conf.project_root + '/sv/control_panel/' + control_panel_name + ".control_info.txt")
for sample in sample_conf.control_panel[control_panel_name]:
tmp_list.append(run_conf.project_root+ "/sv/"+ sample +"/"+ sample +".junction.clustered.bedpe.gz")
merge_bedpe_list.append(tmp_list)
# generate input list of 'SV filt'
filt_bedpe_list = []
for complist in sample_conf.sv_detection:
if os.path.exists(run_conf.project_root + '/sv/' + complist[0] +'/'+ complist[0] +'.genomonSV.result.filt.txt'): continue
filt_bedpe_list.append(run_conf.project_root+ "/sv/"+ complist[0] +"/"+ complist[0] +".junction.clustered.bedpe.gz")
# generate input list of 'qc'
qc_bamstats_list = []
qc_coverage_list = []
qc_merge_list = []
for sample in sample_conf.qc:
if os.path.exists(run_conf.project_root + '/qc/' + sample + '/' + sample + '.genomonQC.result.txt'): continue
qc_merge_list.append(
[run_conf.project_root + '/qc/' + sample + '/' + sample + '.bamstats',
run_conf.project_root + '/qc/' + sample + '/' + sample + '.coverage'])
if not os.path.exists(run_conf.project_root + '/qc/' + sample + '/' + sample + '.bamstats'):
qc_bamstats_list.append(run_conf.project_root + '/bam/' + sample +'/'+ sample +'.markdup.bam')
if not os.path.exists(run_conf.project_root + '/qc/' + sample + '/' + sample + '.coverage'):
qc_coverage_list.append(run_conf.project_root + '/bam/' + sample +'/'+ sample +'.markdup.bam')
###
# input/output lists for post-analysis
###
genomon_conf_name, genomon_conf_ext = os.path.splitext(os.path.basename(run_conf.genomon_conf_file))
sample_conf_name, sample_conf_ext = os.path.splitext(os.path.basename(run_conf.sample_conf_file))
# generate input list of 'post analysis for mutation'
pa_outputs_mutation = r_post_analysis.output_files("mutation", sample_conf.mutation_call, run_conf.project_root, sample_conf_name, genomon_conf)
pa_inputs_mutation = []
if pa_outputs_mutation["run_pa"] == True:
for complist in sample_conf.mutation_call:
pa_inputs_mutation.append(run_conf.project_root + '/mutation/' + complist[0] +'/'+ complist[0] +'.genomon_mutation.result.filt.txt')
# generate input list of 'post analysis for SV'
pa_outputs_sv = r_post_analysis.output_files("sv", sample_conf.sv_detection, run_conf.project_root, sample_conf_name, genomon_conf)
pa_inputs_sv = []
if pa_outputs_sv["run_pa"] == True:
for complist in sample_conf.sv_detection:
pa_inputs_sv.append(run_conf.project_root + '/sv/' + complist[0] +'/'+ complist[0] +'.genomonSV.result.filt.txt')
# generate input list of 'post analysis for qc'
pa_outputs_qc = r_post_analysis.output_files("qc", sample_conf.qc, run_conf.project_root, sample_conf_name, genomon_conf)
pa_inputs_qc = []
if pa_outputs_qc["run_pa"] == True:
for sample in sample_conf.qc:
pa_inputs_qc.append(run_conf.project_root + '/qc/' + sample + '/' + sample + '.genomonQC.result.txt')
###
# input/output lists for paplot
###
paplot_output = run_conf.project_root + '/paplot/' + sample_conf_name + '/index.html'
## mutation
use_mutations = []
if pa_outputs_mutation["case1"]["output_filt"] != "":
use_mutations.append(pa_outputs_mutation["case1"]["output_filt"])
if pa_outputs_mutation["case2"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel"):
use_mutations.append(pa_outputs_mutation["case2"]["output_filt"])
if pa_outputs_mutation["case3"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpair"):
use_mutations.append(pa_outputs_mutation["case3"]["output_filt"])
if pa_outputs_mutation["case4"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel") and genomon_conf.getboolean("paplot", "include_unpair"):
use_mutations.append(pa_outputs_mutation["case4"]["output_filt"])
paplot_inputs_mutation = []
if os.path.exists(paplot_output) == False or pa_outputs_mutation["run_pa"] == True:
paplot_inputs_mutation.extend(use_mutations)
## pmsignature
# ind
ind_outputs = []
ind_exists = True
for i in range(genomon_conf.getint("pmsignature_ind", "signum_min"), genomon_conf.getint("pmsignature_ind", "signum_max") + 1):
fname = run_conf.project_root + '/pmsignature/' + sample_conf_name + '/pmsignature.ind.result.%d.json' % i
ind_outputs.append(fname)
if not os.path.exists(fname): ind_exists = False
run_ind = False
paplot_inputs_ind = []
if len(sample_conf.mutation_call) > 0 and genomon_conf.getboolean("pmsignature_ind", "enable") and len(use_mutations) > 0:
if ind_exists == False: run_ind = True
elif pa_outputs_mutation["run_pa"] == True: run_ind = True
elif not os.path.exists(run_conf.project_root + '/pmsignature/' + sample_conf_name + '/mutation.cut.txt'): run_ind = True
if os.path.exists(paplot_output) == False or run_ind == True:
paplot_inputs_ind.extend(ind_outputs)
# full
full_outputs = []
full_exists = True
for i in range(genomon_conf.getint("pmsignature_full", "signum_min"), genomon_conf.getint("pmsignature_full", "signum_max") + 1):
fname = run_conf.project_root + '/pmsignature/' + sample_conf_name + '/pmsignature.full.result.%d.json' % i
full_outputs.append(fname)
if not os.path.exists(fname): full_exists = False
run_full = False
paplot_inputs_full = []
if len(sample_conf.mutation_call) > 0 and genomon_conf.getboolean("pmsignature_full", "enable") and len(use_mutations) > 0:
if full_exists == False: run_full = True
elif pa_outputs_mutation["run_pa"] == True: run_full = True
elif not os.path.exists(run_conf.project_root + '/pmsignature/' + sample_conf_name + '/mutation.cut.txt'): run_full = True
if os.path.exists(paplot_output) == False or run_full == True:
paplot_inputs_full.extend(full_outputs)
pmsignature_inputs = []
if run_ind == True or run_full == True:
pmsignature_inputs.extend(use_mutations)
## sv
paplot_inputs_sv = []
if os.path.exists(paplot_output) == False or pa_outputs_sv["run_pa"] == True:
if pa_outputs_sv["case1"]["output_filt"] != "":
paplot_inputs_sv.append(pa_outputs_sv["case1"]["output_filt"])
if pa_outputs_sv["case2"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel"):
paplot_inputs_sv.append(pa_outputs_sv["case2"]["output_filt"])
if pa_outputs_sv["case3"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpair"):
paplot_inputs_sv.append(pa_outputs_sv["case3"]["output_filt"])
if pa_outputs_sv["case4"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel") and genomon_conf.getboolean("paplot", "include_unpair"):
paplot_inputs_sv.append(pa_outputs_sv["case4"]["output_filt"])
## qc
paplot_inputs_qc = []
if os.path.exists(paplot_output) == False or pa_outputs_qc["run_pa"] == True:
paplot_inputs_qc.extend(pa_outputs_qc["outputs"])
paplot_inputs = []
paplot_inputs.extend(paplot_inputs_qc)
paplot_inputs.extend(paplot_inputs_sv)
paplot_inputs.extend(paplot_inputs_mutation)
paplot_inputs.extend(paplot_inputs_ind)
paplot_inputs.extend(paplot_inputs_full)
if _debug:
from pprint import pprint
print ("post-analysis-mutation"); pprint (pa_outputs_mutation); print ("post-analysis-sv"); pprint (pa_outputs_sv); print ("post-analysis-qc"); pprint (pa_outputs_qc)
print ("paplot"); pprint (paplot_inputs)
print ("pmsignature"); pprint (pmsignature_inputs)
# prepare output directories
if not os.path.isdir(run_conf.project_root): os.mkdir(run_conf.project_root)
if not os.path.isdir(run_conf.project_root + '/script'): os.mkdir(run_conf.project_root + '/script')
if not os.path.isdir(run_conf.project_root + '/script/sv_merge'): os.mkdir(run_conf.project_root + '/script/sv_merge')
if not os.path.isdir(run_conf.project_root + '/log'): os.mkdir(run_conf.project_root + '/log')
if not os.path.isdir(run_conf.project_root + '/log/sv_merge'): os.mkdir(run_conf.project_root + '/log/sv_merge')
if not os.path.isdir(run_conf.project_root + '/fastq'): os.mkdir(run_conf.project_root + '/fastq')
if not os.path.isdir(run_conf.project_root + '/bam'): os.mkdir(run_conf.project_root + '/bam')
if not os.path.isdir(run_conf.project_root + '/mutation'): os.mkdir(run_conf.project_root + '/mutation')
if not os.path.isdir(run_conf.project_root + '/mutation/control_panel'): os.mkdir(run_conf.project_root + '/mutation/control_panel')
if not os.path.isdir(run_conf.project_root + '/mutation/hotspot'): os.mkdir(run_conf.project_root + '/mutation/hotspot')
if not os.path.isdir(run_conf.project_root + '/sv'): os.mkdir(run_conf.project_root + '/sv')
if not os.path.isdir(run_conf.project_root + '/sv/non_matched_control_panel'): os.mkdir(run_conf.project_root + '/sv/non_matched_control_panel')
if not os.path.isdir(run_conf.project_root + '/sv/control_panel'): os.mkdir(run_conf.project_root + '/sv/control_panel')
if not os.path.isdir(run_conf.project_root + '/qc'): os.mkdir(run_conf.project_root + '/qc')
for sample in sample_conf.qc:
if not os.path.isdir(run_conf.project_root + '/qc/' + sample): os.mkdir(run_conf.project_root + '/qc/' + sample)
if (genomon_conf.getboolean("post_analysis", "enable") == True):
if not os.path.exists(run_conf.project_root + '/post_analysis'): os.mkdir(run_conf.project_root + '/post_analysis')
if not os.path.exists(run_conf.project_root + '/post_analysis/' + sample_conf_name): os.mkdir(run_conf.project_root + '/post_analysis/' + sample_conf_name)
if not os.path.isdir(run_conf.project_root + '/script/post_analysis'): os.mkdir(run_conf.project_root + '/script/post_analysis')
if not os.path.isdir(run_conf.project_root + '/log/post_analysis'): os.mkdir(run_conf.project_root + '/log/post_analysis')
if (genomon_conf.getboolean("paplot", "enable") == True):
if not os.path.isdir(run_conf.project_root + '/paplot/'): os.mkdir(run_conf.project_root + '/paplot/')
if not os.path.isdir(run_conf.project_root + '/paplot/' + sample_conf_name): os.mkdir(run_conf.project_root + '/paplot/' + sample_conf_name)
if not os.path.isdir(run_conf.project_root + '/script/paplot'): os.mkdir(run_conf.project_root + '/script/paplot')
if not os.path.isdir(run_conf.project_root + '/log/paplot'): os.mkdir(run_conf.project_root + '/log/paplot')
if (genomon_conf.getboolean("pmsignature_ind", "enable") == True) or (genomon_conf.getboolean("pmsignature_full", "enable") == True):
if not os.path.isdir(run_conf.project_root + '/pmsignature/'): os.mkdir(run_conf.project_root + '/pmsignature/')
if not os.path.isdir(run_conf.project_root + '/pmsignature/' + sample_conf_name): os.mkdir(run_conf.project_root + '/pmsignature/' + sample_conf_name)
if not os.path.isdir(run_conf.project_root + '/script/pmsignature'): os.mkdir(run_conf.project_root + '/script/pmsignature')
if not os.path.isdir(run_conf.project_root + '/log/pmsignature'): os.mkdir(run_conf.project_root + '/log/pmsignature')
if not os.path.isdir(run_conf.project_root + '/config'): os.mkdir(run_conf.project_root + '/config')
for outputfiles in (bam2fastq_output_list, linked_fastq_list):
for outputfile in outputfiles:
sample = os.path.basename(os.path.dirname(outputfile[0][0]))
fastq_dir = run_conf.project_root + '/fastq/' + sample
bam_dir = run_conf.project_root + '/bam/' + sample
if not os.path.isdir(fastq_dir): os.mkdir(fastq_dir)
if not os.path.isdir(bam_dir): os.mkdir(bam_dir)
for target_sample_dict in (sample_conf.bam_import, sample_conf.fastq, sample_conf.bam_tofastq):
for sample in target_sample_dict:
script_dir = run_conf.project_root + '/script/' + sample
log_dir = run_conf.project_root + '/log/' + sample
if not os.path.isdir(script_dir): os.mkdir(script_dir)
if not os.path.isdir(log_dir): os.mkdir(log_dir)
shutil.copyfile(run_conf.genomon_conf_file, run_conf.project_root + '/config/' + genomon_conf_name +'_'+ run_conf.analysis_timestamp + genomon_conf_ext)
shutil.copyfile(run_conf.sample_conf_file, run_conf.project_root + '/config/' + sample_conf_name +'_'+ run_conf.analysis_timestamp + sample_conf_ext)
# prepare output directory for each sample and make mutation control panel file
for complist in sample_conf.mutation_call:
# make dir
mutation_dir = run_conf.project_root + '/mutation/' + complist[0]
if not os.path.isdir(mutation_dir): os.mkdir(mutation_dir)
# make the control panel text
control_panel_name = complist[2]
if control_panel_name != None:
control_panel_file = run_conf.project_root + '/mutation/control_panel/' + control_panel_name + ".control_panel.txt"
with open(control_panel_file, "w") as out_handle:
for panel_sample in sample_conf.control_panel[control_panel_name]:
out_handle.write(run_conf.project_root + '/bam/' + panel_sample + '/' + panel_sample + '.markdup.bam' + "\n")
# make SV configuration file
for complist in sample_conf.sv_detection:
# make the control yaml file
control_panel_name = complist[2]
if control_panel_name != None:
control_conf = run_conf.project_root + '/sv/control_panel/' + control_panel_name + ".control_info.txt"
with open(control_conf, "w") as out_handle:
for sample in sample_conf.control_panel[control_panel_name]:
out_handle.write(sample+ "\t"+ run_conf.project_root+ "/sv/"+ sample +"/"+ sample+ "\n")
# link the import bam to project directory
@originate(sample_conf.bam_import.keys())
def link_import_bam(sample):
bam = sample_conf.bam_import[sample]
link_dir = run_conf.project_root + '/bam/' + sample
bam_prefix, ext = os.path.splitext(bam)
if not os.path.isdir(link_dir): os.mkdir(link_dir)
if (not os.path.exists(link_dir +'/'+ sample +'.markdup.bam')) and (not os.path.exists(link_dir +'/'+ sample +'.markdup.bam.bai')):
os.symlink(bam, link_dir +'/'+ sample +'.markdup.bam')
if (os.path.exists(bam +'.bai')):
os.symlink(bam +'.bai', link_dir +'/'+ sample +'.markdup.bam.bai')
elif (os.path.exists(bam_prefix +'.bai')):
os.symlink(bam_prefix +'.bai', link_dir +'/'+ sample +'.markdup.bam.bai')
# convert bam to fastq
@originate(bam2fastq_output_list)
def bam2fastq(outputfiles):
sample = os.path.basename(os.path.dirname(outputfiles[0][0]))
output_dir = run_conf.project_root + '/fastq/' + sample
arguments = {"biobambam": genomon_conf.get("SOFTWARE", "biobambam"),
"param": genomon_conf.get("bam2fastq", "params"),
"input_bam": sample_conf.bam_tofastq[sample],
"f1_name": outputfiles[0][0],
"f2_name": outputfiles[1][0],
"o1_name": output_dir + '/unmatched_first_output.txt',
"o2_name": output_dir + '/unmatched_second_output.txt',
"t": output_dir + '/temp.txt',
"s": output_dir + '/single_end_output.txt'}
bamtofastq.task_exec(arguments, run_conf.project_root + '/log/' + sample, run_conf.project_root + '/script/'+ sample)
# link the input fastq to project directory
@originate(linked_fastq_list)
def link_input_fastq(output_file):
sample = os.path.basename(os.path.dirname(output_file[0][0]))
fastq_dir = run_conf.project_root + '/fastq/' + sample
fastq_prefix, ext = os.path.splitext(sample_conf.fastq[sample][0][0])
# Todo
# 1. should compare the timestamps between input and linked file
# 2. check md5sum ?
for (count, fastq_files) in enumerate(sample_conf.fastq[sample][0]):
fastq_prefix, ext = os.path.splitext(fastq_files)
if not os.path.exists(fastq_dir + '/'+str(count+1)+'_1'+ ext): os.symlink(sample_conf.fastq[sample][0][count], fastq_dir + '/'+str(count+1)+'_1'+ ext)
if not os.path.exists(fastq_dir + '/'+str(count+1)+'_2'+ ext): os.symlink(sample_conf.fastq[sample][1][count], fastq_dir + '/'+str(count+1)+'_2'+ ext)
# split fastq
@subdivide([bam2fastq, link_input_fastq], formatter(), "{path[0]}/*_*.fastq_split", "{path[0]}")
def split_files(input_files, output_files, target_dir):
sample_name = os.path.basename(target_dir)
for oo in output_files:
os.unlink(oo)
split_lines = genomon_conf.get("split_fastq", "split_fastq_line_number")
input_prefix, ext = os.path.splitext(input_files[0][0])
arguments = {"lines": split_lines,
"fastq_filter": genomon_conf.get("split_fastq", "fastq_filter"),
"target_dir": target_dir,
"ext": ext}
fastq_splitter.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/'+ sample_name, 2)
file_list = glob.glob(target_dir + '/1_*.fastq_split')
file_list.sort()
last_file_lines = sum(1 for line in open(file_list[-1]))
all_line_num = ((len(file_list)-1)*int(split_lines)) + last_file_lines
with open(target_dir + "/fastq_line_num.txt", "w") as out_handle:
out_handle.write(str(all_line_num)+"\n")
for input_fastq in input_files[0]:
os.unlink(input_fastq)
for input_fastq in input_files[1]:
os.unlink(input_fastq)
#bwa
@subdivide(split_files, formatter(".+/(.+)/1_0000.fastq_split"), add_inputs("{subpath[0][2]}/fastq/{subdir[0][0]}/2_0000.fastq_split"), "{subpath[0][2]}/bam/{subdir[0][0]}/{subdir[0][0]}_*.sorted.bam", "{subpath[0][2]}/fastq/{subdir[0][0]}", "{subpath[0][2]}/bam/{subdir[0][0]}")
def map_dna_sequence(input_files, output_files, input_dir, output_dir):
sample_name = os.path.basename(output_dir)
all_line_num = 0
with open(input_dir + "/fastq_line_num.txt") as in_handle:
tmp_num = in_handle.read()
all_line_num = int(tmp_num)
split_lines = genomon_conf.get("split_fastq", "split_fastq_line_number")
ans_quotient = all_line_num / int(split_lines)
ans_remainder = all_line_num % int(split_lines)
max_task_id = ans_quotient if ans_remainder == 0 else ans_quotient + 1
arguments = {"input_dir": input_dir,
"output_dir": output_dir,
"sample_name": sample_name,
"bwa": genomon_conf.get("SOFTWARE", "bwa"),
"bwa_params": genomon_conf.get("bwa_mem", "bwa_params"),
"ref_fa":genomon_conf.get("REFERENCE", "ref_fasta"),
"biobambam": genomon_conf.get("SOFTWARE", "biobambam")}
bwa_align.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/' + sample_name, max_task_id)
for task_id in range(max_task_id):
num = str(task_id).zfill(4)
os.unlink(input_dir +'/1_'+str(num)+'.fastq_split')
os.unlink(input_dir +'/2_'+str(num)+'.fastq_split')
os.unlink(output_dir+'/'+sample_name+'_'+str(num)+'.bwa.sam')
# merge sorted bams into one and mark duplicate reads with biobambam
@collate(map_dna_sequence, formatter(), "{subpath[0][2]}/bam/{subdir[0][0]}/{subdir[0][0]}.markdup.bam", "{subpath[0][2]}/bam/{subdir[0][0]}")
def markdup(input_files, output_file, output_dir):
sample_name = os.path.basename(output_dir)
output_prefix, ext = os.path.splitext(output_file)
input_bam_files = ""
for input_file in input_files:
input_bam_files = input_bam_files + " I=" + input_file
arguments = {"biobambam": genomon_conf.get("SOFTWARE", "biobambam"),
"out_prefix": output_prefix,
"input_bam_files": input_bam_files,
"out_bam": output_file}
markduplicates.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/'+ sample_name)
for input_file in input_files:
os.unlink(input_file)
os.unlink(input_file + ".bai")
# identify mutations
@follows( markdup )
@follows( link_import_bam )
@subdivide(markdup_bam_list, formatter(), "{subpath[0][2]}/mutation/{subdir[0][0]}/{subdir[0][0]}.genomon_mutation.result.filt.txt", "{subpath[0][2]}/mutation/{subdir[0][0]}")
def identify_mutations(input_file, output_file, output_dir):
sample_name = os.path.basename(output_dir)
active_inhouse_normal_flag = False
if genomon_conf.has_option("annotation", "active_inhouse_normal_flag"):
active_inhouse_normal_flag = genomon_conf.get("annotation", "active_inhouse_normal_flag")
inhouse_normal_tabix_db = ""
if genomon_conf.has_option("REFERENCE", "inhouse_normal_tabix_db"):
inhouse_normal_tabix_db = genomon_conf.get("REFERENCE", "inhouse_normal_tabix_db")
active_inhouse_tumor_flag = False
if genomon_conf.has_option("annotation", "active_inhouse_tumor_flag"):
active_inhouse_tumor_flag = genomon_conf.get("annotation", "active_inhouse_tumor_flag")
inhouse_tumor_tabix_db = ""
if genomon_conf.has_option("REFERENCE", "inhouse_tumor_tabix_db"):
inhouse_tumor_tabix_db = genomon_conf.get("REFERENCE", "inhouse_tumor_tabix_db")
active_HGMD_flag = False
if genomon_conf.has_option("annotation", "active_HGMD_flag"):
active_HGMD_flag = genomon_conf.get("annotation", "active_HGMD_flag")
HGMD_tabix_db = ""
if genomon_conf.has_option("REFERENCE", "HGMD_tabix_db"):
HGMD_tabix_db = genomon_conf.get("REFERENCE", "HGMD_tabix_db")
arguments = {
# fisher mutation
"fisher": genomon_conf.get("SOFTWARE", "fisher"),
"fisher_pair_params": genomon_conf.get("fisher_mutation_call", "pair_params"),
"fisher_single_params": genomon_conf.get("fisher_mutation_call", "single_params"),
# realignment filter
"mutfilter": genomon_conf.get("SOFTWARE", "mutfilter"),
"realignment_params": genomon_conf.get("realignment_filter","params"),
# indel filter
"indel_params": genomon_conf.get("indel_filter", "params"),
# breakpoint filter
"breakpoint_params": genomon_conf.get("breakpoint_filter","params"),
# simplerepeat filter
"simple_repeat_db":genomon_conf.get("REFERENCE", "simple_repeat_tabix_db"),
# EB filter
"EBFilter": genomon_conf.get("SOFTWARE", "ebfilter"),
"eb_map_quality": genomon_conf.get("eb_filter","map_quality"),
"eb_base_quality": genomon_conf.get("eb_filter","base_quality"),
"filter_flags": genomon_conf.get("eb_filter","filter_flags"),
"control_bam_list": input_file[2],
# hotspot mutation caller
"hotspot": genomon_conf.get("SOFTWARE","hotspot"),
"hotspot_database":genomon_conf.get("REFERENCE","hotspot_db"),
"active_hotspot_flag":genomon_conf.get("hotspot","active_hotspot_flag"),
"hotspot_params": genomon_conf.get("hotspot","params"),
"mutil": genomon_conf.get("SOFTWARE", "mutil"),
# original_annotations
"mutanno": genomon_conf.get("SOFTWARE", "mutanno"),
"active_inhouse_normal_flag": active_inhouse_normal_flag,
"inhouse_normal_database":inhouse_normal_tabix_db,
"active_inhouse_tumor_flag": active_inhouse_tumor_flag,
"inhouse_tumor_database":inhouse_tumor_tabix_db,
"active_HGVD_2013_flag": genomon_conf.get("annotation", "active_HGVD_2013_flag"),
"HGVD_2013_database":genomon_conf.get("REFERENCE", "HGVD_2013_tabix_db"),
"active_HGVD_2016_flag": genomon_conf.get("annotation", "active_HGVD_2016_flag"),
"HGVD_2016_database":genomon_conf.get("REFERENCE", "HGVD_2016_tabix_db"),
"active_ExAC_flag": genomon_conf.get("annotation", "active_ExAC_flag"),
"ExAC_database":genomon_conf.get("REFERENCE", "ExAC_tabix_db"),
"active_HGMD_flag": active_HGMD_flag,
"HGMD_database": HGMD_tabix_db,
# annovar
"active_annovar_flag": genomon_conf.get("annotation", "active_annovar_flag"),
"annovar": genomon_conf.get("SOFTWARE", "annovar"),
"annovar_database": genomon_conf.get("annotation", "annovar_database"),
"table_annovar_params": genomon_conf.get("annotation", "table_annovar_params"),
"annovar_buildver": genomon_conf.get("annotation", "annovar_buildver"),
# commmon
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"ref_fa":genomon_conf.get("REFERENCE", "ref_fasta"),
"interval_list": genomon_conf.get("REFERENCE", "interval_list"),
"disease_bam": input_file[0],
"control_bam": input_file[1],
"out_prefix": output_dir + '/' + sample_name,
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"blat": genomon_conf.get("SOFTWARE", "blat")}
interval_list = genomon_conf.get("REFERENCE", "interval_list")
max_task_id = sum(1 for line in open(interval_list))
mutation_call.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name, max_task_id)
arguments = {
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"control_bam": input_file[1],
"control_bam_list": input_file[2],
"active_annovar_flag": genomon_conf.get("annotation", "active_annovar_flag"),
"annovar_buildver": genomon_conf.get("annotation", "annovar_buildver"),
"active_HGVD_2013_flag": genomon_conf.get("annotation", "active_HGVD_2013_flag"),
"active_HGVD_2016_flag": genomon_conf.get("annotation", "active_HGVD_2016_flag"),
"active_ExAC_flag": genomon_conf.get("annotation", "active_ExAC_flag"),
"active_HGMD_flag": active_HGMD_flag,
"active_inhouse_normal_flag": active_inhouse_normal_flag,
"active_inhouse_tumor_flag": active_inhouse_tumor_flag,
"filecount": max_task_id,
"mutil": genomon_conf.get("SOFTWARE", "mutil"),
"pair_params": genomon_conf.get("mutation_util","pair_params"),
"single_params": genomon_conf.get("mutation_util","single_params"),
"active_hotspot_flag":genomon_conf.get("hotspot","active_hotspot_flag"),
"hotspot_database":genomon_conf.get("REFERENCE","hotspot_db"),
"meta_info_em": get_meta_info(["fisher", "mutfilter", "ebfilter", "mutil", "mutanno"]),
"meta_info_m": get_meta_info(["fisher", "mutfilter", "mutil", "mutanno"]),
"meta_info_ema": get_meta_info(["fisher", "mutfilter", "ebfilter", "mutil", "mutanno", "hotspot"]),
"meta_info_ma": get_meta_info(["fisher", "mutfilter", "mutil", "mutanno", "hotspot"]),
"out_prefix": output_dir + '/' + sample_name}
mutation_merge.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
annovar_buildver = genomon_conf.get("annotation", "annovar_buildver"),
for task_id in range(1,(max_task_id + 1)):
input_file = output_dir+'/'+sample_name+'_mutations_candidate.'+str(task_id)+'.'+annovar_buildver[0]+'_multianno.txt'
os.unlink(input_file)
for task_id in range(1,(max_task_id + 1)):
if os.path.exists(output_dir+'/'+sample_name+'.fisher_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.fisher_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.hotspot_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.hotspot_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.fisher_hotspot_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.fisher_hotspot_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.realignment_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.realignment_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.indel_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.indel_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.breakpoint_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.breakpoint_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.simplerepeat_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.simplerepeat_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.ebfilter_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.ebfilter_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.inhouse_normal.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.inhouse_normal.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.inhouse_tumor.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.inhouse_tumor.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.HGVD_2013.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.HGVD_2013.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.HGVD_2016.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.HGVD_2016.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.ExAC.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.ExAC.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.HGMD.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.HGMD.'+str(task_id)+'.txt')
# parse SV
@follows( link_import_bam )
@follows( markdup )
@transform(parse_sv_bam_list, formatter(), "{subpath[0][2]}/sv/{subdir[0][0]}/{subdir[0][0]}.junction.clustered.bedpe.gz")
def parse_sv(input_file, output_file):
dir_name = os.path.dirname(output_file)
if not os.path.isdir(dir_name): os.mkdir(dir_name)
sample_name = os.path.basename(dir_name)
arguments = {"genomon_sv": genomon_conf.get("SOFTWARE", "genomon_sv"),
"input_bam": input_file,
"output_prefix": output_file.replace(".junction.clustered.bedpe.gz", ""),
"param": genomon_conf.get("sv_parse", "params"),
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"htslib": genomon_conf.get("SOFTWARE", "htslib")}
sv_parse.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/' + sample_name)
# merge SV
@follows( parse_sv )
@transform(merge_bedpe_list, formatter(".+/(?P<NAME>.+).control_info.txt"), "{subpath[0][2]}/sv/non_matched_control_panel/{NAME[0]}.merged.junction.control.bedpe.gz")
def merge_sv(input_files, output_file):
arguments = {"genomon_sv": genomon_conf.get("SOFTWARE", "genomon_sv"),
"control_info": input_files[0],
"merge_output_file": output_file,
"param": genomon_conf.get("sv_merge", "params"),
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"htslib": genomon_conf.get("SOFTWARE", "htslib")}
sv_merge.task_exec(arguments, run_conf.project_root + '/log/sv_merge', run_conf.project_root + '/script/sv_merge')
# filt SV
@follows( merge_sv )
@transform(filt_bedpe_list, formatter(), "{subpath[0][2]}/sv/{subdir[0][0]}/{subdir[0][0]}.genomonSV.result.filt.txt")
def filt_sv(input_files, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
#sample_yaml = run_conf.project_root + "/sv/config/" + sample_name + ".yaml"
filt_param = ""
for complist in sample_conf.sv_detection:
if sample_name == complist[0]:
if complist[1] != None:
filt_param = filt_param + " --matched_control_bam " + run_conf.project_root + "/bam/" + complist[1] + '/' + complist[1] + ".markdup.bam"
if complist[2] != None:
filt_param = filt_param + " --non_matched_control_junction " + run_conf.project_root +"/sv/non_matched_control_panel/"+ complist[2] +".merged.junction.control.bedpe.gz"
if complist[1] != None:
filt_param = filt_param + " --matched_control_label " + complist[1]
break
filt_param = filt_param.lstrip(' ') + ' ' + genomon_conf.get("sv_filt", "params")
arguments = {"genomon_sv": genomon_conf.get("SOFTWARE", "genomon_sv"),
"input_bam": run_conf.project_root + "/bam/" + sample_name + '/' + sample_name + ".markdup.bam",
"output_prefix": run_conf.project_root + "/sv/" + sample_name + '/' + sample_name,
"reference_genome": genomon_conf.get("REFERENCE", "ref_fasta"),
"annotation_dir": genomon_conf.get("sv_filt", "annotation_dir"),
"param": filt_param,
"meta_info": get_meta_info(["genomon_sv", "sv_utils"]),
"sv_utils": genomon_conf.get("SOFTWARE", "sv_utils"),
"sv_utils_annotation_dir": genomon_conf.get("sv_filt", "sv_utils_annotation_dir"),
"sv_utils_param": genomon_conf.get("sv_filt", "sv_utils_params"),
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"htslib": genomon_conf.get("SOFTWARE", "htslib"),
"blat": genomon_conf.get("SOFTWARE", "blat")}
sv_filt.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
# qc
@follows( link_import_bam )
@follows( markdup )
@follows( filt_sv )
@follows( identify_mutations )
@transform(qc_bamstats_list, formatter(), "{subpath[0][2]}/qc/{subdir[0][0]}/{subdir[0][0]}.bamstats")
def bam_stats(input_file, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_qc": genomon_conf.get("SOFTWARE", "genomon_qc"),
"bamstats": genomon_conf.get("SOFTWARE", "bamstats"),
"perl5lib": genomon_conf.get("ENV", "PERL5LIB"),
"input_file": input_file,
"output_file": output_file}
r_qc_bamstats.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
@follows( link_import_bam )
@follows( markdup )
@follows( filt_sv )
@follows( identify_mutations )
@transform(qc_coverage_list, formatter(), "{subpath[0][2]}/qc/{subdir[0][0]}/{subdir[0][0]}.coverage")
def coverage(input_file, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
data_type = "exome"
if genomon_conf.get("qc_coverage", "wgs_flag") == "True":
data_type = "wgs"
arguments = {"data_type": data_type,
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_qc": genomon_conf.get("SOFTWARE", "genomon_qc"),
"coverage_text": genomon_conf.get("qc_coverage", "coverage"),
"i_bed_lines": genomon_conf.get("qc_coverage", "wgs_i_bed_lines"),
"i_bed_width": genomon_conf.get("qc_coverage", "wgs_i_bed_width"),
"incl_bed_width":genomon_conf.get("qc_coverage", "wgs_incl_bed_width"),
"genome_size_file": genomon_conf.get("REFERENCE", "genome_size"),
"gaptxt": genomon_conf.get("REFERENCE", "gaptxt"),
"bait_file": genomon_conf.get("REFERENCE", "bait_file"),
"samtools_params": genomon_conf.get("qc_coverage", "samtools_params"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"input_file": input_file,
"output_file": output_file}
r_qc_coverage.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/' + sample_name)
@follows( bam_stats )
@follows( coverage )
@collate(qc_merge_list, formatter(), "{subpath[0][2]}/qc/{subdir[0][0]}/{subdir[0][0]}.genomonQC.result.txt")
def merge_qc(input_files, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_qc": genomon_conf.get("SOFTWARE", "genomon_qc"),
"bamstats_file": input_files[0][0],
"coverage_file": input_files[0][1],
"output_file": output_file,
"meta": get_meta_info(["genomon_pipeline"]),
"fastq_line_num_file": run_conf.project_root +'/fastq/'+ sample_name +'/fastq_line_num.txt'}
r_qc_merge.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
#####################
# post analysis stage
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(len(pa_inputs_mutation) > 0)
@follows(filt_sv)
@follows(identify_mutations)
@collate(pa_inputs_mutation, formatter(), pa_outputs_mutation["outputs"])
def post_analysis_mutation(input_files, output_file):
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_pa": genomon_conf.get("SOFTWARE", "genomon_pa"),
"mode": "mutation",
"genomon_root": run_conf.project_root,
"output_dir": run_conf.project_root + "/post_analysis/" + sample_conf_name,
"sample_sheet": os.path.abspath(run_conf.sample_conf_file),
"config_file": genomon_conf.get("post_analysis", "config_file"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"input_file_case1": ",".join(pa_outputs_mutation["case1"]["samples"]),
"input_file_case2": ",".join(pa_outputs_mutation["case2"]["samples"]),
"input_file_case3": ",".join(pa_outputs_mutation["case3"]["samples"]),
"input_file_case4": ",".join(pa_outputs_mutation["case4"]["samples"]),
}
r_post_analysis.task_exec(arguments, run_conf.project_root + '/log/post_analysis', run_conf.project_root + '/script/post_analysis')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(len(pa_inputs_sv) > 0)
@follows(filt_sv)
@follows(identify_mutations)
@collate(pa_inputs_sv, formatter(), pa_outputs_sv["outputs"])
def post_analysis_sv(input_files, output_file):
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_pa": genomon_conf.get("SOFTWARE", "genomon_pa"),
"mode": "sv",
"genomon_root": run_conf.project_root,
"output_dir": run_conf.project_root + "/post_analysis/" + sample_conf_name,
"sample_sheet": os.path.abspath(run_conf.sample_conf_file),
"config_file": genomon_conf.get("post_analysis", "config_file"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"input_file_case1": ",".join(pa_outputs_sv["case1"]["samples"]),
"input_file_case2": ",".join(pa_outputs_sv["case2"]["samples"]),
"input_file_case3": ",".join(pa_outputs_sv["case3"]["samples"]),
"input_file_case4": ",".join(pa_outputs_sv["case4"]["samples"]),
}
r_post_analysis.task_exec(arguments, run_conf.project_root + '/log/post_analysis', run_conf.project_root + '/script/post_analysis')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(len(pa_inputs_qc) > 0)
@follows(merge_qc)
@collate(pa_inputs_qc, formatter(), pa_outputs_qc["outputs"])
def post_analysis_qc(input_files, output_file):
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_pa": genomon_conf.get("SOFTWARE", "genomon_pa"),
"mode": "qc",
"genomon_root": run_conf.project_root,
"output_dir": run_conf.project_root + "/post_analysis/" + sample_conf_name,
"sample_sheet": os.path.abspath(run_conf.sample_conf_file),
"config_file": genomon_conf.get("post_analysis", "config_file"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"input_file_case1": ",".join(sample_conf.qc),
"input_file_case2": "",
"input_file_case3": "",
"input_file_case4": "",
}
r_post_analysis.task_exec(arguments, run_conf.project_root + '/log/post_analysis', run_conf.project_root + '/script/post_analysis')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("pmsignature_ind", "enable") or genomon_conf.getboolean("pmsignature_full", "enable"))
@active_if(len(pmsignature_inputs) > 0)
@follows(post_analysis_mutation)
@collate(pmsignature_inputs, formatter(), run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt")
def pre_pmsignature(input_files, output_file):
arguments = {"input_files" : " ".join(input_files),
"output_file" : run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt"
}
r_pre_pmsignature.task_exec(arguments, run_conf.project_root + '/log/pmsignature', run_conf.project_root + '/script/pmsignature')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("pmsignature_ind", "enable"))
@active_if(run_ind)
@follows(pre_pmsignature)
@transform(run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt", formatter(), ind_outputs[0])
def pmsignature_ind(input_file, output_file):
command = r_pmsignature_ind.ind_template.format(
inputfile = input_file,
outputdir = run_conf.project_root + '/pmsignature/' + sample_conf_name,
trdirflag = genomon_conf.get("pmsignature_ind", "trdirflag").upper(),
trialnum = genomon_conf.getint("pmsignature_ind", "trialnum"),
bs_genome = genomon_conf.get("pmsignature_ind", "bs_genome"),
bgflag = genomon_conf.get("pmsignature_ind", "bgflag"),
txdb_transcript = genomon_conf.get("pmsignature_ind", "txdb_transcript"),
script_path = genomon_conf.get("SOFTWARE", "r_scripts"))
sig_nums = range(genomon_conf.getint("pmsignature_ind", "signum_min"), genomon_conf.getint("pmsignature_ind", "signum_max") + 1)
sig_num_text = ""
for i in sig_nums: sig_num_text += "%d " % i
arguments = {"r_path": genomon_conf.get("ENV", "R_PATH"),
"r_ld_library_path": genomon_conf.get("ENV", "R_LD_LIBRARY_PATH"),
"r_libs": genomon_conf.get("ENV", "R_LIBS"),
"command": command,
"sig_list": sig_num_text
}
max_task_id = len(sig_nums)
r_pmsignature_ind.task_exec(arguments, run_conf.project_root + '/log/pmsignature', run_conf.project_root + '/script/pmsignature', max_task_id)
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("pmsignature_full", "enable"))
@active_if(run_full)
@follows(pre_pmsignature)
@transform(run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt", formatter(), full_outputs[0])
def pmsignature_full(input_file, output_file):
command = r_pmsignature_full.full_template.format(
inputfile = input_file,
outputdir = run_conf.project_root + '/pmsignature/' + sample_conf_name,
trdirflag = genomon_conf.get("pmsignature_full", "trdirflag").upper(),
trialnum = genomon_conf.getint("pmsignature_full", "trialnum"),
bgflag = genomon_conf.get("pmsignature_full", "bgflag"),
bs_genome = genomon_conf.get("pmsignature_full", "bs_genome"),
txdb_transcript = genomon_conf.get("pmsignature_full", "txdb_transcript"),
script_path = genomon_conf.get("SOFTWARE", "r_scripts"))
sig_nums = range(genomon_conf.getint("pmsignature_full", "signum_min"), genomon_conf.getint("pmsignature_full", "signum_max") + 1)
sig_num_text = ""
for i in sig_nums: sig_num_text += "%d " % i
arguments = {"r_path": genomon_conf.get("ENV", "R_PATH"),
"r_ld_library_path": genomon_conf.get("ENV", "R_LD_LIBRARY_PATH"),
"r_libs": genomon_conf.get("ENV", "R_LIBS"),
"command": command,
"sig_list": sig_num_text
}
max_task_id = len(sig_nums)
r_pmsignature_full.task_exec(arguments, run_conf.project_root + '/log/pmsignature', run_conf.project_root + '/script/pmsignature', max_task_id)
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("paplot", "enable"))
@active_if(len(paplot_inputs) > 0)
@follows(post_analysis_sv)
@follows(post_analysis_qc)
@follows(pmsignature_ind)
@follows(pmsignature_full)
@collate(paplot_inputs, formatter(), run_conf.project_root + '/paplot/' + sample_conf_name + '/index.html')
def paplot(input_file, output_file):
if not os.path.exists(paplot_output) and os.path.exists(run_conf.project_root + '/paplot/' + sample_conf_name + '/.meta.json'):
os.unlink(run_conf.project_root + '/paplot/' + sample_conf_name + '/.meta.json')
command = ""
if len(paplot_inputs_qc) > 0:
command += r_paplot.qc_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
inputs = ",".join(paplot_inputs_qc),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
if len(paplot_inputs_sv) > 0:
command += r_paplot.sv_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
inputs = ",".join(paplot_inputs_sv),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
if len(paplot_inputs_mutation) > 0:
command += r_paplot.mutation_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
inputs = ",".join(paplot_inputs_mutation),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"),
annovar = genomon_conf.getboolean("annotation", "active_annovar_flag"))
if genomon_conf.getboolean("pmsignature_ind", "enable"):
for i in range(len(paplot_inputs_ind)):
command += r_paplot.ind_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
input = paplot_inputs_ind[i],
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
if genomon_conf.getboolean("pmsignature_full", "enable"):
for i in range(len(paplot_inputs_full)):
command += r_paplot.full_template.format(
paplot =genomon_conf.get("SOFTWARE", "paplot"),
input = paplot_inputs_full[i],
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
remark = genomon_conf.get("paplot", "remarks")
remark += "<ul>"
for item in genomon_conf.get("paplot", "software").split(","):
key = item.split(":")[0].strip(" ").rstrip(" ")
name = item.split(":")[1].strip(" ").rstrip(" ")
try:
version = get_version(key).split("-")
except Exception:
print ("[WARNING] paplot: %s is not defined." % (key))
continue
remark += "<li>" + name + " " + version[-1] + "</li>"
remark += "</ul>"
command += r_paplot.index_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
remarks = remark,
config_file = genomon_conf.get("paplot", "config_file"))
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"paplot": genomon_conf.get("SOFTWARE", "paplot"),
"command": command
}
r_paplot.task_exec(arguments, run_conf.project_root + '/log/paplot', run_conf.project_root + '/script/paplot')
| gpl-2.0 | 2,453,567,865,520,221,000 | 54.965184 | 288 | 0.628586 | false |
twicki/dawn | docs/_extension/cmake.py | 1 | 4098 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
## _
## | |
## __| | __ ___ ___ ___
## / _` |/ _` \ \ /\ / / '_ |
## | (_| | (_| |\ V V /| | | |
## \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
## This file is distributed under the MIT License (MIT).
## See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
import os
import re
from docutils.parsers.rst import Directive, directives
from docutils.transforms import Transform
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils import io, nodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
class CMakeModule(Directive):
""" Declare the cmake-module directive
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'encoding': directives.encoding}
def __init__(self, *args, **keys):
self.re_start = re.compile(r'^#\[(?P<eq>=*)\[\.rst:$')
Directive.__init__(self, *args, **keys)
def run(self):
settings = self.state.document.settings
if not settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
env = self.state.document.settings.env
rel_path, path = env.relfn2path(self.arguments[0])
path = os.path.normpath(path)
encoding = self.options.get('encoding', settings.input_encoding)
e_handler = settings.input_encoding_error_handler
try:
settings.record_dependencies.add(path)
f = io.FileInput(source_path=path, encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
raw_lines = f.read().splitlines()
f.close()
rst = None
lines = []
for line in raw_lines:
if rst is not None and rst != '#':
# Bracket mode: check for end bracket
pos = line.find(rst)
if pos >= 0:
if line[0] == '#':
line = ''
else:
line = line[0:pos]
rst = None
else:
# Line mode: check for .rst start (bracket or line)
m = self.re_start.match(line)
if m:
rst = ']%s]' % m.group('eq')
line = ''
elif line == '#.rst:':
rst = '#'
line = ''
elif rst == '#':
if line == '#' or line[:2] == '# ':
line = line[2:]
else:
rst = None
line = ''
elif rst is None:
line = ''
lines.append(line)
if rst is not None and rst != '#':
raise self.warning('"%s" found unclosed bracket "#[%s[.rst:" in %s' %
(self.name, rst[1:-1], path))
self.state_machine.insert_input(lines, path)
return []
def setup(app):
app.add_directive('cmake-module', CMakeModule)
# app.add_transform(CMakeTransform)
# app.add_transform(CMakeXRefTransform)
# app.add_domain(CMakeDomain) | mit | 2,626,606,504,850,924,000 | 36.263636 | 100 | 0.464129 | false |
masoodking/LinkPrediction | tsne_python/tsne.py | 1 | 5760 | #
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
# The example can be run by executing: ipython tsne.py -pylab
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta))
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if X.dtype != "float64":
print "Error: array X should have type float64.";
return -1;
#if no_dims.__class__ != "<type 'int'>": # doesn't work yet!
# print "Error: number of dimensions should be an integer.";
# return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 200;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
X = Math.loadtxt("d500.txt");
#labels = Math.loadtxt("labels.txt");
text_file = open("l500.txt", "r")
labels = text_file.readlines()
Y = tsne(X, 2, 50, 20.0);
#Plot.scatter(Y[:,0], Y[:,1], 20, labels)
Plot.scatter(
Y[:, 0], Y[:, 1], marker = 'o', c = Y[:, 1],
cmap = Plot.get_cmap('Spectral'))
'''
for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
Plot.annotate(label, xy = (x, y), xytext = (-20, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
'''
Plot.show()
| bsd-3-clause | 3,899,504,067,676,658,000 | 29.967742 | 117 | 0.602257 | false |
convexopt/gpkit | gpkit/tests/t_vars.py | 1 | 7635 | """Test VarKey, Variable, VectorVariable, and ArrayVariable classes"""
import unittest
import numpy as np
from gpkit import (Monomial, NomialArray, Variable, VarKey,
VectorVariable, ArrayVariable)
import gpkit
from gpkit.nomials import Variable as PlainVariable
class TestVarKey(unittest.TestCase):
"""TestCase for the VarKey class"""
def test_init(self):
"""Test VarKey initialization"""
# test type
x = VarKey('x')
self.assertEqual(type(x), VarKey)
# test no args
x = VarKey()
self.assertEqual(type(x), VarKey)
y = VarKey(x)
self.assertEqual(x, y)
# test special 'name' keyword overwriting behavior
x = VarKey('x', flavour='vanilla')
self.assertEqual(x.name, 'x')
x = VarKey(name='x')
self.assertEqual(x.name, 'x')
# pylint: disable=redundant-keyword-arg
self.assertRaises(TypeError, lambda: VarKey('x', name='y'))
# pylint: disable=redundant-keyword-arg
self.assertRaises(TypeError, lambda: VarKey(x, name='y'))
def test_eq_neq(self):
"""Test boolean equality operators"""
# no args
vk1 = VarKey()
vk2 = VarKey()
self.assertTrue(vk1 != vk2)
self.assertFalse(vk1 == vk2)
self.assertEqual(vk1, vk1)
V = VarKey('V')
vel = VarKey('V')
self.assertTrue(V == vel)
self.assertFalse(V != vel)
self.assertEqual(vel, vel)
x1 = Variable("x", 3, "m")
x2 = Variable("x", 2, "ft")
x3 = Variable("x", 2, "m")
if gpkit.units:
self.assertNotEqual(x2.key, x3.key)
else: # units don't distinguish variables when they're disabled
self.assertEqual(x2.key, x3.key)
self.assertEqual(x1.key, x3.key)
def test_repr(self):
"""Test __repr__ method"""
for k in ('x', '$x$', 'var_name', 'var name', r"\theta", r'$\pi_{10}$'):
var = VarKey(k)
self.assertEqual(repr(var), k)
# not sure what this means, but I want to know if it changes
for num in (2, 2.0):
v = VarKey(num)
self.assertEqual(v, VarKey(str(num)))
def test_dict_key(self):
"""make sure variables are well-behaved dict keys"""
v = VarKey()
x = VarKey('$x$')
d = {v: 1273, x: 'foo'}
self.assertEqual(d[v], 1273)
self.assertEqual(d[x], 'foo')
d = {VarKey(): None, VarKey(): 12}
self.assertEqual(len(d), 2)
def test_units_attr(self):
"""Make sure VarKey objects have a units attribute"""
x = VarKey('x')
for vk in (VarKey(), x, VarKey(x), VarKey(units='m')):
self.assertTrue(hasattr(vk, 'units'))
class TestVariable(unittest.TestCase):
"""TestCase for the Variable class"""
def test_init(self):
"""Test Variable initialization"""
v = Variable('v')
self.assertTrue(isinstance(v, PlainVariable))
self.assertTrue(isinstance(v, Monomial))
# test that operations on Variable cast to Monomial
self.assertTrue(isinstance(3*v, Monomial))
self.assertFalse(isinstance(3*v, PlainVariable))
def test_value(self):
"""Detailed tests for value kwarg of __init__"""
a = Variable('a')
b = Variable('b', value=4)
c = a**2 + b
self.assertEqual(b.value, 4)
self.assertTrue(isinstance(b.value, float))
p1 = c.value
p2 = a**2 + 4
self.assertEqual(p1, p2)
self.assertEqual(a.value, a)
def test_hash(self):
x1 = Variable("x", "-", "first x")
x2 = Variable("x", "-", "second x")
self.assertEqual(hash(x1), hash(x2))
p1 = Variable("p", "psi", "first pressure")
p2 = Variable("p", "psi", "second pressure")
self.assertEqual(hash(p1), hash(p2))
xu = Variable("x", "m", "x with units")
if gpkit.units:
self.assertNotEqual(hash(x1), hash(xu))
else: # units don't distinguish variables when they're disabled
self.assertEqual(hash(x1), hash(xu))
def test_unit_parsing(self):
x = Variable("x", "s^0.5/m^0.5")
y = Variable("y", "(m/s)^-0.5")
self.assertEqual(x.units, y.units)
def test_to(self):
if gpkit.units:
x = Variable("x", "ft")
self.assertEqual(x.to("inch").c.magnitude, 12)
def test_eq_ne(self):
# test for #1138
W = Variable("W", 5, "lbf", "weight of 1 bag of sugar")
self.assertTrue(W != W.key)
self.assertTrue(W.key != W)
self.assertFalse(W == W.key)
self.assertFalse(W.key == W)
class TestVectorVariable(unittest.TestCase):
"""TestCase for the VectorVariable class.
Note: more relevant tests in t_posy_array."""
def test_init(self):
"""Test VectorVariable initialization"""
# test 1
n = 3
v = VectorVariable(n, 'v', label='dummy variable')
self.assertTrue(isinstance(v, NomialArray))
v_mult = 3*v
for i in range(n):
self.assertTrue(isinstance(v[i], PlainVariable))
self.assertTrue(isinstance(v[i], Monomial))
# test that operations on Variable cast to Monomial
self.assertTrue(isinstance(v_mult[i], Monomial))
self.assertFalse(isinstance(v_mult[i], PlainVariable))
# test 2
x = VectorVariable(3, 'x', label='dummy variable')
x_0 = Monomial('x', idx=(0,), shape=(3,), label='dummy variable')
x_1 = Monomial('x', idx=(1,), shape=(3,), label='dummy variable')
x_2 = Monomial('x', idx=(2,), shape=(3,), label='dummy variable')
x2 = NomialArray([x_0, x_1, x_2])
self.assertEqual(x, x2)
# test inspired by issue 137
N = 20
x_arr = np.arange(0, 5., 5./N) + 1e-6
x = VectorVariable(N, 'x', x_arr, 'm', "Beam Location")
def test_constraint_creation_units(self):
v = VectorVariable(2, "v", "m/s")
c = (v >= 40*gpkit.units("ft/s"))
c2 = (v >= np.array([20, 30])*gpkit.units("ft/s"))
if gpkit.units:
self.assertTrue(c.right.units)
self.assertTrue(NomialArray(c2.right).units)
else:
self.assertEqual(type(c.right), int)
self.assertEqual(type(c2.right), np.ndarray)
class TestArrayVariable(unittest.TestCase):
"""TestCase for the ArrayVariable class"""
def test_is_vector_variable(self):
"""
Make sure ArrayVariable is a shortcut to VectorVariable
(we want to know if this changes).
"""
self.assertTrue(ArrayVariable is VectorVariable)
def test_str(self):
"""Make sure string looks something like a numpy array"""
x = ArrayVariable((2, 4), 'x')
strx = str(x)
self.assertEqual(strx.count("["), 3)
self.assertEqual(strx.count("]"), 3)
class TestVectorize(unittest.TestCase):
"""TestCase for gpkit.vectorize"""
def test_shapes(self):
with gpkit.Vectorize(3):
with gpkit.Vectorize(5):
y = gpkit.Variable("y")
x = gpkit.VectorVariable(2, "x")
z = gpkit.VectorVariable(7, "z")
self.assertEqual(y.shape, (5, 3))
self.assertEqual(x.shape, (2, 5, 3))
self.assertEqual(z.shape, (7, 3))
TESTS = [TestVarKey, TestVariable, TestVectorVariable, TestArrayVariable,
TestVectorize]
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| mit | 2,720,635,913,569,846,000 | 33.863014 | 80 | 0.568173 | false |
GaussDing/django | tests/admin_changelist/tests.py | 1 | 33998 | from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, CustomPaginationAdmin,
CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, CustomIdUser, Event, Genre, Group,
Invitation, Membership, Musician, OrderedObject, Parent, Quartet, Swallow,
UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
list_select_related = m.get_list_select_related(request)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">-</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause | -5,703,059,772,062,413,000 | 45.130258 | 164 | 0.629978 | false |
plang85/rough_surfaces | rough_surfaces/plot.py | 1 | 2368 | import numpy as np
from matplotlib import rcParams
import scipy.stats as scst
# TODO get rid of these and we won't need matplotlib in the setup, only for examples
rcParams['font.size'] = 14
rcParams['legend.fontsize'] = 10
rcParams['savefig.dpi'] = 300
rcParams['legend.loc'] = 'upper right'
rcParams['image.cmap'] = 'hot'
def roughness_spectrum(ax, q, C, lenght_unit, onedim=False):
ax.loglog(q, C)
ax.set_xlabel('q (' + lenght_unit + '$^{-1}$' + ')')
ax.set_ylabel('C (' + lenght_unit + '$^{4}$' + ')')
if onedim:
ax.set_ylabel('C (' + lenght_unit + '$^{3}$' + ')')
def roughness(ax, h, dxy, length_unit):
N = h.shape[0]
L = dxy * N
x = np.linspace(-L / 2.0, L / 2.0, N)
XX, YY = np.meshgrid(x, x)
ax.pcolor(XX, YY, h)
ax.axis('equal')
unit_den = ''.join(['(', length_unit, ')'])
ax.set_xlabel('x ' + unit_den)
ax.set_ylabel('y ' + unit_den)
def roughness_histogram(ax, h, length_unit):
bins = 30
ax.hist(h.flatten(), bins, normed=1, color='gray', ec='white')
hsigma = np.std(h)
hspace = np.linspace(h.min(), h.max(), 100)
ax.plot(hspace, scst.norm.pdf(hspace, np.mean(h), hsigma), lw=3, alpha=0.5)
unit_den = ''.join(['(', length_unit, ')'])
ax.set_xlabel('Surface Height ' + unit_den)
ax.set_ylabel('Relative Probability')
def trace(surface, index, axis=0):
if axis == 0:
return surface[index, :]
elif axis == 1:
return surface[:, index]
else:
raise ValueError('axis must be 0(x) or 1(y)')
def traces(ax, surface, displacements=[], index=None, axis=0):
if not index:
index = int(surface.shape[axis] / 2)
surface_trace = trace(surface, index, axis)
ax.plot(surface_trace, label='rigid surface')
if displacements:
for displacement in displacements:
shifted_displacement = displacement - (np.max(displacement) - np.max(surface))
ax.plot(trace(shifted_displacement, index, axis), label='elastic body')
def slope_histogram(ax, h):
bins = 30
g = np.gradient(h)
ax.hist(np.ravel(g), bins, normed=1, color='gray', ec='white')
# hsigma = np.std(h)
# hspace = np.linspace(h.min(), h.max(), 100)
# ax.plot(hspace, scst.norm.pdf(hspace, np.mean(h), hsigma), lw=3, alpha=0.5)
ax.set_xlabel('Surface Slope (-)')
ax.set_ylabel('Relative Probability')
| mit | 3,680,067,757,627,451,400 | 32.352113 | 90 | 0.602618 | false |
vergecurrency/VERGE | test/functional/interface_http.py | 1 | 4779 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import VergeTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (VergeTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because verged should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit | 6,686,118,359,302,574,000 | 43.25 | 106 | 0.626282 | false |
bluesky4485/qiniu4blog | qiniu4blog/qiniu4blog.py | 1 | 3873 | #!/usr/bin/env python3
import os, time, datetime, platform, urllib, hashlib
import qiniu
from mimetypes import MimeTypes
import pyperclip
from os.path import expanduser
import configparser
homedir = expanduser("~")
config = configparser.ConfigParser()
config.read(homedir+'/qiniu.cfg')
mime = MimeTypes()
now = datetime.datetime.now()
try:
bucket = config.get('config', 'bucket')
accessKey = config.get('config', 'accessKey')
secretKey = config.get('config', 'secretKey')
path_to_watch = config.get('config', 'path_to_watch')
enable = config.get('custom_url','enable')
if enable == 'false':
print('custom_url not set')
else:
addr = config.get('custom_url','addr')
except ConfigParser.NoSectionError as err:
print('Error Config File:' + err)
def setcodeingbyos():
'''่ทๅ็ณป็ปๅนณๅฐ,่ฎพ็ฝฎ็ผ่งฃ็ '''
if 'cygwin' in platform.system().lower():
code = 'GBK'
elif os.name == 'nt' or platform.system() == 'Windows':
code = 'GBK'
elif os.name == 'mac' or platform.system() == 'Darwin':
code = 'utf-8'
elif os.name == 'posix' or platform.system() == 'Linux':
code = 'utf-8'
return code
def set_clipboard(url_list):
for url in url_list:
pyperclip.copy(url)
spam = pyperclip.paste()
def parseRet(retData, respInfo):
'''ๅค็ไธไผ ็ปๆ'''
if retData != None:
print("Upload file success!")
print("Hash: " + retData["hash"])
print("Key: " + retData["key"])
for k, v in retData.items():
if k[:2] == "x:":
print(k + ":" + v)
for k, v in retData.items():
if k[:2] == "x:" or k == "hash" or k == "key":
continue
else:
print(k + ":" + str(v))
else:
print("Upload file failed!")
print("Error: " + respInfo.text_body)
def upload_without_key(bucket, filePath, uploadname):
'''ไธไผ ๆไปถ'''
auth = qiniu.Auth(accessKey, secretKey)
upToken = auth.upload_token(bucket, key=None)
key = uploadname
retData, respInfo = qiniu.put_file(upToken, key, filePath, mime_type=mime.guess_type(filePath)[0])
parseRet(retData, respInfo)
def getkey(filename):
ext = filename[filename.rfind('.'):]
file_path = path_to_watch + '/' + filename
md5 = hashlib.md5(open(file_path, 'rb').read()).hexdigest()
# remote url: filetype/year/month/md5.filetype
remote = ext[1:] + '/' + str(now.year) + '/' + str(now.month) + '/' + md5 + ext
return remote
def main():
print("running ... ...")
before = dict([(f, None) for f in os.listdir(path_to_watch)])
while 1:
time.sleep(1)
after = dict([(f, None) for f in os.listdir(path_to_watch)])
added = [f for f in after if not f in before]
removed = [f for f in before if not f in after]
if added:
print("Added Files: " + ", ".join(added))
# print(added)
url_list = []
for i in added:
filekey = getkey(i)
upload_without_key(bucket, os.path.join(path_to_watch, i), filekey)
if enable == 'true':
url = addr + urllib.parse.quote(filekey)
else:
url = 'http://' + bucket + '.qiniudn.com/' + urllib.parse.quote(filekey)
url_list.append(url)
with open('image_markdown.txt', 'a') as f:
for url in url_list:
image = '![' + added[0] + ']' + '(' + url + ')' + '\n'
f.write(image)
print("image url [markdown] is save in image_markdwon.txt")
set_clipboard(url_list)
if removed:
print("Removed Files: " + ", ".join(removed))
print(removed)
before = after
if __name__ == "__main__":
main()
| mit | -7,276,391,584,932,604,000 | 28.469231 | 102 | 0.549465 | false |
shiroyuki/gallium | gallium/obj/encoder.py | 1 | 1878 | import datetime
from abc import ABC
from dataclasses import asdict, is_dataclass
from enum import Enum
from typing import Any, List, Optional
class PlugIn(ABC):
def can_handle(self, obj: Any) -> bool:
...
def encode(self, obj: Any) -> Any:
...
class DataClassPlugIn(PlugIn):
def can_handle(self, obj: Any) -> bool:
return is_dataclass(obj)
def encode(self, obj: Any) -> Any:
return asdict(obj)
class DateTimePlugIn(PlugIn):
def __init__(self, time_format: Optional[str] = None):
self.__time_format = time_format
def can_handle(self, obj: Any) -> bool:
return isinstance(obj, datetime.datetime)
def encode(self, obj: datetime.datetime) -> Any:
return obj.strftime(self.__time_format) if self.__time_format else obj.isoformat()
class EnumPlugIn(PlugIn):
def can_handle(self, obj: Any) -> bool:
return isinstance(obj, Enum)
def encode(self, obj: Enum) -> Any:
return obj.value
class ObjectEncoder:
def __init__(self):
self.__plug_ins: List[PlugIn] = []
def register(self, plug_in: PlugIn):
self.__plug_ins.append(plug_in)
return self
def encode(self, obj: Any):
for plug_in in self.__plug_ins:
if plug_in.can_handle(obj):
return self.encode(plug_in.encode(obj))
if isinstance(obj, dict):
return {
k: self.encode(v)
for k, v in obj.items()
}
if not isinstance(obj, str) and hasattr(obj, '__iter__'):
return [
self.encode(item)
for item in obj
]
return obj
@staticmethod
def build():
return ObjectEncoder() \
.register(DataClassPlugIn()) \
.register(DateTimePlugIn()) \
.register(EnumPlugIn())
| mit | -1,853,982,818,953,678,800 | 24.04 | 90 | 0.571885 | false |
GovReady/govready-q | guidedmodules/module_logic.py | 1 | 95357 | import uuid
from itertools import groupby
from urllib.parse import urlunparse
from django.conf import settings
from jinja2.sandbox import SandboxedEnvironment
from controls.enums.statements import StatementTypeEnum
from controls.oscal import Catalogs, Catalog
from siteapp.settings import GOVREADY_URL
def get_jinja2_template_vars(template):
from jinja2 import meta, TemplateSyntaxError
env = SandboxedEnvironment()
try:
expr = env.parse(template)
except TemplateSyntaxError as e:
raise Exception("expression {} is invalid: {}".format(template, e))
return set(meta.find_undeclared_variables(expr))
class Jinja2Environment(SandboxedEnvironment):
# A Jinja2 Environment for template and expression execution.
intercepted_binops = frozenset(['+'])
def call_binop(self, context, operator, left, right):
# If the operands are RenderedAnswer instances, then unwrap them
# to the raw Python value before executing the operator.
def unwrap(operand):
if isinstance(operand, RenderedAnswer):
operand = operand.answer
return operand
left = unwrap(left)
right = unwrap(right)
# Example from Jinja2 docs about overriding an operator.
#if operator == '+':
# return self.undefined('the power operator is unavailable')
# Call default operator logic.
return SandboxedEnvironment.call_binop(self, context, operator, left, right)
def walk_module_questions(module, callback):
# Walks the questions in depth-first order following the dependency
# tree connecting questions. If a question is a dependency of multiple
# questions, it is walked only once.
#
# callback is called for each question in the module with these arguments:
#
# 1) A ModuleQuestion, after the callback has been called for all
# ModuleQuestions that the question depends on.
# 2) A dictionary that has been merged from the return value of all
# of the callback calls on its dependencies.
# 3) A set of ModuleQuestion instances that this question depends on,
# so that the callback doesn't have to compute it (again) itself.
# Remember each question that is processed so we only process each
# question at most once. Cache the state that it gives.
processed_questions = { }
# Pre-load all of the dependencies between questions in this module
# and get the questions that are not depended on by any question,
# which is where the dependency chains start.
dependencies, root_questions = get_all_question_dependencies(module)
# Local function that processes a single question.
def walk_question(q, stack):
# If we've seen this question already as a dependency of another
# question, then return its state dict from last time.
if q.key in processed_questions:
return processed_questions[q.key]
# Prevent infinite recursion.
if q.key in stack:
raise ValueError("Cyclical dependency in questions: " + "->".join(stack + [q.key]))
# Execute recursively on the questions it depends on,
# in module definition order rather than in a random
# order.
state = { }
deps = list(dependencies[q])
deps.sort(key = lambda q : q.definition_order)
for qq in deps:
state.update(walk_question(qq, stack+[q.key]))
# Run the callback and get its state.
state = callback(q, state, dependencies[q])
# Remember the state in case we encounter it later.
processed_questions[q.key] = dict(state) # clone
# Return the state to the caller.
return state
# Walk the dependency root questions in document order.
root_questions = list(root_questions)
root_questions.sort(key = lambda q : q.definition_order)
for q in root_questions:
walk_question(q, [])
def evaluate_module_state(current_answers, parent_context=None):
# Compute the next question to ask the user, given the user's
# answers to questions so far, and all imputed answers up to
# that point.
#
# To figure this out, we walk the dependency tree of questions
# until we arrive at questions that have no unanswered dependencies.
# Such questions can be put forth to the user.
# Build a list of ModuleQuestion that the are not unanswerable
# because they are imputed or unavailable to the user. These
# questions may have no answer or can be updatd with a new answer.
answerable = set()
# Build a list of ModuleQuestions that the user may answer now
# excluding questions that the have already been answered.
can_answer = set()
# Build a list of ModuleQuestions that still need an answer,
# including can_answer and unanswered ModuleQuestions that
# have dependencies that are unanswered and need to be answered
# first before the questions in this list can be answered.
unanswered = set()
# Build a new array of answer values.
from collections import OrderedDict
answertuples = OrderedDict()
# Build a list of questions whose answers were imputed.
was_imputed = set()
# Create some reusable context for evaluating impute conditions --- really only
# so that we can pass down project and organization values. Everything else is
# cleared from the context's cache for each question because each question sees
# a different set of dependencies.
impute_context_parent = TemplateContext(
ModuleAnswers(current_answers.module, current_answers.task, {}), lambda _0, _1, _2, _3, value : str(value), # escapefunc
parent_context=parent_context)
# Visitor function.
def walker(q, state, deps):
# If any of the dependencies don't have answers yet, then this question
# cannot be processed yet.
for qq in deps:
if qq.key not in state:
unanswered.add(q)
answertuples[q.key] = (q, False, None, None)
return { }
# Can this question's answer be imputed from answers that
# it depends on? If the user answered this question (during
# a state in which it wasn't imputed, but now it is), the
# user's answer is overridden with the imputed value for
# consistency with the Module's logic.
# Before running impute conditions below, we need a TemplateContext
# which provides the functionality of resolving variables mentioned
# in the impute condition. The TemplateContext that we use here is
# different from the one we normally use to render output documents
# because an impute condition in a question should not be able to see
# the answers to questions that come later in the module. The purpose
# of evaluate_module_state is, in part, to get the answers to questions,
# included imputed answers, and so the answers to later questions are not
# yet know. Therefore, we construct a TemplateContext that only includes
# the answers to questions that we've computed so far.
impute_context = TemplateContext(
ModuleAnswers(current_answers.module, current_answers.task, state),
impute_context_parent.escapefunc, parent_context=impute_context_parent, root=True)
v = run_impute_conditions(q.spec.get("impute", []), impute_context)
if v:
# An impute condition matched. Unwrap to get the value.
answerobj = None
v = v[0]
was_imputed.add(q.key)
elif q.key in current_answers.as_dict():
# The user has provided an answer to this question. Question can be updated.
answerobj = current_answers.get(q.key)
v = current_answers.as_dict()[q.key]
answerable.add(q)
elif current_answers.module.spec.get("type") == "project" and q.key == "_introduction":
# Projects have an introduction but it isn't displayed as a question.
# It's not explicitly answered, but treat it as answered so that questions
# that implicitly depend on it can be evaluated.
# TODO: Is this still necessary?
answerobj = None
v = None
else:
# This question does not have an answer yet. We don't set
# anything in the state that we return, which flags that
# the question is not answered.
#
# But we can remember that this question *can* be answered
# by the user, and that it's not answered yet.
answerable.add(q)
can_answer.add(q)
unanswered.add(q)
answertuples[q.key] = (q, False, None, None)
return state
# Update the state that's passed to questions that depend on this
# and also the global state of all answered questions.
state[q.key] = (q, True, answerobj, v)
answertuples[q.key] = (q, True, answerobj, v)
return state
# Walk the dependency tree.
walk_module_questions(current_answers.module, walker)
# There may be multiple routes through the tree of questions,
# so we'll prefer the question that is defined first in the spec.
can_answer = sorted(can_answer, key = lambda q : q.definition_order)
# The list of unanswered questions should be in the same order as
# can_answer so that as the user goes through the questions they
# are following the same order as the list of upcoming questions.
# Ideally we'd form both can_answer and unanswered in the same way
# in the right order without needing to sort later, but until then
# we'll just sort both.
unanswered = sorted(unanswered, key = lambda q : q.definition_order)
# Create a new ModuleAnswers object that holds the user answers,
# imputed answers (which override user answers), and next-question
# information.
ret = ModuleAnswers(current_answers.module, current_answers.task, answertuples)
ret.was_imputed = was_imputed
ret.unanswered = unanswered
ret.can_answer = can_answer
ret.answerable = answerable
return ret
def get_question_context(answers, question):
# What is the context of questions around the given question so show
# the user their progress through the questions?
# Create an object to lazy-render values, since we only use it on
# the module-finished page and not to display context on question
# pages.
from guidedmodules.module_logic import TemplateContext, RenderedAnswer, HtmlAnswerRenderer
class LazyRenderedAnswer:
def __init__(self, q, is_answered, answer_obj, answer_value):
self.q = q
self.is_answered = is_answered
self.answer_obj = answer_obj
self.answer_value = answer_value
def __call__(self):
if not self.is_answered:
return "<i>not answered</i>"
if self.q.spec["type"] == "interstitial":
return "<i>seen</i>"
if self.answer_value is None:
return "<i>skipped</i>"
if not hasattr(LazyRenderedAnswer, 'tc'):
LazyRenderedAnswer.tc = TemplateContext(answers, HtmlAnswerRenderer(show_metadata=False))
return RenderedAnswer(answers.task, self.q, self.is_answered, self.answer_obj, self.answer_value, LazyRenderedAnswer.tc).__html__()
answers.as_dict() # force lazy-load
context = []
for q, is_answered, answer_obj, answer_value in answers.answertuples.values():
# Sometimes we want to skip imputed questions, but for the sake
# of the authoring tool we need to keep imputed questions so
# the user can navigate to them.
context.append({
"key": q.key,
"title": q.spec['title'],
"link": answers.task.get_absolute_url_to_question(q),
# Any question that has been answered or can be answered next can be linked to,
"can_link": (answer_obj or q in answers.can_answer),
"imputed": is_answered and answer_obj is None,
"skipped": (answer_obj is not None and answer_value is None) and (q.spec["type"] != "interstitial"),
"answered": answer_obj is not None,
"reviewed": answer_obj.reviewed if answer_obj is not None else None,
"is_this_question": (question is not None) and (q.key == question.key),
"value": LazyRenderedAnswer(q, is_answered, answer_obj, answer_value),
"definition_order": q.definition_order
})
# Sort list of questions by definition_order
from operator import itemgetter
context_sorted = sorted(context, key=itemgetter('definition_order'))
return context_sorted
def oscal_context(answers):
"""
Generate a dictionary of values useful for rendering OSCAL.
Lots of work in progress here!
"""
# sometimes we run into answers w/o a task, in which case
# there is not much we can do
if not hasattr(answers, 'task'):
return dict()
project = answers.task.project
system = project.system
# TODO: where do we get the catalog key from?
catalog_key = Catalogs.NIST_SP_800_53_rev4
catalog = Catalog.GetInstance(catalog_key)
# build a component from an Element
def _component(e):
return {
'uuid': e.uuid,
'title': e.name,
'description': e.description,
'state': e.component_state,
'type': e.component_type
}
components = [_component(e) for e in system.producer_elements]
# collect all the control implementation statements
statements = system.root_element.statements_consumed \
.filter(statement_type=StatementTypeEnum.CONTROL_IMPLEMENTATION.name) \
.order_by('sid')
# and all the project's organizational parameters
params = project.get_parameter_values(catalog_key)
# loop over all statements, grouped by control id and
# build a list of implemented_requirements
implemented_requirements = []
for control_id, group in groupby(statements, lambda s: s.sid):
ir = {
"control_id": control_id,
"uuid": str(uuid.uuid4()),
"statements": []
}
param_ids = catalog.get_parameter_ids_for_control(control_id)
ir["parameter_settings"] = [
dict(param_id=param_id, value=params.get(param_id))
for param_id in param_ids
if params.get(param_id)
]
# loop over all the statements for this control, grouped by
# "part id". I.e., "ac-1.a", "ac-1.b", etc.
for pid, group in groupby(sorted(group, key=lambda s: s.pid),
lambda s: s.pid):
# useful to extract the statement id from the first statement
# (should be the same for all the statements in this group)
group = list(group)
first_statement = group[0]
statement = {
"id": first_statement.oscal_statement_id,
"uuid": str(uuid.uuid4()),
"by_components": []
}
# assumption: at this point, each statement in the group
# has been contributed by a different component. if
# assumption is not valid, we'll have to fix this code a
# bit, since OSCAL doesn't obiviously support multiple
# contributions to a statement from the same component
for s in group:
by_component = {
"uuid": str(s.uuid),
"component_uuid": s.producer_element.uuid,
"description": s.body
}
statement["by_components"].append(by_component)
ir['statements'].append(statement)
implemented_requirements.append(ir)
# TODO: placeholder for information types -- should be able to pull this out
# from questionnaire
security_body = project.system.get_security_impact_level
confidentiality = security_body.get("security_objective_confidentiality", "UNKOWN")
integrity = security_body.get("security_objective_integrity", "UNKOWN")
availability = security_body.get("security_objective_availability", "UNKOWN")
information_types = [
{
"uuid": str(uuid.uuid4()),
"title": "UNKNOWN information type title",
# "categorizations": [], # TODO https://doi.org/10.6028/NIST.SP.800-60v2r1
"description": "information type description",
"confidentiality_impact": confidentiality,
"integrity_impact": integrity,
"availability_impact": availability
}
]
# generate a URL to reference this system's OSCAL profile (baseline)
# TODO: fix url pattern matching for backward compatibility, figure out profile usage
# profile_path = reverse('profile_oscal_json', kwargs=dict(system_id=system.id))
profile = urlunparse((GOVREADY_URL.scheme, GOVREADY_URL.netloc,
"profile_path",
None, None, None))
return {
"uuid": str(uuid.uuid4()), # SSP UUID
"make_uuid": uuid.uuid4, # so we can gen UUIDS if needed in the templates
"version": float(project.version),
"profile": profile,
"oscal_version": "1.0.0rc1",
"last_modified": str(project.updated),
"system_id": f"govready-{system.id}",
"system_authorization_boundary": "System authorization boundary, TBD", # TODO
"system_security_impact_level_confidentiality":confidentiality,
"system_security_impact_level_integrity": integrity,
"system_security_impact_level_availability": availability,
"system_operating_status": "operational", # TODO: need from questionnaire, but wrong format
"components": components,
"implemented_requirements": implemented_requirements,
"information_types": information_types
}
def render_content(content, answers, output_format, source,
additional_context={}, demote_headings=True,
show_answer_metadata=False, use_data_urls=False,
is_computing_title=False):
# Renders content (which is a dict with keys "format" and "template")
# into the requested output format, using the ModuleAnswers in answers
# to provide the template context.
# Get the template.
template_format = content["format"]
template_body = content["template"]
# Markdown cannot be used with Jinja2 because auto-escaping is highly
# context dependent. For instance, Markdown can have HTML literal blocks
# and in those blocks the usual backslash-escaping is replaced with
# HTML's usual &-escaping. Also, when a template value is substituted
# inside a Markdown block like a blockquote, newlines in the substituted
# value will break the logical structure of the template without some
# very complex handling of adding line-initial whitespace and block
# markers ("*", ">").
#
# So we must convert Markdown to another format prior to running templates.
#
# If the output format is HTML, convert the Markdown to HTML.
#
# If the output format is plain-text, treat the Markdown as if it is plain text.
#
# No other output formats are supported.
if template_format == "markdown":
if output_format == "html" or output_format == "PARSE_ONLY":
# Convert the template first to HTML using CommonMark.
if not isinstance(template_body, str): raise ValueError("Template %s has incorrect type: %s" % (source, type(template_body)))
# We don't want CommonMark to mess up template tags, however. If
# there are symbols which have meaning both to Jinaj2 and CommonMark,
# then they may get ruined by CommonMark because they may be escaped.
# For instance:
#
# {% hello "*my friend*" %}
#
# would become
#
# {% hello "<em>my friend</em>" %}
#
# and
#
# [my link]({{variable_holding_url}})
#
# would become a link whose target is
#
# %7B%7Bvariable_holding_url%7D%7D
#
# And that's not good!
#
# Do a simple lexical pass over the template and replace template
# tags with special codes that CommonMark will ignore. Then we'll
# put back the strings after the CommonMark has been rendered into
# HTML, so that the template tags end up in their appropriate place.
#
# Since CommonMark will clean up Unicode in URLs, e.g. in link and
# image URLs, by %-encoding non-URL-safe characters, we have to
# also override CommonMark's URL escaping function at
# https://github.com/rtfd/CommonMark-py/blob/master/CommonMark/common.py#L71
# to not %-encode our special codes. Unfortunately urllib.parse.quote's
# "safe" argument does not handle non-ASCII characters.
from commonmark import inlines
def urlencode_special(uri):
import urllib.parse
return "".join(
urllib.parse.quote(c, safe="/@:+?=&()%#*,") # this is what CommonMark does
if c not in "\uE000\uE001" else c # but keep our special codes
for c in uri)
inlines.normalize_uri = urlencode_special
substitutions = []
import re
def replace(m):
# Record the substitution.
index = len(substitutions)
substitutions.append(m.group(0))
return "\uE000%d\uE001" % index # use Unicode private use area code points
template_body = re.sub(r"{%[\w\W]*?%}|{{.*?}}", replace, template_body)
# Use our CommonMark Tables parser & renderer.
from commonmark_extensions.tables import \
ParserWithTables as CommonMarkParser, \
RendererWithTables as CommonMarkHtmlRenderer
# Subclass the renderer to control the output a bit.
class q_renderer(CommonMarkHtmlRenderer):
def __init__(self):
# Our module templates are currently trusted, so we can keep
# safe mode off, and we're making use of that. Safe mode is
# off by default, but I'm making it explicit. If we ever
# have untrusted template content, we will need to turn
# safe mode on.
super().__init__(options={ "safe": False })
def heading(self, node, entering):
# Generate <h#> tags with one level down from
# what would be normal since they should not
# conflict with the page <h1>.
if entering and demote_headings:
node.level += 1
super().heading(node, entering)
def code_block(self, node, entering):
# Suppress info strings because with variable substitution
# untrusted content could land in the <code> class attribute
# without a language- prefix.
node.info = None
super().code_block(node, entering)
def make_table_node(self, node):
return "<table class='table'>"
template_format = "html"
template_body = q_renderer().render(CommonMarkParser().parse(template_body))
# Put the Jinja2 template tags back that we removed prior to running
# the CommonMark renderer.
def replace(m):
return substitutions[int(m.group(1))]
template_body = re.sub("\uE000(\d+)\uE001", replace, template_body)
elif output_format in ("text", "markdown"):
# Pass through the markdown markup unchanged.
pass
else:
raise ValueError("Cannot render a markdown template to %s in %s." % (output_format, source))
# Execute the template.
if template_format in ("json", "yaml"):
# The json and yaml template types are not rendered in the usual
# way. The template itself is a Python data structure (not a string).
# We will replace all string values in the data structure (except
# dict keys) with what we get by calling render_content recursively
# on the string value, assuming it is a template of plain-text type.
import re
from collections import OrderedDict
import jinja2
env = Jinja2Environment(
autoescape=True,
undefined=jinja2.StrictUndefined) # see below - we defined any undefined variables
context = dict(additional_context) # clone
if answers:
def escapefunc(question, task, has_answer, answerobj, value):
# Don't perform any escaping. The caller will wrap the
# result in jinja2.Markup().
return str(value)
def errorfunc(message, short_message, long_message, **format_vars):
# Wrap in jinja2.Markup to prevent auto-escaping.
return jinja2.Markup("<" + message.format(**format_vars) + ">")
tc = TemplateContext(answers, escapefunc,
root=True,
errorfunc=errorfunc,
source=source,
show_answer_metadata=show_answer_metadata,
is_computing_title=is_computing_title)
context.update(tc)
def walk(value, path, additional_context_2 = {}):
# Render string values through the templating logic.
if isinstance(value, str):
return render_content(
{
"format": "text",
"template": value
},
answers,
"text",
source + " " + "->".join(path),
{ **additional_context, **additional_context_2 }
)
# Process objects with a special "%___" key specially.
# If it has a %for key with a string value, then interpret the string value as
# an expression in Jinja2 which we assume evaluates to a sequence-like object
# and loop over the items in the sequence. For each item, the "%loop" key
# of this object is rendered with the context amended with variable name
# assigned the sequence item.
elif isinstance(value, dict) and isinstance(value.get("%for"), str):
# The value of the "%for" key is "variable in expression". Parse that
# first.
m = re.match(r"^(\w+) in (.*)", value.get("%for"), re.I)
if not m:
raise ValueError("%for directive needs 'variable in expression' value")
varname = m.group(1)
expr = m.group(2)
# print(print"%for: expr = ", expr)
condition_func = compile_jinja2_expression(expr)
if output_format == "PARSE_ONLY":
return value
# Evaluate the expression.
context.update(additional_context_2)
seq = condition_func(context)
# print("%for: seq = ", seq)
# Render the %loop key for each item in sequence.
return [
walk(
value.get("%loop"),
path+[str(i)],
{ **additional_context_2, **{ varname: item } })
for i, item in enumerate(seq)
]
# For a %dict key, we will add a dictionary for each element in the
# sequence. The key for the dictionary is specified by value of %key
# item, and the value of the item itself is specified by the %value
elif isinstance(value, dict) and isinstance(value.get("%dict"), str):
# The value of the "%dict" key is "variable in expression". Parse that
# first.
m = re.match(r"^(\w+) in (.*)", value.get("%dict"), re.I)
if not m:
raise ValueError("%dict directive needs 'variable in expression' value")
varname = m.group(1)
expr = m.group(2)
condition_func = compile_jinja2_expression(expr)
if output_format == "PARSE_ONLY":
return value
# Evaluate the expression.
context.update(additional_context_2)
seq = condition_func(context)
# Render the %value key for each item in sequence,
# producing a dict of dicts. Each rendered dict
# must contain a special item with the key "%key".
# The value of "%key" is used to key a dictionary
# containing the remainder of the rendered items.
# E.g.,
# {
# "books": {
# "%dict": "book in books",
# "%value": {
# "%key": "{{ book.id }}",
# "title": "{{ book.title }}",
# "author": "{{ book.author }}"
# }
# }
# }
# will render to:
# {
# "books": {
# "100": {
# "title": "Harry Potter and the Chamber of Secrets",
# "author": "JK"
# },
# "101": {
# "title": "Harry Potter and the Goblet of Fire",
# "author": "JK"
# }
# }
# }
retval = dict()
if "%value" not in value:
raise ValueError("%dict directive missing %value")
item_value = value["%value"]
for i, item in enumerate(seq):
obj = walk(
item_value,
path+[str(i)],
{ **additional_context_2, **{ varname: item } })
if not isinstance(obj, dict):
raise ValueError("%value did not produce a dict")
if "%key" not in obj:
raise ValueError("dict returned by %value had no %key")
dict_key = obj.pop('%key')
retval[dict_key] = obj
return retval
elif isinstance(value, dict) and isinstance(value.get("%if"), str):
# The value of the "%if" key is an expression.
condition_func = compile_jinja2_expression(value["%if"])
if output_format == "PARSE_ONLY":
return value
# Evaluate the expression.
context.update(additional_context_2)
test = condition_func(context)
# If the expression is true, then we render the "%then" key.
if test:
return walk(
value.get("%then"),
path+["%then"],
additional_context_2)
else:
return None
# All other JSON data passes through unchanged.
elif isinstance(value, list):
# Recursively enter each value in the list and re-assemble a new list with
# the return value of this function on each item in the list.
return [
walk(i, path+[str(i)], additional_context_2)
for i in value]
elif isinstance(value, dict):
# Recursively enter each value in each key-value pair in the JSON object.
# Return a new JSON object with the same keys but with the return value
# of this function applied to the value.
return OrderedDict([
( k,
walk(v, path+[k], additional_context_2)
)
for k, v in value.items()
])
else:
# Leave unchanged.
return value
# Render the template. Recursively walk the JSON data structure and apply the walk()
# function to each value in it.
oscal = oscal_context(answers)
value = walk(template_body, [], dict(oscal=oscal) if oscal else {})
# If we're just testing parsing the template, return
# any output now. Since the inner templates may have
# returned a value of any type, we can't serialize back to
# JSON --- pyyaml's safe dumper will raise an error if
# it gets a non-safe value type.
if output_format == "PARSE_ONLY":
return value
# Render to JSON or YAML depending on what was specified on the
# template.
if template_format == "json":
import json
output = json.dumps(value, indent=True)
elif template_format == "yaml":
import rtyaml
output = rtyaml.dump(value)
if output_format == "html":
# Convert to HTML.
import html
return "<pre>" + html.escape(output) + "</pre>"
elif output_format == "text":
# Treat as plain text.
return output
elif output_format == "PARSE_ONLY":
# For tests, callers can use the "PARSE_ONLY" output format to
# stop after the template is prepared.
return output
else:
raise ValueError("Cannot render %s to %s in %s." % (template_format, output_format, source))
elif template_format in ("text", "markdown", "html", "xml"):
# The plain-text and HTML template types are rendered using Jinja2.
#
# The only difference is in how escaping of substituted variables works.
# For plain-text, there is no escaping. For HTML, we render 'longtext'
# anwers as if the user was typing Markdown. That makes sure that
# paragraphs aren't collapsed in HTML, and gives us other benefits.
# For other values we perform standard HTML escaping.
import jinja2
if template_format in ("text", "markdown", "xml"):
def escapefunc(question, task, has_answer, answerobj, value):
# Don't perform any escaping. The caller will wrap the
# result in jinja2.Markup().
return str(value)
def errorfunc(message, short_message, long_message, **format_vars):
# Wrap in jinja2.Markup to prevent auto-escaping.
return jinja2.Markup("<" + message.format(**format_vars) + ">")
elif template_format == "html":
escapefunc = HtmlAnswerRenderer(show_metadata=show_answer_metadata, use_data_urls=use_data_urls)
def errorfunc(message, short_message, long_message, **format_vars):
if output_format == "html" and show_answer_metadata:
# In HTML outputs with popovers for answer metadata, use a popover
# TODO: Display detailed error info in task-finished.html more explicitly
# renders popovers in templates.
return jinja2.Markup("""
<span class="text-danger"
data-toggle="popover" data-content="{}">
<{}>
</span>
""".format(jinja2.escape(long_message.format(**format_vars)),
jinja2.escape(short_message.format(**format_vars))))
else:
# Simple error message for HTML output when popovers are not
# being used. Auto-escaping will take care of escaping.
return "<{}>".format(message.format(**format_vars))
# Execute the template.
# Evaluate the template. Ensure autoescaping is turned on. Even though
# we handle it ourselves, we do so using the __html__ method on
# RenderedAnswer, which relies on autoescaping logic. This also lets
# the template writer disable autoescaping with "|safe".
env = Jinja2Environment(
autoescape=True,
undefined=jinja2.StrictUndefined) # see below - we defined any undefined variables
try:
template = env.from_string(template_body)
except jinja2.TemplateSyntaxError as e:
raise ValueError("There was an error loading the Jinja2 template %s: %s, line %d" % (source, str(e), e.lineno))
# For tests, callers can use the "PARSE_ONLY" output format to
# stop after the template is compiled.
if output_format == "PARSE_ONLY":
return template
# Create an intial context dict with the additional_context provided
# by the caller, add additional context variables and functions, and
# add rendered answers into it.
context = dict(additional_context) # clone
if answers and answers.task:
context['static_asset_path_for'] = answers.task.get_static_asset_url
# Render.
try:
# context.update will immediately load all top-level values, which
# unfortuntately might throw an error if something goes wrong
if answers:
tc = TemplateContext(answers, escapefunc,
root=True,
errorfunc=errorfunc,
source=source,
show_answer_metadata=show_answer_metadata,
is_computing_title=is_computing_title)
context.update(tc)
# Define undefined variables. Jinja2 will normally raise an exception
# when an undefined variable is accessed. It can also be set to not
# raise an exception and treat the variables as nulls. As a middle
# ground, we'll render these variables as error messages. This isn't
# great because an undefined variable indicates an incorrectly authored
# template, and rendering the variable might mean no one will notice
# the template is incorrect. But it's probably better UX than having
# a big error message for the output as a whole or silently ignoring it.
for varname in get_jinja2_template_vars(template_body):
context.setdefault(varname, UndefinedReference(varname, errorfunc, [source]))
# Now really render.
output = template.render(context)
except Exception as e:
raise ValueError("There was an error executing the template %s: %s" % (source, str(e)))
# Convert the output to the desired output format.
if template_format == "text":
if output_format == "text":
# text => text (nothing to do)
return output
# TODO: text => markdown
elif output_format == "html":
# convert text to HTML by ecaping and wrapping in a <pre> tag
import html
return "<pre>" + html.escape(output) + "</pre>"
elif template_format == "markdown":
if output_format == "text":
# TODO: markdown => text, for now just return the Markdown markup
return output
elif output_format == "markdown":
# markdown => markdown -- nothing to do
return output
elif template_format == "xml":
if output_format == "text":
# TODO: markdown => text, for now just return the Markdown markup
return output
elif output_format == "markdown":
# markdown => markdown -- nothing to do
return output
# markdown => html never occurs because we convert the Markdown to
# HTML earlier and then we see it as html => html.
elif template_format == "html":
if output_format == "html":
# html => html
#
# There is no data transformation, but we must check that no
# unsafe content was inserted by variable substitution ---
# in particular, unsafe URLs like javascript: and data: URLs.
# When the content comes from a Markdown template, unsafe content
# can only end up in <a> href's and <img> src's. If the template
# has unsafe content like raw HTML, then it is up to the template
# writer to ensure that variable substitution does not create
# a vulnerability.
#
# We also rewrite non-absolute URLs in <a> href's and <img> src
# to allow for linking to module-defined static content.
#
# This also fixes the nested <p>'s within <p>'s when a longtext
# field is rendered.
def rewrite_url(url, allow_dataurl=False):
# Rewrite for static assets.
if answers and answers.task:
url = answers.task.get_static_asset_url(url, use_data_urls=use_data_urls)
# Check final URL.
import urllib.parse
u = urllib.parse.urlparse(url)
# Allow data URLs in some cases.
if use_data_urls and allow_dataurl and u.scheme == "data":
return url
if u.scheme not in ("", "http", "https", "mailto"):
return "javascript:alert('Invalid link.');"
return url
import html5lib
dom = html5lib.HTMLParser().parseFragment(output)
for node in dom.iter():
if node.get("href"):
node.set("href", rewrite_url(node.get("href")))
if node.get("src"):
node.set("src", rewrite_url(node.get("src"), allow_dataurl=(node.tag == "{http://www.w3.org/1999/xhtml}img")))
output = html5lib.serialize(dom, quote_attr_values="always", omit_optional_tags=False, alphabetical_attributes=True)
# But the p's within p's fix gives us a lot of empty p's.
output = output.replace("<p></p>", "")
return output
raise ValueError("Cannot render %s to %s." % (template_format, output_format))
else:
raise ValueError("Invalid template format encountered: %s." % template_format)
class HtmlAnswerRenderer:
def __init__(self, show_metadata, use_data_urls=False):
self.show_metadata = show_metadata
self.use_data_urls = use_data_urls
def __call__(self, question, task, has_answer, answerobj, value):
import html
if question is not None and question.spec["type"] == "longtext":
# longtext fields are rendered into the output
# using CommonMark. Escape initial <'s so they
# are not treated as the start of HTML tags,
# which are not permitted in safe mode, but
# <'s appear tag-like in certain cases like
# when we say <not answerd>.
if value.startswith("<"): value = "\\" + value
from commonmark_extensions.tables import \
ParserWithTables as CommonMarkParser, \
RendererWithTables as CommonMarkHtmlRenderer
parsed = CommonMarkParser().parse(value)
value = CommonMarkHtmlRenderer({ "safe": True }).render(parsed)
wrappertag = "div"
elif question is not None and question.spec["type"] == "file" \
and hasattr(value, "file_data"):
# Files turn into link tags, possibly containing a thumbnail
# or the uploaded image itself.
img_url = None
if self.use_data_urls and value.file_data.get("thumbnail_dataurl"):
img_url = value.file_data["thumbnail_dataurl"]
elif self.use_data_urls and value.file_data.get("content_dataurl"):
img_url = value.file_data["content_dataurl"]
elif self.use_data_urls:
img_url = "data:"
elif value.file_data.get("thumbnail_url"):
img_url = value.file_data["thumbnail_url"]
elif question.spec.get("file-type") == "image":
img_url = value.file_data['url']
from jinja2.filters import do_filesizeformat
label = "Download attachment ({format}; {size}; {date})".format(
format=value.file_data["type_display"],
size=do_filesizeformat(value.file_data['size']),
date=answerobj.created.strftime("%x") if answerobj else "",
)
if not img_url:
# no thumbnail
value = """<p><a href="%s">%s</a></p>""" % (
html.escape(value.file_data['url']),
label,
)
else:
# has a thumbnail
# used to have max-height: 100vh; here but wkhtmltopdf understands it as 0px
value = """
<p>
<a href="%s" class="user-media">
<img src="%s" class="img-responsive" style=" border: 1px solid #333; margin-bottom: .25em;">
<div style='font-size: 90%%;'>%s</a></div>
</a>
</p>""" % (
html.escape(value.file_data['url']),
html.escape(img_url or ""),
label,
)
wrappertag = "div"
elif question is not None and question.spec["type"] == "datagrid":
# Assuming that RenderedAnswer gives us string version of the stored datagrid object
# that is an Array of Dictionaries
import ast
try:
# Get datagrid data if datagrid question has been answered with information
datagrid_rows = ast.literal_eval(value)
except:
if value == "<nothing chosen>":
# Datagrid question has been visited and instantiated but no answer given
# No data was entered into data grid
datagrid_rows = []
else:
# Datagrid question has not been visited and not yet instantiated
# `value` is set to "<Software Inventory (datagrid)>"
datagrid_rows = []
if "render" in question.spec and question.spec["render"] == "vertical":
# Build a vertical table to display datagrid information
value = ""
for item in datagrid_rows:
# Start a new table
value += "<table class=\"table\">\n"
# Create a row for each field
for field in question.spec["fields"]:
value += "<tr><td class=\"td_datagrid_vertical\">{}</td><td>{}</td></tr>".format(html.escape(str(field["text"])), html.escape(str(item[field["key"]])))
value += "\n</table>"
else:
# Build a standard table to display datagrid information
value = "<table class=\"table\">\n"
value += "<thead>\n<tr>"
# To get the correct order, get keys from question specification fields
for field in question.spec["fields"]:
value += "<th>{}</th>".format(html.escape(str(field["text"])))
value += "</tr></thead>\n"
for item in datagrid_rows:
value += "<tr>"
# To get the correct order, get keys from question specification fields
for field in question.spec["fields"]:
value += "<td>{}</td>".format(html.escape(str(item[field["key"]])))
value += "</tr>\n"
# value = html.escape(str(datagrid_rows))
value += "\n</table>"
wrappertag = "div"
else:
# Regular text fields just get escaped.
value = html.escape(str(value))
wrappertag = "span"
if (not self.show_metadata) or (question is None):
return value
# Wrap the output in a tag that holds metadata.
# If the question is imputed...
if has_answer and not answerobj:
return """<{tag} class='question-answer'
data-module='{module}'
data-question='{question}'
data-answer-type='{answer_type}'
{edit_link}
>{value}</{tag}>""".format(
tag=wrappertag,
module=html.escape(question.module.spec['title']),
question=html.escape(question.spec["title"]),
answer_type="skipped" if not has_answer else "imputed",
edit_link="",
value=value,
)
# If the question is unanswered...
if not answerobj:
return """<{tag} class='question-answer'
data-module='{module}'
data-question='{question}'
data-answer-type='{answer_type}'
{edit_link}
>{value}</{tag}>""".format(
tag=wrappertag,
module=html.escape(question.module.spec['title']),
question=html.escape(question.spec["title"]),
answer_type="skipped" if not has_answer else "imputed",
edit_link=("data-edit-link='" + task.get_absolute_url_to_question(question) + "'") if task else "",
value=value,
)
# If the question is answered (by a user).
return """<{tag} class='question-answer'
data-module='{module}'
data-question='{question}'
data-answer-type='user-answer'
data-edit-link='{edit_link}'
data-answered-by='{answered_by}'
data-answered-on='{answered_on}'
data-reviewed='{reviewed}'
>{value}</{tag}>""".format(
tag=wrappertag,
module=html.escape(question.module.spec['title']),
question=html.escape(question.spec["title"]),
edit_link=answerobj.taskanswer.get_absolute_url(),
answered_by=html.escape(str(answerobj.answered_by)),
answered_on=html.escape(answerobj.created.strftime("%c")),
reviewed=str(answerobj.reviewed),
value=value,
)
def clear_module_question_cache():
if hasattr(get_all_question_dependencies, 'cache'):
del get_all_question_dependencies.cache
def get_all_question_dependencies(module):
# Initialize cache, query cache.
if not hasattr(get_all_question_dependencies, 'cache'):
get_all_question_dependencies.cache = { }
if module.id in get_all_question_dependencies.cache:
return get_all_question_dependencies.cache[module.id]
# Pre-load all of the questions by their key so that the dependency
# evaluation is fast.
all_questions = { }
for q in module.questions.all():
all_questions[q.key] = q
# Compute all of the dependencies of all of the questions.
dependencies = {
q: get_question_dependencies(q, get_from_question_id=all_questions)
for q in all_questions.values()
}
# Find the questions that are at the root of the dependency tree.
is_dependency_of_something = set()
for deps in dependencies.values():
is_dependency_of_something |= deps
root_questions = { q for q in dependencies if q not in is_dependency_of_something }
ret = (dependencies, root_questions)
# Save to in-memory (in-process) cache. Never in debugging.
if not settings.DEBUG:
get_all_question_dependencies.cache[module.id] = ret
return ret
def get_question_dependencies(question, get_from_question_id=None):
return set(edge[1] for edge in get_question_dependencies_with_type(question, get_from_question_id))
def get_question_dependencies_with_type(question, get_from_question_id=None):
if get_from_question_id is None:
# dict-like interface
class GetFromQuestionId:
def __getitem__(self, qid):
return question.module.questions.filter(key=qid).get()
def __contains__(self, qid):
return question.module.questions.filter(key=qid).exists()
get_from_question_id = GetFromQuestionId()
# Returns a set of ModuleQuestion instances that this question is dependent on
# as a list of edges that are tuples of (edge_type, question obj).
ret = []
# All questions mentioned in prompt text become dependencies.
for qid in get_jinja2_template_vars(question.spec.get("prompt", "")):
ret.append(("prompt", qid))
# All questions mentioned in the impute conditions become dependencies.
# And when impute values are expressions, then similarly for those.
for rule in question.spec.get("impute", []):
if "condition" in rule:
for qid in get_jinja2_template_vars(
r"{% if (" + rule["condition"] + r") %}...{% endif %}"
):
ret.append(("impute-condition", qid))
if rule.get("value-mode") == "expression":
for qid in get_jinja2_template_vars(
r"{% if (" + rule["value"] + r") %}...{% endif %}"
):
ret.append(("impute-value", qid))
if rule.get("value-mode") == "template":
for qid in get_jinja2_template_vars(rule["value"]):
ret.append(("impute-value", qid))
# Other dependencies can just be listed.
for qid in question.spec.get("ask-first", []):
ret.append(("ask-first", qid))
# Turn IDs into ModuleQuestion instances.
return [ (edge_type, get_from_question_id[qid])
for (edge_type, qid) in ret
if qid in get_from_question_id
]
jinja2_expression_compile_cache = { }
def compile_jinja2_expression(expr):
# If the expression has already been compiled and is in the cache,
# return the compiled expression.
if expr in jinja2_expression_compile_cache:
return jinja2_expression_compile_cache[expr]
# The expression is not in the cache. Compile it.
env = Jinja2Environment()
compiled = env.compile_expression(expr)
# Save it to the cache.
jinja2_expression_compile_cache[expr] = compiled
# Return it.
return compiled
def run_impute_conditions(conditions, context):
# Check if any of the impute conditions are met based on
# the questions that have been answered so far and return
# the imputed value. Be careful about values like 0 that
# are false-y --- must check for "is None" to know if
# something was imputed or not.
env = Jinja2Environment()
for rule in conditions:
if "condition" in rule:
condition_func = compile_jinja2_expression(rule["condition"])
try:
value = condition_func(context)
except:
value = None
else:
value = True
if value:
# The condition is met. Compute the imputed value.
if rule.get("value-mode", "raw") == "raw":
# Imputed value is the raw YAML value.
value = rule["value"]
elif rule.get("value-mode", "raw") == "expression":
value = compile_jinja2_expression(rule["value"])(context)
if isinstance(value, RenderedAnswer):
# Unwrap.
value = value.answer
elif hasattr(value, "__html__"):
# some things might return something that safely wraps a string,
# like our SafeString instance
value = value.__html__()
elif hasattr(value, "as_raw_value"):
# RenderedProject, RenderedOrganization
value = value.as_raw_value()
elif rule.get("value-mode", "raw") == "template":
env = Jinja2Environment(autoescape=True)
try:
template = env.from_string(rule["value"])
except jinja2.TemplateSyntaxError as e:
raise ValueError("There was an error loading the template %s: %s" % (rule["value"], str(e)))
value = template.render(context)
else:
raise ValueError("Invalid impute condition value-mode.")
# Since the imputed value may be None, return
# the whole thing in a tuple to distinguish from
# a None indicating the lack of an imputed value.
return (value,)
return None
def get_question_choice(question, key):
for choice in question.spec["choices"]:
if choice["key"] == key:
return choice
raise KeyError(repr(key) + " is not a choice")
class ModuleAnswers(object):
"""Represents a set of answers to a Task."""
def __init__(self, module, task, answertuples):
self.module = module
self.task = task
self.answertuples = answertuples
self.answers_dict = None
def __str__(self):
return "<ModuleAnswers for %s - %s>" % (self.module, self.task)
def as_dict(self):
if self.answertuples is None:
# Lazy-load by calling the task's get_answers function
# and copying its answers dictionary.
if self.task is None:
self.answertuples = { q.key: (q, False, None, None) for q in sorted(self.module.questions.all(), key = lambda q : q.definition_order) }
else:
self.answertuples = self.task.get_answers().answertuples
if self.answers_dict is None:
self.answers_dict = { q.key: value for q, is_ans, ansobj, value in self.answertuples.values() if is_ans }
return self.answers_dict
def with_extended_info(self, parent_context=None):
# Return a new ModuleAnswers instance that has imputed values added
# and information about the next question(s) and unanswered questions.
return evaluate_module_state(self, parent_context=parent_context)
def get(self, question_key):
return self.answertuples[question_key][2]
def get_questions(self):
self.as_dict() # lazy load if necessary
return [v[0] for v in self.answertuples.values()]
def render_answers(self, show_unanswered=True, show_imputed=True, show_imputed_nulls=True, show_metadata=False):
# Return a generator that provides tuples of
# (question, answerobj, answerhtml) where
# * question is a ModuleQuestion instance
# * answerobj is a TaskAnswerHistory instance (e.g. holding user and review state), or None if the answer was skipped or imputed
# * answerhtml is a str of rendered HTML
tc = TemplateContext(self, HtmlAnswerRenderer(show_metadata=show_metadata))
for q, is_answered, a, value in self.answertuples.values():
if not is_answered and not show_unanswered: continue # skip questions that have no answers
if not a and not show_imputed: continue # skip imputed answers
if not a and value is None and not show_imputed_nulls: continue # skip questions whose imputed value is null
if q.spec["type"] == "interstitial": continue # skip question types that display awkwardly
if value is None:
# Question is skipped.
if a.skipped_reason:
value_display = "<i>{}</i>".format( a.get_skipped_reason_display() )
else:
value_display = "<i>skipped</i>"
else:
# Use the template rendering system to produce a human-readable
# HTML rendering of the value.
value_display = RenderedAnswer(self.task, q, is_answered, a, value, tc)
# For question types whose primary value is machine-readable,
# show a nice display form if possible using the .text attribute,
# if possible. It probably returns a SafeString which needs __html__()
# to be called on it. "file" questions render nicer without .text.
if q.spec["type"] not in ("file",):
try:
value_display = value_display.text
except AttributeError:
pass
# Whether or not we called .text, call __html__() to get
# a rendered form.
if hasattr(value_display, "__html__"):
value_display = value_display.__html__()
yield (q, a, value_display)
def render_output(self, use_data_urls=False):
# Now that all questions have been answered, generate this
# module's output. The output is a set of documents. The
# documents are lazy-rendered because not all of them may
# be used by the caller.
class LazyRenderedDocument(object):
output_formats = ("html", "text", "markdown")
def __init__(self, module_answers, document, index, use_data_urls):
self.module_answers = module_answers
self.document = document
self.index = index
self.rendered_content = { }
self.use_data_urls = use_data_urls
def __iter__(self):
# Yield all of the keys (entry) that are in the output document
# specification, plus all of the output formats which are
# keys (entry) in our returned dict that lazily render the document.
for entry, value in self.document.items():
if entry not in self.output_formats:
yield entry
for entry in self.output_formats:
yield entry
def __getitem__(self, entry):
if entry in self.output_formats:
# entry is an output format -> lazy render.
if entry not in self.rendered_content:
# Cache miss.
# For errors, what is the name of this document?
if "id" in self.document:
doc_name = self.document["id"]
else:
doc_name = "at index " + str(self.index)
if "title" in self.document:
doc_name = repr(self.document["title"]) + " (" + doc_name + ")"
doc_name = "'%s' output document '%s'" % (self.module_answers.module.module_name, doc_name)
# Try to render it.
task_cache_entry = "output_r1_{}_{}_{}".format(
self.index,
entry,
1 if self.use_data_urls else 0,
)
def do_render():
try:
return render_content(self.document, self.module_answers, entry, doc_name, show_answer_metadata=True, use_data_urls=self.use_data_urls)
except Exception as e:
# Put errors into the output. Errors should not occur if the
# template is designed correctly.
ret = str(e)
if entry == "html":
import html
ret = "<p class=text-danger>" + html.escape(ret) + "</p>"
return ret
self.rendered_content[entry] = self.module_answers.task._get_cached_state(task_cache_entry, do_render)
return self.rendered_content[entry]
elif entry in self.document:
# entry is a entry in the specification for the document.
# Return it unchanged.
return self.document[entry]
raise KeyError(entry)
def get(self, entry, default=None):
if entry in self.output_formats or entry in self.document:
return self[entry]
return [ LazyRenderedDocument(self, d, i, use_data_urls) for i, d in enumerate(self.module.spec.get("output", [])) ]
class UndefinedReference:
def __init__(self, varname, errorfunc, path=[]):
self.varname = varname
self.errorfunc = errorfunc
self.path = path
def __html__(self):
return self.errorfunc(
"invalid reference to '{varname}' in {source}",
"invalid reference",
"Invalid reference to variable '{varname}' in {source}.",
varname=self.varname,
source=" -> ".join(self.path),
)
def __getitem__(self, item):
return UndefinedReference(item, self.errorfunc, self.path+[self.varname])
from collections.abc import Mapping
class TemplateContext(Mapping):
"""A Jinja2 execution context that wraps the Pythonic answers to questions
of a ModuleAnswers instance in RenderedAnswer instances that provide
template and expression functionality like the '.' accessor to get to
the answers of a sub-task."""
def __init__(self, module_answers, escapefunc, parent_context=None, root=False, errorfunc=None, source=None, show_answer_metadata=None, is_computing_title=False):
self.module_answers = module_answers
self.escapefunc = escapefunc
self.root = root
self.errorfunc = parent_context.errorfunc if parent_context else errorfunc
self.source = (parent_context.source if parent_context else []) + ([source] if source else [])
self.show_answer_metadata = parent_context.show_answer_metadata if parent_context else (show_answer_metadata or False)
self.is_computing_title = parent_context.is_computing_title if parent_context else is_computing_title
self._cache = { }
self.parent_context = parent_context
def __str__(self):
return "<TemplateContext for %s>" % (self.module_answers)
def __getitem__(self, item):
# Cache every context variable's value, since some items are expensive.
if item not in self._cache:
self._cache[item] = self.getitem(item)
return self._cache[item]
def _execute_lazy_module_answers(self):
if self.module_answers is None:
# This is a TemplateContext for an unanswered question with an unknown
# module type. We treat this as if it were a Task that had no questions but
# also is not finished.
self._module_questions = { }
return
if callable(self.module_answers):
self.module_answers = self.module_answers()
self._module_questions = { q.key: q for q in self.module_answers.get_questions() }
def getitem(self, item):
self._execute_lazy_module_answers()
# If 'item' matches a question ID, wrap the internal Pythonic/JSON-able value
# with a RenderedAnswer instance which take care of converting raw data values
# into how they are rendered in templates (escaping, iteration, property accessors)
# and evaluated in expressions.
question = self._module_questions.get(item)
if question:
# The question might or might not be answered. If not, its value is None.
self.module_answers.as_dict() # trigger lazy-loading
_, is_answered, answerobj, answervalue = self.module_answers.answertuples.get(item, (None, None, None, None))
return RenderedAnswer(self.module_answers.task, question, is_answered, answerobj, answervalue, self)
# The context also provides the project and organization that the Task belongs to,
# and other task attributes, assuming the keys are not overridden by question IDs.
if self.module_answers and self.module_answers.task:
if item == "title" and (not self.is_computing_title or not self.root):
return self.module_answers.task.title
if item == "task_link":
return self.module_answers.task.get_absolute_url()
if item == "project":
if self.parent_context is not None: # use parent's cache
return self.parent_context[item]
return RenderedProject(self.module_answers.task.project, parent_context=self)
if item == "organization":
if self.parent_context is not None: # use parent's cache
return self.parent_context[item]
return RenderedOrganization(self.module_answers.task, parent_context=self)
if item == "control_catalog":
# Retrieve control catalog(s) for project
# Temporarily retrieve a single catalog
# TODO: Retrieve multiple catalogs because we could have catalogs plus overlays
# Will need a better way to determine the catalogs on a system so we can retrieve at once
# Maybe get the catalogs as a property of the system
# Retrieve a Django dictionary of dictionaries object of full control catalog
from controls.oscal import Catalog
try:
all_keys = list(set([controls.oscal_catalog_key for controls in
self.module_answers.task.project.system.root_element.controls.all()]))
except:
all_keys = []
# Need default if there are no control catalogs present
control_catalog = []
# If there are multiple catalogs
if len(all_keys) > 1:
for idx, key in enumerate(all_keys):
# Detect single control catalog from first control
try:
parameter_values = self.module_answers.task.project.get_parameter_values(key)
sca = Catalog.GetInstance(catalog_key=key,
parameter_values=parameter_values)
control_catalog.append(sca.flattened_controls_all_as_dict_list)
except:
control_catalog = None
# If there is one catalog
elif len(all_keys) == 1:
try:
parameter_values = self.module_answers.task.project.get_parameter_values(all_keys[0])
sca = Catalog.GetInstance(catalog_key=all_keys[0],
parameter_values=parameter_values)
control_catalog = sca.flattened_controls_all_as_dict
except:
control_catalog = None
return control_catalog
if item == "system":
# Retrieve the system object associated with this project
# Returned value must be a python dictionary
return self.module_answers.task.project.system
if item == "oscal":
return oscal_context(self.module_answers.task.project.system)
if item in ("is_started", "is_finished"):
# These are methods on the Task instance. Don't
# call the method here because that leads to infinite
# recursion. Figuring out if a module is finished
# requires imputing all question answers, which calls
# into templates, and we can end up back here.
return getattr(self.module_answers.task, item)
else:
# If there is no Task associated with this context, then we're
# faking the attributes.
if item in ("is_started", "is_finished"):
return (lambda : False) # the attribute normally returns a bound function
# The 'questions' key returns (question, answer) pairs.
if item == "questions":
if self.module_answers is None:
return []
self.module_answers.as_dict() # trigger lazy-loading
ret = []
for question, is_answered, answerobj, answervalue in self.module_answers.answertuples.values():
ret.append((
question.spec,
RenderedAnswer(self.module_answers.task, question, is_answered, answerobj, answervalue, self)
))
return ret
# The output_documents key returns the output documents as a dict-like mapping
# from IDs to rendered content.
if item == "output_documents":
return TemplateContext.LazyOutputDocuments(self)
# The item is not something found in the context.
error_message = "'{item}' is not a question or property of '{object}'."
error_message_vars = { "item": item, "object": (self.module_answers.task.title if self.module_answers and self.module_answers.task else self.module_answers.module.spec["title"]) }
if self.errorfunc:
return UndefinedReference(item, self.errorfunc, self.source + ["(" + error_message_vars["object"] + ")"])
raise AttributeError(error_message.format(**error_message_vars))
def __iter__(self):
self._execute_lazy_module_answers()
seen_keys = set()
# question names
for q in self._module_questions.values():
seen_keys.add(q.key)
yield q.key
# special values
# List the name of variables that are available in the templatecontext `getitem`
if self.module_answers and self.module_answers.task:
# Attributes that are only available if there is a task.
if not self.is_computing_title or not self.root:
# 'title' isn't available if we're in the process of
# computing it
yield "title"
for attribute in ("task_link", "project", "organization", "control_catalog", "system"):
if attribute not in seen_keys:
yield attribute
# Attributes that are available even when peering into unanswered module-type questions.
for attribute in ("is_started", "is_finished", "questions", "output_documents"):
if attribute not in seen_keys:
yield attribute
def __len__(self):
return len([x for x in self])
# Class that lazy-renders output documents on request.
class LazyOutputDocuments:
def __init__(self, context):
self.context = context
def __getattr__(self, item):
try:
# Find the requested output document in the module.
for doc in self.context.module_answers.module.spec.get("output", []):
if doc.get("id") == item:
# Render it.
content = render_content(doc, self.context.module_answers, "html",
"'%s' output document '%s'" % (repr(self.context.module_answers.module), item),
{}, show_answer_metadata=self.context.show_answer_metadata)
# Mark it as safe.
from jinja2 import Markup
return Markup(content)
else:
raise ValueError("%s is not the id of an output document in %s." % (item, self.context.module_answers.module))
except Exception as e:
return str(e)
def __contains__(self, item):
for doc in self.context.module_answers.module.spec.get("output", []):
if doc.get("id") == item:
return True
return False
def __iter__(self):
for doc in self.context.module_answers.module.spec.get("output", []):
if doc.get("id"):
yield doc["id"]
class RenderedProject(TemplateContext):
def __init__(self, project, parent_context=None):
self.project = project
def _lazy_load():
if self.project.root_task:
return self.project.root_task.get_answers()
super().__init__(_lazy_load, parent_context.escapefunc, parent_context=parent_context)
self.source = self.source + ["project variable"]
def __str__(self):
return "<TemplateContext for %s - %s>" % (self.project, self.module_answers)
def as_raw_value(self):
if self.is_computing_title:
# When we're computing the title for "instance-name", prevent
# infinite recursion.
return self.project.root_task.module.spec['title']
return self.project.title
def __html__(self):
return self.escapefunc(None, None, None, None, self.as_raw_value())
class RenderedOrganization(TemplateContext):
def __init__(self, task, parent_context=None):
self.task =task
def _lazy_load():
project = self.organization.get_organization_project()
if project.root_task:
return project.root_task.get_answers()
super().__init__(_lazy_load, parent_context.escapefunc, parent_context=parent_context)
self.source = self.source + ["organization variable"]
@property
def organization(self):
if not hasattr(self, "_org"):
self._org = self.task.project.organization
return self._org
def __str__(self):
return "<TemplateContext for %s - %s>" % (self.organization, self.module_answers)
def as_raw_value(self):
return self.organization.name
def __html__(self):
return self.escapefunc(None, None, None, None, self.as_raw_value())
class RenderedAnswer:
def __init__(self, task, question, is_answered, answerobj, answer, parent_context):
self.task = task
self.question = question
self.is_answered = is_answered
self.answerobj = answerobj
self.answer = answer
self.parent_context = parent_context
self.escapefunc = parent_context.escapefunc
self.question_type = self.question.spec["type"]
self.cached_tc = None
def __html__(self):
# This method name is a Jinja2 convention. See http://jinja.pocoo.org/docs/2.10/api/#jinja2.Markup.
# Jinja2 calls this method to get the string to put into the template when this value
# appears in template in a {{variable}} directive.
#
# So this method returns how the templates render a question's answer when used as in e.g. {{q0}}.
if self.answer is None:
# Render a non-answer answer.
if self.parent_context.is_computing_title:
# When computing an instance-name title,
# raise an exception (caught higher up) if
# an unanswered question is rendered.
raise ValueError("Attempt to render unanswered question {}.".format(self.question.key))
value = "<%s>" % self.question.spec['title']
elif self.question_type == "multiple-choice":
# Render multiple-choice as a comma+space-separated list of the choice keys.
value = ", ".join(self.answer)
elif self.question_type == "datagrid":
# Render datagrid as an array of dictionaries
value = str(self.answer)
elif self.question_type == "file":
# Pass something to the escapefunc that HTML rendering can
# recognize as a file but non-HTML rendering sees as a string.
class FileValueWrapper:
def __init__(self, answer):
self.file_data = answer
def __str__(self):
return "<uploaded file: " + self.file_data['url'] + ">"
value = FileValueWrapper(self.answer)
elif self.question_type in ("module", "module-set"):
ans = self.answer # ModuleAnswers or list of ModuleAnswers
if self.question_type == "module": ans = [ans] # make it a lsit
def get_title(task):
if self.parent_context.is_computing_title:
# When we're computing the title for "instance-name", prevent
# infinite recursion.
return task.module.spec['title']
else:
# Get the computed title.
return task.title
value = ", ".join(get_title(a.task) for a in ans)
else:
# For all other question types, just call Python str().
value = str(self.answer)
# And in all cases, escape the result.
return self.escapefunc(self.question, self.task, self.answer is not None, self.answerobj, value)
@property
def text(self):
# How the template renders {{q0.text}} to get a nice display form of the answer.
if self.answer is None:
if self.parent_context.is_computing_title:
# When computing an instance-name title,
# raise an exception (caught higher up) if
# an unanswered question is rendered.
raise ValueError("Attempt to render unanswered question {}.".format(self.question.key))
value = "<not answered>"
elif self.question_type == "date":
# Format the ISO date for display.
value = str(self.answer) # fall-back
import re, datetime
m = re.match("(\d\d\d\d)-(\d\d)-(\d\d)$", self.answer)
if m:
try:
year, month, date = [int(x) for x in m.groups()]
value = datetime.date(year, month, date).strftime("%x")
except ValueError:
pass
elif self.question_type == "yesno":
value = ("Yes" if self.answer == "yes" else "No")
elif self.question_type == "choice":
value = get_question_choice(self.question, self.answer)["text"]
elif self.question_type == "multiple-choice":
if len(self.answer) == 0:
value = "<nothing chosen>"
else:
choices = [get_question_choice(self.question, c)["text"] for c in self.answer] # get choice text
delim = "," if ("," not in "".join(choices)) else ";" # separate choices by commas unless there are commas in the choices, then use semicolons
value = (delim+" ").join(choices)
elif self.question_type == "datagrid":
if len(self.answer) == 0:
value = "<nothing chosen>"
else:
value = str(self.answer)
elif self.question_type in ("integer", "real"):
# Use a locale to generate nice human-readable numbers.
# The locale is set on app startup using locale.setlocale in settings.py.
import locale
value = locale.format(
"%d" if self.question_type == "integer" else "%g",
self.answer,
grouping=True)
elif self.question_type == "file":
value = "<uploaded file: " + self.answer['url'] + ">"
elif self.question_type in ("module", "module-set"):
# This field is not present for module-type questions because
# the keys are attributes exposed by the answer.
raise AttributeError()
else:
# For all other question types, just call Python str().
value = str(self.answer)
# Wrap the value in something that provides a __html__
# method to override Jinja2 escaping so we can use our
# own function.
class SafeString:
def __init__(self, value, ra):
self.value = value
self.ra = ra
def __html__(self):
return self.ra.escapefunc(self.ra.question, self.ra.task, self.ra.answer is not None, self.ra.answerobj, self.value)
return SafeString(value, self)
@property
def edit_link(self):
# Return a link to edit this question.
return self.task.get_absolute_url_to_question(self.question)
@property
def choices_selected(self):
# Return the dicts for each choice that is a part of the answer.
if self.question_type == "multiple-choice":
return [
choice
for choice in self.question.spec["choices"]
if self.answer is not None and choice["key"] in self.answer
]
raise AttributeError
@property
def choices_not_selected(self):
# Return the dicts for each choice that is not a part of the answer.
if self.question_type == "multiple-choice":
return [
choice
for choice in self.question.spec["choices"]
if choice["key"] not in self.answer or self.answer is None
]
raise AttributeError
@property
def not_yet_answered(self):
return not self.is_answered
@property
def imputed(self):
# The answer was imputed if it's considered 'answered'
# but there is no TaskAnswerHistory record in the database
# for it, which means the user didn't provide the answer.
return self.is_answered and (self.answerobj is None)
@property
def skipped(self):
# The question has a null answer either because it was imputed null
# or the user skipped it.
return self.is_answered and (self.answer is None)
@property
def skipped_by_user(self):
# The question has a null answer but it wasn't imputed null.
return self.is_answered and (self.answerobj is not None) and (self.answer is None)
@property
def answered(self):
# This question has an answer, either because it was imputed or it was
# answered by the user, but not if it was imputed null or answered null
# because those are skipped states above.
return self.is_answered and (self.answer is not None)
@property
def skipped_reason(self):
if self.answerobj is None:
return self.answerobj
return self.answerobj.skipped_reason
@property
def unsure(self):
# If the question was answered by a user, return its unsure flag.
if not self.answerobj:
return None
return self.answerobj.unsure
@property
def date_answered(self):
# Date question was answered.
if not self.answerobj:
return None
return self.answerobj.created
@property
def reviewed_state(self):
# Question reviewed value.
if not self.answerobj:
return None
return self.answerobj.reviewed
def __bool__(self):
# How the template converts a question variable to
# a boolean within an expression (i.e. within an if).
# true.
if self.question_type == "yesno":
# yesno questions are true if they are answered as yes.
return self.answer == "yes"
else:
# Other question types are true if they are answered.
# (It would be bad to use Python bool() because it might
# give unexpected results for e.g. integer/real zero.)
return self.answer is not None
def __iter__(self):
if self.answer is None:
# If the question was skipped, return a generator that
# yields nothing --- there is nothing to iterate over.
return (None for _ in [])
if self.question_type == "multiple-choice":
# Iterate by creating a RenderedAnswer for each selected choice,
# with a made-up temporary Question instance that has the same
# properties as the actual multiple-choice choice but whose
# type is a single "choice".
from .models import ModuleQuestion
return (
RenderedAnswer(
self.task,
ModuleQuestion(
module=self.question.module,
key=self.question.key,
spec={
"type": "choice",
"title": self.question.spec['title'],
"prompt": self.question.spec['prompt'],
"choices": self.question.spec["choices"],
}),
self.is_answered,
self.answerobj,
ans, self.parent_context)
for ans in self.answer)
elif self.question_type == "datagrid":
# Iterate by creating a RenderedAnswer for each selected field,
# with a made-up temporary Question instance that has the same
# properties as the actual datagrid field but whose
# type is a single "datagrid".
from .models import ModuleQuestion
return (
RenderedAnswer(
self.task,
ModuleQuestion(
module=self.question.module,
key=self.question.key,
spec={
"type": "datagrid",
"title": self.question.spec['title'],
"prompt": self.question.spec['prompt'],
"fields": self.question.spec["fields"],
}),
self.is_answered,
self.answerobj,
ans, self.parent_context)
for ans in self.answer)
elif self.question_type == "module-set":
# Iterate over the sub-tasks' answers. Load each's answers + imputed answers.
return (TemplateContext(
v.with_extended_info(parent_context=self.parent_context if not v.task or not self.task or v.task.project_id==self.task.project_id else None),
self.escapefunc, parent_context=self.parent_context)
for v in self.answer)
raise TypeError("Answer of type %s is not iterable." % self.question_type)
def __len__(self):
if self.question_type in ("multiple-choice", "module-set"):
if self.answer is None: return 0
return len(self.answer)
if self.question_type in ("datagrid"):
if self.answer is None: return 0
return len(self.answer)
raise TypeError("Answer of type %s has no length." % self.question_type)
def __getattr__(self, item):
# For module-type questions, provide the answers of the
# sub-task as properties of this context variable.
if self.question_type == "module":
# Pass through via a temporary TemplateContext.
if self.answer is not None:
# If the question was not skipped, then we have the ModuleAnswers for it.
# Load its answers + evaluate impute conditions.
if not self.cached_tc:
self.cached_tc = TemplateContext(
lambda : self.answer.with_extended_info(parent_context=self.parent_context if not self.answer.task or not self.task or self.answer.task.project_id==self.task.project_id else None),
self.escapefunc,
parent_context=self.parent_context)
tc = self.cached_tc
else:
# The question was skipped -- i.e. we have no ModuleAnswers for
# the question that this RenderedAnswer represents. But we want
# to gracefully represent the inner item attribute as skipped too.
# If self.question.answer_type_module is set, then we know the
# inner Module type, so we can create a dummy instance that
# represents an unanswered instance of the Module.
if self.question.answer_type_module is not None:
ans = ModuleAnswers(self.question.answer_type_module, None, None)
else:
ans = None
tc = TemplateContext(ans, self.escapefunc, parent_context=self.parent_context)
return tc[item]
# For the "raw" question type, the answer value is any
# JSONable Python data structure. Forward the getattr
# request onto the value.
# Similarly for file questions which have their own structure.
elif self.question_type in ("raw", "file"):
if self.answer is not None:
return self.answer[item]
else:
# Avoid attribute errors.
return None
# For other types of questions, or items that are not question
# IDs of the subtask, just do normal Python behavior.
return super().__getattr__(self, item)
def __eq__(self, other):
if isinstance(other, RenderedAnswer):
other = other.answer
return self.answer == other
def __gt__(self, other):
if isinstance(other, RenderedAnswer):
other = other.answer
if self.answer is None or other is None:
# if either represents a skipped/imputed-null question,
# prevent a TypeError by just returning false
return False
try:
return self.answer > other
except TypeError:
# If one tries to compare a string to an integer, just
# say false.
return False
def __lt__(self, other):
if isinstance(other, RenderedAnswer):
other = other.answer
if self.answer is None or other is None:
# if either represents a skipped/imputed-null question,
# prevent a TypeError by just returning false
return False
try:
return self.answer < other
except TypeError:
# If one tries to compare a string to an integer, just
# say false.
return False
| gpl-3.0 | 5,768,756,161,847,980,000 | 44.321768 | 204 | 0.57162 | false |
ellmo/rogue-python-engine | rpe/player.py | 1 | 1367 | import rpe_map
import camera
class Player(object):
def __init__(self, rpe_map, direction_vector):
self._rpe_map = rpe_map
self._x = rpe_map.start_position[0]
self._y = rpe_map.start_position[1]
self._camera = camera.Camera(rpe_map.start_position, direction_vector)
@property
def rpe_map(self):
return self._rpe_map
@property
def camera(self):
return self._camera
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def position(self):
return (self._x, self._y)
@property
def dirx(self):
return self._camera.dirx
@property
def diry(self):
return self._camera.diry
def move(self, forward, left):
if left is 0:
new_x = self._x + self.dirx * forward
new_y = self._y + self.diry * forward
else:
new_x = self._x + self.diry * left
new_y = self._y - self.dirx * left
_new_tile = self._rpe_map.tiles[int(new_y)][int(new_x)]
if not (_new_tile.solid or (_new_tile.thing and _new_tile.thing.blocking)):
self._x = new_x
self._y = new_y
self._camera.x = new_x
self._camera.y = new_y
def rotate(self, direction):
self._camera.rotate(direction)
| gpl-3.0 | -1,962,575,099,425,045,200 | 23.854545 | 83 | 0.548647 | false |
qedsoftware/commcare-hq | custom/ilsgateway/tanzania/handlers/generic_stock_report_handler.py | 1 | 2041 | from django.conf import settings
from corehq.apps.commtrack.exceptions import NotAUserClassError
from corehq.apps.commtrack.sms import process
from corehq.apps.sms.api import send_sms_to_verified_number
from custom.ilsgateway.tanzania.handlers.ils_stock_report_parser import ILSStockReportParser
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from dimagi.utils.decorators.memoized import memoized
class GenericStockReportHandler(KeywordHandler):
formatter = None
status_type = None
status_value = None
@property
@memoized
def data(self):
return ILSStockReportParser(
self.domain_object,
self.verified_contact,
self.formatter()
).parse(self.msg.text)
def get_message(self, data):
raise NotImplemented()
def on_success(self):
raise NotImplemented()
def on_error(self, data):
raise NotImplemented()
def handle(self):
location = self.user.location
domain = self.domain_object
location_id = self.location_id
if not location_id:
return False
if location.location_type_name == 'FACILITY':
try:
data = self.data
if not data:
return True
if not data.get('transactions'):
self.on_error(data)
return True
process(domain.name, data)
if not data['errors']:
self.on_success()
else:
self.on_error(data)
return True
self.respond(self.get_message(data))
except NotAUserClassError:
return True
except Exception, e: # todo: should we only trap SMSErrors?
if settings.UNIT_TESTING or settings.DEBUG:
raise
send_sms_to_verified_number(self.verified_contact, 'problem with stock report: %s' % str(e))
return True
| bsd-3-clause | 5,092,202,569,598,729,000 | 29.924242 | 108 | 0.593827 | false |
qedsoftware/commcare-hq | corehq/apps/case_search/views.py | 1 | 1603 | import json
from corehq.apps.domain.decorators import cls_require_superuser_or_developer
from corehq.apps.domain.views import DomainViewMixin
from django.http import Http404
from dimagi.utils.web import json_response
from django.views.generic import TemplateView
from corehq.apps.case_search.models import case_search_enabled_for_domain
from corehq.util.view_utils import json_error, BadRequest
class CaseSearchView(DomainViewMixin, TemplateView):
template_name = 'case_search/case_search.html'
urlname = 'case_search'
@cls_require_superuser_or_developer
def get(self, request, *args, **kwargs):
if not case_search_enabled_for_domain(self.domain):
raise Http404("Domain does not have case search enabled")
return self.render_to_response(self.get_context_data())
@json_error
@cls_require_superuser_or_developer
def post(self, request, *args, **kwargs):
from corehq.apps.es.case_search import CaseSearchES
if not case_search_enabled_for_domain(self.domain):
raise BadRequest("Domain does not have case search enabled")
query = json.loads(request.POST.get('q'))
case_type = query.get('type')
search_params = query.get('parameters', [])
search = CaseSearchES()
search = search.domain(self.domain).is_closed(False)
if case_type:
search = search.case_type(case_type)
for param in search_params:
search = search.case_property_query(**param)
search_results = search.values()
return json_response({'values': search_results})
| bsd-3-clause | -1,550,747,963,873,273,300 | 39.075 | 76 | 0.696818 | false |
googleapis/python-aiplatform | tests/unit/enhanced_library/test_enhanced_types.py | 1 | 2007 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.aiplatform.v1.schema.trainingjob import definition
from google.cloud.aiplatform.v1beta1.schema.trainingjob import (
definition as definition_v1beta1,
)
ModelType = definition.AutoMlImageClassificationInputs().ModelType
test_training_input = definition.AutoMlImageClassificationInputs(
multi_label=True,
model_type=ModelType.CLOUD,
budget_milli_node_hours=8000,
disable_early_stopping=False,
)
ModelType_v1beta1 = definition_v1beta1.AutoMlImageClassificationInputs().ModelType
test_training_input_v1beta1 = definition.AutoMlImageClassificationInputs(
multi_label=True,
model_type=ModelType_v1beta1.CLOUD,
budget_milli_node_hours=8000,
disable_early_stopping=False,
)
# Test the v1 enhanced types.
def test_exposes_to_value_method_v1():
assert hasattr(test_training_input, "to_value")
def test_exposes_from_value_method_v1():
assert hasattr(test_training_input, "from_value")
def test_exposes_from_map_method_v1():
assert hasattr(test_training_input, "from_map")
# Test the v1beta1 enhanced types.
def test_exposes_to_value_method_v1beta1():
assert hasattr(test_training_input_v1beta1, "to_value")
def test_exposes_from_value_method_v1beta1():
assert hasattr(test_training_input_v1beta1, "from_value")
def test_exposes_from_map_method_v1beta1():
assert hasattr(test_training_input_v1beta1, "from_map")
| apache-2.0 | 1,963,648,343,854,830,300 | 31.901639 | 82 | 0.765321 | false |
praekelt/wsgi-ua-mapper | ua_mapper/updatewurfl.py | 1 | 1900 | import os
import sys
from optparse import OptionParser
from urllib import urlopen
from ua_mapper.wurfl2python import WurflPythonWriter, DeviceSerializer
OUTPUT_PATH = os.path.abspath(os.path.dirname(__file__))
WURFL_ARCHIVE_PATH = os.path.join(OUTPUT_PATH, "wurfl.zip")
WURFL_XML_PATH = os.path.join(OUTPUT_PATH, "wurfl.xml")
WURFL_PY_PATH = os.path.join(OUTPUT_PATH, "wurfl.py")
WURFL_DOWNLOAD_URL = 'http://downloads.sourceforge.net/project/wurfl/WURFL/latest/wurfl-latest.zip'
class Updater(object):
help = 'Updates Wurfl devices database.'
def write_archive(self, filename, data):
f = open(WURFL_ARCHIVE_PATH, "w")
f.write(data)
f.close()
def fetch_latest_wurfl(self):
print "Downloading Wurfl..."
data = urlopen(WURFL_DOWNLOAD_URL).read()
self.write_archive(WURFL_ARCHIVE_PATH, data)
os.system("unzip -o %s -d %s" % (WURFL_ARCHIVE_PATH, OUTPUT_PATH))
return True
def wurfl_to_python(self):
print "Compiling device list..."
# Setup options.
op = OptionParser()
op.add_option("-l", "--logfile", dest="logfile", default=sys.stderr,
help="where to write log messages")
# Cleanup args for converter to play nicely.
if '-f' in sys.argv:
sys.argv.remove('-f')
if '--force' in sys.argv:
sys.argv.remove('--force')
options, args = op.parse_args()
options = options.__dict__
options.update({"outfile": WURFL_PY_PATH})
# Perform conversion.
wurfl = WurflPythonWriter(WURFL_XML_PATH, device_handler=DeviceSerializer, options=options)
wurfl.process()
def handle(self, *args, **options):
self.fetch_latest_wurfl()
self.wurfl_to_python()
from ua_mapper.wurfl import devices
print "Done."
Updater().handle()
| bsd-3-clause | 2,836,465,306,440,326,000 | 31.758621 | 99 | 0.623684 | false |
Clarity-89/clarityv2 | src/clarityv2/work_entries/admin.py | 1 | 1978 | import datetime
from datetime import timedelta
from django.contrib import admin
from django.contrib.admin.filters import DateFieldListFilter
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from import_export.admin import ImportExportActionModelAdmin
from .models import WorkEntry
class CustomDateTimeFilter(DateFieldListFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
today = now.date()
last_year_begin = today.replace(year=today.year - 1, month=1, day=1)
last_year_end = today.replace(year=today.year, month=1, day=1)
self.links += ((
(_('Last year'), {
self.lookup_kwarg_since: str(last_year_begin),
self.lookup_kwarg_until: str(last_year_end),
}),
))
@admin.register(WorkEntry)
class WorkEntryAdmin(ImportExportActionModelAdmin):
list_display = ('date', 'duration', 'project', 'notes')
list_filter = ('project__client', 'project', ('date', CustomDateTimeFilter))
search_fields = ('notes',)
change_list_template = 'admin/work_entries/workentry/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
duration = (queryset.aggregate(Sum('duration'))['duration__sum']) or timedelta()
response.context_data['total_duration'] = duration.total_seconds() / 3600
return response
| mit | 394,790,853,219,527,550 | 37.038462 | 96 | 0.650152 | false |
amilan/dev-maxiv-pynutaq | src/pynutaq/perseus/perseusutils.py | 1 | 7717 | #!/usr/bin/env python
###############################################################################
# NutaqDiags device server.
#
# Copyright (C) 2013 Max IV Laboratory, Lund Sweden
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
###############################################################################
"""This module contains useful functions to be used in the devices.
"""
__author__ = 'antmil'
__docformat__ = 'restructuredtext'
import math
from pynutaq.perseus.perseusdefs import *
def get_offset(type, cavity):
if type == 'read':
if cavity == 'A':
return SETTINGS_READ_OFFSET_A
elif cavity == 'B':
return SETTINGS_READ_OFFSET_B
else:
raise 'Unknown cavity. Must be A or B.'
elif type == 'write':
if cavity == 'A':
return SETTINGS_WRITE_OFFSET_A
elif cavity == 'B':
return SETTINGS_WRITE_OFFSET_B
else:
raise 'Unknown cavity. Must be A or B.'
elif type == 'diag':
if cavity == 'A':
return DIAGNOSTICS_OFFSET_A
elif cavity == 'B':
return DIAGNOSTICS_OFFSET_B
else:
raise 'Unknown cavity. Must be A or B.'
else:
raise 'Wrong type of offset!'
def read_angle(perseus, address, cavity):
# =IF(P6>32767;(P6-65536)/32767*180;P6/32767*180)
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
if value > 32767:
angle = (value - 65536) * 180.0 / 32767
else:
angle = (value * 180.0) / 32767
return angle
def write_angle(perseus, value, address, cavity):
"""=ROUND(IF(
E6<0; E6/180*32767+65536;
IF(E6<=180; E6/180*32767;
(E6-360)/180*32767+65536)
);0
)
"""
if value < 0:
angle = (value * 32767 / 180.0) + 65536
elif value <= 180.0:
angle = (value * 32767) / 180.0
else:
angle = ((value - 360) * 32767 / 180.0) + 65536
value = address << 17 | int(angle)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_milivolts(perseus, address, cavity):
"""
This method converts the value readed from a register in milivolts usign the following formula:
VALUE = ROUND(P23*1000/32767*1,6467602581;0)
:param value: value read from a register.
:return: value converted in milivolts
"""
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
milis = value * 1000.0 / 32767 * 1.6467602581
return milis
def write_milivolts(perseus, milivolts, address, cavity):
"""
This method converts the value from milivolts to bit to be written in the register usign the following
formula:
VALUE =ROUND(E23/1000*32767/1,6467602581;0)
:param value: value to be converted.
:return: value to write in the register.
"""
value = (milivolts * 32767 / 1.6467602581) / 1000.0
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_settings_diag_milivolts(perseus, address, cavity):
"""
This method converts the value readed from a register in milivolts usign the following formula:
VALUE = ROUND(P23*1000/32767*1,6467602581;0)
:param value: value read from a register.
:return: value converted in milivolts
"""
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
milis = value * 1000.0 / 32767
return milis
def write_settings_diag_milivolts(perseus, milivolts, address, cavity):
"""
This method converts the value from milivolts to bit to be written in the register usign the following
formula:
VALUE =ROUND(E23/1000*32767/1,6467602581;0)
:param value: value to be converted.
:return: value to write in the register.
"""
value = (milivolts / 1000.0) * 32767
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_settings_diag_percentage(perseus, address, cavity):
"""
This method converts the value readed from a register in milivolts usign the following formula:
VALUE = ROUND(P23*1000/32767*1,6467602581;0)
:param value: value read from a register.
:return: value converted in milivolts
"""
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
percentage = value * 100.0 / 32767
return percentage
def write_settings_diag_percentage(perseus, percentage, address, cavity):
"""
This method converts the value from milivolts to bit to be written in the register usign the following
formula:
VALUE =ROUND(E23/1000*32767/1,6467602581;0)
:param value: value to be converted.
:return: value to write in the register.
"""
value = (percentage / 100.0) * 32767
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_direct(perseus, address, cavity):
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
return value
def write_direct(perseus, value, address, cavity):
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_diag_angle(perseus, address, cavity):
offset = get_offset('diag', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
# =IF(D49>32767;
# (D49-65536)/32767*180;
# D49/32767*180)
if value > 32767:
angle = (value - (1 << 16)) * 180.0 / 32767
else:
angle = value * 180.0 / 32767
return angle
def read_diag_direct(perseus, address, cavity):
offset = get_offset('diag', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
return value
def read_diag_milivolts(perseus, address, cavity):
offset = get_offset('diag', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
#and now convert the value
#=IF(D9<32768;
# D9/32767*1000;
# (D9-2^16)/32767*1000)
if value < 32768:
milis = value * 1000.0 / 32767
else:
milis = ((value - (1 << 16)) * 1000.0) / 32767
return milis
def calc_amplitude(perseus, ivalue, qvalue):
amplitude = math.sqrt((ivalue**2) + (qvalue**2))
return amplitude
def calc_phase(perseus, ivalue, qvalue):
phase = math.atan2(qvalue, ivalue)
return phase
def start_reading_diagnostics(perseus, cavity):
offset = get_offset('diag', cavity)
value = 1 << 16
perseus.write(offset, value)
#@warning: I know ... this is not needed
value = 0 << 16
#lets continue
perseus.write(offset, value)
def end_reading_diagnostics(perseus, cavity):
offset = get_offset('diag', cavity)
value = 1 << 16
perseus.write(offset, value)
| gpl-3.0 | -5,668,996,451,132,105,000 | 28.795367 | 110 | 0.618116 | false |
nick41496/Beatnik | slackbot/migrations/0001_initial.py | 1 | 1031 | # Generated by Django 2.2.9 on 2020-04-26 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Install',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_id', models.TextField(unique=True, verbose_name='Install-specific app ID')),
('authed_user_id', models.TextField(verbose_name='Installing user ID')),
('scope', models.TextField(verbose_name='OAuth scopes granted')),
('access_token', models.TextField(verbose_name='OAuth access token')),
('bot_user_id', models.TextField(verbose_name='Install-specific bot ID')),
('team_name', models.TextField(verbose_name='Workspace name')),
('team_id', models.TextField(verbose_name='Workspace ID')),
],
),
]
| gpl-3.0 | 4,604,570,838,081,418,000 | 37.185185 | 114 | 0.588749 | false |
markgw/jazzparser | lib/nltk/classify/megam.py | 1 | 5817 | # Natural Language Toolkit: Interface to Megam Classifier
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
"""
A set of functions used to interface with the external U{megam
<http://www.cs.utah.edu/~hal/megam/>} maxent optimization package.
Before C{megam} can be used, you should tell NLTK where it can find
the C{megam} binary, using the L{config_megam()} function. Typical
usage:
>>> import nltk
>>> nltk.config_megam('.../path/to/megam')
>>> classifier = nltk.MaxentClassifier.train(corpus, 'megam')
"""
__docformat__ = 'epytext en'
import os
import os.path
import subprocess
from nltk.internals import find_binary
try:
import numpy
except ImportError:
numpy = None
######################################################################
#{ Configuration
######################################################################
_megam_bin = None
def config_megam(bin=None):
"""
Configure NLTK's interface to the C{megam} maxent optimization
package.
@param bin: The full path to the C{megam} binary. If not specified,
then nltk will search the system for a C{megam} binary; and if
one is not found, it will raise a C{LookupError} exception.
@type bin: C{string}
"""
global _megam_bin
_megam_bin = find_binary(
'megam', bin,
env_vars=['MEGAM', 'MEGAMHOME'],
binary_names=['megam.opt', 'megam', 'megam_686', 'megam_i686.opt'],
url='http://www.cs.utah.edu/~hal/megam/')
######################################################################
#{ Megam Interface Functions
######################################################################
def write_megam_file(train_toks, encoding, stream,
bernoulli=True, explicit=True):
"""
Generate an input file for C{megam} based on the given corpus of
classified tokens.
@type train_toks: C{list} of C{tuples} of (C{dict}, C{str})
@param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
@type encoding: L{MaxentFeatureEncodingI}
@param encoding: A feature encoding, used to convert featuresets
into feature vectors.
@type stream: C{stream}
@param stream: The stream to which the megam input file should be
written.
@param bernoulli: If true, then use the 'bernoulli' format. I.e.,
all joint features have binary values, and are listed iff they
are true. Otherwise, list feature values explicitly. If
C{bernoulli=False}, then you must call C{megam} with the
C{-fvals} option.
@param explicit: If true, then use the 'explicit' format. I.e.,
list the features that would fire for any of the possible
labels, for each token. If C{explicit=True}, then you must
call C{megam} with the C{-explicit} option.
"""
# Look up the set of labels.
labels = encoding.labels()
labelnum = dict([(label, i) for (i, label) in enumerate(labels)])
# Write the file, which contains one line per instance.
for featureset, label in train_toks:
# First, the instance number.
stream.write('%d' % labelnum[label])
# For implicit file formats, just list the features that fire
# for this instance's actual label.
if not explicit:
_write_megam_features(encoding.encode(featureset, label),
stream, bernoulli)
# For explicit formats, list the features that would fire for
# any of the possible labels.
else:
for l in labels:
stream.write(' #')
_write_megam_features(encoding.encode(featureset, l),
stream, bernoulli)
# End of the isntance.
stream.write('\n')
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by C{megam} when training a
model, return a C{numpy} array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise ValueError('This function requires that numpy be installed')
assert explicit, 'non-explicit not supported yet'
lines = s.strip().split('\n')
weights = numpy.zeros(features_count, 'd')
for line in lines:
if line.strip():
fid, weight = line.split()
weights[int(fid)] = float(weight)
return weights
def _write_megam_features(vector, stream, bernoulli):
if not vector:
raise ValueError('MEGAM classifier requires the use of an '
'always-on feature.')
for (fid, fval) in vector:
if bernoulli:
if fval == 1:
stream.write(' %s' % fid)
elif fval != 0:
raise ValueError('If bernoulli=True, then all'
'features must be binary.')
else:
stream.write(' %s %s' % (fid, fval))
def call_megam(args):
"""
Call the C{megam} binary with the given arguments.
"""
if isinstance(args, basestring):
raise TypeError('args should be a list of strings')
if _megam_bin is None:
config_megam()
# Call megam via a subprocess
cmd = [_megam_bin] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print
print stderr
raise OSError('megam command failed!')
return stdout
| gpl-3.0 | 2,395,200,553,374,821,000 | 33.625 | 75 | 0.596527 | false |
datsideofthemoon/openbts-webui | urls.py | 1 | 1973 | # Copyright (C) 2012 Daniil Egorov <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
#OpenBTS:
from webgui.views import main, advanced, status, actions, addparam
#Smqueue
from webgui.views import smqadvanced, smqactions
#SubscriberRegistry
from webgui.views import sbrdialdata, sbractions, sbradvanced
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^mysite/', include('mysite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
#Media static files:
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
url(r'^$', main),
#OpenBTS:
url(r'^openbts/main/', main),
url(r'^openbts/advanced/', advanced),
url(r'^openbts/status/', status),
url(r'^openbts/actions/', actions),
url(r'^openbts/addparam/', addparam),
#Smqueue:
url(r'^smqueue/actions/', smqactions),
url(r'^smqueue/configuration/', smqadvanced),
#SubscriberRegistry
url(r'^subscriberregistry/actions/', sbractions),
url(r'^subscriberregistry/configuration/', sbradvanced),
url(r'^subscriberregistry/subscribers/', sbrdialdata),
)
| gpl-3.0 | 4,428,396,824,841,071,600 | 37.686275 | 102 | 0.716168 | false |
sachabest/cis599 | web/dashboard/uploader.py | 1 | 2445 | import csv, logging
from .models import Student, Project
from django.contrib.auth.models import User
logger = logging.getLogger(__name__)
def parse_input_csv(csv_file_wrapper, project_file_wrapper):
'''
Takes in raw text and outputs json for group information.
Expected format of project_file:
Name / Number / PM PennKey / Customer PennKey
Expected format of csv_file:
Name / Class / PennKey / Major / Team #
'''
new_projects = {}
new_students = []
data = csv.reader(project_file_wrapper.read().decode(encoding='UTF-8').splitlines())
for row in data:
project_number = int(row[1])
username = row[2] + "@upenn.edu"
customer_username = row[3] + "@upenn.edu"
try:
pm_user = User.objects.get(username=username)
except:
pm_user = User(username=username)
try:
customer_user = User.objects.get(username=customer_username)
except:
customer_user = User(username=customer_username)
pm_user.save()
customer_user.save()
try:
new_project = Projects.objects.get(number=project_number)
except:
new_project = Project(name=row[0], number=project_number, pm_user=pm_user, \
client_user=customer_user)
new_project.save()
# set pm_user and customer_user later
new_projects[project_number] = new_project
data = csv.reader(csv_file_wrapper.read().decode(encoding='UTF-8').splitlines())
project_mapping = {}
for row in data:
username = row[2] + "@upenn.edu"
try:
student = User.objects.get(username=username)
except:
student = User(username=username)
student.first_name = "Not"
student.last_name = "Registered"
student.save()
student.student = Student()
student.student.year = row[1]
student.student.major = row[3]
student.student.save()
student.save()
# add code here to find if the PM user exists
project_number = int(row[4])
new_project = new_projects[project_number]
student.student.project = new_project
student.student.save()
if project_number not in project_mapping:
project_mapping[project_number] = []
project_mapping[project_number].append(student)
return (new_projects.values(), project_mapping) | mit | -322,003,598,205,245,440 | 34.970588 | 88 | 0.608589 | false |
ZTH1970/alcide | scripts/import_users.py | 1 | 5758 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import csv
import codecs
import string
import random
from datetime import datetime, time
import django.core.management
import alcide.settings
django.core.management.setup_environ(alcide.settings)
from django.contrib.auth.models import User
from alcide.actes.models import EventAct
from alcide.agenda.models import Event, EventType
from alcide.dossiers.models import PatientRecord, Status, FileState
from alcide.ressources.models import Service
from alcide.personnes.models import Worker, Holiday, UserWorker
from alcide.ressources.models import WorkerType
wt="./scripts/worker_type.csv"
access_worker_enabled="./scripts/access_worker.csv"
worker_only_disabled="./scripts/worker_only.csv"
db_path = "./scripts/20121219-212026"
dbs = ["F_ST_ETIENNE_SESSAD_TED", "F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD"]
def _to_date(str_date):
if not str_date:
return None
return datetime.strptime(str_date[:-13], "%Y-%m-%d")
def _to_int(str_int):
if not str_int:
return None
return int(str_int)
def discipline_mapper(tables_data, service):
for line in tables_data['discipline']:
# Insert workertype
if not WorkerType.objects.filter(name=line['libelle']):
WorkerType.objects.create(name=line['libelle'])
def intervenants_mapper(tables_data, service):
for line in tables_data['intervenants']:
# Insert workers
for disp in tables_data['discipline']:
if disp['id'] == line['discipline']:
type = WorkerType.objects.get(name=disp['libelle'])
# TODO : import actif or not
worker, created = Worker.objects.get_or_create(
type=type,
last_name=line['nom'],
first_name=line['prenom'],
email=line['email'],
phone=line['tel'],
gender=int(line['titre']),
)
worker.services.add(service)
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="iso8859-15", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
def main():
'''User and worker'''
cmpp = Service.objects.get(name="CMPP")
camsp = Service.objects.get(name="CAMSP")
sessad_ted = Service.objects.get(name="SESSAD TED")
sessad_dys = Service.objects.get(name="SESSAD DYS")
csvfile = open(access_worker_enabled, 'rb')
csvlines = UnicodeReader(csvfile, delimiter=';', quotechar='|',encoding='utf-8')
csvlines.next()
for line in csvlines:
user = User(username=line[2])
user.set_password(line[3])
user.save()
last_name = line[0]
first_name = line[1]
gender = 1
if line[14] == 'femme':
gender = 2
email = line[13]
type = WorkerType.objects.get(pk=int(line[8]))
enabled = True
old_camsp_id = None
if line[9] != '':
old_camsp_id = line[9]
old_cmpp_id = None
if line[10] != '':
old_cmpp_id = line[10]
old_sessad_dys_id = None
if line[11] != '':
old_sessad_dys_id = line[11]
old_sessad_ted_id = None
if line[12] != '':
old_sessad_ted_id = line[12]
worker = Worker(last_name=last_name, first_name=first_name,
gender=gender, email=email, type=type,
old_camsp_id=old_camsp_id, old_cmpp_id=old_cmpp_id,
old_sessad_dys_id=old_sessad_dys_id, old_sessad_ted_id=old_sessad_ted_id,
enabled=enabled)
worker.save()
if line[4] != '':
worker.services.add(camsp)
if line[5] != '':
worker.services.add(cmpp)
if line[6] != '':
worker.services.add(sessad_dys)
if line[7] != '':
worker.services.add(sessad_ted)
worker.save()
UserWorker(user=user,worker=worker).save()
'''Worker only'''
csvfile = open(worker_only_disabled, 'rb')
csvlines = UnicodeReader(csvfile, delimiter=';', quotechar='|',encoding='utf-8')
csvlines.next()
for line in csvlines:
old_camsp_id = None
old_cmpp_id = None
old_sessad_dys_id = None
old_sessad_ted_id = None
service = line[5]
if service == 'CAMSP':
old_camsp_id = line[0]
elif service == 'CMPP':
old_cmpp_id = line[0]
elif service == 'SESSAD DYS':
old_sessad_dys_id = line[0]
else:
old_sessad_ted_id = line[0]
last_name = line[1]
first_name = line[2]
gender = 1
if line[3] == 'Femme':
gender = 2
type = WorkerType.objects.get(pk=int(line[4]))
enabled = False
worker = Worker(last_name=last_name, first_name=first_name,
gender=gender, email=None, type=type,
old_camsp_id=old_camsp_id, old_cmpp_id=old_cmpp_id,
old_sessad_dys_id=old_sessad_dys_id, old_sessad_ted_id=old_sessad_ted_id,
enabled=enabled)
worker.save()
if __name__ == "__main__":
main()
| agpl-3.0 | -7,221,619,280,167,472,000 | 30.464481 | 99 | 0.588051 | false |
sheqi/TVpgGLM | test/practice6_pystan_hmc_Qi_loop_test.py | 1 | 1347 | import pickle
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from pyglm.utils.utils import expand_scalar, compute_optimal_rotation
dim = 2
N = 20
r = 1 + np.arange(N) // (N/2.)
th = np.linspace(0, 4 * np.pi, N, endpoint=False)
x = r * np.cos(th)
y = r * np.sin(th)
L = np.hstack((x[:, None], y[:, None]))
L1 = np.random.randn(N, dim)
W = np.zeros((N, N))
# Distance matrix
D = ((L[:, None, :] - L[None, :, :]) ** 2).sum(2)
sig = np.exp(-D/2)
Sig = np.tile(sig[:, :, None, None], (1, 1, 1, 1))
Mu = expand_scalar(0, (N, N, 1))
for n in range(N):
for m in range(N):
W[n, m] = npr.multivariate_normal(Mu[n, m], Sig[n, m])
aa = 1.0
bb = 1.0
cc = 1.0
sm = pickle.load(open('/Users/pillowlab/Dropbox/pyglm-master/Practices/model.pkl', 'rb'))
new_data = dict(N=N, W=W, B=dim)
for i in range(100):
fit = sm.sampling(data=new_data, iter=100, warmup=50, chains=1, init=[dict(l=L1, sigma=aa)],
control=dict(stepsize=0.001))
samples = fit.extract(permuted=True)
aa = np.mean(samples['sigma'])
#aa = samples['sigma'][-1]
#bb = np.mean(samples['eta'])
#cc = np.mean(samples['rho'])
L1 = np.mean(samples['l'], 0)
#L1 = samples['l'][-1]
R = compute_optimal_rotation(L1, L)
L1 = np.dot(L1, R)
plt.scatter(L1[:,0],L1[:,1])
plt.scatter(L[:,0],L[:,1]) | mit | 6,740,229,076,920,193,000 | 24.923077 | 96 | 0.582777 | false |
googleads/googleads-python-lib | examples/ad_manager/v202105/activity_group_service/get_all_activity_groups.py | 1 | 1854 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all activity groups.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_group_service = client.GetService(
'ActivityGroupService', version='v202105')
# Create a statement to select activity groups.
statement = ad_manager.StatementBuilder(version='v202105')
# Retrieve a small amount of activity groups at a time, paging
# through until all activity groups have been retrieved.
while True:
response = activity_group_service.getActivityGroupsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for activity_group in response['results']:
# Print out some information for each activity group.
print('Activity group with ID "%d" and name "%s" was found.\n' %
(activity_group['id'], activity_group['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 | 4,992,633,580,900,890,000 | 35.352941 | 74 | 0.721143 | false |
vxsx/djangocms-text-ckeditor | djangocms_text_ckeditor/utils.py | 1 | 6174 | # -*- coding: utf-8 -*-
import os
import re
from collections import OrderedDict
from functools import wraps
from classytags.utils import flatten_context
from cms.models import CMSPlugin
from django.core.files.storage import get_storage_class
from django.template.defaultfilters import force_escape
from django.template.loader import render_to_string
from django.utils.decorators import available_attrs
from django.utils.functional import LazyObject
OBJ_ADMIN_RE_PATTERN = r'<cms-plugin .*?\bid="(?P<pk>\d+)".*?>.*?</cms-plugin>'
OBJ_ADMIN_WITH_CONTENT_RE_PATTERN = r'<cms-plugin .*?\bid="(?P<pk>\d+)".*?>(?P<content>.*?)</cms-plugin>'
OBJ_ADMIN_RE = re.compile(OBJ_ADMIN_RE_PATTERN, flags=re.DOTALL)
def _render_cms_plugin(plugin, context):
context = flatten_context(context)
context['plugin'] = plugin
# This my fellow ckeditor enthusiasts is a hack..
# If I let djangoCMS render the plugin using {% render_plugin %}
# it will wrap the output in the toolbar markup which we don't want.
# If I render the plugin without rendering a template first, then context processors
# are not called and so plugins that rely on these like those using sekizai will error out.
# The compromise is to render a template so that Django binds the context to it
# and thus calls context processors AND render the plugin manually with the context
# after it's been bound to a template.
response = render_to_string(
'cms/plugins/render_plugin_preview.html',
context,
request=context['request'],
)
return response
def random_comment_exempt(view_func):
# Borrowed from
# https://github.com/lpomfrey/django-debreach/blob/f778d77ffc417/debreach/decorators.py#L21
# This is a no-op if django-debreach is not installed
def wrapped_view(*args, **kwargs):
response = view_func(*args, **kwargs)
response._random_comment_exempt = True
return response
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def plugin_to_tag(obj, content='', admin=False):
plugin_attrs = OrderedDict(
id=obj.pk,
icon_alt=force_escape(obj.get_instance_icon_alt()),
content=content,
)
if admin:
# Include extra attributes when rendering on the admin
plugin_class = obj.get_plugin_class()
preview = getattr(plugin_class, 'text_editor_preview', True)
plugin_tag = (
u'<cms-plugin render-plugin=%(preview)s alt="%(icon_alt)s "'
u'title="%(icon_alt)s" id="%(id)d">%(content)s</cms-plugin>'
)
plugin_attrs['preview'] = 'true' if preview else 'false'
else:
plugin_tag = (
u'<cms-plugin alt="%(icon_alt)s "'
u'title="%(icon_alt)s" id="%(id)d">%(content)s</cms-plugin>'
)
return plugin_tag % plugin_attrs
def plugin_tags_to_id_list(text, regex=OBJ_ADMIN_RE):
def _find_plugins():
for tag in regex.finditer(text):
plugin_id = tag.groupdict().get('pk')
if plugin_id:
yield plugin_id
return [int(id) for id in _find_plugins()]
def _plugin_tags_to_html(text, output_func):
"""
Convert plugin object 'tags' into the form for public site.
context is the template context to use, placeholder is the placeholder name
"""
plugins_by_id = get_plugins_from_text(text)
def _render_tag(m):
try:
plugin_id = int(m.groupdict()['pk'])
obj = plugins_by_id[plugin_id]
except KeyError:
# Object must have been deleted. It cannot be rendered to
# end user so just remove it from the HTML altogether
return u''
else:
obj._render_meta.text_enabled = True
return output_func(obj, m)
return OBJ_ADMIN_RE.sub(_render_tag, text)
def plugin_tags_to_user_html(text, context):
def _render_plugin(obj, match):
return _render_cms_plugin(obj, context)
return _plugin_tags_to_html(text, output_func=_render_plugin)
def plugin_tags_to_admin_html(text, context):
def _render_plugin(obj, match):
plugin_content = _render_cms_plugin(obj, context)
return plugin_to_tag(obj, content=plugin_content, admin=True)
return _plugin_tags_to_html(text, output_func=_render_plugin)
def plugin_tags_to_db(text):
def _strip_plugin_content(obj, match):
return plugin_to_tag(obj)
return _plugin_tags_to_html(text, output_func=_strip_plugin_content)
def replace_plugin_tags(text, id_dict, regex=OBJ_ADMIN_RE):
plugins_by_id = CMSPlugin.objects.in_bulk(id_dict.values())
def _replace_tag(m):
try:
plugin_id = int(m.groupdict()['pk'])
new_id = id_dict[plugin_id]
plugin = plugins_by_id[new_id]
except KeyError:
# Object must have been deleted. It cannot be rendered to
# end user, or edited, so just remove it from the HTML
# altogether
return u''
return plugin_to_tag(plugin)
return regex.sub(_replace_tag, text)
def get_plugins_from_text(text, regex=OBJ_ADMIN_RE):
from cms.utils.plugins import downcast_plugins
plugin_ids = plugin_tags_to_id_list(text, regex)
plugins = CMSPlugin.objects.filter(pk__in=plugin_ids).select_related('placeholder')
plugin_list = downcast_plugins(plugins, select_placeholder=True)
return dict((plugin.pk, plugin) for plugin in plugin_list)
"""
The following class is taken from https://github.com/jezdez/django/compare/feature/staticfiles-templatetag
and should be removed and replaced by the django-core version in 1.4
"""
default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage'
class ConfiguredStorage(LazyObject):
def _setup(self):
from django.conf import settings
self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))()
configured_storage = ConfiguredStorage()
def static_url(path):
'''
Helper that prefixes a URL with STATIC_URL and cms
'''
if not path:
return ''
return configured_storage.url(os.path.join('', path))
| bsd-3-clause | -6,806,602,773,429,382,000 | 33.49162 | 106 | 0.660026 | false |
uwcirg/true_nth_usa_portal | portal/views/portal.py | 1 | 47136 | """Portal view functions (i.e. not part of the API or auth)"""
from datetime import datetime
from pprint import pformat
from time import strftime
from urllib.parse import urlencode
from celery.exceptions import TimeoutError
from celery.result import AsyncResult
from flask import (
Blueprint,
Response,
abort,
current_app,
g,
jsonify,
make_response,
redirect,
render_template,
render_template_string,
request,
session,
url_for,
)
from flask_babel import gettext as _
from flask_sqlalchemy import get_debug_queries
from flask_swagger import swagger
from flask_user import roles_required
from flask_wtf import FlaskForm
import requests
from sqlalchemy import and_
from sqlalchemy.orm.exc import NoResultFound
from wtforms import (
BooleanField,
HiddenField,
IntegerField,
StringField,
validators,
)
from ..audit import auditable_event
from ..database import db
from ..date_tools import FHIR_datetime
from ..extensions import oauth
from ..factories.celery import create_celery
from ..models.app_text import (
AppText,
InitialConsent_ATMA,
MailResource,
UndefinedAppText,
UserInviteEmail_ATMA,
UserReminderEmail_ATMA,
VersionedResource,
app_text,
get_terms,
)
from ..models.client import validate_origin
from ..models.communication import Communication, load_template_args
from ..models.coredata import Coredata
from ..models.fhir import bundle_results
from ..models.i18n import get_locale
from ..models.identifier import Identifier
from ..models.login import login_user
from ..models.message import EmailMessage
from ..models.next_step import NextStep
from ..models.organization import (
Organization,
OrganizationIdentifier,
OrgTree,
UserOrganization,
)
from ..models.qb_timeline import invalidate_users_QBT
from ..models.questionnaire import Questionnaire
from ..models.questionnaire_response import QuestionnaireResponse
from ..models.role import ALL_BUT_WRITE_ONLY, ROLE
from ..models.table_preference import TablePreference
from ..models.url_token import BadSignature, SignatureExpired, verify_token
from ..models.user import User, current_user, get_user_or_abort
from ..system_uri import SHORTCUT_ALIAS
from ..trace import dump_trace, establish_trace, trace
from ..type_tools import check_int
from .auth import logout, next_after_login
from .crossdomain import crossdomain
portal = Blueprint('portal', __name__)
@portal.route('/favicon.ico')
def favicon():
return redirect(url_for('static', filename='img/favicon.ico'), code=302)
@portal.route('/no-script')
def no_script():
return make_response(_("This application requires Javascript enabled."
" Please check your browser settings."))
@portal.before_app_request
def assert_locale_selector():
# Confirm import & use of custom babel localeselector function.
# Necessary to import get_locale to bring into the request scope to
# prevent the default babel locale selector from being used.
locale_code = get_locale()
# assign locale code as global for easy access in template
if locale_code:
g.locale_code = locale_code
@portal.before_app_request
def debug_request_dump():
if current_app.config.get("DEBUG_DUMP_HEADERS"):
current_app.logger.debug(
"{0.remote_addr} {0.method} {0.path} {0.headers}".format(request))
if current_app.config.get("DEBUG_DUMP_REQUEST"):
output = "{0.remote_addr} {0.method} {0.path}"
if request.data:
output += " {data}"
if request.args:
output += " {0.args}"
if request.form:
output += " {0.form}"
current_app.logger.debug(output.format(
request,
data=request.get_data(as_text=True),
))
@portal.after_app_request
def report_slow_queries(response):
"""Log slow database queries
This will only function if BOTH values are set in the config:
DATABASE_QUERY_TIMEOUT = 0.5 # threshold in seconds
SQLALCHEMY_RECORD_QUERIES = True
"""
threshold = current_app.config.get('DATABASE_QUERY_TIMEOUT')
if threshold:
for query in get_debug_queries():
if query.duration >= threshold:
current_app.logger.warning(
"SLOW QUERY: {0.statement}\n"
"Duration: {0.duration:.4f} seconds\n"
"Parameters: {0.parameters}\n"
"Context: {0.context}".format(query))
return response
@portal.route('/report-error')
@oauth.require_oauth()
def report_error():
"""Useful from front end, client-side to raise attention to problems
On occasion, an exception will be generated in the front end code worthy of
gaining attention on the server side. By making a GET request here, a
server side error will be generated (encouraging the system to handle it
as configured, such as by producing error email).
OAuth protected to prevent abuse.
Any of the following query string arguments (and their values) will be
included in the exception text, to better capture the context. None are
required.
:subject_id: User on which action is being attempted
:message: Details of the error event
:page_url: The page requested resulting in the error
actor_id need not be sent, and will always be included - the OAuth
protection guarentees and defines a valid current user.
"""
message = {'actor': "{}".format(current_user())}
accepted = ('subject_id', 'page_url', 'message')
for attr in accepted:
value = request.args.get(attr)
if value:
message[attr] = value
# log as an error message - but don't raise a server error
# for the front end to manage.
current_app.logger.error("Received error {}".format(pformat(message)))
return jsonify(error='received')
class ShortcutAliasForm(FlaskForm):
shortcut_alias = StringField(
'Code', validators=[validators.DataRequired()])
@staticmethod
def validate_shortcut_alias(field):
"""Custom validation to confirm an alias match"""
if len(field.data.strip()):
try:
Identifier.query.filter_by(
system=SHORTCUT_ALIAS, _value=field.data).one()
except NoResultFound:
raise validators.ValidationError("Code not found")
@portal.route('/go', methods=['GET', 'POST'])
def specific_clinic_entry():
"""Entry point with form to insert a coded clinic shortcut
Invited users may start here to obtain a specific clinic assignment,
by entering the code or shortcut alias they were given.
Store the clinic in the session for association with the user once
registered and redirect to the standard landing page.
NB if already logged in - this will bounce user to home
"""
if current_user():
return redirect(url_for('portal.home'))
form = ShortcutAliasForm(request.form)
if not form.validate_on_submit():
return render_template('shortcut_alias.html', form=form)
return specific_clinic_landing(form.shortcut_alias.data)
@portal.route('/go/<string:clinic_alias>')
def specific_clinic_landing(clinic_alias):
"""Invited users start here to obtain a specific clinic assignment
Store the clinic in the session for association with the user once
registered and redirect to the standard landing page.
"""
# Shortcut aliases are registered with the organization as identifiers.
# Confirm the requested alias exists or 404
identifier = Identifier.query.filter_by(system=SHORTCUT_ALIAS,
_value=clinic_alias).first()
if not identifier:
current_app.logger.debug("Clinic alias not found: %s", clinic_alias)
abort(404)
# Expecting exactly one organization for this alias, save ID in session
results = OrganizationIdentifier.query.filter_by(
identifier_id=identifier.id).one()
# Top-level orgs with child orgs won't work, as the UI only lists
# the clinic level
org = Organization.query.get(results.organization_id)
if org.partOf_id is None:
orgs = OrgTree().here_and_below_id(results.organization_id)
for childOrg in orgs:
# the org tree contains an org other than the alias org itself
if childOrg != results.organization_id:
abort(400, "alias points to top-level organization")
session['associate_clinic_id'] = results.organization_id
current_app.logger.debug(
"Storing session['associate_clinic_id']{}".format(
session['associate_clinic_id']))
return redirect(url_for('user.register'))
@portal.route('/require_cookies')
def require_cookies():
"""give front end opportunity to verify cookies
Renders HTML including cookie check, then redirects back to `target`
NB - query string 'cookies_tested=True' added to target for client
to confirm this process happened.
"""
mutable_args = request.args.copy()
target = mutable_args.pop('target')
if not target:
raise ValueError("require cookies needs a `target`")
mutable_args['cookies_tested'] = True
query_string = urlencode(mutable_args)
delimiter = '&' if '?' in target else '?'
target = "{}{}{}".format(target, delimiter, query_string)
return render_template('require_cookies.html', target=target)
@portal.route('/access/<string:token>', defaults={'next_step': None})
@portal.route('/access/<string:token>/<string:next_step>')
def access_via_token(token, next_step=None):
"""Limited access users enter here with special token as auth
Tokens contain encrypted data including the user_id and timestamp
from when it was generated.
If the token is found to be valid, and the user_id isn't associated
with a *privilidged* account, the behavior depends on the roles assigned
to the token's user_id:
* WRITE_ONLY users will be directly logged into the weak auth account
* others will be given a chance to prove their identity
:param next_step: if the user is to be redirected following validation
and intial queries, include a value. These come from a controlled
vocabulary - see `NextStep`
"""
# logout current user if one is logged in.
if current_user():
logout(prevent_redirect=True, reason="forced from /access_via_token")
assert (not current_user())
# Confirm the token is valid, and not expired.
valid_seconds = current_app.config.get(
'TOKEN_LIFE_IN_DAYS') * 24 * 3600
try:
user_id = verify_token(token, valid_seconds)
except SignatureExpired:
current_app.logger.info("token access failed: "
"expired token {}".format(token))
abort(404, "URL token has expired")
except BadSignature:
abort(404, "URL token is invalid")
# Valid token - confirm user id looks legit
user = get_user_or_abort(user_id)
not_allowed = {
ROLE.ADMIN.value,
ROLE.APPLICATION_DEVELOPER.value,
ROLE.SERVICE.value}
has = {role.name for role in user.roles}
if not has.isdisjoint(not_allowed):
abort(400, "Access URL not allowed for privileged accounts")
# if provided, validate and store target in session
if next_step:
NextStep.validate(next_step)
target_url = getattr(NextStep, next_step)(user)
if not target_url:
# Due to access strategies, the next step may not (yet) apply
abort(400,
"Patient doesn't qualify for '{}', can't continue".format(
next_step))
session['next'] = target_url
current_app.logger.debug(
"/access with next_step, storing in session['next']: {}".format(
session['next']))
# as this is the entry point for many pre-registered or not-yet-logged-in
# users, capture their locale_code in the session for template rendering
# prior to logging in. (Post log-in, the current_user().locale_code is
# always available
session['locale_code'] = user.locale_code
if {ROLE.WRITE_ONLY.value, ROLE.ACCESS_ON_VERIFY.value}.intersection(has):
# write only users with special role skip the challenge protocol
if ROLE.PROMOTE_WITHOUT_IDENTITY_CHALLENGE.value in has:
# access_on_verify users are REQUIRED to verify
if ROLE.ACCESS_ON_VERIFY.value in has:
current_app.logger.error(
"ACCESS_ON_VERIFY {} has disallowed role "
"PROMOTE_WITHOUT_IDENTITY_CHALLENGE".format(user))
abort(400, "Invalid state - access denied")
# only give such tokens 5 minutes - recheck validity
verify_token(token, valid_seconds=5 * 60)
auditable_event("promoting user without challenge via token, "
"pending registration", user_id=user.id,
subject_id=user.id, context='account')
user.mask_email()
db.session.commit()
session['invited_verified_user_id'] = user.id
return redirect(url_for('user.register', email=user.email))
# If user does not have PROMOTE_WITHOUT_IDENTITY_CHALLENGE,
# challenge the user identity, followed by a redirect to the
# appropriate page.
session['challenge.user_id'] = user.id
if not all((user.birthdate, user.first_name, user.last_name)):
current_app.logger.error(
"{} w/o all (birthdate, first_name, last_name); can't "
"verify".format(user))
abort(400, "invalid state - can't continue")
if ROLE.ACCESS_ON_VERIFY.value in has:
# Send user to verify, and then follow post login flow
session['challenge.access_on_verify'] = True
session['challenge.next_url'] = url_for('auth.next_after_login')
else:
# Still here implies a WRITE_ONLY user in process of registration.
# Preserve the invited user id, should we need to
# merge associated details after user proves themselves and logs in
auditable_event(
"invited user entered using token, pending registration",
user_id=user.id, subject_id=user.id, context='account')
session['challenge.next_url'] = url_for(
'user.register', email=user.email)
session['challenge.merging_accounts'] = True
return redirect(
url_for('portal.challenge_identity', request_path=request.url))
# If not WRITE_ONLY user, redirect to login page
# Email field is auto-populated unless using alt auth (fb/google/etc)
if user.email and user.password:
return redirect(url_for('user.login', email=user.email))
return redirect(url_for('user.login'))
class ChallengeIdForm(FlaskForm):
retry_count = HiddenField('retry count', default=0)
next_url = HiddenField('next')
user_id = HiddenField('user')
merging_accounts = HiddenField('merging_accounts')
access_on_verify = HiddenField('access_on_verify')
first_name = StringField(
'First Name', validators=[validators.input_required()])
last_name = StringField(
'Last Name', validators=[validators.input_required()])
birthdate = StringField(
'Birthdate', validators=[validators.input_required()])
@portal.route('/challenge', methods=['GET', 'POST'])
def challenge_identity(
user_id=None, next_url=None, merging_accounts=False,
access_on_verify=False, request_path=None):
"""Challenge the user to verify themselves
Can't expose the parameters for security reasons - use the session,
namespace each variable i.e. session['challenge.user_id'] unless
calling as a function.
:param user_id: the user_id to verify - invited user or the like
:param next_url: destination url on successful challenge completion
:param merging_accounts: boolean value, set true IFF on success, the
user account will be merged into a new account, say from a weak
authenicated WRITE_ONLY invite account
:param access_on_verify: boolean value, set true IFF on success, the
user should be logged in once validated, i.e. w/o a password
:param request_path: the requested url prior to redirection to here
necessary in no cookie situations, to redirect user back
"""
# At this point, we can expect a session, or the user likely
# doesn't have cookies enabled. (ignore misleading `_fresh`
# and `_permanent` keys)
session_keys = [k for k in session if k not in ('_fresh', '_permanent')]
if not session_keys:
request_path = request.args.get('request_path', request_path)
current_app.logger.warning(
"failed request due to lack of cookies: {}".format(request_path))
return redirect(url_for(
'portal.require_cookies', target=request_path))
if request.method == 'GET':
# Pull parameters from session if not defined
if not (user_id and next_url):
user_id = session.get('challenge.user_id')
next_url = session.get('challenge.next_url')
merging_accounts = session.get(
'challenge.merging_accounts', False)
access_on_verify = session.get(
'challenge.access_on_verify', False)
if request.method == 'POST':
form = ChallengeIdForm(request.form)
if form.next_url.data:
validate_origin(form.next_url.data)
if not form.user_id.data:
abort(400, "missing user in identity challenge")
user = get_user_or_abort(form.user_id.data)
else:
user = get_user_or_abort(user_id)
form = ChallengeIdForm(
next_url=next_url, user_id=user.id,
merging_accounts=merging_accounts,
access_on_verify=access_on_verify)
error = ""
if not form.validate_on_submit():
return render_template(
'challenge_identity.html', form=form, errorMessage=error)
first_name = form.first_name.data
last_name = form.last_name.data
try:
birthdate = datetime.strptime(form.birthdate.data, '%m-%d-%Y')
except ValueError as ve:
current_app.logger.warning(
"failed challenge birthdate format, {}".format(ve))
birthdate = None
score = user.fuzzy_match(first_name=first_name,
last_name=last_name,
birthdate=birthdate)
if score < current_app.config.get('IDENTITY_CHALLENGE_THRESHOLD', 85):
auditable_event(
"Failed identity challenge tests with values:"
"(first_name={}, last_name={}, birthdate={})".format(
first_name, last_name, birthdate),
user_id=user.id, subject_id=user.id,
context='authentication')
# very modest brute force test
form.retry_count.data = int(form.retry_count.data) + 1
if form.retry_count.data >= 1:
error = _("Unable to match identity")
if form.retry_count.data > 3:
abort(404, _("User Not Found"))
return render_template(
'challenge_identity.html', form=form, errorMessage=error)
# identity confirmed
session['challenge_verified_user_id'] = user.id
if form.merging_accounts.data == 'True':
user.mask_email()
db.session.commit()
session['invited_verified_user_id'] = user.id
if form.access_on_verify.data == 'True':
# Log user in as they have now verified
login_user(user=user, auth_method='url_authenticated_and_verified')
return redirect(form.next_url.data)
@portal.route('/initial-queries', methods=['GET', 'POST'])
def initial_queries():
"""Initial consent terms, initial queries view function"""
user = current_user()
if not user:
# Shouldn't happen, unless user came in on a bookmark
current_app.logger.debug("initial_queries (no user!) -> landing")
return redirect('/')
if user.deleted:
abort(400, "deleted user - operation not permitted")
if request.method == 'POST':
"""
data submission all handled via ajax calls from initial_queries
template. assume POST can only be sent when valid.
"""
current_app.logger.debug("POST initial_queries -> next_after_login")
return next_after_login()
elif len(Coredata().still_needed(user)) == 0:
# also handle the situations that resulted from:
# 1. user refreshing the browser or
# 2. exiting browser and resuming session thereafter
# In both cases, the request method is GET, hence a redirect back to
# initial-queries page won't ever reach the above check specifically
# for next_after_login based on the request method of POST
current_app.logger.debug("GET initial_queries -> next_after_login")
return next_after_login()
org = user.first_top_organization()
role = None
for r in (ROLE.STAFF_ADMIN.value, ROLE.STAFF.value, ROLE.PATIENT.value):
if user.has_role(r):
# treat staff_admins as staff for this lookup
r = ROLE.STAFF.value if r == ROLE.STAFF_ADMIN.value else r
role = r
terms = get_terms(user.locale_code, org, role)
# need this at all time now for ui
consent_agreements = Organization.consent_agreements(
locale_code=user.locale_code)
return render_template(
'initial_queries.html', user=user, terms=terms,
consent_agreements=consent_agreements)
@portal.route('/admin')
@roles_required(ROLE.ADMIN.value)
@oauth.require_oauth()
def admin():
"""user admin view function"""
# can't do list comprehension in template - prepopulate a 'rolelist'
user = current_user()
pref_org_list = None
# check user table preference for organization filters
pref = TablePreference.query.filter_by(table_name='adminList',
user_id=user.id).first()
if pref and pref.filters:
pref_org_list = pref.filters.get('orgs_filter_control')
if pref_org_list:
org_list = set()
# for selected filtered orgs, we also need to get the children
# of each, if any
for orgId in pref_org_list:
check_int(orgId)
if orgId == 0: # None of the above doesn't count
continue
org_list.update(OrgTree().here_and_below_id(orgId))
users = User.query.join(UserOrganization).filter(and_(
User.deleted_id.is_(None),
UserOrganization.user_id == User.id,
UserOrganization.organization_id != 0,
UserOrganization.organization_id.in_(org_list)))
else:
org_list = Organization.query.all()
users = User.query.filter_by(deleted=None).all()
return render_template(
'admin/admin.html', users=users, wide_container="true",
org_list=list(org_list), user=user)
@portal.route('/invite', methods=('GET', 'POST'))
@oauth.require_oauth()
def invite():
"""invite other users via form data
see also /api/user/{user_id}/invite
"""
if request.method == 'GET':
return render_template('invite.html')
subject = request.form.get('subject')
body = request.form.get('body')
recipients = request.form.get('recipients')
user = current_user()
if not user.email:
abort(400, "Users without an email address can't send email")
email = EmailMessage(
subject=subject, body=body, recipients=recipients,
sender=user.email, user_id=user.id)
email.send_message()
db.session.add(email)
db.session.commit()
return invite_sent(message_id=email.id)
@portal.route('/invite/<int:message_id>')
@oauth.require_oauth()
def invite_sent(message_id):
"""show invite sent"""
message = EmailMessage.query.get(message_id)
if not message:
abort(404, "Message not found")
current_user().check_role('view', other_id=message.user_id)
return render_template('invite_sent.html', message=message)
@portal.route('/profile', defaults={'user_id': None})
@portal.route('/profile/<int:user_id>')
@roles_required(ALL_BUT_WRITE_ONLY)
@oauth.require_oauth()
def profile(user_id):
"""profile view function"""
user = current_user()
# template file for user self's profile
template_file = 'profile/my_profile.html'
if user_id and user_id != user.id:
user.check_role("edit", other_id=user_id)
user = get_user_or_abort(user_id)
# template file for view of other user's profile
template_file = 'profile/user_profile.html'
consent_agreements = Organization.consent_agreements(
locale_code=user.locale_code)
terms = VersionedResource(
app_text(InitialConsent_ATMA.name_key()),
locale_code=user.locale_code)
return render_template(template_file, user=user, terms=terms,
current_user=current_user(),
consent_agreements=consent_agreements)
@portal.route('/patient-invite-email/<int:user_id>')
@roles_required([ROLE.ADMIN.value, ROLE.STAFF_ADMIN.value, ROLE.STAFF.value])
@oauth.require_oauth()
def patient_invite_email(user_id):
"""Patient Invite Email Content"""
if user_id:
user = get_user_or_abort(user_id)
else:
user = current_user()
try:
top_org = user.first_top_organization()
if top_org:
name_key = UserInviteEmail_ATMA.name_key(org=top_org.name)
else:
name_key = UserInviteEmail_ATMA.name_key()
args = load_template_args(user=user)
item = MailResource(
app_text(name_key), locale_code=user.locale_code, variables=args)
except UndefinedAppText:
"""return no content and 204 no content status"""
return '', 204
return jsonify(subject=item.subject, body=item.body)
@portal.route('/patient-reminder-email/<int:user_id>')
@roles_required([ROLE.ADMIN.value, ROLE.STAFF_ADMIN.value, ROLE.STAFF.value])
@oauth.require_oauth()
def patient_reminder_email(user_id):
"""Patient Reminder Email Content"""
from ..models.qb_status import QB_Status
if user_id:
user = get_user_or_abort(user_id)
else:
user = current_user()
try:
top_org = user.first_top_organization()
if top_org:
name_key = UserReminderEmail_ATMA.name_key(org=top_org.name)
else:
name_key = UserReminderEmail_ATMA.name_key()
# If the user has a pending questionnaire bank, include for due date
qstats = QB_Status(user, as_of_date=datetime.utcnow())
qbd = qstats.current_qbd()
if qbd:
qb_id, qb_iteration = qbd.qb_id, qbd.iteration
else:
qb_id, qb_iteration = None, None
args = load_template_args(
user=user, questionnaire_bank_id=qb_id, qb_iteration=qb_iteration)
item = MailResource(
app_text(name_key), locale_code=user.locale_code, variables=args)
except UndefinedAppText:
"""return no content and 204 no content status"""
return '', 204
return jsonify(subject=item.subject, body=item.body)
@portal.route('/explore')
def explore():
user = current_user()
"""Explore TrueNTH page"""
return render_template('explore.html', user=user)
@portal.route('/share-your-story')
@portal.route('/shareyourstory')
@portal.route('/shareYourStory')
def share_story():
return redirect(
url_for('static', filename='files/LivedExperienceVideo.pdf'))
@portal.route('/robots.txt')
def robots():
if current_app.config["SYSTEM_TYPE"].lower() == "production":
return "User-agent: * \nAllow: /"
return "User-agent: * \nDisallow: /"
@portal.route('/contact/<int:message_id>')
def contact_sent(message_id):
"""show invite sent"""
message = EmailMessage.query.get(message_id)
if not message:
abort(404, "Message not found")
return render_template('contact_sent.html', message=message)
@portal.route('/psa-tracker')
def psa_tracker():
user = current_user()
if user:
user.check_role("edit", other_id=user.id)
return render_template('psa_tracker.html', user=current_user())
class SettingsForm(FlaskForm):
timeout = IntegerField(
'Session Timeout for This Web Browser (in seconds)',
validators=[validators.DataRequired()])
patient_id = IntegerField(
'Patient to edit', validators=[validators.optional()])
timestamp = StringField(
"Datetime string for patient's questionnaire_responses, "
"format YYYY-MM-DD")
import_orgs = BooleanField('Import Organizations from Site Persistence')
@portal.route('/settings', methods=['GET', 'POST'])
@roles_required(ROLE.ADMIN.value)
@oauth.require_oauth()
def settings():
"""settings panel for admins"""
# load all top level orgs and consent agreements
user = current_user()
organization_consents = Organization.consent_agreements(
locale_code=user.locale_code)
# load all app text values - expand when possible
apptext = {}
for a in AppText.query.all():
try:
# expand strings with just config values, such as LR
apptext[a.name] = app_text(a.name)
except ValueError:
# lack context to expand, show with format strings
apptext[a.name] = a.custom_text
default_timeout = current_app.config['DEFAULT_INACTIVITY_TIMEOUT']
current_timeout = request.cookies.get("SS_TIMEOUT", default_timeout)
form = SettingsForm(request.form, timeout=current_timeout)
if not form.validate_on_submit():
return render_template(
'settings.html',
form=form,
apptext=apptext,
organization_consents=organization_consents,
wide_container="true")
if form.import_orgs.data:
from ..config.model_persistence import ModelPersistence
establish_trace("Initiate import...")
try:
org_persistence = ModelPersistence(
model_class=Organization, sequence_name='organizations_id_seq',
lookup_field='id')
org_persistence.import_(keep_unmentioned=False, target_dir=None)
except ValueError as e:
trace("IMPORT ERROR: {}".format(e))
# Purge cached data and reload.
OrgTree().invalidate_cache()
organization_consents = Organization.consent_agreements(
locale_code=user.locale_code)
if form.patient_id.data and form.timestamp.data:
patient = get_user_or_abort(form.patient_id.data)
try:
dt = FHIR_datetime.parse(form.timestamp.data)
for qnr in QuestionnaireResponse.query.filter_by(
subject_id=patient.id):
qnr.authored = dt
document = qnr.document
document['authored'] = FHIR_datetime.as_fhir(dt)
# Due to the infancy of JSON support in POSTGRES and SQLAlchemy
# one must force the update to get a JSON field change to stick
db.session.query(QuestionnaireResponse).filter(
QuestionnaireResponse.id == qnr.id
).update({"document": document})
db.session.commit()
invalidate_users_QBT(patient.id)
except ValueError as e:
trace("Invalid date format {}".format(form.timestamp.data))
trace("ERROR: {}".format(e))
response = make_response(render_template(
'settings.html',
form=form,
apptext=apptext,
organization_consents=organization_consents,
trace_data=dump_trace(),
wide_container="true"))
# Only retain custom timeout if set different from default
if form.timeout.data != default_timeout:
if form.timeout.data > current_app.config.get(
'PERMANENT_SESSION_LIFETIME'):
abort(400, "Inactivity timeout value can't exceed"
" PERMANENT_SESSION_LIFETIME")
# set cookie max_age to 5 years for config retention
max_age = 60 * 60 * 24 * 365 * 5
response.set_cookie(
'SS_TIMEOUT', str(form.timeout.data), max_age=max_age)
return response
@portal.route('/api/settings', defaults={'config_key': None})
@portal.route('/api/settings/<string:config_key>')
def config_settings(config_key):
# return selective keys - not all can be be viewed by users, e.g.secret key
config_prefix_whitelist = (
'ACCEPT_TERMS_ON_NEXT_ORG',
'CONSENT',
'COPYRIGHT',
'GIL',
'LOCALIZED_AFFILIATE_ORG',
'LR_',
'MAINTENANCE_',
'PROTECTED_FIELDS',
'PROTECTED_ORG',
'PATIENT_LIST_ADDL_FIELDS',
'REQUIRED_CORE_DATA',
'PRE_REGISTERED_ROLES',
'SHOW_PROFILE_MACROS',
'SYSTEM',
)
if config_key:
key = config_key.upper()
if not any(
key.startswith(prefix) for prefix in config_prefix_whitelist
):
abort(400, "Configuration key '{}' not available".format(key))
return jsonify({key: current_app.config.get(key)})
config_settings = {}
# return selective keys - not all can be be viewed by users, e.g.secret key
for key in current_app.config:
if any(key.startswith(prefix) for prefix in config_prefix_whitelist):
config_settings[key] = current_app.config.get(key)
return jsonify(config_settings)
@portal.route('/research')
@roles_required([ROLE.RESEARCHER.value])
@oauth.require_oauth()
def research_dashboard():
"""Research Dashboard
Only accessible to those with the Researcher role.
"""
return render_template('research.html', user=current_user())
@portal.route('/spec')
@crossdomain(origin='*')
def spec():
"""generate swagger friendly docs from code and comments
View function to generate swagger formatted JSON for API
documentation. Pulls in a few high level values from the
package data (see setup.py) and via flask-swagger, makes
use of any yaml comment syntax found in application docstrings.
Point Swagger-UI to this view for rendering
"""
swag = swagger(current_app)
metadata = current_app.config.metadata
swag.update({
"info": {
"version": metadata['version'],
"title": metadata['summary'],
"termsOfService": metadata['home-page'],
"contact": {
"name": metadata['author'],
"email": metadata['author-email'],
"url": metadata['home-page'],
},
},
"schemes": (current_app.config['PREFERRED_URL_SCHEME'],),
"securityDefinitions": {
"ServiceToken": {
"type": "apiKey",
"name": "Authorization",
"in": "header",
},
"OAuth2AuthzFlow": {
"type": "oauth2",
"authorizationUrl": url_for('auth.authorize', _external=True),
"tokenUrl": url_for('auth.access_token', _external=True),
"flow": "accessCode",
"scopes": {},
}
},
})
# Todo: figure out why description isn't always set
if metadata.get('description'):
swag["info"]["description"] = metadata.get('description').strip()
# Fix swagger docs for paths with duplicate operationIds
# Dict of offending routes (path and method), grouped by operationId
operations = {}
for path, path_options in swag['paths'].items():
for method, route in path_options.items():
if 'operationId' not in route:
continue
operation_id = route['operationId']
operations.setdefault(operation_id, [])
operations[operation_id].append({'path': path, 'method': method})
# Alter route-specific swagger info (using operations dict) to prevent
# non-unique operationId
for operation_id, routes in operations.items():
if len(routes) == 1:
continue
for route_info in routes:
path = route_info['path']
method = route_info['method']
route = swag['paths'][path][method]
parameters = []
# Remove swagger path parameters from routes where it is optional
for parameter in route.pop('parameters', ()):
if parameter['in'] == 'path' and (
"{%s}" % parameter['name']) not in path:
# Prevent duplicate operationIds by adding suffix
# Assume "simple" version of API route if path parameter
# included but not in path
swag['paths'][path][method][
'operationId'] = "{}-simple".format(operation_id)
continue
parameters.append(parameter)
# Overwrite old parameter list
if parameters:
swag['paths'][path][method]['parameters'] = parameters
# Add method as suffix to prevent duplicate operationIds on
# synonymous routes
if method == 'put' or method == 'post':
swag['paths'][path][method]['operationId'] = "{}-{}".format(
operation_id, method)
return jsonify(swag)
@portal.route("/celery-test")
def celery_test(x=16, y=16):
"""Simple view to test asynchronous tasks via celery"""
from ..tasks import add
x = int(request.args.get("x", x))
y = int(request.args.get("y", y))
# Don't queue up a bunch of test tasks; expire if not responsive
res = add.apply_async((x, y), expires=2.0)
return jsonify(result=res.get(), task_id=res.task_id)
@portal.route("/celery-info")
def celery_info():
from ..tasks import info
res = info.apply_async()
return jsonify(result=res.get(), task_id=res.task_id)
@portal.route("/task/<task_id>")
@oauth.require_oauth()
def task_result(task_id):
"""Present result from any given (celery) task
NB actual formatting and permission checks handled by
``format_task_output`` - see for details.
:param task_id: original celery task identifier
:return: formatted as defined by the job results
"""
celery = create_celery(current_app)
task = AsyncResult(task_id, app=celery)
try:
retval = task.get(timeout=2)
except TimeoutError:
retval = "Operation timed out; task incomplete"
except Exception as e:
retval = e
if isinstance(retval, Exception):
return jsonify(
status=task.status, traceback=task.traceback, error=str(retval))
return format_task_output(retval)
def format_task_output(result):
"""Format background task result
Present result from a background task as defined within the serialized
data.
NB to protect any task result that requires protection include
``acting_user_id`` or ``required_roles`` in result dictionary.
:param result: dictionary defining what and how to present job output,
as generated by the background task.
Expected dictionary keys include::
:required_user_id: if defined, *ONLY* said user can view the result
:required_roles: if defined (list of role_names), *ONLY* users with
one of the given role names can view the result
:response_format: with values such as ``csv`` or ``json``
:data: actual data to be included
:return: HTTP Response appropriate for given job result.
"""
if not isinstance(result, dict):
# basic task w/o any security or formatting details
# return simple result representation
return repr(result)
def check_permission(user, required_user_id, required_roles):
"""If required_user or required_roles are defined, confirm match
:raises Unauthorized: if check fails
"""
if required_user_id and user.id != required_user_id:
abort(401, "protected task result not available")
required_role_found = False
for role_name in required_roles:
if user.has_role(role_name):
required_role_found = True
break
if required_roles and not required_role_found:
abort(401, "protected task result not available")
response_format = result.get('response_format')
data = result.get('data', [])
check_permission(
user=current_user(),
required_user_id=result.get('required_user_id'),
required_roles=result.get('required_roles', []))
if response_format == 'csv':
def gen(items):
yield ','.join(column_headers) + '\n' # header row
for i in items:
yield ','.join(
['"{}"'.format(i.get(k, "")) for k in column_headers]
) + '\n'
column_headers = result.get('column_headers', [])
filename_prefix = result.get('filename_prefix', 'report')
filename = '{}-{}.csv'.format(
filename_prefix, strftime('%Y_%m_%d-%H_%M'))
return Response(gen(data), headers={
'Content-Disposition': 'attachment;filename={}'.format(filename),
'Content-type': "text/csv"})
elif response_format == 'json':
if hasattr(data, 'get') and data.get(
'resourceType', None) == 'Bundle':
return jsonify(data)
return jsonify(bundle_results(elements=data))
else:
abort(400, "unsupported response_format: '{}'".format(
response_format))
@portal.route("/task/<task_id>/status")
def task_status(task_id):
"""Present known status details for any given celery task
See also ``task_result`` to obtain task output tailored to original
request parameters.
:param task_id: Original task identifier
:return: formatted details in JSON
"""
celery = create_celery(current_app)
task = AsyncResult(task_id, app=celery)
response = {'state': task.state}
if getattr(task, 'info') and hasattr(task.info, 'items'):
response.update(task.info.items())
# Don't include 'data' if available, as only the result
# function includes vital security checks
response.pop('data', None)
return jsonify(response)
@portal.route('/communicate/preview/<int:comm_id>')
@roles_required([ROLE.ADMIN.value])
@oauth.require_oauth()
def preview_communication(comm_id):
"""Communication message preview"""
comm = Communication.query.get(comm_id)
if not comm:
abort(404, "no communication found for id `{}`".format(comm_id))
preview = comm.preview()
return jsonify(subject=preview.subject, body=preview.body,
recipients=preview.recipients)
@portal.route("/communicate/<email_or_id>")
@roles_required(ROLE.ADMIN.value)
@oauth.require_oauth()
def communicate(email_or_id):
"""Direct call to trigger communications to given user.
Typically handled by scheduled jobs, this API enables testing of
communications without the wait.
Include a `force=True` query string parameter to first invalidate the cache
and look for fresh messages before triggering the send.
Include a `purge=True` query string parameter to throw out existing
communications for the user first, thus forcing a resend (implies a force)
Include a `trace=True` query string parameter to get details found during
processing - like a debug trace.
"""
from ..tasks import send_user_messages
try:
uid = int(email_or_id)
u = User.query.get(uid)
except ValueError:
u = User.query.filter(User.email == email_or_id).first()
if not u:
message = 'no such user'
elif u.deleted_id:
message = 'delted user - not allowed'
else:
purge = request.args.get('purge', False)
if purge in ('', '0', 'false', 'False'):
purge = False
force = request.args.get('force', purge)
if force in ('', '0', 'false', 'False'):
force = False
trace = request.args.get('trace', False)
if trace:
establish_trace("BEGIN trace for communicate on {}".format(u))
if purge:
Communication.query.filter_by(user_id=u.id).delete()
try:
message = send_user_messages(u, force)
except ValueError as ve:
message = "ERROR {}".format(ve)
if trace:
message = dump_trace(message)
return jsonify(message=message)
@portal.route("/post-result/<task_id>")
def post_result(task_id):
celery = create_celery(current_app)
r = AsyncResult(task_id, app=celery).get(timeout=1.0)
return jsonify(status_code=r.status_code, url=r.url, text=r.text)
@portal.route("/legal/stock-org-consent/<org_name>")
def stock_consent(org_name):
"""Simple view to render default consent with named organization
We generally store the unique URL pointing to the content of the agreement
to which the user consents. Special case for organizations without a
custom consent agreement on file.
:param org_name: the org_name to include in the agreement text
"""
body = _("I consent to sharing information with %(org_name)s",
org_name=_(org_name))
return render_template_string(
"""<!doctype html>
<html>
<head>
</head>
<body>
<p>{{ body }}</p>
</body>
</html>""",
body=body)
def get_asset(uuid):
url = "{}/c/portal/truenth/asset/detailed".format(
current_app.config["LR_ORIGIN"])
return requests.get(url, params={'uuid': uuid}).json()['asset']
def get_any_tag_data(*anyTags):
""" query LR based on any tags
this is an OR condition; will match any tag specified
:param anyTag: a variable number of tags to be queried,
e.g., 'tag1', 'tag2'
"""
# NOTE: need to convert tags to format: anyTags=tag1&anyTags=tag2...
liferay_qs_params = {
'anyTags': anyTags,
'sort': 'true',
'sortType': 'DESC'
}
url = "{}/c/portal/truenth/asset/query".format(
current_app.config["LR_ORIGIN"])
return requests.get(url, params=liferay_qs_params).json()
def get_all_tag_data(*allTags):
""" query LR based on all required tags
this is an AND condition; all required tags must be present
:param allTags: variable number of tags to be queried,
e.g., 'tag1', 'tag2'
"""
# NOTE: need to convert tags to format: allTags=tag1&allTags=tag2...
liferay_qs_params = {
'allTags': allTags,
'sort': 'true',
'sortType': 'DESC'
}
url = "{}/c/portal/truenth/asset/query".format(
current_app.config["LR_ORIGIN"])
return requests.get(url, params=liferay_qs_params).json()
| bsd-3-clause | -3,423,425,466,594,004,000 | 34.954233 | 79 | 0.634229 | false |
datapythonista/pandas | pandas/tests/frame/methods/test_set_index.py | 2 | 25979 | """
See also: test_reindex.py:TestReindexSetIndex
"""
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
class TestSetIndex:
def test_set_index_multiindex(self):
# segfault in GH#3308
d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df["tuples"] = tuples
index = MultiIndex.from_tuples(df["tuples"])
# it works!
df.set_index(index)
def test_set_index_empty_column(self):
# GH#1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=["a", "m", "p", "x"],
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_index_empty_dataframe(self):
# GH#38419
df1 = DataFrame(
{"a": Series(dtype="datetime64[ns]"), "b": Series(dtype="int64"), "c": []}
)
df2 = df1.set_index(["a", "b"])
result = df2.index.to_frame().dtypes
expected = df1[["a", "b"]].dtypes
tm.assert_series_equal(result, expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_timezone(self):
# GH#12358
# tz-aware Series should retain the tz
idx = DatetimeIndex(["2014-01-01 10:10:10"], tz="UTC").tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]},
index=Index(di, name="index"),
)
exp.index = exp.index._with_freq(None)
tm.assert_frame_equal(res, exp)
# GH#12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
return_value = result.set_index(keys, drop=drop, inplace=True)
assert return_value is None
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH#1590
df = DataFrame({"val": [0, 1, 2], "key": ["a", "b", "c"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# plain == would give ambiguous Boolean error for containers
first_drop = (
False
if (
isinstance(keys[0], str)
and keys[0] == "A"
and isinstance(keys[1], str)
and keys[1] == "A"
)
else drop
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_preserve_categorical_dtype(self):
# GH#13743, GH#13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_set_index_datetime(self):
# GH#3950
df = DataFrame(
{
"label": ["a", "a", "a", "b", "b", "b"],
"datetime": [
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
],
"value": range(6),
}
)
df.index = to_datetime(df.pop("datetime"), utc=True)
df.index = df.index.tz_convert("US/Pacific")
expected = DatetimeIndex(
["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],
name="datetime",
)
expected = expected.tz_localize("UTC").tz_convert("US/Pacific")
df = df.set_index("label", append=True)
tm.assert_index_equal(df.index.levels[0], expected)
tm.assert_index_equal(df.index.levels[1], Index(["a", "b"], name="label"))
assert df.index.names == ["datetime", "label"]
df = df.swaplevel(0, 1)
tm.assert_index_equal(df.index.levels[0], Index(["a", "b"], name="label"))
tm.assert_index_equal(df.index.levels[1], expected)
assert df.index.names == ["label", "datetime"]
df = DataFrame(np.random.random(6))
idx1 = DatetimeIndex(
[
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
],
tz="US/Eastern",
)
idx2 = DatetimeIndex(
[
"2012-04-01 09:00",
"2012-04-01 09:00",
"2012-04-01 09:00",
"2012-04-02 09:00",
"2012-04-02 09:00",
"2012-04-02 09:00",
],
tz="US/Eastern",
)
idx3 = date_range("2011-01-01 09:00", periods=6, tz="Asia/Tokyo")
idx3 = idx3._with_freq(None)
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = DatetimeIndex(
["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],
tz="US/Eastern",
)
expected2 = DatetimeIndex(
["2012-04-01 09:00", "2012-04-02 09:00"], tz="US/Eastern"
)
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
# GH#7092
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_set_index_period(self):
# GH#6631
df = DataFrame(np.random.random(6))
idx1 = period_range("2011-01-01", periods=3, freq="M")
idx1 = idx1.append(idx1)
idx2 = period_range("2013-01-01 09:00", periods=2, freq="H")
idx2 = idx2.append(idx2).append(idx2)
idx3 = period_range("2005", periods=6, freq="A")
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = period_range("2011-01-01", periods=3, freq="M")
expected2 = period_range("2013-01-01 09:00", periods=2, freq="H")
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
class TestSetIndexInvalid:
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
class TestSetIndexCustomLabelType:
def test_set_index_custom_label_type(self):
# GH#24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self) -> str:
return f"<Thing {repr(self.name)}>"
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH#24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self) -> str:
tmp = sorted(self)
joined_reprs = ", ".join(map(repr, tmp))
# double curly brace prints one brace in format string
return f"frozenset({{{joined_reprs}}})"
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH#24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self) -> str:
return f"<Thing {repr(self.name)}>"
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_set_index_periodindex(self):
# GH#6631
df = DataFrame(np.random.random(6))
idx1 = period_range("2011/01/01", periods=6, freq="M")
idx2 = period_range("2013", periods=6, freq="A")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_drop_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame({"a": [1, 2, 3]})
msg = (
r"In a future version of pandas all arguments of DataFrame\.set_index "
r"except for the argument 'keys' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.set_index("a", True)
expected = DataFrame(index=Index([1, 2, 3], name="a"))
tm.assert_frame_equal(result, expected)
| bsd-3-clause | -3,370,677,243,398,780,400 | 35.182451 | 88 | 0.549752 | false |
carpyncho/feets | feets/extractors/ext_q31.py | 1 | 3876 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
""""""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from .core import Extractor
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class Q31(Extractor):
r"""
**Q31** (:math:`Q_{3-1}`)
:math:`Q_{3-1}` is the difference between the third quartile, :math:`Q_3`,
and the first quartile, :math:`Q_1`, of a raw light curve.
:math:`Q_1` is a split between the lowest 25% and the highest 75% of data.
:math:`Q_3` is a split between the lowest 75% and the highest 25% of data.
.. code-block:: pycon
>>> fs = feets.FeatureSpace(only=['Q31'])
>>> features, values = fs.extract(**lc_normal)
>>> dict(zip(features, values))
{'Q31': 1.3320376563134508}
References
----------
.. [kim2014epoch] Kim, D. W., Protopapas, P., Bailer-Jones, C. A.,
Byun, Y. I., Chang, S. W., Marquette, J. B., & Shin, M. S. (2014).
The EPOCH Project: I. Periodic Variable Stars in the EROS-2 LMC
Database. arXiv preprint Doi:10.1051/0004-6361/201323252.
"""
data = ["magnitude"]
features = ["Q31"]
def fit(self, magnitude):
q31 = np.percentile(magnitude, 75) - np.percentile(magnitude, 25)
return {"Q31": q31}
class Q31Color(Extractor):
r"""
**Q31_color** (:math:`Q_{3-1|B-R}`)
:math:`Q_{3-1}` applied to the difference between both bands of a light
curve (B-R).
.. code-block:: pycon
>>> fs = feets.FeatureSpace(only=['Q31_color'])
>>> features, values = fs.extract(**lc_normal)
>>> dict(zip(features, values))
{'Q31_color': 1.8840489594535512}
References
----------
.. [kim2014epoch] Kim, D. W., Protopapas, P., Bailer-Jones, C. A.,
Byun, Y. I., Chang, S. W., Marquette, J. B., & Shin, M. S. (2014).
The EPOCH Project: I. Periodic Variable Stars in the EROS-2 LMC
Database. arXiv preprint Doi:10.1051/0004-6361/201323252.
"""
data = ["aligned_magnitude", "aligned_magnitude2"]
features = ["Q31_color"]
def fit(self, aligned_magnitude, aligned_magnitude2):
N = len(aligned_magnitude)
b_r = aligned_magnitude[:N] - aligned_magnitude2[:N]
q31_color = np.percentile(b_r, 75) - np.percentile(b_r, 25)
return {"Q31_color": q31_color}
| mit | -5,410,018,966,500,149,000 | 33.300885 | 79 | 0.566563 | false |
danylaksono/inasafe | safe_qgis/impact_statistics/test/test_postprocessor_manager.py | 1 | 5425 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **GUI Test Cases.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '19/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import sys
import os
import logging
from os.path import join
# Add PARENT directory to path to make test aware of other modules
pardir = os.path.abspath(join(os.path.dirname(__file__), '..'))
sys.path.append(pardir)
#for p in sys.path:
# print p + '\n'
# this import required to enable PyQt API v2
import qgis # pylint: disable=W0611
#from qgis.core import QgsMapLayerRegistry
from safe_qgis.utilities.utilities_for_testing import (
get_qgis_app,
set_canvas_crs,
set_jakarta_extent,
GEOCRS)
from safe_qgis.widgets.dock import Dock
from safe_qgis.utilities.utilities_for_testing import (
load_standard_layers,
setup_scenario,
canvas_list)
QGISAPP, CANVAS, IFACE, PARENT = get_qgis_app()
DOCK = Dock(IFACE)
LOGGER = logging.getLogger('InaSAFE')
#noinspection PyArgumentList
class PostprocessorManagerTest(unittest.TestCase):
"""Test the postprocessor manager"""
def setUp(self):
"""Fixture run before all tests"""
os.environ['LANG'] = 'en'
DOCK.showOnlyVisibleLayersFlag = True
load_standard_layers()
DOCK.cboHazard.setCurrentIndex(0)
DOCK.cboExposure.setCurrentIndex(0)
DOCK.cboFunction.setCurrentIndex(0)
DOCK.runInThreadFlag = False
DOCK.showOnlyVisibleLayersFlag = False
DOCK.setLayerNameFromTitleFlag = False
DOCK.zoomToImpactFlag = False
DOCK.hideExposureFlag = False
DOCK.showIntermediateLayers = False
set_jakarta_extent()
def test_checkPostProcessingLayersVisibility(self):
"""Generated layers are not added to the map registry."""
# Explicitly disable showing intermediate layers
DOCK.showIntermediateLayers = False
# with KAB_NAME aggregation attribute defined in .keyword using
# kabupaten_jakarta_singlepart.shp
myResult, myMessage = setup_scenario(
DOCK,
hazard='A flood in Jakarta like in 2007',
exposure='People',
function='Need evacuation',
function_id='Flood Evacuation Function',
aggregation_layer='kabupaten jakarta singlepart')
assert myResult, myMessage
#LOGGER.info("Registry list before:\n%s" %
# QgsMapLayerRegistry.instance().mapLayers())
#one layer (the impact) should have been added
myExpectedCount = len(CANVAS.layers()) + 1
#
# # Press RUN
DOCK.accept()
# no KW dialog will popuo due to complete keywords
myAfterCount = len(CANVAS.layers())
#LOGGER.info("Registry list after:\n%s" %
# QgsMapLayerRegistry.instance().mapLayers())
myMessage = ('Expected %s items in canvas, got %s' %
(myExpectedCount, myAfterCount))
assert myExpectedCount == myAfterCount, myMessage
# Now run again showing intermediate layers
DOCK.showIntermediateLayers = True
# Press RUN
DOCK.accept()
# no KW dialog will popuo due to complete keywords
#one layer (the impact) should have been added
myExpectedCount += 2
myAfterCount = len(CANVAS.layers())
LOGGER.info("Canvas list after:\n %s" % canvas_list())
myMessage = ('Expected %s items in canvas, got %s' %
(myExpectedCount, myAfterCount))
# We expect two more since we enabled showing intermedate layers
assert myExpectedCount == myAfterCount, myMessage
def test_postProcessorOutput(self):
"""Check that the post processor does not add spurious report rows."""
# with KAB_NAME aggregation attribute defined in .keyword using
# kabupaten_jakarta_singlepart.shp
myResult, myMessage = setup_scenario(
DOCK,
hazard='A flood in Jakarta like in 2007',
exposure='People',
function='Need evacuation',
function_id='Flood Evacuation Function')
# Enable on-the-fly reprojection
set_canvas_crs(GEOCRS, True)
set_jakarta_extent()
assert myResult, myMessage
# Press RUN
DOCK.accept()
myMessage = 'Spurious 0 filled rows added to post processing report.'
myResult = DOCK.wvResults.page().currentFrame().toPlainText()
for line in myResult.split('\n'):
if 'Entire area' in line:
myTokens = str(line).split('\t')
myTokens = myTokens[1:]
mySum = 0
for myToken in myTokens:
mySum += float(myToken.replace(',', '.'))
assert mySum != 0, myMessage
if __name__ == '__main__':
suite = unittest.makeSuite(PostprocessorManagerTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 | 8,593,382,577,701,894,000 | 33.119497 | 78 | 0.640922 | false |
xinghalo/DMInAction | src/tensorflow/recommend/ops.py | 1 | 2022 | from src import tensorflow as tf
def inference_svd(user_batch, item_batch, user_num, item_num, dim=5, device="/cpu:0"):
with tf.device("/cpu:0"):
bias_global = tf.get_variable("bias_global", shape=[])
w_bias_user = tf.get_variable("embd_bias_user", shape=[user_num])
w_bias_item = tf.get_variable("embd_bias_item", shape=[item_num])
# embedding_lookup ๅฐฑๆฏๅจw_bias_user ๆฅๆพuser_batchไธญ่กจ็คบ็ไฟกๆฏ
bias_user = tf.nn.embedding_lookup(w_bias_user, user_batch, name="bias_user")
bias_item = tf.nn.embedding_lookup(w_bias_item, item_batch, name="bias_item")
w_user = tf.get_variable("embd_user", shape=[user_num, dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
w_item = tf.get_variable("embd_item", shape=[item_num, dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
embd_user = tf.nn.embedding_lookup(w_user, user_batch, name="embedding_user")
embd_item = tf.nn.embedding_lookup(w_item, item_batch, name="embedding_item")
with tf.device(device):
infer = tf.reduce_sum(tf.multiply(embd_user, embd_item), 1)
infer = tf.add(infer, bias_global)
infer = tf.add(infer, bias_user)
infer = tf.add(infer, bias_item, name="svd_inference")
regularizer = tf.add(tf.nn.l2_loss(embd_user), tf.nn.l2_loss(embd_item), name="svd_regularizer")
return infer, regularizer
def optimization(infer, regularizer, rate_batch, learning_rate=0.001, reg=0.1, device="/cpu:0"):
global_step = tf.train.get_global_step()
assert global_step is not None
with tf.device(device):
cost_l2 = tf.nn.l2_loss(tf.subtract(infer, rate_batch))
penalty = tf.constant(reg, dtype=tf.float32, shape=[], name="l2")
cost = tf.add(cost_l2, tf.multiply(regularizer, penalty))
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, global_step=global_step)
return cost, train_op
| apache-2.0 | -2,948,722,271,632,747,000 | 56.142857 | 104 | 0.643 | false |
robin900/gspread-dataframe | tests/mock_worksheet.py | 1 | 1986 | import os.path
import json
import re
from gspread.models import Cell
from gspread_dataframe import _cellrepr
def contents_of_file(filename, et_parse=True):
with open(os.path.join(os.path.dirname(__file__), filename), "r") as f:
return json.load(f)
SHEET_CONTENTS_FORMULAS = contents_of_file("sheet_contents_formulas.json")
SHEET_CONTENTS_EVALUATED = contents_of_file("sheet_contents_evaluated.json")
CELL_LIST = [
Cell(row=i + 1, col=j + 1, value=value)
for i, row in enumerate(contents_of_file("cell_list.json"))
for j, value in enumerate(row)
]
CELL_LIST_STRINGIFIED = [
Cell(
row=i + 1,
col=j + 1,
value=_cellrepr(
value,
allow_formulas=True,
string_escaping=re.compile(r"3e50").match,
),
)
for i, row in enumerate(contents_of_file("cell_list.json"))
for j, value in enumerate(row)
]
_without_index = contents_of_file("cell_list.json")
for _r in _without_index:
del _r[0]
CELL_LIST_STRINGIFIED_NO_THINGY = [
Cell(
row=i + 1,
col=j + 1,
value=_cellrepr(
value,
allow_formulas=True,
string_escaping=re.compile(r"3e50").match,
),
)
for i, row in enumerate(_without_index)
for j, value in enumerate(row)
]
class MockWorksheet(object):
def __init__(self):
self.row_count = 10
self.col_count = 10
self.id = "fooby"
self.title = "gspread dataframe test"
self.spreadsheet = MockSpreadsheet()
class MockSpreadsheet(object):
def values_get(self, *args, **kwargs):
if (
kwargs.get("params", {}).get("valueRenderOption")
== "UNFORMATTED_VALUE"
):
return SHEET_CONTENTS_EVALUATED
if kwargs.get("params", {}).get("valueRenderOption") == "FORMULA":
return SHEET_CONTENTS_FORMULAS
if __name__ == "__main__":
from gspread_dataframe import *
ws = MockWorksheet()
| mit | -4,786,977,959,139,163,000 | 24.792208 | 76 | 0.598691 | false |
theonaun/theo_site | app_surgeo/services/rest_api.py | 1 | 2418 | import json
from django.http import HttpResponse
from .calculations import surgeo_model
from .calculations import surname_model
from .calculations import geocode_model
from .calculations import forename_model
from .hmac_utility import verify_message
class RestAPI(object):
'''Takes queries and gets results.'''
@classmethod
def input_query(cls, request):
query_dict = request.GET
# Come back to this and do HMAC
function_dict = {'forename_query': cls.forename_query,
'surgeo_query': cls.surgeo_query,
'geocode_query': cls.geocode_query,
'surname_query': cls.surname_query}
try:
''' TODO DEBUG ONLY... LEAVING HMAC verification off.
query_string = query_dict.urlencode()
truncated_query_string = query_string.partition('&hmac=')[0]
hmac_string = query_dict['hmac']
message_verified = verify_message(request.user,
truncated_query_string,
hmac_string)
if not message_verified:
return HttpResponse('Unauthorized', status=401)
'''
function_string = query_dict['function']
function = function_dict[function_string]
result = function(query_dict)
return result
except Exception:
return False
@classmethod
def forename_query(cls, argument_dict):
forename = argument_dict['forename_input']
result = forename_model(forename)
json_string = json.dumps(dict(result))
return json_string
@classmethod
def surgeo_query(cls, argument_dict):
surname = argument_dict['surname_input']
zcta = argument_dict['zcta_input']
result = surgeo_model(surname, zcta)
json_string = json.dumps(dict(result))
return json_string
@classmethod
def geocode_query(cls, argument_dict):
zcta = argument_dict['zcta_input']
result = geocode_model(zcta)
json_string = json.dumps(dict(result))
return json_string
@classmethod
def surname_query(cls, argument_dict):
surname = argument_dict['surname_input']
result = surname_model(surname)
json_string = json.dumps(dict(result))
return json_string
| mit | -262,319,776,951,719,100 | 33.056338 | 72 | 0.593879 | false |
google-research/google-research | gfsa/model/edge_supervision_models_test.py | 1 | 8547 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for gfsa.model.edge_supervision_models."""
import functools
import textwrap
from absl.testing import absltest
from absl.testing import parameterized
import dataclasses
import flax
import gin
import jax
import jax.numpy as jnp
import numpy as np
from gfsa import automaton_builder
from gfsa import sparse_operator
from gfsa.datasets import graph_bundle
from gfsa.model import edge_supervision_models
class EdgeSupervisionModelsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
gin.clear_config()
def test_variants_from_edges(self):
example = graph_bundle.zeros_like_padded_example(
graph_bundle.PaddingConfig(
static_max_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=5, num_input_tagged_nodes=0),
max_initial_transitions=0,
max_in_tagged_transitions=0,
max_edges=8))
example = dataclasses.replace(
example,
graph_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=4, num_input_tagged_nodes=0),
edges=sparse_operator.SparseCoordOperator(
input_indices=jnp.array([[0], [0], [0], [1], [1], [2], [0], [0]]),
output_indices=jnp.array([[1, 2], [2, 3], [3, 0], [2, 0], [0, 2],
[0, 3], [0, 0], [0, 0]]),
values=jnp.array([1, 1, 1, 1, 1, 1, 0, 0])))
weights = edge_supervision_models.variants_from_edges(
example,
automaton_builder.EncodedGraphMetadata(
num_nodes=5, num_input_tagged_nodes=0),
variant_edge_type_indices=[2, 0],
num_edge_types=3)
expected = np.array([
[[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0]],
[[1, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]],
[[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]],
[[0, 0, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0]],
], np.float32)
# Only assert on the non-padded part.
np.testing.assert_allclose(weights[:4, :4], expected)
def test_ggtnn_steps(self):
gin.parse_config(
textwrap.dedent("""\
edge_supervision_models.ggnn_steps.iterations = 10
graph_layers.LinearMessagePassing.message_dim = 5
"""))
_, params = edge_supervision_models.ggnn_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32))
# This component should only contain one step block, with two sublayers.
self.assertEqual(set(params.keys()), {"step"})
self.assertLen(params["step"], 2)
# Gradients should work.
outs, vjpfun = jax.vjp(
functools.partial(
edge_supervision_models.ggnn_steps.call,
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32)),
params,
)
vjpfun(outs)
@parameterized.named_parameters(
{
"testcase_name":
"shared",
"expected_block_count":
1,
"config":
textwrap.dedent("""\
transformer_steps.layers = 3
transformer_steps.share_weights = True
transformer_steps.mask_to_neighbors = False
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""),
}, {
"testcase_name":
"unshared",
"expected_block_count":
3,
"config":
textwrap.dedent("""\
transformer_steps.layers = 3
transformer_steps.share_weights = False
transformer_steps.mask_to_neighbors = False
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""),
}, {
"testcase_name":
"shared_masked",
"expected_block_count":
1,
"config":
textwrap.dedent("""\
transformer_steps.layers = 3
transformer_steps.share_weights = True
transformer_steps.mask_to_neighbors = True
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""),
})
def test_transformer_steps(self, config, expected_block_count):
gin.parse_config(config)
_, params = edge_supervision_models.transformer_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
neighbor_mask=jnp.zeros((5, 5), jnp.float32),
num_real_nodes_per_graph=4)
# This component should contain the right number of blocks.
self.assertLen(params, expected_block_count)
for block in params.values():
# Each block contains 4 sublayers.
self.assertLen(block, 4)
# Gradients should work.
outs, vjpfun = jax.vjp(
functools.partial(
edge_supervision_models.transformer_steps.call,
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
neighbor_mask=jnp.zeros((5, 5), jnp.float32),
num_real_nodes_per_graph=4),
params,
)
vjpfun(outs)
def test_transformer_steps_masking(self):
"""Transformer should mask out padding even if not masked to neigbors."""
gin.parse_config(
textwrap.dedent("""\
transformer_steps.layers = 1
transformer_steps.share_weights = False
transformer_steps.mask_to_neighbors = False
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""))
with flax.nn.capture_module_outputs() as outputs:
edge_supervision_models.transformer_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
neighbor_mask=jnp.zeros((5, 5), jnp.float32),
num_real_nodes_per_graph=4)
attention_weights, = (v[0]
for k, v in outputs.as_dict().items()
if k.endswith("attend/attention_weights"))
expected = np.array([[[0.25, 0.25, 0.25, 0.25, 0.0]] * 5] * 2)
np.testing.assert_allclose(attention_weights, expected)
def test_nri_steps(self):
gin.parse_config(
textwrap.dedent("""\
graph_layers.NRIEdgeLayer.allow_non_adjacent = True
graph_layers.NRIEdgeLayer.mlp_vtoe_dims = [4, 4]
nri_steps.mlp_etov_dims = [8, 8]
nri_steps.with_residual_layer_norm = True
nri_steps.layers = 3
"""))
_, params = edge_supervision_models.nri_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
num_real_nodes_per_graph=4)
# This component should contain the right number of blocks.
self.assertLen(params, 3)
for block in params.values():
# Each block contains 5 sublayers:
# - NRI message pass
# - Three dense layers (from mlp_etov_dims, then back to embedding space)
# - Layer norm
self.assertLen(block, 5)
# Gradients should work.
outs, vjpfun = jax.vjp(
functools.partial(
edge_supervision_models.nri_steps.call,
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
num_real_nodes_per_graph=4),
params,
)
vjpfun(outs)
if __name__ == "__main__":
absltest.main()
| apache-2.0 | 8,043,755,052,644,149,000 | 35.216102 | 79 | 0.583246 | false |
kodi-czsk/plugin.video.online-files | resources/lib/fastshare.py | 1 | 5924 | # -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2012 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,urllib,urllib2,cookielib,random,util,sys,os,traceback
from provider import ContentProvider
from provider import ResolveException
class FastshareContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None,tmp_dir='.'):
ContentProvider.__init__(self,'fastshare.cz','http://www.fastshare.cz/',username,password,filter,tmp_dir)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['search','resolve']
def search(self,keyword):
return self.list('?term='+urllib.quote(keyword))
def list(self,url):
result = []
page = util.request(self._url(url))
data = util.substr(page,'<div class=\"search','<footer')
for m in re.finditer('<div class=\"search-result-box(.+?)</a>',data,re.IGNORECASE | re.DOTALL ):
it = m.group(1)
link = re.search('<a href=([^ ]+)',it,re.IGNORECASE | re.DOTALL)
name = re.search('title=\"([^\"]+)',it,re.IGNORECASE | re.DOTALL)
img = re.search('<img src=\"([^\"]+)',it,re.IGNORECASE | re.DOTALL)
size = re.search('<div class=\"fs\">([^<]+)',it,re.IGNORECASE | re.DOTALL)
time = re.search('<div class=\"vd\">([^<]+)',it,re.IGNORECASE | re.DOTALL)
if name and link:
item = self.video_item()
item['title'] = name.group(1)
if size:
item['size'] = size.group(1).strip()
if time:
item['length'] = time.group(1).strip()
item['url'] = self._url(link.group(1))
item['img'] = self._url(img.group(1))
self._filter(result,item)
next = re.search('<a href=\"(?P<url>[^\"]+)[^>]+>dal',data,re.IGNORECASE | re.DOTALL)
if next:
item = self.dir_item()
item['type'] = 'next'
item['url'] = next.group('url')
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
util.init_urllib()
url = self._url(item['url'])
page = ''
try:
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.UnknownHandler())
urllib2.install_opener(opener)
request = urllib2.Request(url)
request.add_header('User-Agent',util.UA)
response= urllib2.urlopen(request)
page = response.read()
response.close()
except urllib2.HTTPError, e:
traceback.print_exc()
return
data = util.substr(page,'<form method=post target=\"iframe_dwn\"','</form>')
action = re.search('action=(?P<url>[^>]+)',data,re.IGNORECASE | re.DOTALL)
img = re.search('<img src=\"(?P<url>[^\"]+)',data,re.IGNORECASE | re.DOTALL)
if img and action:
sessid=[]
for cookie in re.finditer('(PHPSESSID=[^\;]+)',response.headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
sessid.append(cookie.group(1))
# we have to download image ourselves
image = util.request(self._url(img.group('url')),headers={'Referer':url,'Cookie':sessid[-1]})
img_file = os.path.join(self.tmp_dir,'captcha.png')
util.save_data_to_file(image,img_file)
code = None
if captcha_cb:
code = captcha_cb({'id':'0','img':img_file})
if not code:
self.info('No captcha received, exit')
return
request = urllib.urlencode({'code':code})
req = urllib2.Request(self._url(action.group('url')),request)
req.add_header('User-Agent',util.UA)
req.add_header('Referer',url)
req.add_header('Cookie',sessid[-1])
try:
resp = urllib2.urlopen(req)
if resp.code == 302:
file_url = resp.headers.get('location')
else:
file_url = resp.geturl()
if file_url.find(action.group('url')) > 0:
msg = resp.read()
resp.close()
js_msg = re.search('alert\(\'(?P<msg>[^\']+)',msg,re.IGNORECASE | re.DOTALL)
if js_msg:
raise ResolveException(js_msg.group('msg'))
self.error(msg)
raise ResolveException('Nelze ziskat soubor, zkuste to znovu')
resp.close()
if file_url.find('data') >=0 or file_url.find('download_free') > 0:
item['url'] = file_url
return item
self.error('wrong captcha, retrying')
return self.resolve(item,captcha_cb,select_cb)
except urllib2.HTTPError:
traceback.print_exc()
return
| gpl-2.0 | 3,914,445,946,651,833,000 | 43.878788 | 121 | 0.549629 | false |
ngageoint/scale | scale/recipe/seed/recipe_definition.py | 1 | 21647 | """Defines the class for managing a recipe definition"""
from __future__ import unicode_literals
import json
import os
from django.db.models import Q
from job.configuration.data.exceptions import InvalidConnection
from job.configuration.interface.scale_file import ScaleFileDescription
from job.deprecation import JobConnectionSunset
from job.handlers.inputs.file import FileInput
from job.handlers.inputs.files import FilesInput
from job.handlers.inputs.property import PropertyInput
from job.models import JobType
from job.seed.types import SeedInputFiles, SeedInputJson
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from recipe.configuration.data.exceptions import InvalidRecipeConnection
from recipe.configuration.definition.exceptions import InvalidDefinition
from recipe.handlers.graph import RecipeGraph
DEFAULT_VERSION = '2.0'
SCHEMA_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema/recipe_definition_2_0.json')
with open(SCHEMA_FILENAME) as schema_file:
RECIPE_DEFINITION_SCHEMA = json.load(schema_file)
class RecipeDefinition(object):
"""Represents the definition for a recipe. The definition includes the recipe inputs, the jobs that make up the
recipe, and how the inputs and outputs of those jobs are connected together.
"""
def __init__(self, definition):
"""Creates a recipe definition object from the given dictionary. The general format is checked for correctness,
but the actual job details are not checked for correctness.
:param definition: The recipe definition
:type definition: dict
:raises InvalidDefinition: If the given definition is invalid
"""
self._definition = definition
self._input_files_by_name = {} # Name -> `job.seed.types.SeedInputFiles`
self._input_json_by_name = {} # Name -> `job.seed.types.SeedInputJson`
self._jobs_by_name = {} # Name -> job dict
self._property_validation_dict = {} # Property Input name -> required
self._input_file_validation_dict = {} # File Input name -> (required, multiple, file description)
try:
validate(definition, RECIPE_DEFINITION_SCHEMA)
except ValidationError as ex:
raise InvalidDefinition('Invalid recipe definition: %s' % unicode(ex))
self._populate_default_values()
if not self._definition['version'] == DEFAULT_VERSION:
raise InvalidDefinition('%s is an unsupported version number' % self._definition['version'])
for input_file in self._get_input_files():
name = input_file['name']
if name in self._input_files_by_name:
raise InvalidDefinition('Invalid recipe definition: %s is a duplicate input data name' % name)
self._input_files_by_name[name] = SeedInputFiles(input_file)
for input_json in self._get_input_json():
name = input_json['name']
if name in self._input_json_by_name or name in self._input_files_by_name:
raise InvalidDefinition('Invalid recipe definition: %s is a duplicate input data name' % name)
self._input_json_by_name[name] = SeedInputJson(input_json)
for job_dict in self._definition['jobs']:
name = job_dict['name']
if name in self._jobs_by_name:
raise InvalidDefinition('Invalid recipe definition: %s is a duplicate job name' % name)
self._jobs_by_name[name] = job_dict
self._create_validation_dicts()
self._validate_job_dependencies()
self._validate_no_dup_job_inputs()
self._validate_recipe_inputs()
def _get_inputs(self):
return self._definition.get('inputs', {})
def _get_input_files(self):
return self._get_inputs().get('files', {})
def _get_seed_input_files(self):
"""
:return: typed instance of Input Files
:rtype: [:class:`job.seed.types.SeedInputFiles`]
"""
return [SeedInputFiles(x) for x in self._get_input_files()]
def _get_input_json(self):
return self._get_inputs().get('json', {})
def _get_seed_input_json(self):
"""
:return: typed instance of Input JSON
:rtype: [:class:`job.seed.types.SeedInputJson`]
"""
return [SeedInputJson(x) for x in self._get_input_json()]
def get_dict(self):
"""Returns the internal dictionary that represents this recipe definition
:returns: The internal dictionary
:rtype: dict
"""
return self._definition
def get_graph(self):
"""Returns the recipe graph for this definition
:returns: The recipe graph
:rtype: :class:`recipe.handlers.graph.RecipeGraph`
"""
graph = RecipeGraph()
for input_file in self._get_seed_input_files():
if input_file.multiple:
graph_input = FilesInput(input_file.name, input_file.required)
else:
graph_input = FileInput(input_file.name, input_file.required)
graph.add_input(graph_input)
for input_json in self._get_seed_input_json():
graph.add_input(PropertyInput(input_json.name, input_json.required))
for job_name in self._jobs_by_name:
job_dict = self._jobs_by_name[job_name]
job_type = job_dict['job_type']
job_type_name = job_type['name']
job_type_version = job_type['version']
graph.add_job(job_name, job_type_name, job_type_version)
for recipe_input_dict in job_dict['recipe_inputs']:
recipe_input_name = recipe_input_dict['recipe_input']
job_input_name = recipe_input_dict['job_input']
graph.add_recipe_input_connection(recipe_input_name, job_name, job_input_name)
for job_name in self._jobs_by_name:
job_dict = self._jobs_by_name[job_name]
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
dependency_connections = []
for conn_dict in dependency_dict['connections']:
conn_input = conn_dict['input']
job_output = conn_dict['output']
dependency_connections.append((job_output, conn_input))
graph.add_dependency(dependency_name, job_name, dependency_connections)
return graph
def get_job_types(self, lock=False):
"""Returns a set of job types for each job in the recipe
:param lock: Whether to obtain select_for_update() locks on the job type models
:type lock: bool
:returns: Set of referenced job types
:rtype: set[:class:`job.models.JobType`]
"""
filters = []
for job_type_key in self.get_job_type_keys():
job_type_filter = Q(name=job_type_key[0], version=job_type_key[1])
filters = filters | job_type_filter if filters else job_type_filter
if filters:
job_type_query = JobType.objects.all()
if lock:
job_type_query = job_type_query.select_for_update().order_by('id')
return {job_type for job_type in job_type_query.filter(filters)}
return set()
def get_job_type_keys(self):
"""Returns a set of tuples that represent keys for each job in the recipe
:returns: Set of referenced job types as a tuple of (name, version)
:rtype: set[(str, str)]
"""
job_type_keys = set()
for job_dict in self._jobs_by_name.itervalues():
if 'job_type' in job_dict:
job_type = job_dict['job_type']
if 'name' in job_type and 'version' in job_type:
job_type_keys.add((job_type['name'], job_type['version']))
return job_type_keys
def get_job_type_map(self):
"""Returns a mapping of job name to job type for each job in the recipe
:returns: Dictionary with the recipe job name of each job mapping to its job type
:rtype: dict of str -> :class:`job.models.JobType`
"""
results = {}
job_types = self.get_job_types()
job_type_map = {(job_type.name, job_type.version): job_type for job_type in job_types}
for job_name, job_dict in self._jobs_by_name.iteritems():
if 'job_type' in job_dict:
job_type = job_dict['job_type']
if 'name' in job_type and 'version' in job_type:
job_type_key = (job_type['name'], job_type['version'])
if job_type_key in job_type_map:
results[job_name] = job_type_map[job_type_key]
return results
def get_jobs_to_create(self):
"""Returns the list of job names and types to create for the recipe, in the order that they should be created
:returns: List of tuples with each job's name and type
:rtype: [(str, :class:`job.models.JobType`)]
"""
results = []
job_type_map = self.get_job_type_map()
ordering = self.get_graph().get_topological_order()
for job_name in ordering:
job_tuple = (job_name, job_type_map[job_name])
results.append(job_tuple)
return results
def validate_connection(self, recipe_conn):
"""Validates the given recipe connection to ensure that the connection will provide sufficient data to run a
recipe with this definition
:param recipe_conn: The recipe definition
:type recipe_conn: :class:`recipe.configuration.data.recipe_connection.LegacyRecipeConnection`
:returns: A list of warnings discovered during validation
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeConnection`: If there is a configuration
problem
"""
warnings = []
warnings.extend(recipe_conn.validate_input_files(self._input_file_validation_dict))
warnings.extend(recipe_conn.validate_properties(self._property_validation_dict))
# Check all recipe jobs for any file outputs
file_outputs = False
for job_type in self.get_job_types():
if job_type.get_job_interface().get_file_output_names():
file_outputs = True
break
# Make sure connection has a workspace if the recipe has any output files
if file_outputs and not recipe_conn.has_workspace():
raise InvalidRecipeConnection('No workspace provided for output files')
return warnings
def validate_data(self, recipe_data):
"""Validates the given data against the recipe definition
:param recipe_data: The recipe data
:type recipe_data: :class:`recipe.seed.recipe_data.RecipeData`
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeData`: If there is a configuration problem
"""
warnings = []
warnings.extend(recipe_data.validate_input_files(self._input_file_validation_dict))
warnings.extend(recipe_data.validate_input_json(self._property_validation_dict))
# Check all recipe jobs for any file outputs
file_outputs = False
for job_type in self.get_job_types():
if job_type.get_job_interface().get_file_output_names():
file_outputs = True
break
# If there is at least one file output, we must have a workspace to store the output(s)
if file_outputs:
warnings.extend(recipe_data.validate_workspace())
return warnings
def validate_job_interfaces(self):
"""Validates the interfaces of the recipe jobs in the definition to ensure that all of the input and output
connections are valid
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`recipe.configuration.definition.exceptions.InvalidDefinition`:
If there are any invalid job connections in the definition
"""
# Query for job types
job_types_by_name = self.get_job_type_map() # Job name in recipe -> job type model
for job_name, job_data in self._jobs_by_name.iteritems():
if job_name not in job_types_by_name:
if 'job_type' in job_data:
job_type = job_data['job_type']
if 'name' in job_type and 'version' in job_type:
raise InvalidDefinition('Unknown job type: (%s, %s)' % (job_type['name'], job_type['version']))
else:
raise InvalidDefinition('Missing job type name or version: %s' % job_name)
else:
raise InvalidDefinition('Missing job type declaration: %s' % job_name)
warnings = []
for job_name in self._jobs_by_name:
job_dict = self._jobs_by_name[job_name]
warnings.extend(self._validate_job_interface(job_dict, job_types_by_name))
return warnings
def _add_recipe_inputs_to_conn(self, job_conn, recipe_inputs):
"""Populates the given connection for a job with its recipe inputs
:param job_conn: The job's connection
:type job_conn: :class:`job.configuration.data.job_connection.JobConnection` or
:class:`job.data.job_connection.SeedJobConnection`
:param recipe_inputs: List of recipe inputs used for the job
:type recipe_inputs: list of dict
"""
for recipe_dict in recipe_inputs:
recipe_input = recipe_dict['recipe_input']
job_input = recipe_dict['job_input']
if recipe_input in self._input_json_by_name:
job_conn.add_property(job_input)
elif recipe_input in self._input_files_by_name:
input_file = self._input_files_by_name[recipe_input]
job_conn.add_input_file(job_input, input_file.multiple, input_file.media_types, not input_file.required,
input_file.partial)
def _create_validation_dicts(self):
"""Creates the validation dicts required by recipe_data to perform its validation"""
for input in self._get_seed_input_json():
self._property_validation_dict[input.name] = input.required
for input in self._get_seed_input_files():
file_desc = ScaleFileDescription()
for media_type in input.media_types:
file_desc.add_allowed_media_type(media_type)
self._input_file_validation_dict[input.name] = (input.required,
True if input.multiple else False,
file_desc)
def _populate_default_values(self):
"""Goes through the definition and populates any missing values with defaults
"""
for input_file in self._get_input_files():
if 'required' not in input_file:
input_file['required'] = True
if 'multiple' not in input_file:
input_file['multiple'] = False
if 'partial' not in input_file:
input_file['partial'] = False
if 'mediaTypes' not in input_file:
input_file['mediaTypes'] = []
for input_json in self._get_input_json():
if 'required' not in input_json:
input_json['required'] = True
for job_dict in self._definition['jobs']:
if not 'recipe_inputs' in job_dict:
job_dict['recipe_inputs'] = []
if not 'dependencies' in job_dict:
job_dict['dependencies'] = []
for dependency_dict in job_dict['dependencies']:
if not 'connections' in dependency_dict:
dependency_dict['connections'] = []
def _validate_job_interface(self, job_dict, job_types_by_name):
"""Validates the input connections for the given job in the recipe definition
:param job_dict: The job dictionary
:type job_dict: dict
:param job_types_by_name: Dict mapping all job names in the recipe to their job type models
:type job_types_by_name: dict
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`recipe.configuration.definition.exceptions.InvalidDefinition`:
If there are any invalid job connections in the definition
"""
job_type = job_types_by_name[job_dict['name']]
# Job connection will represent data to be passed to the job to validate
job_conn = JobConnectionSunset.create(job_type.get_job_interface())
# Assume a workspace is provided, this will be verified when validating the recipe data
job_conn.add_workspace()
# Populate connection with data that will come from recipe inputs
self._add_recipe_inputs_to_conn(job_conn, job_dict['recipe_inputs'])
# Populate connection with data that will come from job dependencies
warnings = []
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
job_type = job_types_by_name[dependency_name]
for conn_dict in dependency_dict['connections']:
conn_input = conn_dict['input']
job_output = conn_dict['output']
job_type.get_job_interface().add_output_to_connection(job_output, job_conn, conn_input)
job_type = job_types_by_name[job_dict['name']]
try:
warnings.extend(job_type.get_job_interface().validate_connection(job_conn))
except InvalidConnection as ex:
raise InvalidDefinition(unicode(ex))
return warnings
def _validate_job_dependencies(self):
"""Validates that every job dependency is listed in jobs and that there are no cyclic dependencies
:raises InvalidDefinition: If there is an undefined job or a cyclic dependency
"""
# Make sure all dependencies are defined
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
if dependency_name not in self._jobs_by_name:
msg = 'Invalid recipe definition: Job %s has undefined dependency %s' % (job_name, dependency_name)
raise InvalidDefinition(msg)
# Ensure no cyclic dependencies
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
dependencies_to_check = set()
dependencies_to_check.add(job_name)
while dependencies_to_check:
next_layer = set()
for dependency in dependencies_to_check:
job_dict = self._jobs_by_name[dependency]
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
if dependency_name == job_name:
msg = 'Invalid recipe definition: Job %s has a circular dependency' % job_name
raise InvalidDefinition(msg)
next_layer.add(dependency_name)
dependencies_to_check = next_layer
def _validate_no_dup_job_inputs(self):
"""Validates that there are no duplicate inputs for any job
:raises InvalidDefinition: If there is a duplicate input
"""
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
input_names = set()
for recipe_dict in job_dict['recipe_inputs']:
name = recipe_dict['job_input']
if name in input_names:
msg = 'Invalid recipe definition: Job %s has duplicate input %s' % (job_name, name)
raise InvalidDefinition(msg)
input_names.add(name)
for dependency_dict in job_dict['dependencies']:
for conn_dict in dependency_dict['connections']:
name = conn_dict['input']
if name in input_names:
msg = 'Invalid recipe definition: Job %s has duplicate input %s' % (job_name, name)
raise InvalidDefinition(msg)
input_names.add(name)
def _validate_recipe_inputs(self):
"""Validates that the recipe inputs used when listing the jobs are defined in the input data section
:raises InvalidDefinition: If there is an undefined recipe input
"""
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
for recipe_dict in job_dict['recipe_inputs']:
recipe_input = recipe_dict['recipe_input']
if recipe_input not in self._input_files_by_name and recipe_input not in self._input_json_by_name:
msg = 'Invalid recipe definition: Job %s has undefined recipe input %s' % (job_name, recipe_input)
raise InvalidDefinition(msg)
| apache-2.0 | -336,845,178,525,268,600 | 43.541152 | 120 | 0.612464 | false |
alanrodas/fa6i-kgt | kgt/__init__.py | 1 | 1408 | # Import required libraries
from flask import Flask, render_template
from flask_login import LoginManager
from ConfigParser import SafeConfigParser
from pony.orm import *
# Load the configuration file of the application
config = SafeConfigParser()
config.read('kgt/config.ini')
# Create the flask application to run
app = Flask(__name__)
app.secret_key = config.get('auth', 'secretkey')
# Create the login manager
login_manager = LoginManager()
login_manager.init_app(app)
from kgt.auth import auth
# Initialize database
db = Database()
db.bind(config.get('database', 'provider'), host=config.get('database', 'host'), user=config.get('database', 'user'), passwd=config.get('database', 'passwd'), db=config.get('database', 'db'))
# Make the configuration and authentication objects available in templates
app.jinja_env.globals.update(config=config)
app.jinja_env.globals.update(auth=auth)
# Add error handlers to default pages so there is no fuzz
@app.errorhandler(404)
def page_not_found(e):
return render_template('error/404.html'), 404
@app.errorhandler(403)
def forbidden(e):
return render_template('error/403.html'), 403
@app.errorhandler(500)
def internal_server_error(e):
return render_template('error/500.html'), 500
# Load the models and routes of the application
import kgt.auth
import kgt.auth.routes
import kgt.models
import kgt.routes
db.generate_mapping(create_tables=True)
| gpl-3.0 | 6,765,289,706,268,825,000 | 29.608696 | 191 | 0.761364 | false |
dnlcrl/TensorFlow-Playground | 1.tutorials/2.Deep MNIST for Experts/mnist.py | 1 | 3226 |
import tensorflow as tf
# download and install the data automatically
import input_data
# download dataset or open
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# launch interactive session
sess = tf.InteractiveSession()
# placeholders
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
# variables
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# initialize variables
sess.run(tf.initialize_all_variables())
# implement regression model
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# train the model
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
# evaluate the model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})
# weight and bias inizialitaazion
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# convolution and pooling
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# first conv layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# second layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# densely connected layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# train and evaluate
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(
feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print "step %d, training accuracy %g" % (i, train_accuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print "test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
| mit | 4,093,565,697,744,812,000 | 28.87037 | 113 | 0.676689 | false |
p473lr/i-urge-mafia-gear | HP Code Wars Documents/2014/Solutions/prob02_CheckDigit.py | 1 | 2339 | #!/usr/bin/env python
#CodeWars 2014
#
#Check Digits
#
# There are many situations where we exchange a number with someone. In some cases we need
# to be sure that the number we gave them was received correctly. This is especially
# important for credit cards, serial numbers, and product bar code numbers.
# A check digit is used to ensure that a sequence of numbers was transmitted or
# entered correctly without human error. This extra digit helps verify that a tired
# programmer didn't switch numbers (ex. 12 -> 15), reverse a pair of numbers
# (ex. 34 -> 43) or otherwise alter the sequence. The different algorithms used
# to calculate a check digit determine what types of errors it will catch.
#
# For UPC there's a specific algorithm that's used to catch 100% of single digit errors
# and 89% of transposition errors. Your task is to calculate the missing check digit for
# the given list of UPCs.
#
# First, add all the digits in the odd-numbered positions together and multiply the
# result by three. Then add the digits in the even-numbered positions to the result.
# Next, find the modulo 10 of the sum. The modulo operation calculates the remainder
# after dividing the sum by 10. Finally subtract if from 10 to obtain the check digit.
#
# The first line of the input will contain the number of partial UPCs that follow.
# Each UPC will be on it's own line with spaces between all the digits.
#
# 7
# 0 3 6 0 0 0 2 9 1 4 5
# 0 7 3 8 5 2 0 0 9 3 8
# 0 4 1 2 2 0 1 8 9 0 4
# 0 3 7 0 0 0 2 0 2 1 4
# 7 6 5 6 6 8 2 0 2 0 2
# 0 4 1 2 2 0 6 7 0 4 0
# 0 4 1 2 2 0 6 7 0 0 0
#
#
# 0 3 6 0 0 0 2 9 1 4 5 2
# 0 7 3 8 5 2 0 0 9 3 8 5
# 0 4 1 2 2 0 1 8 9 0 4 5
# 0 3 7 0 0 0 2 0 2 1 4 1
# 7 6 5 6 6 8 2 0 2 0 2 8
# 0 4 1 2 2 0 6 7 0 4 0 6
# 0 4 1 2 2 0 6 7 0 0 0 0
#
import sys
print ("Enter number of lines. Then 11 digits for each line.")
count = int(sys.stdin.readline())
while (count > 0):
count -= 1
line = sys.stdin.readline().rstrip('\n')
currentDigit=1
checkDigit=0
for c in line:
if (c.isdigit()):
value = int(c)
checkDigit += value
if (currentDigit % 2 == 1):
checkDigit += value+value # Add odd positions a total of 3 times.
currentDigit += 1
checkDigit = checkDigit % 10
print (line, (10-checkDigit)%10)
| apache-2.0 | -2,976,268,853,443,760,600 | 34.984615 | 91 | 0.666524 | false |
toddpalino/kafka-tools | tests/tools/models/test_cluster.py | 1 | 5206 | import json
import unittest
from testfixtures import LogCapture
from kafka.tools.models.broker import Broker
from kafka.tools.models.topic import Topic
from kafka.tools.models.cluster import Cluster
class SimpleClusterTests(unittest.TestCase):
def setUp(self):
self.cluster = Cluster()
def add_brokers(self, num):
for i in range(1, num + 1):
broker = Broker("brokerhost{0}.example.com".format(i), id=i)
self.cluster.add_broker(broker)
def add_topics(self, num, partition_count=2):
for i in range(1, num + 1):
topic = Topic("testTopic{0}".format(i), partition_count)
self.cluster.add_topic(topic)
def add_partitions_to_broker(self, broker_id, pos, num):
topic = Topic('testTopic', num)
self.cluster.brokers[broker_id].partitions[pos] = []
for i in range(num):
self.cluster.brokers[broker_id].partitions[pos].append(topic.partitions[i])
def test_cluster_create(self):
assert self.cluster.brokers == {}
assert self.cluster.topics == {}
def test_cluster_add_broker(self):
self.add_brokers(1)
assert len(self.cluster.brokers) == 1
for bid in self.cluster.brokers.keys():
assert self.cluster.brokers[bid].cluster is self.cluster
def test_cluster_num_brokers(self):
self.add_brokers(2)
assert self.cluster.num_brokers() == 2
def test_cluster_add_topic(self):
self.add_topics(1)
assert len(self.cluster.topics) == 1
for tname in self.cluster.topics.keys():
assert self.cluster.topics[tname].cluster is self.cluster
def test_cluster_num_topics(self):
self.add_topics(2)
assert self.cluster.num_topics() == 2
def test_cluster_partition_iterator(self):
self.add_topics(2)
seen_partitions = {}
for partition in self.cluster.partitions([]):
seen_partitions["{0}:{1}".format(partition.topic.name, partition.num)] = 1
assert seen_partitions == {'testTopic1:0': 1, 'testTopic1:1': 1, 'testTopic2:0': 1, 'testTopic2:1': 1}
def test_cluster_partition_iterator_with_exclude(self):
self.add_topics(2)
seen_partitions = {}
for partition in self.cluster.partitions(['testTopic1']):
seen_partitions["{0}:{1}".format(partition.topic.name, partition.num)] = 1
assert seen_partitions == {'testTopic2:0': 1, 'testTopic2:1': 1}
def test_cluster_max_replication_factor(self):
self.add_brokers(2)
self.add_partitions_to_broker(1, 0, 1)
self.add_partitions_to_broker(1, 1, 1)
self.add_partitions_to_broker(2, 2, 2)
assert self.cluster.max_replication_factor() == 3
def test_cluster_log_info(self):
self.add_brokers(2)
with LogCapture() as line:
self.cluster.log_broker_summary()
line.check(('kafka-tools', 'INFO', 'Broker 1: partitions=0/0 (0.00%), size=0'),
('kafka-tools', 'INFO', 'Broker 2: partitions=0/0 (0.00%), size=0'))
def test_cluster_output_json(self):
self.add_topics(2)
self.add_brokers(2)
self.cluster.topics['testTopic1'].partitions[0].add_replica(self.cluster.brokers[1], 0)
self.cluster.topics['testTopic1'].partitions[0].add_replica(self.cluster.brokers[2], 1)
self.cluster.topics['testTopic1'].partitions[1].add_replica(self.cluster.brokers[1], 1)
self.cluster.topics['testTopic1'].partitions[1].add_replica(self.cluster.brokers[2], 0)
cluster = self.cluster.to_dict()
cluster_json = json.dumps(cluster, sort_keys=True)
assert cluster_json == json.dumps({
'brokers': {
'1': {
'hostname': 'brokerhost1.example.com',
'id': 1,
'jmx_port': -1,
'port': 9092,
'rack': None,
'version': None
},
'2': {
'hostname': 'brokerhost2.example.com',
'id': 2,
'jmx_port': -1,
'port': 9092,
'rack': None,
'version': None
}
},
'topics': {
'testTopic1': {
'partitions': {
'0': {
'replicas': [1, 2],
'size': 0
},
'1': {
'replicas': [2, 1],
'size': 0
}
},
'retention': 1
},
'testTopic2': {
'partitions': {
'0': {
'replicas': [],
'size': 0
},
'1': {
'replicas': [],
'size': 0
}
},
'retention': 1
}
}
}, sort_keys=True)
| apache-2.0 | 2,682,005,011,271,960,000 | 36.724638 | 110 | 0.50461 | false |
shpakoo/YAP | StepsLibrary.py | 1 | 60173 | ########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
#################################################
## A library of "steps" or program wrappers to construct pipelines
## Pipeline steps orchestration, grid management and output handling.
#################################################
import sys, tempfile, shlex, glob, os, stat, hashlib, time, datetime, re, curses
from threading import *
from subprocess import *
from MothurCommandInfoWrapper import *
from collections import defaultdict
from collections import deque
from random import *
from Queue import *
import smtplib
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
_author="Sebastian Szpakowski"
_date="2012/09/20"
_version="Version 2"
#################################################
## Classes
##
class BufferedOutputHandler(Thread):
def __init__(self, usecurses=False):
Thread.__init__(self)
self.shutdown=False
self.cache = deque()
self.registered=0
self.ids = list()
self.wrap = 140
self.starttime = time.time()
#### init log
try:
self.otptfile = open("logfile.txt", 'a')
self.toPrint("-----", "GLOBAL", "Appending to a logfile.txt...")
except:
self.otptfile = open("logfile.txt", 'w')
self.toPrint("-----", "GLOBAL", "Creating a new logfile.txt...")
command = " ".join(sys.argv)
self.otptfile.write("command: %s\n" % command)
#### init output (curses)
self.usecurses = usecurses
if (self.usecurses):
self.stdscr=curses.initscr()
curses.savetty()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.textbuffer= list()
self.stdscr.clear()
self.stdscr.refresh()
self.cursestrackbuffer = 100
self.scrollpad = curses.newpad(self.cursestrackbuffer*2, self.wrap*2)
self.spacerpad = curses.newpad(1,1000)
self.updatepad = curses.newpad(10,1000)
self.rows, self.cols = self.stdscr.getmaxyx()
else:
self.stdscr=None
self.start()
def run(self):
self.toPrint("-----", "GLOBAL", "Setting up the pipeline...")
self.flush()
time.sleep(5)
while activeCount()>3 or self.registered>0 or len(self.cache) > 0:
self.flush()
time.sleep(1)
self.flush()
endtime = time.time()
text = "+%s [fin]" % (str(datetime.timedelta(seconds=round(endtime-self.starttime,0))).rjust(17))
self.toPrint("-----", "GLOBAL", text)
command = "%spython %straverser.py" % (binpath, scriptspath)
p = Popen(shlex.split(command), stdout = PIPE, stderr = PIPE, close_fds=True)
dot, err = p.communicate()
p.wait()
x = open("workflow.dot", "w")
x.write(dot)
x.write("\n")
x.close()
for format in ["svg", "svgz", "png", "pdf"]:
command = "%sdot -T%s -o workflow.%s" % (dotpath, format, format)
p = Popen(shlex.split(command), stdin = PIPE, stdout = PIPE, stderr = PIPE, close_fds=True)
out, err = p.communicate(dot)
p.wait()
self.toPrint("-----", "GLOBAL", "Check out workflow.{svg,png,jpg} for an overview of what happened.")
self.flush()
self.otptfile.close()
self.closeDisplay()
self.mailLog()
def register(self, id):
self.registered+=1
self.ids.append(id)
def deregister(self):
self.registered-=1
def collapseIDs(self, text ):
for id in self.ids:
if len(id)>5:
text = re.sub(id, "[{0}~]".format(id[:5]), text)
return (text)
def flush(self):
while len(self.cache) > 0:
id, name, line = self.cache.popleft()
tag = "[{0}] {1:<20} > ".format( id[:5], name)
line = "{0!s}".format(line)
line = self.collapseIDs(line)
otpt = "{0}{1}".format(tag, line[:self.wrap])
self.otptfile.write("{0}{1}\n".format(tag, line))
line = line[self.wrap:]
self.outputScroll(otpt)
while len(line)>=self.wrap:
otpt = "{0}\t{1}".format(tag, line[:self.wrap])
line = line[self.wrap:]
self.outputScroll(otpt)
if len(line)>0:
otpt = "{0:<30}\t\t{1}".format("", line)
line = line
self.outputScroll(otpt)
self.redrawScreen()
def mailLog(self):
log = loadLines("logfile.txt")
log.reverse()
paths = os.getcwd()
paths = "%s/" % (paths)
dirs = glob.glob("*OUTPUT*")
dirs.sort()
for d in dirs:
paths = "%s\n\t%s/*" % (paths, d)
header = "Hi,\nYAP has just finished. Most, if not all, of your data should be in:\n\n%s\n\n-see the log below just to make sure...\nThe attached work-flow graph can be opened in your browser.\nYours,\n\n~YAP" % (paths)
log = "".join(log)
msgtext = "%s\n\n<LOG>\n\n%s\n</LOG>\n\n" % (header, log)
try:
me = __email__
toaddr = [me]
msg = MIMEMultipart()
msg['To'] = COMMASPACE.join(toaddr)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = '[AUTOMATED] YAP is done.'
if me != __admin__:
ccaddr = [__admin__]
msg['BCC'] = COMMASPACE.join(ccaddr)
toaddr = toaddr + ccaddr
msg.attach(MIMEText(msgtext))
files = ["workflow.pdf"]
for f in files:
try:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
except:
pass
s = smtplib.SMTP('mail.jcvi.org')
s.sendmail(me, toaddr , msg.as_string())
s.quit()
except:
pass
def redrawScreen(self):
try:
y,x = self.stdscr.getmaxyx()
### enough screen to print:
if y>20 and x>20:
if len(self.textbuffer) < (y-10):
self.scrollpad.refresh(0, 0, 0, 0, y-10, x-5)
else:
self.scrollpad.refresh(self.cursestrackbuffer-y+10 , 0, 0, 0, y-10, x-5)
self.updatepad.refresh(0, 0, y-8, 10 , y-3, x-5)
### when screen too small
else:
self.scrollpad.refresh(0,0,0,0,0,0)
self.updatepad.refresh(0,0,0,0,0,0)
except:
self.closeDisplay()
self.usecurses=False
#
def toPrint(self, id, name, line):
self.cache.append((id, name, line))
def outputScroll(self, k):
if self.usecurses:
self.textbuffer.append("%s\n" %(k))
self.scrollpad.clear()
for k in self.textbuffer[-self.cursestrackbuffer:]:
self.scrollpad.addstr(k)
else:
print k
def outputUpdate(self,k):
if self.usecurses:
self.updatepad.clear()
for k in k.strip().split("\n"):
self.updatepad.addstr("%s\n" % k)
def closeDisplay(self):
if self.usecurses:
self.stdscr.clear()
self.stdscr.refresh()
curses.curs_set(1)
curses.nocbreak()
curses.echo()
curses.resetty()
curses.endwin()
class TaskQueueStatus(Thread):
def __init__(self, update=1, maxnodes=10):
Thread.__init__(self)
self.active=True
self.maxnodes = maxnodes
self.available = self.maxnodes
self.update = update
#### queue of grid jobs to run
self.scheduled = Queue()
#### to keep track of things popped off the queue
self.processing = dict()
#### inventory of what ran
#### tuple (jid, status) indexed by command
#### status: new/running/done/remove
#### new upon registering
#### running when submitted to the grid
#### done when completed
self.registered = dict()
#### inventory of completed jobs
self.bestqueue = "default.q"
self.pollqueues()
self.running=0
self.stats=dict()
self.previous =""
self.start()
def run(self):
BOH.outputUpdate("Setting up the grid...")
print "Setting up grid..."
time.sleep(5)
while activeCount()>3 or self.running>0 or self.scheduled.qsize()>0:
self.pollfinished()
self.pollqueues()
self.pollrunning()
self.dispatch()
self.cleanup()
BOH.outputUpdate("%s" % (self))
#print self
time.sleep(self.update)
BOH.outputUpdate("%s\nGrid Offline." % (self))
print self
print "Queue status shutting down."
def cleanup(self):
toremove = set()
for key, tup in self.registered.items():
id, status = tup
if status == "remove":
toremove.add(key)
for key in toremove:
del self.registered[key]
def flagRemoval(self, task):
id, status = self.registered[task.getUniqueID()]
if status =="done":
self.registered[task.getUniqueID()] = [id, "remove"]
else:
print "cannot flag yet:", id, status
def pollfinished(self):
# donejobs = set()
#
# ### only 100 recent jobs shown, which could be a problem ;-)
# p = Popen(shlex.split("qstat -s z"), stdout=PIPE, stderr=PIPE, close_fds=True)
# p.wait()
# out,err = p.communicate()
#
# lines = out.split("\n")
# tmp = set()
# if len(lines)>2:
# for line in lines[2:]:
# line = line.strip().split()
# if len(line)>0:
# donejobs.add(line[0])
#
#if len(donejobs)>0:
for key, tup in self.registered.items():
id, status = tup
#if (status == "running") and (id in donejobs):
if (status == "running") and (self.isJobDone(id)):
tmp = self.registered[key][1]= "done"
self.processing[key].setCompleted()
self.available += 1
del self.processing[key]
def isJobDone(self, jid):
p = Popen(shlex.split("qstat -j %s" % jid), stdout=PIPE, stderr=PIPE, close_fds=True)
p.wait()
out,err = p.communicate()
return err.find("jobs do not exist")>-1
def pollqueues(self):
command="qstat -g c"
p = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, close_fds=True )
p.wait()
out,err = p.communicate()
if err.find("neither submit nor admin host")==-1:
queues = defaultdict(float)
out = out.strip().split("\n")
fullqueues = set()
#cache queue information
for q in out[2:]:
queue, cqload, used, res, avail, total, acds, cdsu = q.split()
avail = float(avail)
total = float(total)
if total>0:
queues[queue] = avail
if avail==0:
fullqueues.add(queue)
# determine which queue is the best
#for k in ("default.q", "medium.q", "fast.q", "himem.q"):
#for k in ("fast.q", "medium.q", "default.q"):
#for k in ("himem.q", "medium.q", "default.q"):
if ("medium.q" in fullqueues) and ("default.q" in fullqueues) and "himem" in queues.keys() :
if queues["himem.q"]>0:
self.bestqueue = "himem.q"
else:
self.bestqueue = "medium.q"
else:
for k in ("medium.q", "default.q"):
if queues[k] >= queues[self.bestqueue]:
self.bestqueue = k
### sanity check, this should match the counters
def pollrunning(self):
tmp=defaultdict(int)
for jid, value in self.registered.values():
tmp[value]+=1
self.stats = tmp
self.running = self.stats["running"]
def dispatch(self):
while self.nodesAvailable():
if not self.scheduled.empty():
tmp = self.scheduled.get()
self.processing[tmp.getUniqueID()]=tmp
#print "submitting", tmp.getUniqueID()
jid = tmp.submit()
#print jid
if jid==-1:
print "???", tmp
self.registered[tmp.getUniqueID()] = [tmp.getGridId(), "running"]
self.available-=1
else:
break
def pickQ(self):
return self.bestqueue
def register(self, task):
self.scheduled.put(task)
self.registered[task.getUniqueID()]=[-1, "new"]
def shutdown(self):
self.active=False
print "Queue status shutting down..."
def nodesAvailable(self):
return (self.available > 0)
def __str__(self):
otpt ="Currently running/waiting: %s/%s\n" % (self.running, self.scheduled.qsize())
otpt ="%savailable/total: %s/%s" % (otpt, self.available, self.maxnodes)
# for key, tup in self.registered.items():
# id, status = tup
# if id != -1:
# otpt = "%s\n\t%s\t%s\t%s" % (otpt, id, status, key[0:10])
for key, val in self.stats.items():
otpt = "%s\n\t%s\t%s" % (otpt, key, val)
otpt = "%s\n\nbest queue: %s" % (otpt, self.bestqueue)
return (otpt)
#################################################
### a thread that will track of a qsub job
### templates adapted to JCVIs grid
###
class GridTask():
def __init__(self, template="default.q", command = "", name="default", cpu="1", dependson=list(), cwd=".", debug=False):
self.gridjobid=-1
self.completed=False
self.queue=template
self.inputcommand = command
self.cwd=cwd
self.project = __projectid__
self.email = __email__
### remove *e##, *pe## *o## *po##
self.retainstreams=" -o /dev/null -e /dev/null "
### debug flag
self.debugflag = debug
### the only queue that has more than 4 CPUs...
if int(cpu)>4:
self.queue = "himem.q"
if len(dependson)>0:
holdfor = "-hold_jid "
for k in dependson:
holdfor = "%s%s," % (holdfor, k.getJobid())
holdfor=holdfor.strip(",")
else:
holdfor = ""
### keep po pe o e streams for debugging purposes
if self.debugflag:
self.retainstreams=""
### to avoid long command problems, create a script with the command, and invoke that instead of the command directyly.
px = "tmp.%s.%s.%s.%s." % (randrange(1,100),randrange(1,100),randrange(1,100),randrange(1,100))
sx = ".%s.%s.%s.%s.sh" % (randrange(1,100),randrange(1,100),randrange(1,100),randrange(1,100))
##### to avoid too many opened files OSError
pool_open_files.acquire()
### bounded semaphore should limit throttle the files opening for tasks created around the same time
scriptfile, scriptfilepath = tempfile.mkstemp(suffix=sx, prefix=px, dir=self.cwd, text=True)
os.close(scriptfile)
self.scriptfilepath = scriptfilepath
os.chmod(self.scriptfilepath, 0777 )
input= "%s\n" % (self.inputcommand)
scriptfile = open(self.scriptfilepath, "w")
scriptfile.write(input)
scriptfile.close()
pool_open_files.release()
####
self.templates=dict()
self.templates["himem.q"] = 'qsub %s -P %s -N jh.%s -cwd -pe threaded %s -l "himem" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, cpu, self.email, holdfor, self.scriptfilepath)
self.templates["default.q"] = 'qsub %s -P %s -N jd.%s -cwd -pe threaded %s -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, cpu, self.email, holdfor, self.scriptfilepath)
self.templates["fast.q"] = 'qsub %s -P %s -N jf.%s -cwd -pe threaded %s -l "fast" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name,cpu, self.email, holdfor, self.scriptfilepath)
self.templates["medium.q"] = 'qsub %s -P %s -N jm.%s -cwd -pe threaded %s -l "medium" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, cpu, self.email, holdfor, self.scriptfilepath)
self.templates["himemCHEAT"] = 'qsub %s -P %s -N jH.%s -cwd -pe threaded %s -l "himem" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, 1, self.email, holdfor, self.scriptfilepath)
self.templates["mpi"] = 'qsub %s -P %s -N jP.%s -cwd -pe orte %s -M %s -m a %s mpirun -np %s "%s" ' % (self.retainstreams, self.project, name, cpu, cpu, self.email, holdfor, self.scriptfilepath )
self.command = ""
QS.register(self);
def submit(self):
if not self.queue in self.templates.keys():
self.queue = QS.pickQ()
self.command = self.templates[self.queue]
#print self.command
p = Popen(shlex.split(self.command), stdout=PIPE, stderr=PIPE, cwd=self.cwd, close_fds=True)
p.wait()
out, err = p.communicate()
err = err.strip()
out = out.strip()
if err!="":
print err
if out.endswith("has been submitted"):
self.gridjobid = out.split(" ")[2]
else:
print ">>>", out
print "#FAIL"
return (self.getGridId())
def getGridId(self):
return self.gridjobid
def getUniqueID(self):
return "%s_%s_%s" % (id(self), self.cwd, self.inputcommand)
def setCompleted(self):
self.completed=True
try:
if not self.debugflag:
os.remove(self.scriptfilepath)
except OSError, error:
print( "%s already gone" % self.scriptfilepath)
QS.flagRemoval(self)
def isCompleted(self):
return (self.completed)
def wait(self):
while not self.isCompleted():
time.sleep(0.1)
#################################################
### Iterator over input fasta file.
### Only reading when requested
### Useful for very large FASTA files
### with many sequences
class FastaParser:
def __init__ (self, x):
self.filename = x
self.fp = open(x, "r")
self.currline = ""
self.currentFastaName = ""
self.currentFastaSequence = ""
self.lastitem=False
def __iter__(self):
return(self)
#####
def next(self):
for self.currline in self.fp:
if self.currline.startswith(">"):
self.currline = self.currline[1:]
if self.currentFastaName == "":
self.currentFastaName = self.currline
else:
otpt = (self.currentFastaName.strip(), self.currentFastaSequence.strip())
self.currentFastaName = self.currline
self.currentFastaSequence = ""
self.previoustell = self.fp.tell()
return (otpt)
else:
self.addSequence(self.currline)
if not self.lastitem:
self.lastitem=True
return (self.currentFastaName.strip(), self.currentFastaSequence.strip())
else:
raise StopIteration
def addSequence(self, x):
self.currentFastaSequence = "%s%s" % (self.currentFastaSequence, x.strip())
def __str__():
return ("reading file: %s" %self.filename)
#################################################
### Iterator over input file.
### every line is converted into a dictionary with variables referred to by their
### header name
class GeneralPurposeParser:
def __init__(self, file, skip=0, sep="\t"):
self.filename = file
self.fp = open(self.filename, "rU")
self.sep = sep
self.skip = skip
self.linecounter = 0
self.currline=""
while self.skip>0:
self.next()
self.skip-=1
def __iter__(self):
return (self)
def next(self):
otpt = dict()
for currline in self.fp:
currline = currline.strip().split(self.sep)
self.currline = currline
self.linecounter = self.linecounter + 1
return(currline)
raise StopIteration
def __str__(self):
return "%s [%s]\n\t%s" % (self.filename, self.linecounter, self.currline)
#################################################
### The mother of all Steps:
###
class DefaultStep(Thread):
def __init__(self):
#### thread init
Thread.__init__(self)
self.random = uniform(0, 10000)
self.name = ("%s[%s]" % (self.name, self.random))
#### hash of the current step-path (hash digest of previous steps + current inputs + arguments?)
self.workpathid = ""
#### path where the step stores its files
self.stepdir = ""
#### what needs to be completed for this step to proceed
#### a list of steps
self.previous = list()
#### mapping type - path for files
self.inputs = defaultdict(set)
#### mapping type - name for files
self.outputs = defaultdict(set)
#### mapping arg val for program's arguments
self.arguments= dict()
#### ID of the step...
self.stepname = ""
#### flag for completion
self.completed = False
self.completedpreviously=False
self.failed = False
#### keep track of time elapsed
self.starttime = 0
self.endtime = 0
#### special flag, some steps might not want to delete the inputs (argcheck)
self.removeinputs = True
####
def setInputs(self, x):
for k,v in x.items():
for elem in v:
self.inputs[k].add(elem)
def setArguments(self, x):
for k,v in x.items():
if v=="":
v=" "
self.arguments[k] = v
def setPrevious(self, x):
if not type(x) is list:
self.previous.append(x)
else:
for elem in x:
self.previous.append(elem)
def setName(self, x):
self.stepname=x
def run(self):
self.init()
if self.failed:
#self.message("Error detected... ")
BOH.deregister()
self.completed=True
elif not self.isDone():
try:
self.performStep()
self.finalize()
except Exception, inst:
self.message("...")
self.message( type(inst))
self.message( inst)
BOH.deregister()
self.completed=True
self.failed=True
else:
self.message("Completed (previously).")
BOH.deregister()
self.completed=True
self.completedpreviously=True
def performStep():
self.message("in a step...")
def init(self):
redo=False
### wait for previous steps to finish
for k in self.previous:
while not k.isDone():
#self.message( "waiting" )
time.sleep(1)
if k.hasFailed():
self.failed=True
redo=redo or (not k.isDonePreviously())
#self.message("needs a redo %s" % (redo))
if not self.failed:
### time stamp
self.starttime = time.time()
#### hash of the current step-path (hash digest of previous steps + current inputs + arguments?)
self.workpathid = self.makeWorkPathId()
####
### output handler
BOH.register(self.workpathid)
###
#self.message("Initializing %s %s" % (self.workpathid, self.name))
#### create directories if necessary
self.stepdir =""
self.prepareDir(redo=redo)
def makeWorkPathId(self):
tmp = list()
tmp.append(self.stepname)
if self.previous!=None:
for k in self.previous:
while k.getWorkPathId()==-1:
time.wait(1)
tmp.extend([k.getWorkPathId()])
for k,v in self.inputs.items():
tmp.extend(["%s=%s" % (k, ",".join(v) ) ] )
for k,v in self.arguments.items():
tmp.extend(["%s=%s" % (k, v) ] )
tmp.sort()
tmp = "\n".join(tmp)
workpathid = hashlib.sha224(tmp).hexdigest()[0:5]
return (workpathid)
def getWorkPathId(self):
return (self.workpathid)
def prepareDir(self, redo=False):
### make step's directory
self.stepdir = "Step_%s_%s" % (self.stepname, self.workpathid)
flush_old = False
try:
os.mkdir(self.stepdir)
except OSError, error:
self.message( "Step directory already exists...")
flush_old=True
if redo:
if flush_old:
self.message("Updating...")
k = "rm -r *"
task = GridTask(template="pick", name="redo_clean", command=k, cpu=1, cwd = self.stepdir)
task.wait()
else:
###supposedly no old step-data to flush
pass
else:
### has analysis been done already?
try:
self.parseManifest()
self.completed=True
self.completedpreviously=True
self.message("Using data generated previously...")
except IOError, inst:
#self.message("Will make new manifest...")
pass
except Exception, inst:
self.message("****ERROR***")
self.message(type(inst))
self.message(inst.args)
self.message(inst)
self.message("************")
def finalize(self):
if not self.failed:
self.categorizeAndTagOutputs()
self.makeManifest()
self.endtime = time.time()
self.message( "+%s\t[Done]" % (str(datetime.timedelta(seconds=round(self.endtime-self.starttime,0))).rjust(17)) )
else:
self.endtime = time.time()
self.message( "+%s\t[Fail]" % (str(datetime.timedelta(seconds=round(self.endtime-self.starttime,0))).rjust(17)) )
self.completed=True
BOH.deregister()
def makeManifest(self):
m = open("%s/%s.manifest" % (self.stepdir, self.workpathid), "w")
for type, files in self.inputs.items():
if len(files)>0:
m.write("input\t%s\t%s\n" % (type, ",".join(files)) )
for arg, val in self.arguments.items():
m.write("argument\t%s\t%s\n" % (arg, val ) )
for type, files in self.outputs.items():
if len(files)>0:
m.write("output\t%s\t%s\n" % (type, ",".join(files)) )
m.close()
def determineType(self, filename):
filename = filename.strip().split(".")
extension = filename[-1]
preextension = filename[-2]
if preextension == "scrap":
return "scrap"
elif preextension == "align" and extension == "report":
return "alignreport"
elif extension == "dist" and preextension == "phylip":
return "phylip"
elif extension == "dist":
return "column"
elif preextension == "tax" and extension =="summary":
return "taxsummary"
elif preextension == "cdhit" and extension =="clstr":
return "cdhitclstr"
elif preextension == "bak" and extension =="clstr":
return "cdhitbak"
elif extension == "cdhit":
return "fasta"
elif extension in ["align", "fna", "fa", "seq", "aln"]:
return "fasta"
elif extension == "qual":
return "qfile"
elif extension == "tax":
return "taxonomy"
elif extension == "names":
return "name"
elif extension == "groups":
return "group"
elif extension == "files":
return "file"
elif extension in ["tre", "tree", "dnd"]:
return "tre"
### sge job files
elif re.search("po\d{3}", extension) != None:
return "po"
elif re.search("pe\d{3}", extension) != None:
return "pe"
elif re.search("o\d{3}", extension) != None:
return "o"
elif re.search("e\d{3}", extension) != None:
return "e"
else:
return extension
def categorizeAndTagOutputs(self):
inputs = [x.split("/")[-1] for x in unlist( self.inputs.values()) ]
for file in glob.glob("%s/*" % self.stepdir):
file = file.split("/")[-1]
if file in inputs:
if self.removeinputs:
command = "unlink %s" % (file)
p = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
out,err = p.communicate()
p.wait()
else:
self.message("kept %s" % file)
#pass
elif not file.endswith("manifest"):
#self.message( "output: %s" % (file))
### make sure that every output file except for the manifest starts with the workpathID
file = file.split(".")
if len(file[0]) == len(self.workpathid):
newfilename = "%s.%s" % (self.workpathid, ".".join(file[1:]))
else:
newfilename = "%s.%s" % (self.workpathid, ".".join(file[0:]))
if ".".join(file) != newfilename:
k="mv %s %s" % (".".join(file), newfilename)
p = Popen(shlex.split(k), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
out,err = p.communicate()
p.wait()
self.outputs[self.determineType(newfilename)].add(newfilename)
def find(self, arg, ln=True, original=False):
files=list()
if not original:
if len(self.inputs[arg])==0:
tmp = {arg: self.getOutputs(arg)}
self.setInputs(tmp)
else:
tmp = {arg: self.getOriginal(arg)}
self.setInputs(tmp)
files = self.inputs[arg]
toreturn=list()
for file in files:
if self.isVar(file):
toreturn.append(file[5:])
else:
tmp = file.strip().split("/")[-1]
if (ln):
command = "cp -s %s %s" % (file, tmp )
else:
command = "cp %s %s" % (file, tmp )
p = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True )
out,err = p.communicate()
p.wait()
toreturn.append(tmp)
#unique
toreturn = set(toreturn)
return list(toreturn)
def isVar(self,x):
return x.startswith("[var]")
def getOutputs(self, arg):
if self.outputs.has_key(arg):
otpt = list()
for x in unlist(self.outputs[arg]):
if self.isVar(x):
otpt.append(x)
else:
otpt.append("../%s/%s" % (self.stepdir, x))
return otpt
elif self.previous!=None:
otpt = list()
for k in self.previous:
otpt.extend(k.getOutputs(arg))
return otpt
else:
return list()
def getOriginal(self, arg):
if self.previous == None:
return self.getOutputs(arg)
else:
current = self.getOutputs(arg)
otpt = list()
for k in self.previous:
otpt.extend(k.getOriginal(arg))
if len(otpt)>0:
return otpt
else:
return current
def parseManifest(self):
fp = open("%s/%s.manifest" % (self.stepdir, self.workpathid), "r")
lines=fp.readlines()
fp.close()
for line in lines:
line = line.strip("\n").split("\t")
if line[0] == "output":
type = line[1]
files = line[2].split(",")
for file in files:
self.outputs[type].add(file)
elif line[0] == "input":
type = line[1]
files = line[2].split(",")
for file in files:
self.inputs[type].add(file)
elif line[0] == "argument":
if len(line)==2:
self.arguments[line[1]] = " "
else:
self.arguments[line[1]]=line[2]
def message(self, text):
if type(text) == list:
for line in text:
BOH.toPrint(self.workpathid, self.stepname, line)
else:
BOH.toPrint(self.workpathid, self.stepname, text)
def isDone(self):
return self.completed
def isDonePreviously(self):
return self.completedpreviously
def hasFailed(self):
return self.failed
def getInputValue(self, arg):
if self.arguments.has_key(arg):
return self.arguments[arg]
else:
return None
def setOutputValue(self, arg, val):
self.outputs[arg] = ["[var]%s" % (val)]
def __str__(self):
otpt = "%s\t%s" % (self.stepname, self.name)
for val in self.previous:
otpt += "%s\n%s" % (otpt, val.__str__())
#otpt = "\n".join(set(otpt.strip().split("\n")))
return otpt
class FileImport(DefaultStep):
def __init__(self, INS):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
#self.setPrevious(PREV)
self.setName("FILE_input")
self.start()
def performStep(self):
for type in self.inputs.keys():
files = self.inputs[type]
for file in files:
pool_open_files.acquire()
file = file.split("~")
if len(file)>1:
file, newname = file
tmp = file.strip().split("/")[-1]
k = "cp %s %s.%s" % (file, newname, type)
else:
file = file[0]
tmp = file.strip().split("/")[-1]
k ="cp %s imported.%s" % (file, tmp)
p = Popen(shlex.split(k), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
self.message(k)
out,err = p.communicate()
p.wait()
pool_open_files.release()
class ArgumentCheck(DefaultStep):
def __init__(self, SHOW, PREV):
ARGS = {"show":SHOW}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ArgCheck")
#self.nodeCPUs=nodeCPUs
self.removeinputs=False
self.start()
def performStep(self):
x = self.getInputValue("show")
if x!=None:
for type in x.split(","):
for file in self.find(type):
self.message("%s: %s" % (type,file))
class OutputStep(DefaultStep):
def __init__(self, NAME, SHOW, PREV):
ARGS = {"show":SHOW}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("OUTPUT_%s" % (NAME))
#self.nodeCPUs=nodeCPUs
self.removeinputs=False
self.start()
def performStep(self):
x = self.getInputValue("show")
if x!=None:
for type in x.split(","):
for file in self.find(type.strip(), ln = False):
self.message("%s: %s" % (type,file))
class SFFInfoStep(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("sffinfo")
self.start()
def performStep(self):
steps = list()
for sff in self.find("sff"):
k = "/usr/local/bin/sffinfo -s %s > %s.fasta" % (sff, sff)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
steps.append(task)
k = "/usr/local/bin/sffinfo -q %s > %s.qual" % (sff, sff)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
steps.append(task)
k = "/usr/local/bin/sffinfo -f %s > %s.flow" % (sff, sff)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
steps.append(task)
for s in steps:
s.wait()
class MothurStep(DefaultStep):
def __init__(self, NM, nodeCPUs, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName(NM)
self.nodeCPUs=nodeCPUs
self.start()
def makeCall(self):
FORCE = self.getInputValue("force")
x = MOTHUR.getCommandInfo(self.stepname)
#self.message(self.inputs)
if FORCE != None :
TYPES = FORCE.strip().split(",")
else:
TYPES = x.getInputs()
mothurargs = list()
for TYPE in TYPES:
#### on occasion, mothur overwrites the original file - namely names file
#### FALSE creates a copy
#### TRUE creates a link
if TYPE=="name":
tmp = self.find(TYPE, False)
else:
tmp = self.find(TYPE, True)
if len(tmp)>0:
mothurargs.append ("%s=%s" % (TYPE, "-".join(tmp)))
else:
if x.isRequired(TYPE):
self.message("Required argument '%s' not found!" % (TYPE))
raise Exception
else:
self.message("Optional argument '%s' not found, skipping" % (TYPE))
for arg, val in self.arguments.items():
if x.isAnArgument(arg):
mothurargs.append("%s=%s" % (arg, val))
elif arg=="find":
for a in val.strip().split(","):
self.message(a)
valstoinsert = self.find(a)
self.message(valstoinsert)
if len(valstoinsert)>0:
mothurargs.append("%s=%s" % (a, "-".join(valstoinsert)))
else:
self.message("skipping '%s' - not found" % (a))
else:
self.message("skipping '%s', as it is not an argument for %s" % (arg, self.stepname))
### method is parallelizable,
if x.isAnArgument("processors") and "processors" not in self.arguments.keys():
mothurargs.append("%s=%s" % ("processors", self.nodeCPUs ))
self.message("Will run on %s processors" % (self.nodeCPUs))
himemflag=False
### steps requiring lots of memory
if self.stepname in ("clearcut", "align.seq"):
himemflag=True
self.message("Needs lots of memory")
command = "%s(%s)" % (self.stepname, ", ".join(mothurargs))
return (command, x.isAnArgument("processors"), himemflag)
def performStep(self):
call, parallel, himem = self.makeCall()
k = "%smothur \"#%s\"" % (mothurpath, call)
if self.stepname =="remove.groups" and k.find("groups=)")>-1:
self.message("no groups to remove.")
else:
self.message(k)
if (parallel and self.nodeCPUs>1):
#task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
#elif (himem):
# task = GridTask(template="himem.q", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
else:
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
task.wait()
self.parseLogfile()
def parseLogfile(self):
for f in glob.glob("%s/*.logfile" % (self.stepdir)):
line = ""
for line in loadLines(f):
### UCHIME throws an error when it does not find chimeras, even though it completes.
if line.find ("ERROR")>-1 and line.find("uchime")==-1:
self.failed=True
### last line
if line.find("quit()")==-1:
self.failed=True
class MothurSHHH(DefaultStep):
def __init__(self, PREV, nodeCPUs):
DefaultStep.__init__(self)
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MPyro")
####
####self.nodeCPUs=nodeCPUs
self.nodeCPUs=4
self.start()
def performStep(self):
tasks = list()
TOC = self.find("file")
flows = self.find("flow")
TOC = loadLines("%s/%s" % (self.stepdir, TOC[0]))
TOC = [ ".".join(x.strip().split(".")[1:]) for x in TOC]
# for f in flows:
# tmp = ".".join(f.split(".")[1:])
#
# if tmp in TOC:
#
# ### split tmp into 10,000 lines chunks
# k = "split -l 7000 -a 3 %s %s.split." % (f, f)
# task = GridTask(template="pick", name="MPyroSplit", command=k, cpu=1, cwd = self.stepdir, debug=False)
# tasks.append(task)
# else:
# self.message("skipping %s" % (f))
#
# self.message("splitting %s file(s)" % len(tasks))
#
# for task in tasks:
# task.wait()
################################################
tasks = list()
#for chunk in glob.glob("%s/*.split.*" % (self.stepdir)):
# chunk = chunk.split("/")[-1]
#self.message(chunk)
# call = "shhh.flows(flow=%s, processors=%s, maxiter=100, large=10000)" % (chunk, self.nodeCPUs)
for f in flows:
tmp = ".".join(f.split(".")[1:])
if tmp in TOC:
call = "shhh.flows(flow=%s, processors=%s, maxiter=100, large=10000)" % (f, self.nodeCPUs)
k = "%smothur \"#%s\"" % (mothurpath, call)
self.message(k)
task = GridTask(template="pick", name="Mpyro", command=k, cpu=self.nodeCPUs, cwd = self.stepdir, debug=True)
tasks.append(task)
if len(tasks)==0:
self.failed=True
self.message("processing %s file(s)" % len(tasks))
for task in tasks:
task.wait()
class LUCYcheck(DefaultStep):
def __init__(self, nodeCPUs, PREV):
DefaultStep.__init__(self)
self.nodeCPUs=nodeCPUs
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("LUCY_check")
self.nodeCPUs=nodeCPUs
if self.nodeCPUs>32:
self.nodeCPUs=30
self.start()
def performStep(self):
f = self.find("fasta")[0]
q = self.find("qfile")[0]
statinfo = os.stat("%s/%s" % (self.stepdir, f))
#self.message(statinfo.st_size)
if statinfo.st_size==0:
self.message("%s is empty." % f)
self.failed=True
else:
k ="%s/lucy -error 0.002 0.002 -bracket 20 0.002 -debug -xtra %s -output %s.fastalucy %s.qfilelucy %s %s" % (binpath, self.nodeCPUs, f,q, f,q)
self.message(k)
if self.nodeCPUs>2:
task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
else:
task = GridTask(template="pick", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
task.wait()
class LUCYtrim(DefaultStep):
def __init__(self, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("LUCY_trim")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
f = self.find("fastalucy")[0]
q = self.find("qfilelucy")[0]
k = "%spython %s/fastAtrimmer.py -l %s %s %s " % (binpath, scriptspath, f.split(".")[0], f, q)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
task.wait()
class MatchGroupsToFasta(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MatchGroups")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
f = self.find("fasta")
f = f[0]
g = self.find("group")
g = g[0]
n = self.find("name")
if len(n)>0:
n = "-n %s" % (n[0])
else:
n = ""
k = "%spython %s/MatchGroupsToFasta.py %s -f %s -g %s -o %s.matched.group" % (binpath, scriptspath, n, f, g, ".".join(g.split(".")[:-1]))
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
task.wait()
class MatchGroupsToList(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MatchGroups")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
f = self.find("list")
f = f[0]
g = self.find("group")
g = g[0]
k = "%spython %s/MatchGroupsToFasta.py -l %s -g %s -o %s.matched.group" % (binpath, scriptspath, f, g, ".".join(g.split(".")[:-1]))
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
task.wait()
class FileMerger(DefaultStep):
def __init__(self, TYPES, PREV, prefix="files"):
ARGS = {"types": TYPES}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_cat")
#self.nodeCPUs=nodeCPUs
self.prefix = prefix
self.start()
def performStep(self):
tasks = list()
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
if len(files)>0 and len(files)<25:
k = "cat %s > %s.x%s.merged.%s" % (" ".join(files), self.prefix, len(files), t)
self.message(k)
task = GridTask(template="pick", name="cat", command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
elif len(files)>=25:
k = "cat *.%s* > %s.x%s.merged.%s" % (t, self.prefix, len(files), t)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
#else:
# self.failed=True
for task in tasks:
task.wait()
time.sleep(1)
class FileSort(DefaultStep):
def __init__(self, TYPES, PREV):
ARGS = {"types": TYPES}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_sort")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
if len(files)>0:
k = "sort -n %s > files_x%s.sorted.%s" % (" ".join(files), len(files), t)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
time.sleep(1)
class FileType(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_type")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
for input, output in self.arguments.items():
files = self.find(input)
for file in files:
outname = "%s.%s" % (file, output)
k = "cp %s %s" % (file, outname)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
time.sleep(1)
class CleanFasta(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CleanFasta")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
f = self.find("fasta")
f = f[0]
k = "%spython %s/CleanFasta.py -i %s -o %s.dash_stripped.fasta" % (binpath, scriptspath,f, ".".join(f.split(".")[:-1]))
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir, debug=False)
task.wait()
class MakeNamesFile(DefaultStep):
def __init__(self, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_names")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
files = self.find("fasta")
for f in files:
self.message("Creating 'names' file for sequences in {0}".format( f))
newname = f.strip().split(".")[:-1]
newname = "%s.names" % (".".join(newname))
otpt = open("%s/%s" % (self.stepdir,newname ), 'w')
for head, seq in FastaParser("%s/%s" % (self.stepdir, f)):
head = head.strip().split()[0]
otpt.write("%s\t%s\n" % (head, head))
otpt.close()
if len(files)==0:
self.message("No files to generate NAMES...")
class MakeGroupsFile(DefaultStep):
def __init__(self, PREV, id):
ARGS = {"groupid": id}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_groups")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
files = self.find("fasta")
for f in files:
id = self.getInputValue("groupid")
self.message("Creating 'groups' file; '{0}' for sequences in {1}".format(id, f))
newname = f.strip().split(".")[:-1]
newname = "%s.groups" % (".".join(newname))
otpt = open("%s/%s" % (self.stepdir, newname ), 'w')
for head, seq in FastaParser("%s/%s" % (self.stepdir, f)):
head = head.strip().split()[0]
otpt.write("%s\t%s\n" % (head, id))
otpt.close()
if len(files)==0:
self.message("No files to generate GROUPS...")
class MakeQualFile(DefaultStep):
def __init__(self, PREV, q):
ARGS = {"qual": q}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_qfile")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
f = self.find("fasta")[0]
q = self.getInputValue("qual")
self.message("Creating 'qual' file; '{0}' for sequences in {1}".format(q, f))
newname = f.strip().split(".")[:-1]
newname = "%s.qual" % (".".join(newname))
otpt = open("%s/%s" % (self.stepdir, newname ), 'w')
for head, seq in FastaParser("%s/%s" % (self.stepdir, f)):
otpt.write(">%s\n" % (head))
for k in seq:
otpt.write("%s " % (q))
otpt.write("\n")
otpt.close()
class AlignmentSummary(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AlignmentSummary")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
self.project = __projectid__
self.mailaddress = __email__
f = self.find("fasta")[0]
ref = self.getInputValue("ref")
if ref == None:
ref="e_coli2"
th = self.getInputValue("thresh")
if th == None:
th="0.1"
self.message("summarizing an alignment in %s" % (f) )
k = "%spython %s/alignmentSummary.py -P %s -M %s -t 500 -p %s -i %s -o %s.alsum -T %s -x %s" % (binpath, scriptspath, self.project, self.mailaddress, ref, f,f, th, binpath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=True)
task.wait()
for file in glob.glob("%s/*AlignmentSummary.o*"% (self.stepdir)):
x = loadLines(file)[-1].strip().split("\t")
self.message("Potential trimming coordinates: %s - %s [peak = %s] [thresh = %s]" % (x[1], x[3], x[5], x[7]) )
self.setOutputValue("trimstart", x[1])
self.setOutputValue("trimend", x[3])
self.setOutputValue("trimthresh", x[7])
#self.failed = True
class AlignmentPlot(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AlignmentPlot")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
f = self.find("alsum")[0]
ref = self.getInputValue("ref")
if ref == None:
ref="e_coli"
trimstart = self.getInputValue("trimstart")
if trimstart==None:
trimstart=0
elif trimstart=="find":
trimstart = self.find("trimstart")[0]
trimend = self.getInputValue("trimend")
if trimend == None:
trimend=0
elif trimend == "find":
trimend = self.find("trimend")[0]
trimthresh = self.getInputValue("trimthresh")
if trimthresh == None:
trimthresh=0
elif trimthresh == "find":
trimthresh = self.find("trimthresh")[0]
self.message("Adding trimmig marks at: %s - %s" % (trimstart, trimend))
tmp = open("%s/alsum.r" % (self.stepdir), "w")
tmp.write("source(\"%s/alignmentSummary.R\")\n" % (scriptspath))
tmp.write("batch2(\"%s\", ref=\"%s\", trimstart=%s, trimend=%s, thresh=%s )\n" % (f, ref, trimstart, trimend, trimthresh))
tmp.close()
k = "%sR CMD BATCH alsum.r" % (binpath)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
class GroupRetriever(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("GroupCheck")
self.start()
def performStep(self):
minimum = self.getInputValue("mingroupmembers")
if minimum==None:
minimum=0
group = self.find("group")[0]
groups = defaultdict(int)
otpt = open("{0}/{1}.groupstats".format(self.stepdir, group), "w")
for line in loadLines("%s/%s" % (self.stepdir, group)):
x = line.strip().split("\t")[1]
groups[x]+=1
keys = sorted(groups, key=groups.get)
keys.reverse()
passinggroups=list()
failinggroups = list()
for k in keys:
v = groups[k]
if v>=minimum:
flag="ok"
passinggroups.append(k)
else:
flag="x"
failinggroups.append(k)
self.message("{0:<25}:{1:>10}:{2}".format( k, v, flag))
otpt.write("{0}\t{1}\t{2}\n".format(k,v, flag))
otpt.close()
if len(passinggroups)==0:
self.message("There are not enough reads to analyze. See documentation for -g [currently set to {0}] and -x arguments.".format(minimum))
self.failed=True
if self.getInputValue("report") in [None, "passing"]:
groupnames = "-".join(passinggroups)
else:
groupnames = "-".join(failinggroups)
self.setOutputValue("groups", groupnames)
class CDHIT_454(DefaultStep):
def __init__(self, nodeCPUs, ARGS, PREV):
DefaultStep.__init__(self)
if ARGS.has_key("T"):
self.nodeCPUs = ARGS["T"]
else:
self.nodeCPUs=nodeCPUs
ARGS["T"]=self.nodeCPUs
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHIT_454")
self.start()
def performStep(self):
args = ""
for arg, val in self.arguments.items():
args = "%s -%s %s" % (args, arg, val)
fs = self.find("fasta")
if len(fs)==0:
fs.extend(self.find("mate1"))
fs.extend(self.find("mate2"))
tasks=list()
for f in fs:
k ="%scd-hit-454 -i %s -o %s.cdhit %s" % (cdhitpath, f, f, args)
self.message(k)
task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=False)
# if self.nodeCPUs>2:
# task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
# else:
# task = GridTask(template="himem.q", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
tasks.append(task)
for task in tasks:
task.wait()
class CDHIT_EST(DefaultStep):
def __init__(self, nodeCPUs, ARGS, PREV):
DefaultStep.__init__(self)
if ARGS.has_key("T"):
self.nodeCPUs = ARGS["T"]
else:
self.nodeCPUs=nodeCPUs
ARGS["T"]=self.nodeCPUs
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHIT_EST")
self.start()
def performStep(self):
f = self.find("fasta")[0]
args = ""
dist = 1
for arg, val in self.arguments.items():
args = "%s -%s %s" % (args, arg, val)
if arg == "c":
dist = dist - (float(val))
k ="%scd-hit-est -i %s -o %s._%s_.cdhit %s" % (cdhitpath, f, f, dist, args)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=False)
# if self.nodeCPUs>2:
# task = GridTask(template="defaulttemplate", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
# else:
# task = GridTask(template="himem.q", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
task.wait()
class CDHIT_Perls(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHITperls")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
x = self.find("cdhitclstr")
tasks=list()
for cluster in x:
k = "%sclstr2tree.pl %s > %s.tre" % (cdhitpath, cluster, cluster)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=False)
tasks.append(task)
k = "%sclstr_size_histogram.pl %s > %s.hist.tab.txt " % (cdhitpath, cluster, cluster)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=False)
tasks.append(task)
k = "%sclstr_size_stat.pl %s > %s.size.tab.txt" % (cdhitpath, cluster, cluster)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=False)
tasks.append(task)
for task in tasks:
task.wait()
class CDHIT_Mothurize(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHIT_Mothurise")
self.start()
def performStep(self):
### mode can be "name" or None (to produce a name file)
### mode can be "#.## (to produce a labeled ra/sa/list combo)
m = self.getInputValue("mode")
if m == None:
m = "name"
modeswitch = "-o %s" % (m)
### is there an optional names file?
n = self.find("name")
if len(n)>0:
n = n[0]
nameswitch = "-n %s" % (n)
else:
nameswitch = ""
### is there a required cluster file
clst = self.find("cdhitclstr")
if len(clst)>0:
k = "%spython %sCDHIT_mothurize_clstr.py -c %s %s %s" % (binpath, scriptspath, clst[0], nameswitch, modeswitch)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir, debug=False)
task.wait()
else:
self.failed=True
class R_defaultplots(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("R_plots")
self.start()
def performStep(self):
f = self.find("taxsummary")
anno = self.find("annotation")[0]
tasks = list()
script = open("%s/script.r" % (self.stepdir), "w")
script.write("""source("%sConsTaxonomyPlots.R")\n""" % (scriptspath))
for file in f:
dist = ".%s"% (self.getInputValue("dist"))
if file.find(dist)>-1 and file.find("seq")>-1 :
script.write("""makeDefaultBatchOfPlots("%s", "%s", fileprefix="SEQnum")\n""" % (anno, file))
elif file.find(dist)>-1 and file.find("otu")>-1 :
script.write("""makeDefaultBatchOfPlots("%s", "%s", fileprefix="OTUnum")\n""" % (anno, file))
script.close()
k = "%sR CMD BATCH script.r" % (binpath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir)
task.wait()
class R_OTUplots(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("R_plots_otu")
self.start()
def performStep(self):
####OTUS
f = self.find("fasta")
tasks = list()
#script = open("%s/script.r" % (self.stepdir), "w")
#script.write("""source("%sOtuReadPlots.r")\n""" % (scriptspath))
for file in f:
if file.find("annotated.fasta")>0:
k = """grep ">" %s | awk '{FS = "|"; OFS="\t"} {print $4, $5}' > %s.otustats""" % (file, file)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir, debug=False)
tasks.append(task)
#script.write("""makeBatch("%s.otustats")\n""" % (file))
####COVERAGE
f = self.find("clcassemblystats")
#for file in f:
#script.write("""makeBatchCoverage("%s")\n""" % (file))
#script.close()
### make sure all conversions are complete
for task in tasks:
task.wait()
k = "%sR CMD BATCH %sOtuReadPlots.r" % (binpath, scriptspath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir)
task.wait()
class R_rarefactions(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("R_rarefactions")
self.start()
def performStep(self):
for k in "r_nseqs,rarefaction,r_simpson,r_invsimpson,r_chao,r_shannon,r_shannoneven,r_coverage".strip().split(","):
f = self.find(k)
k = "%sR CMD BATCH %srarefactions.R" % (binpath, scriptspath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir)
task.wait()
class AlignmentTrim(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AlignmentTrim")
self.start()
def performStep(self):
f = self.find("fasta")[0]
args = ""
for arg, val in self.arguments.items():
if val.startswith("find"):
val=self.find(val.split(":")[1])[0]
args = "%s -%s %s" % (args, arg, val)
k ="%spython %salignmentTrimmer.py %s -I %s" % (binpath, scriptspath, args, f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
task.wait()
class AnnotateClusters(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("Annotate")
self.start()
def performStep(self):
l = self.find("list")
t = self.find("taxonomy")
f = self.find("fasta")
g = self.find("group")
self.message(l)
self.message(t)
self.message(f)
self.message(g)
if len(l)==0 or len(t)==0 or len(f)==0 or len(g) == 0:
self.failed=True
else:
tasks=list()
for fasta in f:
dist = fasta.split("_")[-2]
for tax in t:
if tax.find(dist)>-1 and tax.find("otu")==-1:
k = "%spython %sRetrieve.py %s %s %s %s %s" % (binpath, scriptspath, dist, l[0], tax, g[0], fasta)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
#################################################
## Functions
##
################################################
### Read in a file and return a list of lines
###
def loadLines(x):
try:
fp = open(x, "r")
cont=fp.readlines()
fp.close()
#print "%s line(s) loaded." % (len(cont))
except:
cont=""
#print "%s cannot be opened, does it exist? " % ( x )
return cont
def unlist(struct):
for x in struct:
if type(x) is tuple or type(x) is list or type(x) is set :
for y in unlist(x):
yield y
else:
yield x
def init(id, e, maxnodes = 250, update=0.1):
global __projectid__
global __email__
global __admin__
global BOH
global MOTHUR
global QS
__projectid__ = id
__email__ = e
__admin__ = '[email protected]'
BOH = BufferedOutputHandler()
MOTHUR = MothurCommandInfo(path=mothurpath)
QS = TaskQueueStatus(update = update, maxnodes=maxnodes)
return (BOH)
def revComp(string):
global transtab
string=string.upper()
#reverse
string = string [::-1]
return string.translate(transtab)
#################################################
## Arguments
##
#################################################
## Begin
##
from string import maketrans
inttab= "ACGTN"
outtab = "TGCAN"
transtab = maketrans(inttab, outtab)
pool_open_files = BoundedSemaphore(value=4, verbose=False)
mothurpath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/bin/mothur-current/"
cdhitpath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/bin/cdhit-current/"
scriptspath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/scripts/"
binpath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/bin/"
dotpath = "/usr/local/packages/graphviz/bin/"
defaulttemplate = "default.q"
#################################################
## Finish
#################################################
| mit | 3,567,784,365,733,237,000 | 26.780702 | 226 | 0.617004 | false |
CuBoulder/atlas | atlas/backup_operations.py | 1 | 1820 | """
atlas.backup_operations
~~~~
Commands that run on servers to create, restore, and remove backups.
Instance methods:
Delete - Local - Remove backup files.
"""
import logging
import os
from datetime import datetime
from time import time
from atlas import utilities
from atlas.config import (ENVIRONMENT, INSTANCE_ROOT, WEB_ROOT, CORE_WEB_ROOT_SYMLINKS,
NFS_MOUNT_FILES_DIR, NFS_MOUNT_LOCATION, SAML_AUTH,
SERVICE_ACCOUNT_USERNAME, SERVICE_ACCOUNT_PASSWORD, VARNISH_CONTROL_KEY,
SMTP_PASSWORD, WEBSERVER_USER_GROUP, ATLAS_LOCATION, SITE_DOWN_PATH,
SSH_USER, BACKUP_PATH)
from atlas.config_servers import (SERVERDEFS, ATLAS_LOGGING_URLS, API_URLS,
VARNISH_CONTROL_TERMINALS, BASE_URLS)
# Setup a sub-logger. See tasks.py for longer comment.
log = logging.getLogger('atlas.backup_operations')
def backup_delete(item):
"""Remove backup files from servers
Arguments:
item {string} -- Backup item to remove
"""
log.debug('Backup | Delete | Item - %s', item)
log.info('Backup | Delete | Item - %s ', item['_id'])
instance = utilities.get_single_eve('sites', item['site'], item['site_version'])
pretty_filename = '{0}_{1}'.format(
instance['sid'], item['backup_date'].strftime("%Y-%m-%d-%H-%M-%S"))
pretty_database_filename = '{0}.sql'.format(pretty_filename)
database_path = '{0}/backups/{1}'.format(BACKUP_PATH, pretty_database_filename)
pretty_files_filename = '{0}.tar.gz'.format(pretty_filename)
files_path = '{0}/backups/{1}'.format(BACKUP_PATH, pretty_files_filename)
os.remove(files_path)
os.remove(database_path)
log.info('Backup | Delete | Complete | Item - %s', item['_id'])
| mit | -1,822,666,200,972,725,500 | 36.916667 | 98 | 0.639011 | false |
michaelrice/gotland | gotland/rabbit/api.py | 1 | 18344 | # Copyright 2014 Michael Rice <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
try:
from urllib import quote, quote_plus
except ImportError:
from urllib.parse import quote, quote_plus
import requests
from requests.auth import HTTPBasicAuth
class Client(object):
def __init__(self, end_point="http://localhost:15672/api/",
username="guest", password="guest"):
"""Client connection info for the rabbitmq_management API
Usage::
myapi = api(username="sam",password="secure")
"""
self.end_point = end_point
self.auth = HTTPBasicAuth(username, password)
def _get_data(self, path, **kwargs):
"""Lots of work to do here. Literally doing the least possible
to just get something functional. Need to add error handling,
and raise proper exceptions"""
params = None
if 'params' in kwargs:
params = kwargs.get("params")
response = requests.get(path, auth=self.auth, params=params)
if response.status_code != 200:
return
return response.json()
def _send_data(self, path, data=None, request_type='PUT'):
data = json.dumps(data)
if data == 'null':
data = None
headers = {
"Content-type": "application/json",
"Accept": "application/json"
}
if request_type is 'PUT':
response = requests.put(path, data, headers=headers, auth=self.auth)
elif request_type is 'DELETE':
response = requests.delete(path, auth=self.auth, headers=headers,
data=data)
else:
response = requests.post(path, data=data, headers=headers,
auth=self.auth)
if response.status_code == 204:
return
return response.json()
def check_aliveness(self, vhost='/'):
"""Check aliveness of a given vhost. By default / will be checked.
Usage::
myapi = api()
if not myapi.check_aliveness():
handle_down_event()
"""
path = self.end_point + "aliveness-test/" + quote_plus(vhost)
data = self._get_data(path)
if data is None:
return False
try:
if data.get("status") != "ok":
return False
return True
except KeyError:
return False
def get_overview(self):
"""Various random bits of information that describe the
whole system."""
path = self.end_point + "overview"
data = self._get_data(path)
return data
def get_nodes(self):
"""A list of nodes in the RabbitMQ cluster."""
path = self.end_point + "nodes"
data = self._get_data(path)
return data
def get_node_info(self, node_name, get_memory=False):
"""An individual node in the RabbitMQ cluster. Add "get_memory=true"
to get memory statistics."""
path = self.end_point + "nodes/" + node_name
params = None
if get_memory:
params = {"memory": "true"}
data = self._get_data(path, params=params)
return data
def get_extensions(self):
"""A list of extensions to the management plugin"""
path = self.end_point + "extensions"
data = self._get_data(path)
return data
def get_connections(self):
"""A list of all open connections."""
path = self.end_point + "connections"
data = self._get_data(path)
return data
def get_connections_name(self, name):
"""Gets info for an individual connection"""
name = quote(name)
path = self.end_point + "connections/{0}".format(name)
data = self._get_data(path)
return data
def get_channels(self):
"""List of all channels"""
path = self.end_point + "channels"
data = self._get_data(path)
return data
def get_channels_name(self, channel=None):
"""Info about a specific channel"""
channel = quote(channel)
path = self.end_point + "channels/{0}".format(channel)
data = self._get_data(path)
return data
def get_exchanges(self):
"""List of all exchanges"""
path = self.end_point + "exchanges"
data = self._get_data(path)
return data
def get_exchanges_vhost(self, vhost='/'):
"""List of all exchanges on a given vhost"""
path = self.end_point + "exchanges/{0}".format(quote_plus(vhost))
data = self._get_data(path)
return data
def get_exchanges_name_vhost(self, vhost='/', exchange_name=None):
"""Gets info about a given echange (name) on a given vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "exchanges/{0}/{1}".format(vhost, exchange_name)
return self._get_data(path)
def get_bindings_for_exchange(self, vhost='/', exchange_name=None,
stype="source"):
"""A list of all bindings in which a given exchange is the source."""
path = self.end_point + "exchanges/{0}/{1}/bindings/{2}"
path = path.format(quote_plus(vhost), exchange_name, stype)
return self._get_data(path)
def get_queues(self):
"""A list of all queues on the server"""
path = self.end_point + "queues"
return self._get_data(path)
def get_queues_by_vhost(self, vhost='/'):
"""A list of all queues in a given virtual host."""
path = self.end_point + "queues/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_queue_by_name(self, queue_name=None, vhost='/'):
"""Inforation about an individual queue. Takes optional vhost param
Checks / as the default vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "queues/{0}/{1}".format(vhost, queue_name)
return self._get_data(path)
def get_bindings_by_queue(self, queue_name=None, vhost='/'):
"""A list of all bindings on a given queue. Takes an optional
vhost param. The default vhost is /"""
path = self.end_point + "queues/{0}/{1}/bindings"
path = path.format(quote_plus(vhost), queue_name)
return self._get_data(path)
def get_bindings(self):
"""A list of all bindings."""
path = self.end_point + "bindings"
return self._get_data(path)
def get_bindings_by_vhost(self, vhost='/'):
"""A list of all bindings in a given virtual host."""
path = self.end_point + "bindings/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_bindings_between_exchange_and_queue(self, queue_name=None,
exchange_name=None, vhost='/'):
"""A list of all bindings between an exchange and a queue.
Remember, an exchange and a queue can be bound together many times!
"""
path = self.end_point + "bindings/{0}/e/{1}/q/{2}"
path = path.format(quote_plus(vhost), exchange_name, queue_name)
return self._get_data(path)
def update_bindings_between_exchange_and_queue(self):
"""A list of all bindings between an exchange and a queue.
Remember, an exchange and a queue can be bound together many times!
To create a new binding, POST to this URI. You will need a body looking
something like this:
{"routing_key":"my_routing_key","arguments":[]}
All keys are optional. The response will contain a Location header
telling you the URI of your new binding."""
pass
def get_binding_between_exchange_and_queue(self, queue_name=None,
exchange_name=None, vhost='/'):
"""
An individual binding between an exchange and a queue.
The props part of the URI is a "name" for the binding composed of
its routing key and a hash of its arguments.
"""
path = self.end_point + "bindings/{0}/e/{1}/q/{2}/props"
path = path.format(quote_plus(vhost), exchange_name, queue_name)
return self._get_data(path)
def get_bindings_between_exchanges(self, exchange_name_s=None,
exchange_name_d=None, stype="destination", vhost='/'):
"""A list of all bindings between two exchanges. Similar to the list
of all bindings between an exchange and a queue, above.
stype can be either "destination" or "props"
"""
path = self.end_point + "bindings/{0}/e/{1}/e/{2}/{3}"
vhost = quote_plus(vhost)
path = path.format(vhost, exchange_name_s, exchange_name_d, stype)
return self._get_data(path)
def get_vhosts(self):
"""Return a list of all vhosts"""
path = self.end_point + "vhosts"
return self._get_data(path)
def get_vhost_by_name(self, vhost='/'):
"""An individual virtual host. As a virtual host only has a name,
you do not need an HTTP body when PUTing one of these.
"""
path = self.end_point + "vhosts/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_premissions_by_vhost(self, vhost='/'):
"""A list of all permissions for a given virtual host."""
vhost = quote_plus(vhost)
path = self.end_point + "vhosts/{0}/permissions".format(vhost)
return self._get_data(path)
def get_users(self):
"""A list of all users"""
path = self.end_point + "users"
return self._get_data(path)
def get_user_by_name(self, username="guest"):
"""Info about an individual user"""
path = self.end_point + "users/{0}".format(username)
return self._get_data(path)
def get_user_permissions(self, username="guest"):
"""A list of all permissions for a given user."""
path = self.end_point + "users/{0}/permissions".format(username)
return self._get_data(path)
def whoami(self):
"""Details of the currently authenticated user."""
path = self.end_point + "whoami"
return self._get_data(path)
def get_permissions(self):
"""A list of all permissions for all users."""
path = self.end_point + "permissions"
return self._get_data(path)
def get_user_permissions_by_vhost(self, username="guest", vhost='/'):
"""An individual permission of a user and virtual host."""
vhost = quote_plus(vhost)
path = self.end_point + "permissions/{0}/{1}".format(vhost, username)
return self._get_data(path)
def get_parameters(self):
"""A list of all parameters."""
path = self.end_point + "parameters"
return self._get_data(path)
def get_parameters_by_component(self, component=None):
"""A list of all parameters for a given component."""
path = self.end_point + "parameters/{0}".format(component)
return self._get_data(path)
def get_parameters_by_component_by_vhost(self, component=None,
vhost='/'):
"""A list of all parameters for a given component and virtual host"""
vhost = quote_plus(vhost)
path = self.end_point + "parameters/{1}/{0}".format(vhost, component)
return self._get_data(path)
def get_parameter_for_vhost_by_component_name(self, component=None,
parameter_name=None, vhost='/'):
"""Get an individual parameter value from a given vhost & component"""
path = self.end_point + "parameters/{1}/{0}/{2}"
path = path.format(quote_plus(vhost), component, parameter_name)
return self._get_data(path)
def get_policies(self):
"""A list of all policies"""
path = self.end_point + "policies"
return self._get_data(path)
def get_policies_by_vhost(self, vhost='/'):
"""A list of all policies in a given virtual host."""
path = self.end_point + "policies/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_policy_for_vhost_by_name(self, name=None, vhost='/'):
"""Information about an individual policy"""
vhost = quote_plus(vhost)
path = self.end_point + "policies/{0}/{1}".format(vhost, name)
return self._get_data(path)
def create_exchange_on_vhost(self, exchange_name=None,
body={}, vhost='/'):
"""An individual exchange. To PUT an exchange, you will need a body
looking something like this:
{
"type":"direct",
"auto_delete":false,
"durable":true,
"internal":false,
"name": "mytest",
"arguments":[]
}
"""
vhost = quote_plus(vhost)
path = self.end_point + "exchanges/{0}/{1}".format(vhost, exchange_name)
return self._send_data(path, data=body)
def create_queue_on_vhost(self, queue_name=None, body={}, vhost='/'):
"""An individual queue. To PUT a queue, you will need a body looking
something like this:
{
"auto_delete":false,
"durable":true,
"arguments":[],
"node":"rabbit@localnode-1"
}
"""
vhost = quote_plus(vhost)
path = self.end_point + "queues/{0}/{1}".format(vhost, queue_name)
return self._send_data(path, data=body)
def create_vhost(self, vhost):
"""An individual virtual host. As a virtual host only has a name,
you do not need an HTTP body when PUTing one of these."""
path = self.end_point + "vhosts/{0}".format(quote_plus(vhost))
return self._send_data(path)
def create_user(self, username, body={}):
"""An individual user. To PUT a user, you will need a body looking
something like this:
{
"password":"secret",
"tags":"administrator"
}
or:
{
"password_hash":"2lmoth8l4H0DViLaK9Fxi6l9ds8=",
"tags":"administrator"
}
The tags key is mandatory. Either password or password_hash must be
set. Setting password_hash to "" will ensure the user cannot use a
password to log in. tags is a comma-separated list of tags for the
user. Currently recognised tags are "administrator", "monitoring"
and "management".
"""
path = self.end_point + "users/{0}".format(username)
return self._send_data(path, data=body)
def grant_permissions_on_vhost(self, body={}, username=None,
vhost='/'):
"""An individual permission of a user and virtual host. To PUT a
permission, you will need a body looking something like this:
{
"configure":".*",
"write":".*",
"read":".*"
}
All keys are mandatory.
"""
vhost = quote_plus(vhost)
path = self.end_point + "permissions/{0}/{1}".format(vhost, username)
return self._send_data(path, data=body)
def update_parameter(self, component=None, body={}, parameter_name=None,
vhost='/'):
"""An individual parameter. To PUT a parameter, you will need a body
looking something like this:
{
"vhost": "/",
"component":"federation",
"name":"local_username",
"value":"guest"
}
"""
vhost = quote_plus(vhost)
path = "parameters/{1}/{0}/{2}".format(vhost, component, parameter_name)
return self._send_data(path, data=body)
def update_policies(self, policy_name=None, body={}, vhost='/'):
"""An individual policy. To PUT a policy, you will need a body
looking something like this:
{
"pattern":"^amq.",
"definition": {
"federation-upstream-set":"all"
},
"priority":0
}
policies/vhost/name
"""
vhost = quote_plus(vhost)
path = self.end_point + "policies/{0}/{1}".format(vhost, policy_name)
return self._send_data(path, data=body)
def delete_connection(self, name=None, reason=None):
"""Removes a connection by name, with an optional reason"""
path = self.end_point + "connections/" + name
self._send_data(path, request_type='DELETE')
def delete_exchange(self, exchange_name=None, vhost='/'):
"""Delete an exchange from a vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "exchanges/{0}/{1}".format(vhost, exchange_name)
self._send_data(path, request_type='DELETE')
def delete_queue(self, queue_name=None, vhost='/'):
"""Delete a queue from a vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "queues/{0}/{1}".format(vhost, queue_name)
self._send_data(path, request_type='DELETE')
def delete_contents_from_queue(self, queue_name=None, vhost='/'):
"""Delete the contents of a queue. If no vhost name is given the
defult / will be used"""
path = self.end_point + "queues/{0}/{1}/contents"
path = path.format(quote_plus(vhost), queue_name)
self._send_data(path, request_type='DELETE')
#def delete_thing(self):
# """An individual binding between an exchange and a queue. The props
# part of the URI is a "name" for the binding composed of its routing
# key and a hash of its arguments."""
def delete_vhost(self, vhost):
"""Delete a given vhost"""
path = self.end_point + "vhosts/{0}".format(quote_plus(vhost))
self._send_data(path, request_type='DELETE')
def delete_user(self, user=None):
"""Delete a given user"""
path = self.end_point + "users/{0}".format(user)
self._send_data(path, request_type='DELETE')
| apache-2.0 | -7,473,669,924,650,975,000 | 37.216667 | 80 | 0.590656 | false |
clchiou/garage | py/g1/http/http2_servers/g1/http/http2_servers/parts.py | 1 | 1361 | import g1.networks.servers.parts
from g1.apps import parameters
from g1.apps import utils
from g1.bases import labels
from .. import http2_servers # pylint: disable=relative-beyond-top-level
from . import nghttp2
SERVER_LABEL_NAMES = (
# Input.
'application',
# Private.
('server', g1.networks.servers.parts.SERVER_LABEL_NAMES),
)
def define_server(module_path=None, **kwargs):
module_path = module_path or http2_servers.__name__
module_labels = labels.make_nested_labels(module_path, SERVER_LABEL_NAMES)
setup_server(
module_labels,
parameters.define(module_path, make_server_params(**kwargs)),
)
return module_labels
def setup_server(module_labels, module_params):
g1.networks.servers.parts.setup_server(module_labels.server, module_params)
utils.define_maker(
# Although this is called a server, from the perspective of
# g1.networks.servers.SocketServer, this is a handler.
http2_servers.HttpServer,
{
'server_socket': module_labels.server.socket,
'application': module_labels.application,
'return': module_labels.server.handler,
},
)
def make_server_params(**kwargs):
kwargs.setdefault('protocols', (nghttp2.NGHTTP2_PROTO_VERSION_ID, ))
return g1.networks.servers.parts.make_server_params(**kwargs)
| mit | 8,790,086,334,072,161,000 | 30.651163 | 79 | 0.686995 | false |
ajaygarg84/sugar | src/jarabe/frame/clipboardpanelwindow.py | 1 | 5219 | # Copyright (C) 2007, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from urlparse import urlparse
import hashlib
from gi.repository import Gtk
from gi.repository import Gdk
from jarabe.frame.framewindow import FrameWindow
from jarabe.frame.clipboardtray import ClipboardTray
from jarabe.frame import clipboard
class ClipboardPanelWindow(FrameWindow):
def __init__(self, frame, orientation):
FrameWindow.__init__(self, orientation)
self._frame = frame
# Listening for new clipboard objects
# NOTE: we need to keep a reference to Gtk.Clipboard in order to keep
# listening to it.
self._clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self._clipboard.connect('owner-change', self._owner_change_cb)
self._clipboard_tray = ClipboardTray()
self._clipboard_tray.show()
self.append(self._clipboard_tray)
# Receiving dnd drops
self.drag_dest_set(0, [], 0)
self.connect('drag_motion', self._clipboard_tray.drag_motion_cb)
self.connect('drag_leave', self._clipboard_tray.drag_leave_cb)
self.connect('drag_drop', self._clipboard_tray.drag_drop_cb)
self.connect('drag_data_received',
self._clipboard_tray.drag_data_received_cb)
def _owner_change_cb(self, x_clipboard, event):
logging.debug('owner_change_cb')
if self._clipboard_tray.owns_clipboard():
return
cb_service = clipboard.get_instance()
result, targets = x_clipboard.wait_for_targets()
cb_selections = []
if not result:
return
target_is_uri = False
for target in targets:
if target not in ('TIMESTAMP', 'TARGETS',
'MULTIPLE', 'SAVE_TARGETS'):
logging.debug('Asking for target %s.', target)
if target == 'text/uri-list':
target_is_uri = True
selection = x_clipboard.wait_for_contents(target)
if not selection:
logging.warning('no data for selection target %s.', target)
continue
cb_selections.append(selection)
if target_is_uri:
uri = selection.get_uris()[0]
filename = uri[len('file://'):].strip()
md5 = self._md5_for_file(filename)
data_hash = hash(md5)
else:
data_hash = hash(selection.get_data())
if len(cb_selections) > 0:
key = cb_service.add_object(name="", data_hash=data_hash)
if key is None:
return
cb_service.set_object_percent(key, percent=0)
for selection in cb_selections:
self._add_selection(key, selection)
cb_service.set_object_percent(key, percent=100)
def _md5_for_file(self, file_name):
'''Calculate md5 for file data
Calculating block wise to prevent issues with big files in memory
'''
block_size = 8192
md5 = hashlib.md5()
f = open(file_name, 'r')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5.digest()
def _add_selection(self, key, selection):
if not selection.get_data():
logging.warning('no data for selection target %s.',
selection.get_data_type())
return
selection_type = str(selection.get_data_type())
logging.debug('adding type ' + selection_type + '.')
cb_service = clipboard.get_instance()
if selection_type == 'text/uri-list':
uris = selection.get_uris()
if len(uris) > 1:
raise NotImplementedError('Multiple uris in text/uri-list' \
' still not supported.')
uri = uris[0]
scheme, netloc_, path_, parameters_, query_, fragment_ = \
urlparse(uri)
on_disk = (scheme == 'file')
cb_service.add_object_format(key,
selection_type,
uri,
on_disk)
else:
cb_service.add_object_format(key,
selection_type,
selection.get_data(),
on_disk=False)
| gpl-2.0 | 9,051,991,144,524,601,000 | 35.496503 | 79 | 0.569266 | false |
yCanta/yCanta | convert/higherpraise.py | 1 | 2030 | import urllib2
import re
def convert(input_url):
'''return a dictionary as documented in __init__.py '''
# Example higherpraise.com content
# <div aligni="center"><!-- #BeginEditable "1" -->
# <table attrs=". . . ">
# ...
# <h1 align="center"><b><font class="default"><u>
# Title text </u></font><b></h1>
# <h4 align="center"><b><font class="default"><u>
# Author </u></font><b></h1>
# ...
# <pre><strong>Text, more text<br>More text<br><br>Next chunk
# </strong></pre>
# OR
# <pre><strong>Text, more text
# More text
#
# Next Chunk
# </strong></pre>
# ...
# </table>
# <!-- #EndEditable -->
content = urllib2.urlopen(input_url).read()
tag = re.compile(r'\<.*?\>')
try:
song_title = tag.sub('', re.split('\\<.*?h1.*?\\>', content)[1]).strip()
except:
song_title = ''
try:
song_author = tag.sub('', re.split('\\<.*?h4.*?\\>', content)[1]).strip()
except:
song_author = ''
# now the real work -- parsing content into a song
try:
song_div = content.split('<pre>')[1].split('</pre>')[0].replace(' ', ' ')
except:
song_div = content.split('<PRE>')[1].split('</PRE>')[0].replace(' ', ' ')
song_div = tag.sub('', song_div.replace('<br>','\n').replace('<BR>',''))
chunks = []
chunk_types = []
lines = []
# Split into multiple chunks
chunk_list = re.split('\n[ \t\r\f\v]*?\n(?=\s*?\S)', song_div)
for chunk in chunk_list:
if chunk.strip() in (song_title or song_author):
continue
chunks.append(chunk)
chunk_types.append('verse')
# Leave as one chunk
# chunks.append(song_div)
# chunk_types.append('verse')
return dict(title=song_title, author=song_author, chunks=chunks, chunk_type=chunk_types)
if __name__ == '__main__':
import sys
d = convert(sys.argv[1])
# write the song:
print d['title']
print '-'*len(d['title'])
print
print 'Author:', d['author']
print
for chunk in d['chunks']:
print chunk
print
| unlicense | 8,676,968,065,586,122,000 | 24.061728 | 90 | 0.550246 | false |
bolkedebruin/airflow | airflow/hooks/pig_hook.py | 1 | 1159 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.pig.hooks.pig`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.pig.hooks.pig import PigCliHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.pig.hooks.pig`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 | -3,636,194,131,831,471,000 | 38.965517 | 86 | 0.761001 | false |
shlomozippel/ansible | lib/ansible/playbook/play.py | 1 | 14133 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from ansible import utils
from ansible import errors
from ansible.playbook.task import Task
import shlex
import os
class Play(object):
__slots__ = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
'handlers', 'remote_user', 'remote_port',
'sudo', 'sudo_user', 'transport', 'playbook',
'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks',
'basedir'
]
# to catch typos and so forth -- these are userland names
# and don't line up 1:1 with how they are stored
VALID_KEYS = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
'tasks', 'handlers', 'user', 'port', 'include',
'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial'
]
# *************************************************
def __init__(self, playbook, ds, basedir):
''' constructor loads from a play datastructure '''
for x in ds.keys():
if not x in Play.VALID_KEYS:
raise errors.AnsibleError("%s is not a legal parameter in an Ansible Playbook" % x)
# TODO: more error handling
hosts = ds.get('hosts')
if hosts is None:
raise errors.AnsibleError('hosts declaration is required')
elif isinstance(hosts, list):
hosts = ';'.join(hosts)
self._ds = ds
self.playbook = playbook
self.basedir = basedir
self.vars = ds.get('vars', {})
self.vars_files = ds.get('vars_files', [])
self.vars_prompt = ds.get('vars_prompt', {})
self.vars = self._get_vars()
self.hosts = utils.template(basedir, hosts, self.vars)
self.name = ds.get('name', self.hosts)
self._tasks = ds.get('tasks', [])
self._handlers = ds.get('handlers', [])
self.remote_user = utils.template(basedir, ds.get('user', self.playbook.remote_user), self.vars)
self.remote_port = ds.get('port', self.playbook.remote_port)
self.sudo = ds.get('sudo', self.playbook.sudo)
self.sudo_user = utils.template(basedir, ds.get('sudo_user', self.playbook.sudo_user), self.vars)
self.transport = ds.get('connection', self.playbook.transport)
self.tags = ds.get('tags', None)
self.gather_facts = ds.get('gather_facts', None)
self.serial = int(utils.template_ds(basedir, ds.get('serial', 0), self.vars))
if isinstance(self.remote_port, basestring):
self.remote_port = utils.template(basedir, self.remote_port, self.vars)
self._update_vars_files_for_host(None)
self._tasks = self._load_tasks(self._ds.get('tasks', []))
self._handlers = self._load_tasks(self._ds.get('handlers', []))
if self.tags is None:
self.tags = []
elif type(self.tags) in [ str, unicode ]:
self.tags = [ self.tags ]
elif type(self.tags) != list:
self.tags = []
if self.sudo_user != 'root':
self.sudo = True
# *************************************************
def _load_tasks(self, tasks, vars={}, additional_conditions=[]):
''' handle task and handler include statements '''
results = []
for x in tasks:
task_vars = self.vars.copy()
task_vars.update(vars)
if 'include' in x:
tokens = shlex.split(x['include'])
items = ['']
included_additional_conditions = list(additional_conditions)
for k in x:
if k.startswith("with_"):
plugin_name = k[5:]
if plugin_name not in utils.plugins.lookup_loader:
raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
terms = utils.template_ds(self.basedir, x[k], task_vars)
items = utils.plugins.lookup_loader.get(plugin_name, basedir=self.basedir, runner=None).run(terms, inject=task_vars)
elif k.startswith("when_"):
included_additional_conditions.append(utils.compile_when_to_only_if("%s %s" % (k[5:], x[k])))
elif k in ("include", "vars", "only_if"):
pass
else:
raise errors.AnsibleError("parse error: task includes cannot be used with other directives: %s" % k)
if 'vars' in x:
task_vars.update(x['vars'])
if 'only_if' in x:
included_additional_conditions.append(x['only_if'])
for item in items:
mv = task_vars.copy()
mv['item'] = item
for t in tokens[1:]:
(k,v) = t.split("=", 1)
mv[k] = utils.template_ds(self.basedir, v, mv)
include_file = utils.template(self.basedir, tokens[0], mv)
data = utils.parse_yaml_from_file(utils.path_dwim(self.basedir, include_file))
results += self._load_tasks(data, mv, included_additional_conditions)
elif type(x) == dict:
results.append(Task(self,x,module_vars=task_vars, additional_conditions=additional_conditions))
else:
raise Exception("unexpected task type")
for x in results:
if self.tags is not None:
x.tags.extend(self.tags)
return results
# *************************************************
def tasks(self):
''' return task objects for this play '''
return self._tasks
def handlers(self):
''' return handler objects for this play '''
return self._handlers
# *************************************************
def _get_vars(self):
''' load the vars section from a play, accounting for all sorts of variable features
including loading from yaml files, prompting, and conditional includes of the first
file found in a list. '''
if self.vars is None:
self.vars = {}
if type(self.vars) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = {}
# translate a list of vars into a dict
if type(self.vars) == list:
for item in self.vars:
if getattr(item, 'items', None) is None:
raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
k, v = item.items()[0]
vars[k] = v
else:
vars.update(self.vars)
if type(self.vars_prompt) == list:
for var in self.vars_prompt:
if not 'name' in var:
raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt (
vname, private, prompt, encrypt, confirm, salt_size, salt, default
)
elif type(self.vars_prompt) == dict:
for (vname, prompt) in self.vars_prompt.iteritems():
prompt_msg = "%s: " % prompt
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt(
varname=vname, private=False, prompt=prompt_msg, default=None
)
else:
raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
results = self.playbook.extra_vars.copy()
results.update(vars)
return results
# *************************************************
def update_vars_files(self, hosts):
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
# now loop through all the hosts...
for h in hosts:
self._update_vars_files_for_host(h)
# *************************************************
def compare_tags(self, tags):
''' given a list of tags that the user has specified, return two lists:
matched_tags: tags were found within the current play and match those given
by the user
unmatched_tags: tags that were found within the current play but do not match
any provided by the user '''
# gather all the tags in all the tasks into one list
all_tags = []
for task in self._tasks:
all_tags.extend(task.tags)
# compare the lists of tags using sets and return the matched and unmatched
all_tags_set = set(all_tags)
tags_set = set(tags)
matched_tags = all_tags_set & tags_set
unmatched_tags = all_tags_set - tags_set
return matched_tags, unmatched_tags
# *************************************************
def _has_vars_in(self, msg):
return ((msg.find("$") != -1) or (msg.find("{{") != -1))
# *************************************************
def _update_vars_files_for_host(self, host):
if type(self.vars_files) != list:
self.vars_files = [ self.vars_files ]
if host is not None:
inject = {}
inject.update(self.playbook.inventory.get_variables(host))
inject.update(self.playbook.SETUP_CACHE[host])
for filename in self.vars_files:
if type(filename) == list:
# loop over all filenames, loading the first one, and failing if # none found
found = False
sequence = []
for real_filename in filename:
filename2 = utils.template(self.basedir, real_filename, self.vars)
filename3 = filename2
if host is not None:
filename3 = utils.template(self.basedir, filename2, inject)
filename4 = utils.path_dwim(self.basedir, filename3)
sequence.append(filename4)
if os.path.exists(filename4):
found = True
data = utils.parse_yaml_from_file(filename4)
if type(data) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
if host is not None:
if self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# this filename has variables in it that were fact specific
# so it needs to be loaded into the per host SETUP_CACHE
self.playbook.SETUP_CACHE[host].update(data)
self.playbook.callbacks.on_import_for_host(host, filename4)
elif not self._has_vars_in(filename4):
# found a non-host specific variable, load into vars and NOT
# the setup cache
self.vars.update(data)
elif host is not None:
self.playbook.callbacks.on_not_import_for_host(host, filename4)
if found:
break
if not found and host is not None:
raise errors.AnsibleError(
"%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
)
else:
# just one filename supplied, load it!
filename2 = utils.template(self.basedir, filename, self.vars)
filename3 = filename2
if host is not None:
filename3 = utils.template(self.basedir, filename2, inject)
filename4 = utils.path_dwim(self.basedir, filename3)
if self._has_vars_in(filename4):
continue
new_vars = utils.parse_yaml_from_file(filename4)
if new_vars:
if type(new_vars) != dict:
raise errors.AnsibleError("%s must be stored as dictonary/hash: %s" % filename4)
if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# running a host specific pass and has host specific variables
# load into setup cache
self.playbook.SETUP_CACHE[host].update(new_vars)
elif host is None:
# running a non-host specific pass and we can update the global vars instead
self.vars.update(new_vars)
| gpl-3.0 | -539,593,946,497,060,400 | 42.352761 | 141 | 0.521333 | false |
maribhez/DietasBot | fabfile.py | 1 | 1342 | # coding: utf-8
from fabric.api import *
import os
#Paso inicial para poner a punto nuestra maquina.
def Instala():
#Aseguramos la limpieza de la maquina.
run ('sudo rm -rf DietasBot')
#Descargamos nuestra aplicacion desde GitHub.
run('git clone https://github.com/maribhez/DietasBot.git')
#Entramos a la carpeta recien creada e instalamos los requisitos.
run('cd DietasBot && pip install -r requirements.txt')
#Funcion para lanzar nuestra aplicacion.
def Ejecutar():
with shell_env(HOST_BD=os.environ['HOST_BD'],
USER_BD=os.environ['USER_BD'],
PASS_BD=os.environ['PASS_BD'],
NAME_BD=os.environ['NAME_BD'],
TOKENBOT=os.environ['TOKENBOT']
):
run('sudo supervisorctl start botdietas')
def Recargar():
run("sudo supervisorctl reload")
def Detener():
run ('sudo supervisorctl stop botdietas')
def Borrado():
run ('sudo rm -rf DietasBot')
def Test():
with shell_env(HOST_BD=os.environ['HOST_BD'],
USER_BD=os.environ['USER_BD'],
PASS_BD=os.environ['PASS_BD'],
NAME_BD=os.environ['NAME_BD'],
TOKENBOT=os.environ['TOKENBOT']
):
run('cd DietasBot/botDietas && python test_bot.py')
| gpl-3.0 | 6,034,723,090,547,334,000 | 28.822222 | 69 | 0.590164 | false |
julienmalard/Tikon | pruebas/test_ecs/test_aprioris.py | 1 | 1353 | import unittest
import numpy as np
import numpy.testing as npt
import scipy.stats as estad
from tikon.ecs.aprioris import APrioriDens, APrioriDist
from tikon.ecs.dists import DistAnalรญtica
class PruebaAprioris(unittest.TestCase):
@staticmethod
def test_apriori_dens():
apr = APrioriDens((0, 1), 0.9)
trz = apr.dist((0, None)).obt_vals(10000)
npt.assert_allclose(np.mean(np.logical_and(trz < 1, trz > 0)), 0.9, atol=0.01)
def test_apriori_dens_lรญms_errรณneas(sรญmismo):
apr = APrioriDens((0, 1), 0.9)
with sรญmismo.assertRaises(ValueError):
apr.dist((1, None))
@staticmethod
def test_apriori_dist_scipy():
apr = APrioriDist(estad.norm())
trz = apr.dist((None, None)).obt_vals(10000)
npt.assert_allclose(trz.mean(), 0, atol=0.05)
npt.assert_allclose(trz.std(), 1, atol=0.05)
@staticmethod
def test_apriori_dist_analรญtica():
dist = DistAnalรญtica(estad.norm())
apr = APrioriDist(dist)
trz = apr.dist((None, None)).obt_vals(10000)
npt.assert_allclose(trz.mean(), 0, atol=0.05)
npt.assert_allclose(trz.std(), 1, atol=0.05)
def test_apriori_dist_lรญms_errรณneas(sรญmismo):
apr = APrioriDist(estad.norm())
with sรญmismo.assertRaises(ValueError):
apr.dist((1, None))
| agpl-3.0 | 9,142,627,457,586,882,000 | 32.55 | 86 | 0.635618 | false |
MSLNZ/msl-equipment | tests/test_connection.py | 1 | 2262 | import enum
import pytest
from msl.equipment import exceptions
from msl.equipment.connection import Connection
from msl.equipment.record_types import EquipmentRecord
class EnumTest1(enum.IntEnum):
A = 1
B = 2
C = 3
class EnumTest2(enum.IntEnum):
data_A = 11
DATA_B = 12
RES_C = 13
class EnumTest3(enum.Enum):
first = 1.1
second = 2.2
third = 3.3
class EnumTest4(enum.Enum):
START = 'the beginning'
STOP = 'the end'
def test_convert_to_enum():
assert Connection.convert_to_enum('a', EnumTest1, to_upper=True) == EnumTest1.A
assert Connection.convert_to_enum(2, EnumTest1) == EnumTest1.B
assert Connection.convert_to_enum('C', EnumTest1) == EnumTest1.C
with pytest.raises(ValueError):
Connection.convert_to_enum(4, EnumTest1)
with pytest.raises(ValueError):
Connection.convert_to_enum(None, EnumTest1)
assert Connection.convert_to_enum('A', EnumTest2, prefix='data_') == EnumTest2.data_A
assert Connection.convert_to_enum(EnumTest2.DATA_B, EnumTest2) == EnumTest2.DATA_B
assert Connection.convert_to_enum('res_c', EnumTest2, to_upper=True) == EnumTest2.RES_C
assert Connection.convert_to_enum('first', EnumTest3) == EnumTest3.first
assert Connection.convert_to_enum(2.2, EnumTest3) == EnumTest3.second
assert Connection.convert_to_enum(EnumTest3.third, EnumTest3) == EnumTest3.third
with pytest.raises(ValueError):
Connection.convert_to_enum(1.17, EnumTest3)
assert Connection.convert_to_enum('stop', EnumTest4, to_upper=True) == EnumTest4.STOP
assert Connection.convert_to_enum('STarT', EnumTest4, to_upper=True) == EnumTest4.START
assert Connection.convert_to_enum('the end', EnumTest4) == EnumTest4.STOP
def test_exception_handler():
c = Connection(EquipmentRecord())
assert c._exception_handler == exceptions.MSLConnectionError
# not a class error
with pytest.raises(TypeError, match=r'issubclass()'):
c.set_exception_class(None)
# not a subclass of MSLConnectionError
with pytest.raises(TypeError, match=r'MSLConnectionError'):
c.set_exception_class(OSError)
c.set_exception_class(exceptions.ThorlabsError)
assert c._exception_handler == exceptions.ThorlabsError
| mit | 1,085,828,139,733,513,500 | 31.314286 | 91 | 0.710433 | false |
evilhero/mylar | lib/comictaggerlib/comicapi/comet.py | 1 | 9192 | """A class to encapsulate CoMet data"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
#from datetime import datetime
#from pprint import pprint
#import zipfile
from genericmetadata import GenericMetadata
import utils
class CoMet:
writer_synonyms = ['writer', 'plotter', 'scripter']
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
inker_synonyms = ['inker', 'artist', 'finishes']
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
letterer_synonyms = ['letterer']
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
editor_synonyms = ['editor']
def metadataFromString(self, string):
tree = ET.ElementTree(ET.fromstring(string))
return self.convertXMLToMetadata(tree)
def stringFromMetadata(self, metadata):
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
tree = self.convertMetadataToXML(self, metadata)
return header + ET.tostring(tree.getroot())
def indent(self, elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def convertMetadataToXML(self, filename, metadata):
# shorthand for the metadata
md = metadata
# build a tree structure
root = ET.Element("comet")
root.attrib['xmlns:comet'] = "http://www.denvog.com/comet/"
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib[
'xsi:schemaLocation'] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
# helper func
def assign(comet_entry, md_entry):
if md_entry is not None:
ET.SubElement(root, comet_entry).text = u"{0}".format(md_entry)
# title is manditory
if md.title is None:
md.title = ""
assign('title', md.title)
assign('series', md.series)
assign('issue', md.issue) # must be int??
assign('volume', md.volume)
assign('description', md.comments)
assign('publisher', md.publisher)
assign('pages', md.pageCount)
assign('format', md.format)
assign('language', md.language)
assign('rating', md.maturityRating)
assign('price', md.price)
assign('isVersionOf', md.isVersionOf)
assign('rights', md.rights)
assign('identifier', md.identifier)
assign('lastMark', md.lastMark)
assign('genre', md.genre) # TODO repeatable
if md.characters is not None:
char_list = [c.strip() for c in md.characters.split(',')]
for c in char_list:
assign('character', c)
if md.manga is not None and md.manga == "YesAndRightToLeft":
assign('readingDirection', "rtl")
date_str = ""
if md.year is not None:
date_str = str(md.year).zfill(4)
if md.month is not None:
date_str += "-" + str(md.month).zfill(2)
assign('date', date_str)
assign('coverImage', md.coverImage)
# need to specially process the credits, since they are structured
# differently than CIX
credit_writer_list = list()
credit_penciller_list = list()
credit_inker_list = list()
credit_colorist_list = list()
credit_letterer_list = list()
credit_cover_list = list()
credit_editor_list = list()
# loop thru credits, and build a list for each role that CoMet supports
for credit in metadata.credits:
if credit['role'].lower() in set(self.writer_synonyms):
ET.SubElement(
root,
'writer').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.penciller_synonyms):
ET.SubElement(
root,
'penciller').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.inker_synonyms):
ET.SubElement(
root,
'inker').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.colorist_synonyms):
ET.SubElement(
root,
'colorist').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.letterer_synonyms):
ET.SubElement(
root,
'letterer').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.cover_synonyms):
ET.SubElement(
root,
'coverDesigner').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.editor_synonyms):
ET.SubElement(
root,
'editor').text = u"{0}".format(
credit['person'])
# self pretty-print
self.indent(root)
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
return tree
def convertXMLToMetadata(self, tree):
root = tree.getroot()
if root.tag != 'comet':
raise 1
return None
metadata = GenericMetadata()
md = metadata
# Helper function
def xlate(tag):
node = root.find(tag)
if node is not None:
return node.text
else:
return None
md.series = xlate('series')
md.title = xlate('title')
md.issue = xlate('issue')
md.volume = xlate('volume')
md.comments = xlate('description')
md.publisher = xlate('publisher')
md.language = xlate('language')
md.format = xlate('format')
md.pageCount = xlate('pages')
md.maturityRating = xlate('rating')
md.price = xlate('price')
md.isVersionOf = xlate('isVersionOf')
md.rights = xlate('rights')
md.identifier = xlate('identifier')
md.lastMark = xlate('lastMark')
md.genre = xlate('genre') # TODO - repeatable field
date = xlate('date')
if date is not None:
parts = date.split('-')
if len(parts) > 0:
md.year = parts[0]
if len(parts) > 1:
md.month = parts[1]
md.coverImage = xlate('coverImage')
readingDirection = xlate('readingDirection')
if readingDirection is not None and readingDirection == "rtl":
md.manga = "YesAndRightToLeft"
# loop for character tags
char_list = []
for n in root:
if n.tag == 'character':
char_list.append(n.text.strip())
md.characters = utils.listToString(char_list)
# Now extract the credit info
for n in root:
if (n.tag == 'writer' or
n.tag == 'penciller' or
n.tag == 'inker' or
n.tag == 'colorist' or
n.tag == 'letterer' or
n.tag == 'editor'
):
metadata.addCredit(n.text.strip(), n.tag.title())
if n.tag == 'coverDesigner':
metadata.addCredit(n.text.strip(), "Cover")
metadata.isEmpty = False
return metadata
# verify that the string actually contains CoMet data in XML format
def validateString(self, string):
try:
tree = ET.ElementTree(ET.fromstring(string))
root = tree.getroot()
if root.tag != 'comet':
raise Exception
except:
return False
return True
def writeToExternalFile(self, filename, metadata):
tree = self.convertMetadataToXML(self, metadata)
# ET.dump(tree)
tree.write(filename, encoding='utf-8')
def readFromExternalFile(self, filename):
tree = ET.parse(filename)
return self.convertXMLToMetadata(tree)
| gpl-3.0 | 3,675,850,606,253,734,400 | 32.304348 | 97 | 0.546997 | false |
stackforge/python-openstacksdk | openstack/tests/unit/image/v2/test_proxy.py | 1 | 19139 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from unittest import mock
import requests
from openstack import exceptions
from openstack.image.v2 import _proxy
from openstack.image.v2 import image
from openstack.image.v2 import member
from openstack.image.v2 import schema
from openstack.image.v2 import task
from openstack.image.v2 import service_info as si
from openstack.tests.unit.image.v2 import test_image as fake_image
from openstack.tests.unit import test_proxy_base
EXAMPLE = fake_image.EXAMPLE
class FakeResponse:
def __init__(self, response, status_code=200, headers=None):
self.body = response
self.status_code = status_code
headers = headers if headers else {'content-type': 'application/json'}
self.headers = requests.structures.CaseInsensitiveDict(headers)
def json(self):
return self.body
class TestImageProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestImageProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
self.proxy._connection = self.cloud
def test_image_import_no_required_attrs(self):
# container_format and disk_format are required attrs of the image
existing_image = image.Image(id="id")
self.assertRaises(exceptions.InvalidRequest,
self.proxy.import_image,
existing_image)
def test_image_import(self):
original_image = image.Image(**EXAMPLE)
self._verify(
"openstack.image.v2.image.Image.import_image",
self.proxy.import_image,
method_args=[original_image, "method", "uri"],
expected_kwargs={
"method": "method",
"store": None,
"uri": "uri",
"stores": [],
"all_stores": None,
"all_stores_must_succeed": None,
})
def test_image_create_conflict(self):
self.assertRaises(
exceptions.SDKException, self.proxy.create_image,
name='fake', filename='fake', data='fake',
container='bare', disk_format='raw'
)
def test_image_create_checksum_match(self):
fake_image = image.Image(
id="fake", properties={
self.proxy._IMAGE_MD5_KEY: 'fake_md5',
self.proxy._IMAGE_SHA256_KEY: 'fake_sha256'
})
self.proxy.find_image = mock.Mock(return_value=fake_image)
self.proxy._upload_image = mock.Mock()
res = self.proxy.create_image(
name='fake',
md5='fake_md5', sha256='fake_sha256'
)
self.assertEqual(fake_image, res)
self.proxy._upload_image.assert_not_called()
def test_image_create_checksum_mismatch(self):
fake_image = image.Image(
id="fake", properties={
self.proxy._IMAGE_MD5_KEY: 'fake_md5',
self.proxy._IMAGE_SHA256_KEY: 'fake_sha256'
})
self.proxy.find_image = mock.Mock(return_value=fake_image)
self.proxy._upload_image = mock.Mock()
self.proxy.create_image(
name='fake', data=b'fake',
md5='fake2_md5', sha256='fake2_sha256'
)
self.proxy._upload_image.assert_called()
def test_image_create_allow_duplicates_find_not_called(self):
self.proxy.find_image = mock.Mock()
self.proxy._upload_image = mock.Mock()
self.proxy.create_image(
name='fake', data=b'fake', allow_duplicates=True,
)
self.proxy.find_image.assert_not_called()
def test_image_create_validate_checksum_data_binary(self):
""" Pass real data as binary"""
self.proxy.find_image = mock.Mock()
self.proxy._upload_image = mock.Mock()
self.proxy.create_image(
name='fake', data=b'fake', validate_checksum=True,
container='bare', disk_format='raw'
)
self.proxy.find_image.assert_called_with('fake')
self.proxy._upload_image.assert_called_with(
'fake', container_format='bare', disk_format='raw',
filename=None, data=b'fake', meta={},
properties={
self.proxy._IMAGE_MD5_KEY: '144c9defac04969c7bfad8efaa8ea194',
self.proxy._IMAGE_SHA256_KEY: 'b5d54c39e66671c9731b9f471e585'
'd8262cd4f54963f0c93082d8dcf33'
'4d4c78',
self.proxy._IMAGE_OBJECT_KEY: 'bare/fake'},
timeout=3600, validate_checksum=True,
use_import=False,
stores=None,
all_stores=None,
all_stores_must_succeed=None,
wait=False)
def test_image_create_validate_checksum_data_not_binary(self):
self.assertRaises(
exceptions.SDKException, self.proxy.create_image,
name='fake', data=io.StringIO(), validate_checksum=True,
container='bare', disk_format='raw'
)
def test_image_create_data_binary(self):
"""Pass binary file-like object"""
self.proxy.find_image = mock.Mock()
self.proxy._upload_image = mock.Mock()
data = io.BytesIO(b'\0\0')
self.proxy.create_image(
name='fake', data=data, validate_checksum=False,
container='bare', disk_format='raw'
)
self.proxy._upload_image.assert_called_with(
'fake', container_format='bare', disk_format='raw',
filename=None, data=data, meta={},
properties={
self.proxy._IMAGE_MD5_KEY: '',
self.proxy._IMAGE_SHA256_KEY: '',
self.proxy._IMAGE_OBJECT_KEY: 'bare/fake'},
timeout=3600, validate_checksum=False,
use_import=False,
stores=None,
all_stores=None,
all_stores_must_succeed=None,
wait=False)
def test_image_create_without_filename(self):
self.proxy._create_image = mock.Mock()
self.proxy.create_image(
allow_duplicates=True,
name='fake', disk_format="fake_dformat",
container_format="fake_cformat"
)
self.proxy._create_image.assert_called_with(
container_format='fake_cformat', disk_format='fake_dformat',
name='fake', properties=mock.ANY)
def test_image_upload_no_args(self):
# container_format and disk_format are required args
self.assertRaises(exceptions.InvalidRequest, self.proxy.upload_image)
def test_image_upload(self):
# NOTE: This doesn't use any of the base class verify methods
# because it ends up making two separate calls to complete the
# operation.
created_image = mock.Mock(spec=image.Image(id="id"))
self.proxy._create = mock.Mock()
self.proxy._create.return_value = created_image
rv = self.proxy.upload_image(data="data", container_format="x",
disk_format="y", name="z")
self.proxy._create.assert_called_with(image.Image,
container_format="x",
disk_format="y",
name="z")
created_image.upload.assert_called_with(self.proxy)
self.assertEqual(rv, created_image)
def test_image_download(self):
original_image = image.Image(**EXAMPLE)
self._verify('openstack.image.v2.image.Image.download',
self.proxy.download_image,
method_args=[original_image],
method_kwargs={
'output': 'some_output',
'chunk_size': 1,
'stream': True
},
expected_kwargs={'output': 'some_output',
'chunk_size': 1,
'stream': True})
@mock.patch("openstack.image.v2.image.Image.fetch")
def test_image_stage(self, mock_fetch):
img = image.Image(id="id", status="queued")
img.stage = mock.Mock()
self.proxy.stage_image(image=img)
mock_fetch.assert_called()
img.stage.assert_called_with(self.proxy)
@mock.patch("openstack.image.v2.image.Image.fetch")
def test_image_stage_with_data(self, mock_fetch):
img = image.Image(id="id", status="queued")
img.stage = mock.Mock()
mock_fetch.return_value = img
rv = self.proxy.stage_image(image=img, data="data")
img.stage.assert_called_with(self.proxy)
mock_fetch.assert_called()
self.assertEqual(rv.data, "data")
def test_image_stage_wrong_status(self):
img = image.Image(id="id", status="active")
img.stage = mock.Mock()
self.assertRaises(
exceptions.SDKException,
self.proxy.stage_image,
img,
"data"
)
def test_image_delete(self):
self.verify_delete(self.proxy.delete_image, image.Image, False)
def test_image_delete_ignore(self):
self.verify_delete(self.proxy.delete_image, image.Image, True)
@mock.patch("openstack.resource.Resource._translate_response")
@mock.patch("openstack.proxy.Proxy._get")
@mock.patch("openstack.image.v2.image.Image.commit")
def test_image_update(self, mock_commit_image, mock_get_image,
mock_transpose):
original_image = image.Image(**EXAMPLE)
mock_get_image.return_value = original_image
EXAMPLE['name'] = 'fake_name'
updated_image = image.Image(**EXAMPLE)
mock_commit_image.return_value = updated_image.to_dict()
result = self.proxy.update_image(original_image,
**updated_image.to_dict())
self.assertEqual('fake_name', result.get('name'))
def test_image_get(self):
self.verify_get(self.proxy.get_image, image.Image)
def test_images(self):
self.verify_list(self.proxy.images, image.Image)
def test_add_tag(self):
self._verify("openstack.image.v2.image.Image.add_tag",
self.proxy.add_tag,
method_args=["image", "tag"],
expected_args=["tag"])
def test_remove_tag(self):
self._verify("openstack.image.v2.image.Image.remove_tag",
self.proxy.remove_tag,
method_args=["image", "tag"],
expected_args=["tag"])
def test_deactivate_image(self):
self._verify("openstack.image.v2.image.Image.deactivate",
self.proxy.deactivate_image,
method_args=["image"])
def test_reactivate_image(self):
self._verify("openstack.image.v2.image.Image.reactivate",
self.proxy.reactivate_image,
method_args=["image"])
def test_member_create(self):
self.verify_create(self.proxy.add_member, member.Member,
method_kwargs={"image": "test_id"},
expected_kwargs={"image_id": "test_id"})
def test_member_delete(self):
self._verify2("openstack.proxy.Proxy._delete",
self.proxy.remove_member,
method_args=["member_id"],
method_kwargs={"image": "image_id",
"ignore_missing": False},
expected_args=[member.Member],
expected_kwargs={"member_id": "member_id",
"image_id": "image_id",
"ignore_missing": False})
def test_member_delete_ignore(self):
self._verify2("openstack.proxy.Proxy._delete",
self.proxy.remove_member,
method_args=["member_id"],
method_kwargs={"image": "image_id"},
expected_args=[member.Member],
expected_kwargs={"member_id": "member_id",
"image_id": "image_id",
"ignore_missing": True})
def test_member_update(self):
self._verify2("openstack.proxy.Proxy._update",
self.proxy.update_member,
method_args=['member_id', 'image_id'],
expected_args=[member.Member],
expected_kwargs={'member_id': 'member_id',
'image_id': 'image_id'})
def test_member_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_member,
method_args=['member_id'],
method_kwargs={"image": "image_id"},
expected_args=[member.Member],
expected_kwargs={'member_id': 'member_id',
'image_id': 'image_id'})
def test_member_find(self):
self._verify2("openstack.proxy.Proxy._find",
self.proxy.find_member,
method_args=['member_id'],
method_kwargs={"image": "image_id"},
expected_args=[member.Member, "member_id"],
expected_kwargs={'ignore_missing': True,
'image_id': 'image_id'})
def test_members(self):
self.verify_list(self.proxy.members, member.Member,
method_args=('image_1',),
expected_kwargs={'image_id': 'image_1'})
def test_images_schema_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_images_schema,
expected_args=[schema.Schema],
expected_kwargs={'base_path': '/schemas/images',
'requires_id': False})
def test_image_schema_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_image_schema,
expected_args=[schema.Schema],
expected_kwargs={'base_path': '/schemas/image',
'requires_id': False})
def test_members_schema_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_members_schema,
expected_args=[schema.Schema],
expected_kwargs={'base_path': '/schemas/members',
'requires_id': False})
def test_member_schema_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_member_schema,
expected_args=[schema.Schema],
expected_kwargs={'base_path': '/schemas/member',
'requires_id': False})
def test_task_get(self):
self.verify_get(self.proxy.get_task, task.Task)
def test_tasks(self):
self.verify_list(self.proxy.tasks, task.Task)
def test_task_create(self):
self.verify_create(self.proxy.create_task, task.Task)
def test_wait_for_task_immediate_status(self):
status = 'success'
res = task.Task(id='1234', status=status)
result = self.proxy.wait_for_task(
res, status, "failure", 0.01, 0.1)
self.assertTrue(result, res)
def test_wait_for_task_immediate_status_case(self):
status = "SUCcess"
res = task.Task(id='1234', status=status)
result = self.proxy.wait_for_task(
res, status, "failure", 0.01, 0.1)
self.assertTrue(result, res)
def test_wait_for_task_error_396(self):
# Ensure we create a new task when we get 396 error
res = task.Task(
id='id', status='waiting',
type='some_type', input='some_input', result='some_result'
)
mock_fetch = mock.Mock()
mock_fetch.side_effect = [
task.Task(
id='id', status='failure',
type='some_type', input='some_input', result='some_result',
message=_proxy._IMAGE_ERROR_396
),
task.Task(id='fake', status='waiting'),
task.Task(id='fake', status='success'),
]
self.proxy._create = mock.Mock()
self.proxy._create.side_effect = [
task.Task(id='fake', status='success')
]
with mock.patch.object(task.Task,
'fetch', mock_fetch):
result = self.proxy.wait_for_task(
res, interval=0.01, wait=0.5)
self.assertEqual('success', result.status)
self.proxy._create.assert_called_with(
mock.ANY,
input=res.input,
type=res.type)
def test_wait_for_task_wait(self):
res = task.Task(id='id', status='waiting')
mock_fetch = mock.Mock()
mock_fetch.side_effect = [
task.Task(id='id', status='waiting'),
task.Task(id='id', status='waiting'),
task.Task(id='id', status='success'),
]
with mock.patch.object(task.Task,
'fetch', mock_fetch):
result = self.proxy.wait_for_task(
res, interval=0.01, wait=0.5)
self.assertEqual('success', result.status)
def test_tasks_schema_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_tasks_schema,
expected_args=[schema.Schema],
expected_kwargs={'base_path': '/schemas/tasks',
'requires_id': False})
def test_task_schema_get(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_task_schema,
expected_args=[schema.Schema],
expected_kwargs={'base_path': '/schemas/task',
'requires_id': False})
def test_stores(self):
self.verify_list(self.proxy.stores, si.Store)
def test_import_info(self):
self._verify2("openstack.proxy.Proxy._get",
self.proxy.get_import_info,
method_args=[],
method_kwargs={},
expected_args=[si.Import],
expected_kwargs={'require_id': False})
| apache-2.0 | -938,669,344,473,607,300 | 37.125498 | 78 | 0.541042 | false |
bplancher/odoo | openerp/addons/base/tests/test_views.py | 2 | 40915 | # -*- encoding: utf-8 -*-
from functools import partial
import itertools
import unittest
from lxml import etree as ET
from lxml.builder import E
from psycopg2 import IntegrityError
from openerp.exceptions import ValidationError
from openerp.tests import common
import openerp.tools
Field = E.field
class ViewCase(common.TransactionCase):
def setUp(self):
super(ViewCase, self).setUp()
self.addTypeEqualityFunc(ET._Element, self.assertTreesEqual)
self.Views = self.registry('ir.ui.view')
def browse(self, id, context=None):
return self.Views.browse(self.cr, self.uid, id, context=context)
def create(self, value, context=None):
return self.Views.create(self.cr, self.uid, value, context=context)
def read_combined(self, id):
return self.Views.read_combined(
self.cr, self.uid,
id, ['arch'],
context={'check_view_ids': self.Views.search(self.cr, self.uid, [])}
)
def assertTreesEqual(self, n1, n2, msg=None):
self.assertEqual(n1.tag, n2.tag, msg)
self.assertEqual((n1.text or '').strip(), (n2.text or '').strip(), msg)
self.assertEqual((n1.tail or '').strip(), (n2.tail or '').strip(), msg)
# Because lxml uses ordereddicts in which order is important to
# equality (!?!?!?!)
self.assertEqual(dict(n1.attrib), dict(n2.attrib), msg)
for c1, c2 in itertools.izip_longest(n1, n2):
self.assertEqual(c1, c2, msg)
class TestNodeLocator(common.TransactionCase):
"""
The node locator returns None when it can not find a node, and the first
match when it finds something (no jquery-style node sets)
"""
def setUp(self):
super(TestNodeLocator, self).setUp()
self.Views = self.registry('ir.ui.view')
def test_no_match_xpath(self):
"""
xpath simply uses the provided @expr pattern to find a node
"""
node = self.Views.locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.xpath(expr="//qux"))
self.assertIsNone(node)
def test_match_xpath(self):
bar = E.bar()
node = self.Views.locate_node(
E.root(E.foo(), bar, E.baz()),
E.xpath(expr="//bar"))
self.assertIs(node, bar)
def test_no_match_field(self):
"""
A field spec will match by @name against all fields of the view
"""
node = self.Views.locate_node(
E.root(E.foo(), E.bar(), E.baz()),
Field(name="qux"))
self.assertIsNone(node)
node = self.Views.locate_node(
E.root(Field(name="foo"), Field(name="bar"), Field(name="baz")),
Field(name="qux"))
self.assertIsNone(node)
def test_match_field(self):
bar = Field(name="bar")
node = self.Views.locate_node(
E.root(Field(name="foo"), bar, Field(name="baz")),
Field(name="bar"))
self.assertIs(node, bar)
def test_no_match_other(self):
"""
Non-xpath non-fields are matched by node name first
"""
node = self.Views.locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.qux())
self.assertIsNone(node)
def test_match_other(self):
bar = E.bar()
node = self.Views.locate_node(
E.root(E.foo(), bar, E.baz()),
E.bar())
self.assertIs(bar, node)
def test_attribute_mismatch(self):
"""
Non-xpath non-field are filtered by matching attributes on spec and
matched nodes
"""
node = self.Views.locate_node(
E.root(E.foo(attr='1'), E.bar(attr='2'), E.baz(attr='3')),
E.bar(attr='5'))
self.assertIsNone(node)
def test_attribute_filter(self):
match = E.bar(attr='2')
node = self.Views.locate_node(
E.root(E.bar(attr='1'), match, E.root(E.bar(attr='3'))),
E.bar(attr='2'))
self.assertIs(node, match)
def test_version_mismatch(self):
"""
A @version on the spec will be matched against the view's version
"""
node = self.Views.locate_node(
E.root(E.foo(attr='1'), version='4'),
E.foo(attr='1', version='3'))
self.assertIsNone(node)
class TestViewInheritance(ViewCase):
def arch_for(self, name, view_type='form', parent=None):
""" Generates a trivial view of the specified ``view_type``.
The generated view is empty but ``name`` is set as its root's ``@string``.
If ``parent`` is not falsy, generates an extension view (instead of
a root view) replacing the parent's ``@string`` by ``name``
:param str name: ``@string`` value for the view root
:param str view_type:
:param bool parent:
:return: generated arch
:rtype: str
"""
if not parent:
element = E(view_type, string=name)
else:
element = E(view_type,
E.attribute(name, name='string'),
position='attributes'
)
return ET.tostring(element)
def makeView(self, name, parent=None, arch=None):
""" Generates a basic ir.ui.view with the provided name, parent and arch.
If no parent is provided, the view is top-level.
If no arch is provided, generates one by calling :meth:`~.arch_for`.
:param str name:
:param int parent: id of the parent view, if any
:param str arch:
:returns: the created view's id.
:rtype: int
"""
view_id = self.View.create(self.cr, self.uid, {
'model': self.model,
'name': name,
'arch': arch or self.arch_for(name, parent=parent),
'inherit_id': parent,
'priority': 5, # higher than default views
})
self.ids[name] = view_id
return view_id
def setUp(self):
super(TestViewInheritance, self).setUp()
self.model = 'ir.ui.view.custom'
self.View = self.registry('ir.ui.view')
self._init = self.View.pool._init
self.View.pool._init = False
self.ids = {}
a = self.makeView("A")
a1 = self.makeView("A1", a)
a11 = self.makeView("A11", a1)
self.makeView("A111", a11)
self.makeView("A12", a1)
a2 = self.makeView("A2", a)
self.makeView("A21", a2)
a22 = self.makeView("A22", a2)
self.makeView("A221", a22)
b = self.makeView('B', arch=self.arch_for("B", 'tree'))
self.makeView('B1', b, arch=self.arch_for("B1", 'tree', parent=b))
c = self.makeView('C', arch=self.arch_for("C", 'tree'))
self.View.write(self.cr, self.uid, c, {'priority': 1})
def tearDown(self):
self.View.pool._init = self._init
super(TestViewInheritance, self).tearDown()
def test_get_inheriting_views_arch(self):
self.assertEqual(self.View.get_inheriting_views_arch(
self.cr, self.uid, self.ids['A'], self.model), [
(self.arch_for('A1', parent=True), self.ids['A1']),
(self.arch_for('A2', parent=True), self.ids['A2']),
])
self.assertEqual(self.View.get_inheriting_views_arch(
self.cr, self.uid, self.ids['A21'], self.model),
[])
self.assertEqual(self.View.get_inheriting_views_arch(
self.cr, self.uid, self.ids['A11'], self.model),
[(self.arch_for('A111', parent=True), self.ids['A111'])])
def test_default_view(self):
default = self.View.default_view(
self.cr, self.uid, model=self.model, view_type='form')
self.assertEqual(default, self.ids['A'])
default_tree = self.View.default_view(
self.cr, self.uid, model=self.model, view_type='tree')
self.assertEqual(default_tree, self.ids['C'])
def test_no_default_view(self):
self.assertFalse(
self.View.default_view(
self.cr, self.uid, model='does.not.exist', view_type='form'))
self.assertFalse(
self.View.default_view(
self.cr, self.uid, model=self.model, view_type='graph'))
def test_no_recursion(self):
r1 = self.makeView('R1')
with self.assertRaises(ValidationError), self.cr.savepoint():
self.View.write(self.cr, self.uid, r1, {'inherit_id': r1})
r2 = self.makeView('R2', r1)
r3 = self.makeView('R3', r2)
with self.assertRaises(ValidationError), self.cr.savepoint():
self.View.write(self.cr, self.uid, r2, {'inherit_id': r3})
with self.assertRaises(ValidationError), self.cr.savepoint():
self.View.write(self.cr, self.uid, r1, {'inherit_id': r3})
with self.assertRaises(ValidationError), self.cr.savepoint():
self.View.write(self.cr, self.uid, r1, {
'inherit_id': r1,
'arch': self.arch_for('itself', parent=True),
})
class TestApplyInheritanceSpecs(ViewCase):
""" Applies a sequence of inheritance specification nodes to a base
architecture. IO state parameters (cr, uid, model, context) are used for
error reporting
The base architecture is altered in-place.
"""
def setUp(self):
super(TestApplyInheritanceSpecs, self).setUp()
self.View = self.registry('ir.ui.view')
self.base_arch = E.form(
Field(name="target"),
string="Title")
def test_replace(self):
spec = Field(
Field(name="replacement"),
name="target", position="replace")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(Field(name="replacement"), string="Title"))
def test_delete(self):
spec = Field(name="target", position="replace")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(string="Title"))
def test_insert_after(self):
spec = Field(
Field(name="inserted"),
name="target", position="after")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(name="target"),
Field(name="inserted"),
string="Title"
))
def test_insert_before(self):
spec = Field(
Field(name="inserted"),
name="target", position="before")
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(name="inserted"),
Field(name="target"),
string="Title"))
def test_insert_inside(self):
default = Field(Field(name="inserted"), name="target")
spec = Field(Field(name="inserted 2"), name="target", position='inside')
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
default, None)
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(
Field(name="inserted"),
Field(name="inserted 2"),
name="target"),
string="Title"))
def test_unpack_data(self):
spec = E.data(
Field(Field(name="inserted 0"), name="target"),
Field(Field(name="inserted 1"), name="target"),
Field(Field(name="inserted 2"), name="target"),
Field(Field(name="inserted 3"), name="target"),
)
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
self.assertEqual(
self.base_arch,
E.form(
Field(
Field(name="inserted 0"),
Field(name="inserted 1"),
Field(name="inserted 2"),
Field(name="inserted 3"),
name="target"),
string="Title"))
@openerp.tools.mute_logger('openerp.addons.base.ir.ir_ui_view')
def test_invalid_position(self):
spec = Field(
Field(name="whoops"),
name="target", position="serious_series")
with self.assertRaises(AttributeError):
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
@openerp.tools.mute_logger('openerp.addons.base.ir.ir_ui_view')
def test_incorrect_version(self):
# Version ignored on //field elements, so use something else
arch = E.form(E.element(foo="42"))
spec = E.element(
Field(name="placeholder"),
foo="42", version="7.0")
with self.assertRaises(AttributeError):
self.View.apply_inheritance_specs(self.cr, self.uid,
arch,
spec, None)
@openerp.tools.mute_logger('openerp.addons.base.ir.ir_ui_view')
def test_target_not_found(self):
spec = Field(name="targut")
with self.assertRaises(AttributeError):
self.View.apply_inheritance_specs(self.cr, self.uid,
self.base_arch,
spec, None)
class TestApplyInheritedArchs(ViewCase):
""" Applies a sequence of modificator archs to a base view
"""
class TestNoModel(ViewCase):
def test_create_view_nomodel(self):
View = self.registry('ir.ui.view')
view_id = View.create(self.cr, self.uid, {
'name': 'dummy',
'arch': '<template name="foo"/>',
'inherit_id': False,
'type': 'qweb',
})
fields = ['name', 'arch', 'type', 'priority', 'inherit_id', 'model']
[view] = View.read(self.cr, self.uid, [view_id], fields)
self.assertEqual(view, {
'id': view_id,
'name': 'dummy',
'arch': '<template name="foo"/>',
'type': 'qweb',
'priority': 16,
'inherit_id': False,
'model': False,
})
text_para = E.p("", {'class': 'legalese'})
arch = E.body(
E.div(
E.h1("Title"),
id="header"),
E.p("Welcome!"),
E.div(
E.hr(),
text_para,
id="footer"),
{'class': "index"},)
def test_qweb_translation(self):
"""
Test if translations work correctly without a model
"""
self.env['res.lang'].load_lang('fr_FR')
ARCH = '<template name="foo">%s</template>'
TEXT_EN = "Copyright copyrighter"
TEXT_FR = u"Copyrighter, tous droits rรฉservรฉs"
view = self.env['ir.ui.view'].create({
'name': 'dummy',
'arch': ARCH % TEXT_EN,
'inherit_id': False,
'type': 'qweb',
})
self.env['ir.translation'].create({
'type': 'model',
'name': 'ir.ui.view,arch_db',
'res_id': view.id,
'lang': 'fr_FR',
'src': TEXT_EN,
'value': TEXT_FR,
})
view = view.with_context(lang='fr_FR')
self.assertEqual(view.arch, ARCH % TEXT_FR)
class TestTemplating(ViewCase):
def setUp(self):
import openerp.modules
super(TestTemplating, self).setUp()
self._pool = openerp.modules.registry.RegistryManager.get(common.get_db_name())
self._init = self._pool._init
# fuck off
self._pool._init = False
def tearDown(self):
self._pool._init = self._init
super(TestTemplating, self).tearDown()
def test_branding_inherit(self):
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>
"""
})
id2 = Views.create(self.cr, self.uid, {
'name': "Extension",
'type': 'qweb',
'inherit_id': id,
'arch': """<xpath expr="//item" position="before">
<item order="2"/>
</xpath>
"""
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
[initial] = arch.xpath('//item[@order=1]')
self.assertEqual(
str(id),
initial.get('data-oe-id'),
"initial should come from the root view")
self.assertEqual(
'/root[1]/item[1]',
initial.get('data-oe-xpath'),
"initial's xpath should be within the root view only")
[second] = arch.xpath('//item[@order=2]')
self.assertEqual(
str(id2),
second.get('data-oe-id'),
"second should come from the extension view")
def test_branding_distribute_inner(self):
""" Checks that the branding is correctly distributed within a view
extension
"""
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>"""
})
id2 = Views.create(self.cr, self.uid, {
'name': "Extension",
'type': 'qweb',
'inherit_id': id,
'arch': """<xpath expr="//item" position="before">
<item order="2">
<content t-att-href="foo">bar</content>
</item>
</xpath>"""
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
self.assertEqual(
arch,
E.root(
E.item(
E.content("bar", {
't-att-href': "foo",
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(id2),
'data-oe-field': 'arch',
'data-oe-xpath': '/xpath/item/content[1]',
}), {
'order': '2',
}),
E.item({
'order': '1',
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(id),
'data-oe-field': 'arch',
'data-oe-xpath': '/root[1]/item[1]',
})
)
)
def test_esc_no_branding(self):
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base View",
'type': 'qweb',
'arch': """<root>
<item><span t-esc="foo"/></item>
</root>""",
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
self.assertEqual(arch, E.root(E.item(E.span({'t-esc': "foo"}))))
def test_ignore_unbrand(self):
Views = self.registry('ir.ui.view')
id = Views.create(self.cr, self.uid, {
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1" t-ignore="true">
<t t-esc="foo"/>
</item>
</root>"""
})
id2 = Views.create(self.cr, self.uid, {
'name': "Extension",
'type': 'qweb',
'inherit_id': id,
'arch': """<xpath expr="//item[@order='1']" position="inside">
<item order="2">
<content t-att-href="foo">bar</content>
</item>
</xpath>"""
})
arch_string = Views.read_combined(
self.cr, self.uid, id, fields=['arch'],
context={'inherit_branding': True})['arch']
arch = ET.fromstring(arch_string)
Views.distribute_branding(arch)
self.assertEqual(
arch,
E.root(
E.item(
{'t-ignore': 'true', 'order': '1'},
E.t({'t-esc': 'foo'}),
E.item(
{'order': '2'},
E.content(
{'t-att-href': 'foo'},
"bar")
)
)
),
"t-ignore should apply to injected sub-view branding, not just to"
" the main view's"
)
class test_views(ViewCase):
def test_nonexistent_attribute_removal(self):
Views = self.registry('ir.ui.view')
Views.create(self.cr, self.uid, {
'name': 'Test View',
'model': 'ir.ui.view',
'inherit_id': self.browse_ref('base.view_view_tree').id,
'arch': """<?xml version="1.0"?>
<xpath expr="//field[@name='name']" position="attributes">
<attribute name="non_existing_attribute"></attribute>
</xpath>
""",
})
def _insert_view(self, **kw):
"""Insert view into database via a query to passtrough validation"""
kw.pop('id', None)
kw.setdefault('mode', 'extension' if kw.get('inherit_id') else 'primary')
kw.setdefault('active', True)
keys = sorted(kw.keys())
fields = ','.join('"%s"' % (k.replace('"', r'\"'),) for k in keys)
params = ','.join('%%(%s)s' % (k,) for k in keys)
query = 'INSERT INTO ir_ui_view(%s) VALUES(%s) RETURNING id' % (fields, params)
self.cr.execute(query, kw)
return self.cr.fetchone()[0]
def test_custom_view_validation(self):
Views = self.registry('ir.ui.view')
model = 'ir.actions.act_url'
validate = partial(Views._validate_custom_views, self.cr, self.uid, model)
# validation of a single view
vid = self._insert_view(
name='base view',
model=model,
priority=1,
arch_db="""<?xml version="1.0"?>
<tree string="view">
<field name="url"/>
</tree>
""",
)
self.assertTrue(validate()) # single view
# validation of a inherited view
self._insert_view(
name='inherited view',
model=model,
priority=1,
inherit_id=vid,
arch_db="""<?xml version="1.0"?>
<xpath expr="//field[@name='url']" position="before">
<field name="name"/>
</xpath>
""",
)
self.assertTrue(validate()) # inherited view
# validation of a second inherited view (depending on 1st)
self._insert_view(
name='inherited view 2',
model=model,
priority=5,
inherit_id=vid,
arch_db="""<?xml version="1.0"?>
<xpath expr="//field[@name='name']" position="after">
<field name="target"/>
</xpath>
""",
)
self.assertTrue(validate()) # inherited view
def test_view_inheritance(self):
Views = self.registry('ir.ui.view')
v1 = Views.create(self.cr, self.uid, {
'name': "bob",
'model': 'ir.ui.view',
'arch': """
<form string="Base title" version="7.0">
<separator name="separator" string="Separator" colspan="4"/>
<footer>
<button name="action_next" type="object" string="Next button" class="btn-primary"/>
<button string="Skip" special="cancel" class="btn-default"/>
</footer>
</form>
"""
})
v2 = Views.create(self.cr, self.uid, {
'name': "edmund",
'model': 'ir.ui.view',
'inherit_id': v1,
'arch': """
<data>
<form position="attributes" version="7.0">
<attribute name="string">Replacement title</attribute>
</form>
<footer position="replace">
<footer>
<button name="action_next" type="object" string="New button"/>
</footer>
</footer>
<separator name="separator" position="replace">
<p>Replacement data</p>
</separator>
</data>
"""
})
v3 = Views.create(self.cr, self.uid, {
'name': 'jake',
'model': 'ir.ui.view',
'inherit_id': v1,
'priority': 17,
'arch': """
<footer position="attributes">
<attribute name="thing">bob tata lolo</attribute>
<attribute name="thing" add="bibi and co" remove="tata" separator=" " />
<attribute name="otherthing">bob, tata,lolo</attribute>
<attribute name="otherthing" remove="tata, bob"/>
</footer>
"""
})
view = self.registry('ir.ui.view').fields_view_get(
self.cr, self.uid, v2, view_type='form', context={
# fucking what?
'check_view_ids': [v2, v3]
})
self.assertEqual(view['type'], 'form')
self.assertEqual(
ET.fromstring(
view['arch'],
parser=ET.XMLParser(remove_blank_text=True)
),
E.form(
E.p("Replacement data"),
E.footer(
E.button(name="action_next", type="object", string="New button"),
thing="bob lolo bibi and co", otherthing="lolo"
),
string="Replacement title", version="7.0"))
def test_view_inheritance_divergent_models(self):
Views = self.registry('ir.ui.view')
v1 = Views.create(self.cr, self.uid, {
'name': "bob",
'model': 'ir.ui.view.custom',
'arch': """
<form string="Base title" version="7.0">
<separator name="separator" string="Separator" colspan="4"/>
<footer>
<button name="action_next" type="object" string="Next button" class="btn-primary"/>
<button string="Skip" special="cancel" class="btn-default"/>
</footer>
</form>
"""
})
v2 = Views.create(self.cr, self.uid, {
'name': "edmund",
'model': 'ir.ui.view',
'inherit_id': v1,
'arch': """
<data>
<form position="attributes" version="7.0">
<attribute name="string">Replacement title</attribute>
</form>
<footer position="replace">
<footer>
<button name="action_next" type="object" string="New button"/>
</footer>
</footer>
<separator name="separator" position="replace">
<p>Replacement data</p>
</separator>
</data>
"""
})
v3 = Views.create(self.cr, self.uid, {
'name': 'jake',
'model': 'ir.ui.menu',
'inherit_id': v1,
'priority': 17,
'arch': """
<footer position="attributes">
<attribute name="thing">bob</attribute>
</footer>
"""
})
view = self.registry('ir.ui.view').fields_view_get(
self.cr, self.uid, v2, view_type='form', context={
# fucking what?
'check_view_ids': [v2, v3]
})
self.assertEqual(view['type'], 'form')
self.assertEqual(
ET.fromstring(
view['arch'],
parser=ET.XMLParser(remove_blank_text=True)
),
E.form(
E.p("Replacement data"),
E.footer(
E.button(name="action_next", type="object", string="New button")),
string="Replacement title", version="7.0"
))
class ViewModeField(ViewCase):
"""
This should probably, eventually, be folded back into other test case
classes, integrating the test (or not) of the mode field to regular cases
"""
def testModeImplicitValue(self):
""" mode is auto-generated from inherit_id:
* inherit_id -> mode=extension
* not inherit_id -> mode=primary
"""
view = self.browse(self.create({
'inherit_id': None,
'arch': '<qweb/>'
}))
self.assertEqual(view.mode, 'primary')
view2 = self.browse(self.create({
'inherit_id': view.id,
'arch': '<qweb/>'
}))
self.assertEqual(view2.mode, 'extension')
@openerp.tools.mute_logger('openerp.sql_db')
def testModeExplicit(self):
view = self.browse(self.create({
'inherit_id': None,
'arch': '<qweb/>'
}))
view2 = self.browse(self.create({
'inherit_id': view.id,
'mode': 'primary',
'arch': '<qweb/>'
}))
self.assertEqual(view.mode, 'primary')
with self.assertRaises(IntegrityError):
self.create({
'inherit_id': None,
'mode': 'extension',
'arch': '<qweb/>'
})
@openerp.tools.mute_logger('openerp.sql_db')
def testPurePrimaryToExtension(self):
"""
A primary view with inherit_id=None can't be converted to extension
"""
view_pure_primary = self.browse(self.create({
'inherit_id': None,
'arch': '<qweb/>'
}))
with self.assertRaises(IntegrityError):
view_pure_primary.write({'mode': 'extension'})
def testInheritPrimaryToExtension(self):
"""
A primary view with an inherit_id can be converted to extension
"""
base = self.create({'inherit_id': None, 'arch': '<qweb/>'})
view = self.browse(self.create({
'inherit_id': base,
'mode': 'primary',
'arch': '<qweb/>'
}))
view.write({'mode': 'extension'})
def testDefaultExtensionToPrimary(self):
"""
An extension view can be converted to primary
"""
base = self.create({'inherit_id': None, 'arch': '<qweb/>'})
view = self.browse(self.create({
'inherit_id': base,
'arch': '<qweb/>'
}))
view.write({'mode': 'primary'})
class TestDefaultView(ViewCase):
def testDefaultViewBase(self):
self.create({
'inherit_id': False,
'priority': 10,
'mode': 'primary',
'arch': '<qweb/>',
})
v2 = self.create({
'inherit_id': False,
'priority': 1,
'mode': 'primary',
'arch': '<qweb/>',
})
default = self.Views.default_view(self.cr, self.uid, False, 'qweb')
self.assertEqual(
default, v2,
"default_view should get the view with the lowest priority for "
"a (model, view_type) pair"
)
def testDefaultViewPrimary(self):
v1 = self.create({
'inherit_id': False,
'priority': 10,
'mode': 'primary',
'arch': '<qweb/>',
})
self.create({
'inherit_id': False,
'priority': 5,
'mode': 'primary',
'arch': '<qweb/>',
})
v3 = self.create({
'inherit_id': v1,
'priority': 1,
'mode': 'primary',
'arch': '<qweb/>',
})
default = self.Views.default_view(self.cr, self.uid, False, 'qweb')
self.assertEqual(
default, v3,
"default_view should get the view with the lowest priority for "
"a (model, view_type) pair in all the primary tables"
)
class TestViewCombined(ViewCase):
"""
* When asked for a view, instead of looking for the closest parent with
inherit_id=False look for mode=primary
* If root.inherit_id, resolve the arch for root.inherit_id (?using which
model?), then apply root's inheritance specs to it
* Apply inheriting views on top
"""
def setUp(self):
super(TestViewCombined, self).setUp()
self.a1 = self.create({
'model': 'a',
'arch': '<qweb><a1/></qweb>'
})
self.a2 = self.create({
'model': 'a',
'inherit_id': self.a1,
'priority': 5,
'arch': '<xpath expr="//a1" position="after"><a2/></xpath>'
})
self.a3 = self.create({
'model': 'a',
'inherit_id': self.a1,
'arch': '<xpath expr="//a1" position="after"><a3/></xpath>'
})
# mode=primary should be an inheritance boundary in both direction,
# even within a model it should not extend the parent
self.a4 = self.create({
'model': 'a',
'inherit_id': self.a1,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><a4/></xpath>',
})
self.b1 = self.create({
'model': 'b',
'inherit_id': self.a3,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><b1/></xpath>'
})
self.b2 = self.create({
'model': 'b',
'inherit_id': self.b1,
'arch': '<xpath expr="//a1" position="after"><b2/></xpath>'
})
self.c1 = self.create({
'model': 'c',
'inherit_id': self.a1,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><c1/></xpath>'
})
self.c2 = self.create({
'model': 'c',
'inherit_id': self.c1,
'priority': 5,
'arch': '<xpath expr="//a1" position="after"><c2/></xpath>'
})
self.c3 = self.create({
'model': 'c',
'inherit_id': self.c2,
'priority': 10,
'arch': '<xpath expr="//a1" position="after"><c3/></xpath>'
})
self.d1 = self.create({
'model': 'd',
'inherit_id': self.b1,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><d1/></xpath>'
})
def test_basic_read(self):
arch = self.read_combined(self.a1)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.a1(),
E.a3(),
E.a2(),
), arch)
def test_read_from_child(self):
arch = self.read_combined(self.a3)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.a1(),
E.a3(),
E.a2(),
), arch)
def test_read_from_child_primary(self):
arch = self.read_combined(self.a4)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.a1(),
E.a4(),
E.a3(),
E.a2(),
), arch)
def test_cross_model_simple(self):
arch = self.read_combined(self.c2)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.a1(),
E.c3(),
E.c2(),
E.c1(),
E.a3(),
E.a2(),
), arch)
def test_cross_model_double(self):
arch = self.read_combined(self.d1)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.a1(),
E.d1(),
E.b2(),
E.b1(),
E.a3(),
E.a2(),
), arch)
class TestOptionalViews(ViewCase):
"""
Tests ability to enable/disable inherited views, formerly known as
inherit_option_id
"""
def setUp(self):
super(TestOptionalViews, self).setUp()
self.v0 = self.create({
'model': 'a',
'arch': '<qweb><base/></qweb>',
})
self.v1 = self.create({
'model': 'a',
'inherit_id': self.v0,
'active': True,
'priority': 10,
'arch': '<xpath expr="//base" position="after"><v1/></xpath>',
})
self.v2 = self.create({
'model': 'a',
'inherit_id': self.v0,
'active': True,
'priority': 9,
'arch': '<xpath expr="//base" position="after"><v2/></xpath>',
})
self.v3 = self.create({
'model': 'a',
'inherit_id': self.v0,
'active': False,
'priority': 8,
'arch': '<xpath expr="//base" position="after"><v3/></xpath>'
})
def test_applied(self):
""" mandatory and enabled views should be applied
"""
arch = self.read_combined(self.v0)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
E.v2(),
)
)
def test_applied_state_toggle(self):
""" Change active states of v2 and v3, check that the results
are as expected
"""
self.browse(self.v2).toggle()
arch = self.read_combined(self.v0)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
)
)
self.browse(self.v3).toggle()
arch = self.read_combined(self.v0)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
E.v3(),
)
)
self.browse(self.v2).toggle()
arch = self.read_combined(self.v0)['arch']
self.assertEqual(
ET.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
E.v2(),
E.v3(),
)
)
class TestXPathExtentions(common.BaseCase):
def test_hasclass(self):
tree = E.node(
E.node({'class': 'foo bar baz'}),
E.node({'class': 'foo bar'}),
{'class': "foo"})
self.assertEqual(
len(tree.xpath('//node[hasclass("foo")]')),
3)
self.assertEqual(
len(tree.xpath('//node[hasclass("bar")]')),
2)
self.assertEqual(
len(tree.xpath('//node[hasclass("baz")]')),
1)
self.assertEqual(
len(tree.xpath('//node[hasclass("foo")][not(hasclass("bar"))]')),
1)
self.assertEqual(
len(tree.xpath('//node[hasclass("foo", "baz")]')),
1)
| agpl-3.0 | -832,309,322,385,947,100 | 32.371126 | 107 | 0.47535 | false |
ArcaniteSolutions/truffe2 | truffe2/vehicles/models.py | 2 | 13930 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.shortcuts import get_object_or_404
from generic.models import GenericModel, GenericStateModel, FalseFK, GenericGroupsModel, GenericStateRootValidable, GenericGroupsModerableModel, GenericContactableModel, SearchableModel
from rights.utils import AgepolyEditableModel, UnitEditableModel
from users.models import TruffeUser
class _Provider(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
class MetaData:
list_display = [
('name', _(u'Nom')),
]
details_display = list_display + [
('description', _(u'Description')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description')
base_title = _(u'Fournisseurs')
list_title = _(u'Liste des fournisseurs')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-suitcase'
menu_id = 'menu-vehicles-provider'
help_list = _(u"""Les entreprises fournissant des services de locations de vรฉhicules.""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility vรฉhicule'
fields = [
'name',
'description',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
def get_types(self):
return self.vehicletype_set.filter(deleted=False).order_by('name')
def get_cards(self):
return self.card_set.filter(deleted=False).order_by('name')
class _VehicleType(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
provider = FalseFK('vehicles.models.Provider', verbose_name=_('Fournisseur'))
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
class MetaData:
list_display = [
('name', _(u'Nom')),
('provider', _(u'Fournisseur')),
]
details_display = list_display + [
('description', _(u'Description')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description', 'provider__name')
base_title = _(u'Types de vรฉhicule')
list_title = _(u'Liste des types de vรฉhicules')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-truck'
menu_id = 'menu-vehicles-type'
help_list = _(u"""Les diffรฉrents types de vรฉhicules, par fournisseur""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility vรฉhicule'
fields = [
'name',
'description',
'provider',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
class _Card(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
provider = FalseFK('vehicles.models.Provider', verbose_name=_('Fournisseur'))
name = models.CharField(_('Nom'), max_length=255)
number = models.CharField(_(u'Numรฉro'), max_length=255)
description = models.TextField(_('Description'))
exclusif = models.BooleanField(_('Usage exclusif'), default=True, help_text=_(u'Ne peut pas รชtre utilisรฉ plusieurs fois en mรชme temps ?'))
class MetaData:
list_display = [
('name', _(u'Nom')),
('provider', _(u'Fournisseur')),
('number', _(u'Numรฉro')),
]
details_display = list_display + [
('description', _(u'Description')),
('exclusif', _(u'Usage exclusif'))
]
default_sort = "[1, 'asc']" # name
yes_or_no_fields = ['exclusif']
filter_fields = ('name', 'number', 'description', 'provider__name')
base_title = _(u'Cartes')
list_title = _(u'Liste des cartes')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-credit-card'
menu_id = 'menu-vehicles-cards'
help_list = _(u"""Les diffรฉrentes cartes utilisรฉes pour les rรฉservations""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility vรฉhicule'
fields = [
'name',
'description',
'provider',
'number',
]
class Meta:
abstract = True
def __unicode__(self):
return u'{} ({})'.format(self.name, self.number)
class _Location(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
url_location = models.URLField(_('URL carte lieu'), blank=True, null=True)
class MetaData:
list_display = [
('name', _(u'Nom')),
]
details_display = list_display + [
('description', _(u'Description')),
('url_location', _(u'URL carte lieu')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description')
base_title = _(u'Lieux')
list_title = _(u'Liste des lieux')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-location-arrow'
menu_id = 'menu-vehicles-location'
help_list = _(u"""Les lieux de rรฉcupรฉration des locations""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility vรฉhicule'
fields = [
'name',
'description',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
class _Booking(GenericModel, GenericGroupsModerableModel, GenericGroupsModel, GenericContactableModel, GenericStateRootValidable, GenericStateModel, UnitEditableModel, SearchableModel):
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
access = 'LOGISTIQUE'
moderation_access = 'SECRETARIAT'
unit = FalseFK('units.models.Unit')
title = models.CharField(_(u'Titre'), max_length=255)
responsible = models.ForeignKey(TruffeUser, verbose_name=_(u'Responsable'))
reason = models.TextField(_(u'Motif'))
remark = models.TextField(_(u'Remarques'), blank=True, null=True)
remark_agepoly = models.TextField(_(u'Remarques AGEPoly'), blank=True, null=True)
provider = FalseFK('vehicles.models.Provider', verbose_name=_(u'Fournisseur'))
vehicletype = FalseFK('vehicles.models.VehicleType', verbose_name=_(u'Type de vรฉhicule'))
card = FalseFK('vehicles.models.Card', verbose_name=_(u'Carte'), blank=True, null=True)
location = FalseFK('vehicles.models.Location', verbose_name=_(u'Lieu'), blank=True, null=True)
start_date = models.DateTimeField(_(u'Dรฉbut de la rรฉservation'))
end_date = models.DateTimeField(_(u'Fin de la rรฉservation'))
class MetaData:
list_display = [
('title', _('Titre')),
('start_date', _(u'Date dรฉbut')),
('end_date', _('Date fin')),
('provider', _('Fournisseur')),
('vehicletype', _(u'Type de vรฉhicule')),
('status', _('Statut')),
]
details_display = list_display + [
('responsible', _('Responsable')),
('reason', _('Motif')),
('remark', _('Remarques')),
('remark_agepoly', _('Remarques AGEPoly')),
('card', _('Carte')),
('get_location', _('Lieu')),
]
filter_fields = ('title', 'status')
base_title = _(u'Rรฉservations de vรฉhicule')
list_title = _(u'Liste de toutes les rรฉservations de vรฉhicules')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-ambulance'
default_sort = "[3, 'desc']" # end_date
forced_widths = {
'1': '25%',
'2': '140px', # start date
'3': '140px', # end date
}
forced_widths_related = {
'1': '15%',
'2': '25%',
'4': '150px', # start date
'5': '150px', # end date
}
menu_id = 'menu-vehicles-booking'
menu_id_calendar = 'menu-vehicles-booking-calendar'
menu_id_calendar_related = 'menu-vehicles-booking-calendar-related'
datetime_fields = ['start_date', 'end_date']
safe_fields = ['get_location']
has_unit = True
help_list = _(u"""Les rรฉservations de vรฉhicules te permettent de demander la location d'un vรฉhicule pour ton unitรฉ.
Ils sont soumis ร validation par le secrรฉtariat de l'AGEPoly. Il faut toujours faire les rรฉservations le plus tรดt possible !""")
help_list_related = _(u"""La liste de toutes les rรฉservations de vรฉhicules.""")
@staticmethod
def extra_args_for_edit(request, current_unit, current_year):
from vehicles.models import Provider
return {'providers': Provider.objects.filter(deleted=False).order_by('name')}
class MetaEdit:
datetime_fields = ('start_date', 'end_date')
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility vรฉhicule rรฉservation'
fields = [
'title',
'card',
'provider',
'location',
'vehicletype',
'responsible',
'remark',
'reason',
'remark_agepoly',
]
class MetaState(GenericStateRootValidable.MetaState):
states_texts = {
'0_draft': _(u'La rรฉservation est en cours de crรฉation et n\'est pas publique.'),
'1_asking': _(u'La rรฉservation est en cours de modรฉration. Elle n\'est pas รฉditable. Sรฉlectionner ce statut pour demander une modรฉration !'),
'2_online': _(u'La rรฉsevation est validรฉe. Elle n\'est pas รฉditable.'),
'3_archive': _(u'La rรฉservation est archivรฉe. Elle n\'est plus modifiable.'),
'4_deny': _(u'La modรฉration a รฉtรฉ refusรฉe. Le vรฉhicule n\'รฉtait probablement pas disponible.'),
}
def build_form_validation(request, obj):
from vehicles.models import Location
class FormValidation(forms.Form):
remark_agepoly = forms.CharField(label=_('Remarque'), widget=forms.Textarea, required=False)
card = forms.ModelChoiceField(label=_(u'Carte'), queryset=obj.provider.get_cards(), required=False)
location = forms.ModelChoiceField(label=_(u'Lieu'), queryset=Location.objects.filter(deleted=False).order_by('name'), required=False)
return FormValidation
states_bonus_form = {
'2_online': build_form_validation
}
def switch_status_signal(self, request, old_status, dest_status):
from vehicles.models import Location, Card
if dest_status == '2_online':
if request.POST.get('remark_agepoly'):
if self.remark_agepoly:
self.remark_agepoly += '\n' + request.POST.get('remark_agepoly')
else:
self.remark_agepoly = request.POST.get('remark_agepoly')
self.save()
if request.POST.get('card'):
self.card = get_object_or_404(Card, pk=request.POST.get('card'), provider=self.provider, deleted=False)
self.save()
if request.POST.get('location'):
self.location = get_object_or_404(Location, pk=request.POST.get('location'), deleted=False)
self.save()
s = super(_Booking, self)
if hasattr(s, 'switch_status_signal'):
s.switch_status_signal(request, old_status, dest_status)
class Meta:
abstract = True
def __unicode__(self):
return self.title
def get_location(self):
if self.location:
if self.location.url_location:
return u'<a href="{}">{}</a>'.format(self.location.url_location, self.location)
else:
return self.location.__unicode__()
else:
return ''
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
"""Remove fields that should be edited by SECRETARIAT CDD only."""
if not self.rights_in_root_unit(current_user, 'SECRETARIAT'):
del form.fields['card']
del form.fields['location']
del form.fields['remark_agepoly']
unit_users_pk = map(lambda user: user.pk, self.unit.users_with_access())
form.fields['responsible'].queryset = TruffeUser.objects.filter(pk__in=unit_users_pk).order_by('first_name', 'last_name')
def genericFormExtraClean(self, data, form):
if 'provider' in data:
if 'card' in data and data['card']:
if data['card'].provider != data['provider']:
raise forms.ValidationError(_(u'La carte n\'est pas liรฉ au fournisseur sรฉlectionnรฉ'))
if 'vehiculetype' in data and data['vehiculetype']:
if data['vehiculetype'].provider != data['provider']:
raise forms.ValidationError(_(u'Le type de vรฉhicule n\'est pas liรฉ au fournisseur sรฉlectionnรฉ'))
def conflicting_reservation(self):
return self.__class__.objects.exclude(pk=self.pk, deleted=True).filter(status__in=['2_online'], end_date__gt=self.start_date, start_date__lt=self.end_date)
| bsd-2-clause | -4,125,093,519,237,388,300 | 33.147783 | 185 | 0.59247 | false |
jonnybazookatone/ADSimportpipeline | lib/conversions.py | 1 | 1831 | #!/usr/bin/env python
import ads
from ads.Looker import Looker
class ConvertBibcodes:
def __init__(self):
self.bib2alt = Looker(ads.alternates).look
self.bib2epr = Looker(ads.pub2arx).look
self.alt2bib = Looker(ads.altlist).look
self.epr2bib = Looker(ads.ematches).look
def getAlternates(self,bbc):
"""
Returns a list of alternate bibcodes for a record.
"""
if isinstance(bbc, list):
bibcode = bbc[0].strip()
else:
bibcode = bbc.strip()
alternates = []
res = self.bib2alt(bibcode).strip()
rez = self.bib2epr(bibcode).strip()
if res:
for line in res.split('\n'):
alternate = line.split('\t')[1]
if alternate != bibcode:
alternates.append(alternate)
if rez:
alternates.append(rez.strip().split('\n')[0].split('\t')[1])
return alternates
def Canonicalize(self,biblist,remove_matches=False):
"""
Convert a list of bibcodes into a list of canonical
bibcodes (canonical bibcodes remain unchanged).
Setting 'remove_matches' to True will remove e-print
bibcodes that have been matched
"""
if isinstance(biblist, str):
biblist = [biblist]
newlist = []
for bibcode in biblist:
res = self.alt2bib(bibcode).strip()
rez = self.epr2bib(bibcode).strip()
if res:
bibcode = res.strip().split('\n')[0].split('\t')[1]
elif rez and remove_matches:
bibcode = ''
elif rez:
bibcode = rez.strip().split('\n')[0].split('\t')[1]
if bibcode:
newlist.append(bibcode)
return list(set(newlist))
| gpl-3.0 | -3,548,930,736,092,136,400 | 32.290909 | 72 | 0.541234 | false |
nickraptis/fidibot | src/modules/help.py | 1 | 2615 | # Author: Nick Raptis <[email protected]>
"""
Module for listing commands and help.
"""
from basemodule import BaseModule, BaseCommandContext
from alternatives import _
class HelpContext(BaseCommandContext):
def cmd_list(self, argument):
"""List commands"""
arg = argument.lower()
index = self.bot.help_index
public = "public commands -- %s" % " ".join(index['public'])
private = "private commands -- %s" % " ".join(index['private'])
if 'all' in arg or 'both' in arg:
output = "\n".join((public, private))
elif 'pub' in arg or self.target.startswith('#'):
output = public
elif 'priv' in arg or not self.target.startswith('#'):
output = private
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
self.send(self.target, output)
def cmd_modules(self, argument):
"""List active modules"""
index = self.bot.help_index
output = "active modules -- %s" % " ".join(index['modules'].keys())
self.send(self.target, output)
def cmd_help(self, argument):
"""Get help on a command or module"""
arg = argument.lower()
index = self.bot.help_index
target = self.target
args = arg.split()
if not args:
s = "usage: help <command> [public|private] / help module <module>"
self.send(target, s)
elif args[0] == 'module':
args.pop(0)
if not args:
self.send(target, "usage: help module <module>")
else:
help_item = index['modules'].get(args[0])
if help_item:
self.send(target, help_item['summary'])
else:
self.send(target, _("No help for %s"), args[0])
else:
args.append("")
cmd = args.pop(0)
cmd_type = args.pop(0)
if 'pu' in cmd_type or self.target.startswith('#'):
cmd_type = 'public'
elif 'pr' in cmd_type or not self.target.startswith('#'):
cmd_type = 'private'
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
help_item = index[cmd_type].get(cmd)
if help_item:
self.send(target, index[cmd_type][cmd]['summary'])
else:
self.send(target, _("No help for %s"), cmd)
class HelpModule(BaseModule):
context_class = HelpContext
module = HelpModule
| bsd-2-clause | -5,116,395,268,455,257,000 | 32.961039 | 79 | 0.521606 | false |
google-research/leaf-audio | example/train.py | 1 | 2594 | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop using the LEAF frontend."""
import os
from typing import Optional
import gin
from leaf_audio import models
from example import data
import tensorflow as tf
import tensorflow_datasets as tfds
@gin.configurable
def train(workdir: str = '/tmp/',
dataset: str = 'speech_commands',
num_epochs: int = 10,
steps_per_epoch: Optional[int] = None,
learning_rate: float = 1e-4,
batch_size: int = 64,
**kwargs):
"""Trains a model on a dataset.
Args:
workdir: where to store the checkpoints and metrics.
dataset: name of a tensorflow_datasets audio datasset.
num_epochs: number of epochs to training the model for.
steps_per_epoch: number of steps that define an epoch. If None, an epoch is
a pass over the entire training set.
learning_rate: Adam's learning rate.
batch_size: size of the mini-batches.
**kwargs: arguments to the models.AudioClassifier class, namely the encoder
and the frontend models (tf.keras.Model).
"""
datasets, info = tfds.load(dataset, with_info=True)
datasets = data.prepare(datasets, batch_size=batch_size)
num_classes = info.features['label'].num_classes
model = models.AudioClassifier(num_outputs=num_classes, **kwargs)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = 'sparse_categorical_accuracy'
model.compile(loss=loss_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate),
metrics=[metric])
ckpt_path = os.path.join(workdir, 'checkpoint')
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=ckpt_path,
save_weights_only=True,
monitor=f'val_{metric}',
mode='max',
save_best_only=True)
model.fit(datasets['train'],
validation_data=datasets['eval'],
batch_size=None,
epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[model_checkpoint_callback])
| apache-2.0 | 7,620,625,562,801,573,000 | 35.027778 | 79 | 0.692367 | false |
moagstar/xsorted | docs/conf.py | 1 | 8799 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../xsorted")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xsorted'
copyright = u'2017, Daniel Bradburn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from xsorted import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xsorted-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'xsorted Documentation',
u'Daniel Bradburn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit | -1,019,507,065,598,080,300 | 33.778656 | 85 | 0.690533 | false |
buckets1337/MotherMUD | Objects.py | 1 | 25773 | # Objects.py
"""
This file defines the various different objects that can be found in the world
"""
#--------------------------
# This file defines the objects in the world. Best practice for naming an object follows: <region><Room><Name>, where the region is lowercase and every other word in the smashed string is capitalized
#--------------------------
import os
import World
import Globals
indexList = []
fromFileList = Globals.fromFileList
equipmentFromFile = Globals.equipmentFromFile
fileList = []
eqFileList = []
savedEqFileList = []
for region in Globals.RegionsList:
directoryFiles = os.listdir('blueprints/obj/'+str(region)+'/')
eqDirectoryFiles = os.listdir('blueprints/equip/'+str(region)+'/')
if os.path.exists('data/world/' + str(region) + '/equip/'):
savedEqFiles = os.listdir('data/world/' + str(region) + '/equip/')
else:
savedEqFiles = []
for obj in directoryFiles:
path = str(region)+'/'+obj
fileList.append(path)
for obj in eqDirectoryFiles:
path = str(region)+'/'+obj
eqFileList.append(path)
for obj in savedEqFiles:
path = str(region) + '/equip/' + obj
savedEqFileList.append(path)
def setLocation(location):
global room
room = location
def loadSavedEq():
'''
loads equipment into rooms from equipment definition files after server restart
'''
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
path='data/world/' + region + '/equip/' + room + '/'
#shortPath='data/world' + region + '/equip/'
if os.path.exists(path):
Globals.regionListDict[region][room].equipment = {}
dirList = os.listdir(path)
for eqFile in dirList:
if not eqFile.endswith('~'):
newEq = buildEquipmentFromFile(eqFile, path)
# Globals.regionListDict[region][room].items.append(newEq)
def saveEq():
'''
handles saving all equipment in the world (but not player equipment) into unique equipment definition files when the server is shutdown
'''
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
path='data/world/'+region+'/equip/'+room+'/'
shortPath='data/world/'+region+'/equip/'
if not os.path.exists(shortPath):
os.makedirs(shortPath)
if not os.path.exists(path):
os.makedirs(path)
dirList = os.listdir(path)
for eqFile in dirList:
print eqFile
os.remove(path+eqFile)
for eq in Globals.regionListDict[region][room].equipment:
saveEqToFile(eq, path)
def saveEqToFile(eq, path):
'''
handles saving a single bit of equipment to a unique equipment definition file when the server is shutdown
'''
eqType = ''
if hasattr(eq.kind.equipment, 'weapon'):
if eq.kind.equipment.weapon != None:
eqType = 'weapon'
if hasattr(eq.kind.equipment, 'armor'):
if eq.kind.equipment.armor != None:
eqType = 'armor'
battleCommands = []
if eq.kind.equipment.battleCommands != [''] and eq.kind.equipment.battleCommands != []:
for command in eq.kind.equipment.battleCommands:
battleCommands.append(command)
if battleCommands == []:
battleCommands = ''
itemGrabHandler = 'False'
if hasattr(eq.kind, 'itemGrabHandler'):
if eq.kind.itemGrabHandler != None:
itemGrabHandler = 'True'
notDroppable = 'False'
if hasattr(eq.kind, 'itemGrabHandler') and eq.kind.itemGrabHandler != None:
if eq.kind.itemGrabHandler.notDroppable:
notDroppable = 'True'
objectSpawner = 'False'
if hasattr(eq.kind, 'objectSpawner'):
if eq.kind.objectSpawner != None:
objectSpawner = 'True'
filePath = path + str(eq)
with open(filePath, 'w') as f:
f.write('ID=%s\n' %str(eq))
f.write('currentRoom=%s\n' %(str(eq.currentRoom.region)+ ":" +str(eq.currentRoom.name)))
f.write('\n')
f.write('name=%s\n' %eq.name)
f.write('type=%s\n' %eqType)
f.write('slot=%s\n' %eq.kind.equipment.slot)
f.write('\n')
f.write('durability=%s\n' %eq.kind.equipment.durability)
f.write('maxDurability=%s\n' %eq.kind.equipment.maxDurability)
f.write('worth=%s\n' %eq.kind.equipment.worth)
f.write('\n')
f.write('description=%s\n' %eq.description)
f.write('\n')
f.write('longDescription=%s\n' %eq.longDescription)
f.write('\n')
f.write('isVisible=%s\n' %eq.isVisible)
f.write('\n')
f.write('hp=%s\n' %eq.kind.equipment.hp)
f.write('pp=%s\n' %eq.kind.equipment.pp)
f.write('offense=%s\n' %eq.kind.equipment.offense)
f.write('defense=%s\n' %eq.kind.equipment.defense)
f.write('speed=%s\n' %eq.kind.equipment.speed)
f.write('guts=%s\n' %eq.kind.equipment.guts)
f.write('luck=%s\n' %eq.kind.equipment.luck)
f.write('vitality=%s\n' %eq.kind.equipment.vitality)
f.write('IQ=%s\n' %eq.kind.equipment.IQ)
f.write('\n')
f.write('battleCommands=%s\n' %battleCommands)
f.write('\n')
f.write('statusEffect=%s\n' %eq.kind.equipment.statusEffect)
f.write('\n')
f.write('onUse=%s\n' %eq.kind.equipment.onUse)
f.write('\n\n')
f.write('kind.isCarryable=%s\n' %eq.kind.isCarryable)
f.write('kind.respawns=%s\n' %eq.kind.respawns)
f.write('\n')
f.write('kind.itemGrabHandler=%s\n' %itemGrabHandler)
if itemGrabHandler == 'True':
f.write('kind.itemGrabHandler.notDroppable=%s\n' %notDroppable)
f.write('\n')
f.write('kind.objectSpawner=%s\n' %objectSpawner)
if objectSpawner == 'True':
f.write('kind.objectSpawner.time=%s\n' %eq.kind.objectSpawner.time)
f.write('kind.objectSpawner.spawnOdds=%s\n' %eq.kind.objectSpawner.spawnOdds)
f.write('kind.objectSpawner.container=%s\n' %eq.kind.objectSpawner.container)
f.write('kind.objectSpawner.cycles=%s\n' %eq.kind.objectSpawner.cycles)
f.write('kind.objectSpawner.repeat=%s\n' %eq.kind.objectSpawner.repeat)
f.write('kind.objectSpawner.active=%s\n' %eq.kind.objectSpawner.active)
def buildObjectFromFile(file):
'''
creates an object by constructing it out of details in a file
'''
print file
if str(file).endswith('~'):
print "\n"
return
path = 'blueprints/obj/' + file
with open(path, 'r') as f:
fileData = f.readlines()
newObject = World.Object('none', 'none')
print fileData
kind = None
isCarryable = None
isVisible = None
isLocked = False
respawns = None
objectSpawner = None
itemGrabHandler = None
repeat = None
time = None
spawnOdds = None
container = None
cycles = None
repeat = None
active = None
notDroppable = None
objectSpawnerComponent = None
itemGrabHandlerComponent = None
itemComponent = None
mobActive = None
mobCycles = None
mobMode = None
mobSpawnOdds = None
mobTime = None
mobFile = None
mobSpawner = None
onUse = None
for Data in fileData:
if Data.startswith('name='):
newObject.name = Data[6:-2]
if Data.startswith('description='):
newObject.description = Data[13:-2]
if Data.startswith('longDescription='):
newObject.longDescription = Data[17:-2]
if Data.startswith('isVisible='):
text = Data[10:-1]
if text == 'True':
newObject.isVisible = True
elif text == 'False':
newObject.isVisible = False
if Data.startswith('kind='):
text = Data[5:-1]
#print "kind:" + text
if text == 'item':
kind = 'item'
elif text == 'container':
kind = 'container'
if Data.startswith('kind.isCarryable='):
text = Data[17:-1]
#print "isCarryable:" +text
if text == 'True':
isCarryable = True
elif text == 'False':
isCarryable = False
if Data.startswith('kind.respawns='):
text = Data[14:-1]
if text == 'True':
respawns = True
elif text == 'False':
respawns = False
if Data.startswith('kind.isLocked='):
text = Data[14:-1]
if text == 'True':
isLocked = True
if text == 'False':
isLocked = False
if Data.startswith('kind.respawnContents='):
text = Data[21:-1]
if text == 'True':
respawnContents = True
elif text == 'False':
respawnContents = False
if Data.startswith('kind.objectSpawner='):
text = Data[19:-1]
if text == 'True':
objectSpawner = True
elif text == 'False':
objectSpawner = False
if Data.startswith('kind.objectSpawner.time='):
time = int(Data[24:-1])
if Data.startswith('kind.objectSpawner.spawnOdds='):
text = Data[29:-1]
oddsList = text.split(',')
#print "oddsList:" + str(oddsList)
nestedOddsList = []
for odds in oddsList:
nestedOddsList.append(odds.split(':'))
for oddsEntry in nestedOddsList:
oddsEntry[1] = int(oddsEntry[1])
if oddsEntry[0] == 'True':
oddsEntry[0] = True
elif oddsEntry[0] == 'False':
oddsEntry[0] = False
#print nestedOddsList
spawnOdds = nestedOddsList
if Data.startswith('kind.objectSpawner.container='):
text = Data[29:-1]
if text == 'None':
container = None
else:
container = text[1:-1] # this should be a reference to another object
container = container.split(', ')
if Data.startswith('kind.objectSpawner.cycles='):
cycles = int(Data[26:-1])
if Data.startswith('kind.objectSpawner.repeat='):
text = Data[26:-1]
if text == 'True':
repeat = True
elif text == 'False':
repeat = False
if Data.startswith('kind.objectSpawner.active='):
text = Data[26:-1]
#print "***active:" + text
if text == 'True':
active = True
elif text == 'False':
active = False
if Data.startswith('kind.itemGrabHandler='):
text = Data[21:-1]
#print "itemGrabHandler:" +text
if text == 'True':
itemGrabHandler = True
elif text == 'False':
itemGrabHandler = False
if Data.startswith('kind.itemGrabHandler.notDroppable='):
text = Data[34:-1]
#print "*** notDroppabletext:" + text
if text == 'True':
notDroppable = True
elif text == 'False':
notDroppable = False
if Data.startswith('kind.onUse='):
text = Data[11:-1]
onUse = text
if Data.startswith('mobSpawner='):
text = Data[11:-1]
if text == 'True':
mobSpawner = True
elif text == 'False':
mobSpawner = False
if Data.startswith('mobSpawner.mobFile='):
text = Data[19:-1]
mobFile = text
if Data.startswith('mobSpawner.time='):
text = Data[16:-1]
mobTime = int(text)
if Data.startswith('mobSpawner.oddsList='):
text = Data[20:-1]
oddsList = text.split(',')
#print "oddsList:" + str(oddsList)
nestedOddsList = []
for odds in oddsList:
nestedOddsList.append(odds.split(':'))
for oddsEntry in nestedOddsList:
oddsEntry[1] = int(oddsEntry[1])
if oddsEntry[0] == 'True':
oddsEntry[0] = True
elif oddsEntry[0] == 'False':
oddsEntry[0] = False
#print nestedOddsList
mobSpawnOdds = nestedOddsList
if Data.startswith('mobSpawner.mode='):
text = Data[16:-1]
print "mobModeff:" + text
mobMode = text
if Data.startswith('mobSpawner.cycles='):
text = Data[18:-1]
mobCycles = int(text)
if Data.startswith('mobSpawner.active='):
text = Data[18:-1]
if text == 'True':
mobActive = True
elif text == 'False':
mobActive = False
#print kind
if kind == 'item':
# print itemGrabHandler
# print objectSpawnerComponent
# print isCarryable
itemComponent = World.item()
itemComponent.owner = newObject
if kind == 'container':
itemComponent = World.container(inventory=[])
itemComponent.owner = newObject
if objectSpawner:
objectSpawnerComponent = World.objectSpawner(itemComponent, Globals.TIMERS, time, newObject, spawnOdds, container, cycles, repeat, active)
else:
objectSpawnerComponent = None
if itemGrabHandler:
itemGrabHandlerComponent = World.itemGrabHandler(notDroppable)
else:
itemGrabHandlerComponent = None
if mobSpawner:
mobFileMod = mobFile.split("/")
# print mobFileMod
# print Globals.mobsFromFile
# for mob in Globals.mobsFromFile:
# if mob.name == mobFileMod[1]:
# mobref = mob
#print mobMode
mobSpawnerComponent = World.mobSpawner(newObject, Globals.TIMERS, mobTime, mobFileMod[1], mobSpawnOdds, mobCycles, mode=mobMode, active=mobActive)
else:
mobSpawnerComponent = None
#print kind
if kind == 'item':
# print itemGrabHandler
# print objectSpawnerComponent
#print isCarryable
itemComponent.isCarryable = isCarryable
itemComponent.respawns = respawns
itemComponent.itemGrabHandler = itemGrabHandlerComponent
itemComponent.objectSpawner = objectSpawnerComponent
itemComponent.onUse = onUse
#itemComponent = World.item(isCarryable, respawns, itemGrabHandlerComponent, objectSpawnerComponent)
if kind == 'container':
itemComponent.isLocked = isLocked
itemComponent.isCarryable = isCarryable
itemComponent.respawns = respawns
itemComponent.respawnContents = respawnContents
itemComponent.itemGrabHandler = itemGrabHandlerComponent
itemComponent.objectSpawner = objectSpawnerComponent
itemComponent.inventory = []
#itemComponent = World.container(isLocked, isCarryable, respawns, respawnContents, itemGrabHandlerComponent, objectSpawnerComponent)
#print newObject.name
if kind is not None:
newObject.kind = itemComponent
if mobSpawner:
newObject.mobSpawner = mobSpawnerComponent
else:
newObject.mobSpawner = None
#print newObject.kind
fromFileList.append(newObject)
# printing suite
print "name:" + str(newObject.name)
print "description:" + str(newObject.description)
print "currentRoom:" + str(newObject.currentRoom)
print "isVisible:" + str(newObject.isVisible)
print "spawnContainer:" + str(newObject.spawnContainer)
print "longDescription:" + str(newObject.longDescription)
print "kind:" + str(newObject.kind)
print "TIMERS:" + str(newObject.TIMERS)
if newObject.kind is not None:
if isinstance(newObject.kind, World.item):
print "kind.isCarryable:" + str(newObject.kind.isCarryable)
print "kind.respawns:" + str(newObject.kind.respawns)
print "kind.itemGrabHandler:" + str(newObject.kind.itemGrabHandler)
print "kind.objectSpawner:" + str(newObject.kind.objectSpawner)
print "kind.onUse:" + str(newObject.kind.onUse)
if isinstance(newObject.kind, World.container):
print "kind.inventory:" + str(newObject.kind.inventory)
print "kind.isLocked:" + str(newObject.kind.isLocked)
print "kind.isCarryable:" + str(newObject.kind.isCarryable)
print "kind.respawns:" + str(newObject.kind.respawns)
print "kind.respawnContents:" + str(newObject.kind.respawnContents)
print "kind.itemGrabHandler:" + str(newObject.kind.itemGrabHandler)
print "kind.objectSpawner:" + str(newObject.kind.objectSpawner)
if newObject.kind.itemGrabHandler is not None:
print "kind.itemGrabHandler.notDroppable:" + str(newObject.kind.itemGrabHandler.notDroppable)
if newObject.kind.objectSpawner is not None:
print "kind.objectSpawner.owner:" + str(newObject.kind.objectSpawner.owner)
print "kind.objectSpawner.TIMERS:" + str(newObject.kind.objectSpawner.TIMERS)
print "kind.objectSpawner.time:" + str(newObject.kind.objectSpawner.time)
print "kind.objectSpawner.obj:" + str(newObject.kind.objectSpawner.obj)
print "kind.objectSpawner.oddsList:" + str(newObject.kind.objectSpawner.oddsList)
print "kind.objectSpawner.container:" + str(newObject.kind.objectSpawner.container)
print "kind.objectSpanwer.cycles:" + str(newObject.kind.objectSpawner.cycles)
print "kind.objectSpawner.repeat:" + str(newObject.kind.objectSpawner.repeat)
print "kind.objectSpawner.active:" + str(newObject.kind.objectSpawner.active)
print "kind.objectSpawner.timer:" + str(newObject.kind.objectSpawner.timer)
print "kind.objectSpawner.startingLocation:" + str(newObject.kind.objectSpawner.startingLocation)
print "mobSpawner:" + str(newObject.mobSpawner)
if newObject.mobSpawner is not None:
#print "mobSpawner.mobFile:" + str(newObject.mobSpawner.mobFile)
print "mobSpawner.time:" + str(newObject.mobSpawner.time)
print "mobSpawner.oddsList:" + str(newObject.mobSpawner.oddsList)
print "mobSpawner.mode:" + str(newObject.mobSpawner.mode)
print "mobSpawner.cycles:" + str(newObject.mobSpawner.cycles)
print "mobSpawner.active:" + str(newObject.mobSpawner.active)
print "\n"
def buildEquipmentFromFile(file, location):
print file
if str(file).endswith('~'):
print "\n"
return
path = location + file
with open(path, 'r') as f:
fileData = f.readlines()
newWeapon = None
newArmor = None
equipmentType = None
slot = None
durability = None
maxDurability = None
worth = None
description = None
longDescription = None
hp = None
pp = None
offense = None
defense = None
speed = None
guts = None
luck = None
vitality = None
IQ = None
battleCommands = None
statusEffect = None
onUse = None
isVisible = None
spawnContainer = None
isCarryable = None
respawns = None
itemGrabHandler = None
objectSpawner = None
notDroppable = None
container = None
spawnOdds = None
time = None
active = None
repeat = None
cycles = None
for Data in fileData:
if Data.startswith('type='):
equipmentType = Data[5:-1]
if Data.startswith('ID='):
ID = Data[3:-1]
if Data.startswith('name='):
name = Data[5:-1]
if Data.startswith('slot='):
slot = Data[5:-1]
if Data.startswith('durability='):
durability = Data[11:-1]
if Data.startswith('maxDurability='):
maxDurability = Data[14:-1]
if Data.startswith('worth='):
worth = Data[6:-1]
if Data.startswith('description='):
description = Data[12:-1]
if Data.startswith('longDescription='):
longDescription = Data[16:-1]
if Data.startswith('hp='):
hp = int(Data[3:-1])
if Data.startswith('pp='):
pp = int(Data[3:-1])
if Data.startswith('offense='):
offense = int(Data[8:-1])
if Data.startswith('defense='):
defense = int(Data[8:-1])
if Data.startswith('speed='):
speed = int(Data[6:-1])
if Data.startswith('guts='):
guts = int(Data[5:-1])
if Data.startswith('luck='):
luck = int(Data[5:-1])
if Data.startswith('vitality='):
vitality = int(Data[9:-1])
if Data.startswith('IQ='):
IQ = int(Data[3:-1])
if Data.startswith('battleCommands='):
battleCommands = Data[15:-1]
battleCommands = battleCommands.split(",")
if Data.startswith('statusEffect='):
statusEffect = Data[13:-1]
if Data.startswith('onUse='):
onUse = Data[6:-1]
if Data.startswith('isVisible='):
isVisible = Data[10:-1]
if isVisible == 'True':
isVisible = True
elif isVisible == 'False':
isVisible = False
if Data.startswith('kind.isCarryable='):
isCarryable = Data[17:-1]
if isCarryable == "True":
isCarryable = True
elif isCarryable == "False":
isCarryable = False
if Data.startswith('kind.respawns='):
respawns = Data[14:-1]
if respawns == "True":
respawns = True
elif respawns == "False":
respawns = False
if Data.startswith('kind.itemGrabHandler='):
itemGrabHandler = Data[21:-1]
if itemGrabHandler == "True":
itemGrabHandler = True
elif itemGrabHandler == "False":
itemGrabHandler = False
if Data.startswith('kind.itemGrabHandler.notDroppable='):
notDroppable = Data[34:-1]
if notDroppable == "True":
notDroppable = True
elif notDroppable == "False":
notDroppable = False
if Data.startswith('kind.objectSpawner='):
objectSpawner = Data[19:-1]
if objectSpawner == 'True':
objectSpawner = True
elif objectSpawner == 'False':
objectSpawner = False
if Data.startswith('kind.objectSpawner.time='):
time = int(Data[24:-1])
if Data.startswith('kind.objectSpawner.spawnOdds='):
text = Data[29:-1]
oddsList = text.split(',')
#print "oddsList:" + str(oddsList)
nestedOddsList = []
for odds in oddsList:
nestedOddsList.append(odds.split(':'))
for oddsEntry in nestedOddsList:
oddsEntry[1] = int(oddsEntry[1])
if oddsEntry[0] == 'True':
oddsEntry[0] = True
elif oddsEntry[0] == 'False':
oddsEntry[0] = False
#print nestedOddsList
spawnOdds = nestedOddsList
if Data.startswith('kind.objectSpawner.container='):
text = Data[29:-1]
if text == 'None':
container = None
else:
container = text[1:-1] # this should be a reference to another object
container = container.split(', ')
if Data.startswith('kind.objectSpawner.cycles='):
cycles = int(Data[26:-1])
if Data.startswith('kind.objectSpawner.repeat='):
text = Data[26:-1]
if text == 'True':
repeat = True
elif text == 'False':
repeat = False
if Data.startswith('kind.objectSpawner.active='):
text = Data[26:-1]
#print "***active:" + text
if text == 'True':
active = True
elif text == 'False':
active = False
if equipmentType == 'weapon':
newWeapon = World.weapon()
elif equipmentType == 'armor':
newArmor = World.armor()
if itemGrabHandler == True:
newItemGrabHandler = World.itemGrabHandler(notDroppable=notDroppable)
else:
newItemGrabHandler = None
if objectSpawner == True:
newObjectSpawner = World.objectSpawner(owner=None, TIMERS=Globals.TIMERS, time=time, obj=None, oddsList=oddsList, container=container, cycles=cycles, repeat=repeat, active=active)
else:
newObjectSpawner = None
newEquipment = World.equipment(owner=None, weapon=newWeapon, armor=newArmor, slot=slot, durability=durability, maxDurability=maxDurability, worth=worth, hp=hp, pp=pp, offense=offense, defense=defense, speed=speed, guts=guts, luck=luck, vitality=vitality, IQ=IQ, battleCommands=battleCommands, statusEffect=statusEffect, onUse=onUse)
newItem = World.item(isCarryable=isCarryable, respawns=respawns, itemGrabHandler=newItemGrabHandler, objectSpawner=newObjectSpawner, equipment=newEquipment, onUse=onUse)
if newItem.itemGrabHandler:
newItem.itemGrabHandler.owner = newItem
if newItem.objectSpawner:
newItem.objectSpawner.owner = newItem
newEquipment.owner = newItem
newObject = World.Object(name=name, description=description, isVisible=isVisible, spawnContainer=spawnContainer, longDescription=longDescription, kind=newItem)
if newObject.kind.objectSpawner:
newObject.kind.objectSpawner.obj = newObject
newObject.ID = ID
newItem.owner = newObject
equipmentFromFile.append(newObject)
print "\n"
print "name:" + str(newObject.name)
print "description:" + str(newObject.description)
print "currentRoom:" + str(newObject.currentRoom)
print "isVisible:" + str(newObject.isVisible)
print "spawnContainer:" + str(newObject.spawnContainer)
print "longDescription:" + str(newObject.longDescription)
print "kind:" + str(newObject.kind)
#print "TIMERS:" + str(newObject.TIMERS)
if newObject.kind is not None:
print "kind.owner:" + str(newObject.kind.owner)
print "kind.equipment:" + str(newObject.kind.equipment)
print "kind.equipment.owner" + str(newObject.kind.equipment.owner)
if hasattr(newObject.kind.equipment, 'weapon'):
if newObject.kind.equipment.weapon is not None:
print "weapon:" + str(newObject.kind.equipment.weapon)
if hasattr(newObject.kind.equipment, 'armor'):
if newObject.kind.equipment.armor is not None:
print "armor:" + str(newObject.kind.equipment.armor)
print "slot:" + str(newObject.kind.equipment.slot)
print "durability:" + str(newObject.kind.equipment.durability)
print "maxDurability:" + str(newObject.kind.equipment.maxDurability)
print "worth:" + str(newObject.kind.equipment.worth)
if newObject.kind.equipment.hp != 0:
print "hp:" + str(newObject.kind.equipment.hp)
if newObject.kind.equipment.pp != 0:
print "pp:" + str(newObject.kind.equipment.pp)
if newObject.kind.equipment.offense != 0:
print "offense:" + str(newObject.kind.equipment.offense)
if newObject.kind.equipment.defense != 0:
print "defense:" + str(newObject.kind.equipment.defense)
if newObject.kind.equipment.speed != 0:
print "speed:" + str(newObject.kind.equipment.speed)
if newObject.kind.equipment.guts != 0:
print "guts:" + str(newObject.kind.equipment.guts)
if newObject.kind.equipment.luck != 0:
print "luck:" + str(newObject.kind.equipment.luck)
if newObject.kind.equipment.vitality != 0:
print "vitality:" + str(newObject.kind.equipment.vitality)
if newObject.kind.equipment.IQ != 0:
print "IQ:" + str(newObject.kind.equipment.IQ)
if newObject.kind.equipment.statusEffect is not None:
if newObject.kind.equipment.statusEffect != '':
print "statusEffect:" + str(newObject.kind.equipment.statusEffect)
if newObject.kind.equipment.battleCommands is not None:
if newObject.kind.equipment.battleCommands != ['']:
print "battleCommands:" + str(newObject.kind.equipment.battleCommands)
if newObject.kind.equipment.onUse is not None:
if newObject.kind.equipment.onUse != '':
print "onUse:" + str(newObject.kind.equipment.onUse)
if newObject.kind.itemGrabHandler is not None:
print "kind.itemGrabHandler:" + str(newObject.kind.itemGrabHandler)
print "kind.itemGrabHandler.notDroppable:" + str(newObject.kind.itemGrabHandler.notDroppable)
if newObject.kind.objectSpawner is not None:
print "kind.objectSpawner:" + str(newObject.kind.objectSpawner)
print "kind.objectSpawner.owner:" + str(newObject.kind.objectSpawner.owner)
print "kind.objectSpawner.TIMERS:" + str(newObject.kind.objectSpawner.TIMERS)
print "kind.objectSpawner.time:" + str(newObject.kind.objectSpawner.time)
print "kind.objectSpawner.obj:" + str(newObject.kind.objectSpawner.obj)
print "kind.objectSpawner.oddsList:" + str(newObject.kind.objectSpawner.oddsList)
print "kind.objectSpawner.container:" + str(newObject.kind.objectSpawner.container)
print "kind.objectSpanwer.cycles:" + str(newObject.kind.objectSpawner.cycles)
print "kind.objectSpawner.repeat:" + str(newObject.kind.objectSpawner.repeat)
print "kind.objectSpawner.active:" + str(newObject.kind.objectSpawner.active)
print "kind.objectSpawner.timer:" + str(newObject.kind.objectSpawner.timer)
print "kind.objectSpawner.startingLocation:" + str(newObject.kind.objectSpawner.startingLocation)
print "\n"
return newObject
for obj in fileList:
buildObjectFromFile(obj)
print savedEqFileList
if savedEqFileList == []:
for obj in eqFileList:
buildEquipmentFromFile(obj, 'blueprints/equip/')
else:
loadSavedEq()
| apache-2.0 | 8,426,455,827,758,730,000 | 31.665399 | 333 | 0.702867 | false |
bjornaa/roppy | roppy/averator.py | 1 | 1728 | # -*- coding: utf-8 -*-
"""Generator for moving averages from ROMS file(s)"""
import numpy as np
def roms_averator(ncid, var_name, L, grd):
"""Generator for moving averages from ROMS file(s)
var_name : text string, name of NetCDF variable
ncid : an open NetCDF Dataset or MFDataset
grd : a roppy.SGrid instance
L : integer, length of averaging period (only even presently)
n_rec = len(fid.dimensions['ocean_time']) # Number of time records
"""
# TODO: Make grd optional
# Only use of grd is to look work on subdomain,
# alternatively: use subgrid specification
# make attribute grd.subgrid
N = L // 2
assert 2*N == L, "Only even averaging periods allowed (presently)"
# Dimension and staggering
if var_name == 'u': # 3D u-point
I, J = grd.Iu, grd.Ju
s = (slice(None), grd.Ju, grd.Iu)
elif var_name == 'v': # 3D v-point
I, J = grd.Iv, grd.Jv
s = (slice(None), grd.Jv, grd.Iv)
elif var_name == "ocean_time": # scalar
s = ()
else: # default = 3D rho-point
I, J = grd.I, grd.J
s = (slice(None), grd.J, grd.I)
# First average
MF = fid.variables[var_name][(0,) + s]/(4*N)
for t in range(1, 2*N):
MF += fid.variables[var_name][(t,) + s] / (2*N)
MF += fid.variables[var_name][(2*N,) + s]/(4*N)
yield MF
# Update the average
for t in range(N+1, n_rec - N):
MF += fid.variables[var_name][(t+N,) + s]/(4*N)
MF += fid.variables[var_name][(t+N-1,) + s]/(4*N)
MF -= fid.variables[var_name][(t-N,) + s]/(4*N)
MF -= fid.variables[var_name][(t-N-1,) + s]/(4*N)
yield MF
| mit | 2,815,143,418,121,291,300 | 29.315789 | 73 | 0.545718 | false |
PrincetonML/AND4NMF | code/compute_error.py | 1 | 1397 | import numpy as np
from numpy.linalg import norm
def compute_error(A_in, Ag_in):
A = A_in
Ag = Ag_in
#reallign
D = A.shape[1]
inner = np.zeros((D, D))
for i in range(D):
for j in range(D):
inner[i, j] = np.asscalar(A[:, i].transpose() * Ag[:, j] )/(norm(A[:, i]) * norm(Ag[:, j]))
max = np.argmax(inner, axis = 0)
P = np.asmatrix(np.zeros((D, D)))
for i in range(D):
P[i, max[i]] = 1
# print "normalize the rows of A and A^*"
inv_norm_A = np.asarray(1.0 / np.apply_along_axis(norm, 0, A))
A = A * np.diag(inv_norm_A)
inv_norm_Ag = np.asarray(1.0 / np.apply_along_axis(norm, 0, Ag))
Ag = Ag * np.diag(inv_norm_Ag)
u = np.asmatrix(np.ones((1, D)))
#for each A_i^* we try to find the A_i that is closest to A_i^*
error = 0
for i in range(D):
Ag_i = Ag[:, i]
inner_product = np.asmatrix(Ag_i.transpose() * A)
norm_A = np.asmatrix(np.diag(A.transpose() * A))
z = np.divide(inner_product, norm_A).transpose()
z = np.asarray(z).flatten().transpose()
scalar = np.diag(z)
As = A * scalar
diff = np.apply_along_axis(norm, 0, As - Ag_i * u)
# min_idx = np.argmin(diff)
# print 'for Ag_%d: A_%d' % (i, min_idx)
difmin = np.amin(diff)
difmin = difmin * difmin
error = error + difmin
return error
| mit | -702,603,614,609,093,800 | 30.75 | 103 | 0.531854 | false |
Tong-Chen/scikit-learn | sklearn/ensemble/gradient_boosting.py | 1 | 44936 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from warnings import warn
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, array2d, check_arrays, column_or_1d
from ..utils.extmath import logsumexp
from ..utils.fixes import unique
from ..externals import six
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import MSE, PresortBestSplitter
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0)")
self.alpha = alpha
def fit(self, X, y):
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y):
self.mean = np.mean(y)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
def fit(self, X, y):
n_pos = np.sum(y)
n_neg = y.shape[0] - n_pos
if n_neg == 0 or n_pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = np.log(n_pos / n_neg)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y):
class_counts = np.bincount(y)
self.priors = class_counts / float(y.shape[0])
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self, X, y):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k])
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression")
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred):
return np.mean((y - pred.ravel()) ** 2.0)
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
return np.abs(y - pred.ravel()).mean()
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
tree.value[leaf, 0, 0] = np.median(y.take(terminal_region, axis=0) -
pred.take(terminal_region, axis=0))
class HuberLossFunction(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
return (sq_loss + lin_loss) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
pred = pred.ravel()
diff = y - pred
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = np.median(diff)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
return (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
val = stats.scoreatpercentile(diff, self.percentile)
tree.value[leaf, 0] = val
class BinomialDeviance(LossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(y - prob) / sum(prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
denominator = np.sum((y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
class MultinomialDeviance(LossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
numerator *= (self.K - 1) / self.K
denominator = np.sum((y - residual) * (1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'bdeviance': BinomialDeviance,
'mdeviance': MultinomialDeviance,
'deviance': None} # for both, multinomial and binomial
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_features=self.max_features,
random_state=random_state)
sample_weight = None
if self.subsample < 1.0:
sample_weight = sample_mask.astype(np.float64)
tree.fit(X, residual,
sample_weight=sample_weight, check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_mask, self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0")
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0")
if (self.loss not in self._SUPPORTED_LOSS or
self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss in ('mdeviance', 'bdeviance'):
warn(("Loss '{0:s}' is deprecated as of version 0.14. "
"Use 'deviance' instead. ").format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if self.subsample <= 0.0 or self.subsample > 1:
raise ValueError("subsample must be in (0,1]")
if self.init is not None:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init must be valid estimator")
self.init_ = self.init
else:
self.init_ = self.loss_.init_estimator()
if not (0.0 < self.alpha and self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0)")
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
# Check input
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense",
check_ccontiguous=True)
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
self.n_features = n_features
random_state = check_random_state(self.random_state)
# Check parameters
self._check_params()
# pull freq used parameters into local scope
subsample = self.subsample
loss_ = self.loss_
do_oob = subsample < 1.0
# allocate model state data structures
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
if do_oob:
self._oob_score_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_inbag = max(1, int(subsample * n_samples))
if self.verbose:
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
if do_oob:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
verbose_fmt = ' '.join(verbose_fmt)
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
# plot verbose info each time i % verbose_mod == 0
verbose_mod = 1
start_time = time()
# fit initial model
self.init_.fit(X, y)
# init predictions
y_pred = self.init_.predict(X)
# init criterion and splitter
criterion = MSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
# perform boosting iterations
for i in range(self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_mask,
criterion, splitter, random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask])
self._oob_score_[i] = loss_(y[~sample_mask],
y_pred[~sample_mask])
self.oob_improvement_[i] = old_oob_score - self._oob_score_[i]
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = self.loss_(y, y_pred)
if self.verbose > 0:
if (i + 1) % verbose_mod == 0:
oob_impr = self.oob_improvement_[i] if do_oob else 0
remaining_time = ((self.n_estimators - (i + 1)) *
(time() - start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(verbose_fmt.format(iter=i + 1,
train_score=self.train_score_[i],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
verbose_mod *= 10
return self
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.n_estimators):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
@property
def oob_score_(self):
warn("The oob_score_ argument is replaced by oob_improvement_"
" as of version 0.14 and will be removed in 0.16.",
DeprecationWarning)
try:
return self._oob_score_
except AttributeError:
raise ValueError("Estimator not fitted, "
"call `fit` before `oob_score_`.")
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency).
If greater than 1 then it prints progress and performance for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_improvement_` : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
Deprecated: use `oob_improvement_` instead.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'mdeviance', 'bdeviance')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0):
super(GradientBoostingClassifier, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, verbose=verbose)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return super(GradientBoostingClassifier, self).fit(X, y)
def _score_to_proba(self, score):
"""Compute class probability estimates from decision scores. """
proba = np.ones((score.shape[0], self.n_classes_), dtype=np.float64)
if not self.loss_.is_multi_class:
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
else:
proba = np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
return proba
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
score = self.decision_function(X)
return self._score_to_proba(score)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
proba = self.predict_proba(X)
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
def staged_predict(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for proba in self.staged_predict_proba(X):
yield self.classes_.take(np.argmax(proba, axis=1), axis=0)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency).
If greater than 1 then it prints progress and performance for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_improvement_` : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
Deprecated: use `oob_improvement_` instead.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0):
super(GradientBoostingRegressor, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha, verbose)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
self.n_classes_ = 1
return super(GradientBoostingRegressor, self).fit(X, y)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| bsd-3-clause | 8,973,508,359,669,950,000 | 36.260365 | 81 | 0.582228 | false |
linuxdeepin/deepin-ui | dtk/ui/tooltip.py | 1 | 22624 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Xia Bin
#
# Author: Xia Bin <[email protected]>
# Maintainer: Xia Bin <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from animation import Animation, LinerInterpolator
from gtk import gdk
from label import Label
from theme import ui_theme
from utils import propagate_expose, color_hex_to_cairo, cairo_disable_antialias
import cairo
import gobject
import gtk
__all__ = ["text", "custom", "show_tooltip", "show_delay", "hide_delay", "hide_duration",
"background", "padding", "show_now", "has_shadow", "disable", "always_update",
"disable_all"]
class ChildLocation:
def __init__(self):
self.x = 0
self.y = 0
self.child = None
self.container = None
def window_to_alloc(widget, x, y):
if widget.get_has_window() and widget.parent:
(wx, wy) = widget.window.get_position()
x += wx - widget.allocation.x
y += wy - widget.allocation.y
else:
x -= widget.allocation.x
y -= widget.allocation.y
return (x, y)
def child_location_foreach(widget, cl): #cl = child_location
if not widget.is_drawable():
return
if widget.get_realized() and not cl.child :
#TODO: may invalid to untuple!.
(x, y) = cl.container.translate_coordinates(widget, int(cl.x), int(cl.y))
if x >= 0 and x < widget.allocation.width and \
y >=0 and y < widget.allocation.height:
if isinstance(widget, gtk.Container):
tmp = ChildLocation()
(tmp.x, tmp.y, tmp.container) = (x, y, widget)
widget.forall(child_location_foreach, tmp)
if tmp.child:
cl.child = tmp.child
else:
cl.child = widget
else:
cl.child = widget
def coords_to_parent(window, x, y):
if window.get_window_type() == gdk.WINDOW_OFFSCREEN:
(px, py) = (-1, -1)
window.emit("to-embedder", window, x, y, px, py)
return (px, py)
else:
p = window.get_position()
return (x + p[0], y + p[1])
def find_at_coords(gdkwindow, window_x, window_y):
cl = ChildLocation()
try:
widget = gdkwindow.get_user_data()
except:
return (None, cl.x, cl.y)
cl.x = window_x
cl.y = window_y
while gdkwindow and gdkwindow != widget.window:
(cl.x, cl.y) = coords_to_parent(gdkwindow, cl.x, cl.y)
gdkwindow = gdkwindow.get_effective_parent()
if not gdkwindow:
return (None, cl.x, cl.y)
(cl.x, cl.y) = window_to_alloc(widget, cl.x, cl.y)
#find child
if isinstance(widget, gtk.Container):
cl.container = widget
cl.child = None
tmp_widget = widget
widget.forall(child_location_foreach, cl)
if cl.child and WidgetInfo.get_info(cl.child):
widget = cl.child
elif cl.container and WidgetInfo.get_info(cl.container):
widget = cl.container
(cl.x, cl.y) = tmp_widget.translate_coordinates(widget, int(cl.x), int(cl.y))
if WidgetInfo.get_info(widget):
return (widget, cl.x, cl.y)
p = widget.get_parent()
while p:
if WidgetInfo.get_info(p):
return (p, cl.x, cl.y)
else:
p = p.get_parent()
return (None, cl.x, cl.y)
def update_tooltip():
'''
this function will be invoked every gdk event has received.
so reduce the time as possible as we can.
'''
if TooltipInfo.enable_count == 0:
return
try :
(window, x, y) = display.get_window_at_pointer()
except:
return True
(widget, tx, ty) = find_at_coords(window, x, y)
if widget == None:
pass
# print "nop"
if not widget \
or tx < 0 or tx >= widget.allocation.width \
or ty < 0 or ty >= widget.allocation.height:
hide_tooltip()
return True
if TooltipInfo.widget != widget:
TooltipInfo.prewidget = widget
TooltipInfo.winfo = WidgetInfo.get_info(widget)
TooltipInfo.show_delay = TooltipInfo.winfo.show_delay
TooltipInfo.tmpwidget = widget
(rx, ry) = window.get_origin()
if TooltipInfo.pos_info != (int(rx+x), int(ry+y)) and TooltipInfo.show_id != 0:
hide_tooltip()
if TooltipInfo.show_id == 0:
if TooltipInfo.in_quickshow:
show_delay = 300
else:
show_delay = TooltipInfo.winfo.show_delay
TooltipInfo.pos_info = (int(rx+x), int(ry+y))
TooltipInfo.show_id = gobject.timeout_add(show_delay, lambda : show_tooltip(*TooltipInfo.pos_info))
def show_now():
try :
(window, x, y) = display.get_window_at_pointer()
except:
return True
(widget, tx, ty) = find_at_coords(window, x, y)
if widget == None:
pass
if not widget \
or tx < 0 or tx >= widget.allocation.width \
or ty < 0 or ty >= widget.allocation.height:
hide_tooltip()
return True
if TooltipInfo.widget != widget:
TooltipInfo.prewidget = widget
TooltipInfo.winfo = WidgetInfo.get_info(widget)
TooltipInfo.show_delay = TooltipInfo.winfo.show_delay
TooltipInfo.tmpwidget = widget
(rx, ry) = window.get_origin()
if TooltipInfo.pos_info != (int(rx+x), int(ry+y)) and TooltipInfo.show_id != 0:
hide_tooltip()
show_tooltip(int(rx+x), int(ry+y))
class TooltipInfo:
widget = None
tmpwidget = None
prewidget = None
pos_info = None
window = None
alignment = None
winfo = None
offset_x = 5
offset_y = 5
on_showing = False
need_update = True
#displays = []
stamp = 0
enable_count = 0
show_id = 0
in_quickshow = False
quickshow_id = 0
quickshow_delay = 2500
def generate_tooltip_content():
''' generate child widget and update the TooltipInfo'''
if TooltipInfo.widget == TooltipInfo.prewidget and TooltipInfo.alignment.child and not TooltipInfo.need_update:
return
TooltipInfo.widget = TooltipInfo.tmpwidget
TooltipInfo.winfo = WidgetInfo.get_info(TooltipInfo.widget)
winfo = TooltipInfo.winfo
pre_child = TooltipInfo.alignment.child
if pre_child and winfo == WidgetInfo.get_info(pre_child) and not TooltipInfo.need_update:
return
if winfo.custom:
child = winfo.custom(*winfo.custom_args, **winfo.custom_kargs)
elif winfo.text:
child = Label(winfo.text, *winfo.text_args, **winfo.text_kargs)
else:
raise Warning, "tooltip enable's widget must has text or custom property"
if pre_child:
TooltipInfo.alignment.remove(pre_child)
pre_child.destroy()
TooltipInfo.alignment.set_padding(winfo.padding_t, winfo.padding_l, winfo.padding_b, winfo.padding_r)
TooltipInfo.alignment.add(child)
TooltipInfo.alignment.show_all()
allocation = gtk.gdk.Rectangle(0, 0, *TooltipInfo.alignment.child.size_request())
allocation.width += winfo.padding_l + winfo.padding_r
allocation.height += winfo.padding_t + winfo.padding_b
TooltipInfo.window.size_allocate(allocation)
TooltipInfo.window.modify_bg(gtk.STATE_NORMAL, winfo.background)
if winfo.always_update:
TooltipInfo.need_update = True
else:
TooltipInfo.need_update = False
def enable_quickshow():
def disable_q():
TooltipInfo.in_quickshow = False
if TooltipInfo.quickshow_id != 0:
gobject.source_remove(TooltipInfo.quickshow_id)
TooltipInfo.in_quickshow = True
if TooltipInfo.quickshow_id == 0:
TooltipInfo.quickshow_id = gobject.timeout_add(TooltipInfo.quickshow_delay, disable_q)
else:
gobject.source_remove(TooltipInfo.quickshow_id)
TooltipInfo.quickshow_id = gobject.timeout_add(TooltipInfo.quickshow_delay, disable_q)
def hide_tooltip():
TooltipInfo.window.hide()
TooltipInfo.on_showing = False
if TooltipInfo.show_id != 0:
gobject.source_remove(TooltipInfo.show_id)
TooltipInfo.show_id = 0
if TooltipInfo.window.get_realized():
TooltipInfo.window.animation.stop()
return False
def show_tooltip(x, y):
if TooltipInfo.enable_count == 0 or not TooltipInfo.winfo.enable:
return
generate_tooltip_content()
enable_quickshow()
#What will happen if the content widget is very big?
#----------------------------------------------
(p_w, p_h) = (10, 10) #TODO: pointer size ?
(w, h) = TooltipInfo.window.get_root_window().get_size()
(t_w, t_h) = TooltipInfo.window.size_request()
if x + p_w + t_w > w:
POS_H = 0 #left
else:
POS_H = 1 #right
if y + p_h + t_h > h:
POS_V = 2 #top
else:
POS_V = 4 #bttom
p = POS_H + POS_V
######################################
# LEFT(0) RIGHT(1) #
#------------------------------------#
#TOP(2) 2 3 #
#------------------------------------#
#BOTTOM(4) 4 5 #
######################################
if p == 2:
TooltipInfo.window.move(x - t_w, y - t_h)
elif p == 3:
TooltipInfo.window.move(x, y - t_h)
elif p == 4:
TooltipInfo.window.move(x - t_w, y)
elif p == 5:
TooltipInfo.window.move(x + p_w, y + p_h)
else:
assert False, "This shouldn't appaer!!!!!!"
#------------------------------------------
TooltipInfo.window.show()
TooltipInfo.on_showing = True
def __init_window():
def on_realize(win):
win.swindow = gtk.gdk.Window(win.get_parent_window(),
width=0, height=0,
window_type=gtk.gdk.WINDOW_TEMP,
wclass=gtk.gdk.INPUT_OUTPUT,
event_mask=(win.get_events() | gdk.EXPOSURE_MASK),
visual=win.get_visual(),
colormap=win.get_colormap(),
)
win.swindow.set_user_data(win)
#TODO: set duration dynamicly
win.animation = Animation([win.window, win.swindow], gdk.Window.set_opacity, 1000, [0, 1],
lambda *args: 1 - LinerInterpolator(*args))
def on_map(win):
winfo = TooltipInfo.winfo
win.animation.init(1)
win.animation.start_after(winfo.hide_delay)
geo = win.window.get_geometry()
win.swindow.move_resize(geo[0]+TooltipInfo.offset_x, geo[1]+TooltipInfo.offset_y,
win.allocation.width, win.allocation.height)
win.swindow.show()
def on_expose_event(win, e):
cr = win.swindow.cairo_create()
cr.set_source_rgba(1, 1, 1, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
winfo = TooltipInfo.winfo
if winfo.has_shadow:
(x, y, width, height) = (0, 0, win.allocation.width, win.allocation.height)
(o_x, o_y) = (5, 5)
#right-bottom corner
radial = cairo.RadialGradient(width - o_x, height-o_y, 1, width -o_x, height-o_y, o_x)
radial.add_color_stop_rgba(0.0, 0,0,0, 0.3)
radial.add_color_stop_rgba(0.6, 0,0,0, 0.1)
radial.add_color_stop_rgba(1, 0,0,0, 0)
cr.set_source(radial)
cr.rectangle(width-o_x, height-o_y, o_x, o_y)
cr.fill()
#left-bottom corner
radial = cairo.RadialGradient(o_x, height-o_y, 1, o_x, height-o_y, o_x)
radial.add_color_stop_rgba(0.0, 0,0,0, 0.3)
radial.add_color_stop_rgba(0.6, 0,0,0, 0.1)
radial.add_color_stop_rgba(1, 0,0,0, 0)
cr.set_source(radial)
cr.rectangle(0, height-o_y, o_x, o_y)
cr.fill()
#left-top corner
radial = cairo.RadialGradient(width-o_x, o_y, 1, width-o_x, o_y, o_x)
radial.add_color_stop_rgba(0.0, 0,0,0, 0.3)
radial.add_color_stop_rgba(0.6, 0,0,0, 0.1)
radial.add_color_stop_rgba(1, 0,0,0, 0)
cr.set_source(radial)
cr.rectangle(width-o_x, 0, o_x, o_y)
cr.fill()
vradial = cairo.LinearGradient(0, height-o_y, 0, height)
vradial.add_color_stop_rgba(0.0, 0,0,0, .5)
vradial.add_color_stop_rgba(0.4, 0,0,0, 0.25)
vradial.add_color_stop_rgba(1, 0,0,0, 0.0)
cr.set_source(vradial)
cr.rectangle(o_x, height-o_x, width-2*o_x, height)
cr.fill()
hradial = cairo.LinearGradient(width-o_x, 0, width, 0)
hradial.add_color_stop_rgba(0.0, 0,0,0, .5)
hradial.add_color_stop_rgba(0.4, 0,0,0, 0.25)
hradial.add_color_stop_rgba(1, 0,0,0, 0.0)
cr.set_source(hradial)
cr.rectangle(width-o_x, o_y, width, height-2*o_y)
cr.fill()
gtk.Alignment.do_expose_event(TooltipInfo.alignment, e)
propagate_expose(win, e)
return True
def on_unmap(win):
win.swindow.hide()
def on_expose_alignment(widget, event):
'''Expose tooltip label.'''
rect = widget.allocation
cr = widget.window.cairo_create()
with cairo_disable_antialias(cr):
cr.set_line_width(1)
cr.set_source_rgba(*color_hex_to_cairo(ui_theme.get_color("tooltip_frame").get_color()))
cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 1, rect.height - 1)
cr.stroke()
return True
TooltipInfo.window = gtk.Window(gtk.WINDOW_POPUP)
TooltipInfo.window.set_colormap(gtk.gdk.Screen().get_rgba_colormap())
TooltipInfo.alignment = gtk.Alignment()
TooltipInfo.window.add(TooltipInfo.alignment)
TooltipInfo.window.connect('realize', on_realize)
TooltipInfo.window.connect('map', on_map)
TooltipInfo.window.connect('unmap', on_unmap)
TooltipInfo.window.connect('expose-event', on_expose_event)
TooltipInfo.alignment.connect('expose-event', on_expose_alignment)
__init_window()
#TODO:detect display?
#FIXME:
display = None
def init_widget(widget):
TooltipInfo.enable_count += 1
w_info = WidgetInfo()
WidgetInfo.set_info(widget, w_info)
if widget.get_has_window():
widget.add_events(gdk.POINTER_MOTION_MASK|gdk.POINTER_MOTION_HINT_MASK)
else:
widget.connect('realize',
lambda w: w.window.set_events(w.window.get_events() | gdk.POINTER_MOTION_HINT_MASK | gdk.POINTER_MOTION_MASK))
if not display:
init_tooltip(widget)
return w_info
def init_tooltip(win):
global display
if not display:
display = win.get_display()
#gobject.timeout_add(100, lambda : update_tooltip(display))
#win.connect('focus-out-event', lambda w, e: hide_tooltip(True))
win.connect('leave-notify-event', lambda w, e: hide_tooltip())
#
#the Interface of dtk Tooltip, the core is the WidgetInfo's attribute
#
class WidgetInfo(object):
__DATA_NAME = "_deepin_tooltip_info"
@staticmethod
def get_info(widget):
return widget.get_data(WidgetInfo.__DATA_NAME)
@staticmethod
def set_info(widget, info):
return widget.set_data(WidgetInfo.__DATA_NAME, info)
def __init__(self):
object.__setattr__(self, "show_delay", 1000)
object.__setattr__(self, "hide_delay", 3000)
object.__setattr__(self, "hide_duration", 1000)
object.__setattr__(self, "text", None)
object.__setattr__(self, "text_args", None)
object.__setattr__(self, "text_kargs", None)
object.__setattr__(self, "custom", None)
object.__setattr__(self, "custom_args", None)
object.__setattr__(self, "custom_kargs", None)
object.__setattr__(self, "background", gtk.gdk.Color(ui_theme.get_color("tooltip_background").get_color()))
object.__setattr__(self, "padding_t", 5)
object.__setattr__(self, "padding_b", 5)
object.__setattr__(self, "padding_l", 5)
object.__setattr__(self, "padding_r", 5)
object.__setattr__(self, "has_shadow", True)
object.__setattr__(self, "enable", False) #don't modify the "enable" init value
object.__setattr__(self, "always_update", False)
def __setattr__(self, key, value):
if hasattr(self, key):
object.__setattr__(self, key, value)
else:
raise Warning, "Tooltip didn't support the \"%s\" property" % key
TooltipInfo.need_update = True
if key == "text" or key == "custom":
self.enable = True
all_method = {}
def chainmethod(func):
all_method[func.__name__] = func
def wrap(*args, **kargs):
return func(*args, **kargs)
wrap.__dict__ = all_method
return wrap
#
#you can write yourself wrap function use "set_value" or direct modify the WidgetInfo's attribute
#
@chainmethod
def set_value(widgets, kv):
if not isinstance(widgets, list):
widgets = [widgets]
for w in widgets:
w_info = WidgetInfo.get_info(w)
if not w_info:
w_info = init_widget(w)
for k in kv:
setattr(w_info, k, kv[k])
return set_value
#------------------the default wrap function ---------------------------------------
@chainmethod
def text(widget, content, *args, **kargs):
'''
set the tooltip's text content.
the "content", "*args" and "**kargs" are pass to the dtk.ui.Label,
so you can change the text's color and some other property.
@param widget: the widget of you want to change.
@param content: the text which you want show.
@param args: pass to the dtk.ui.Label
@param kargs: pass to the dtk.ui.Label
'''
set_value(widget, {
"text": content,
"text_args":args,
"text_kargs":kargs
})
return text
@chainmethod
def custom(widget, cb, *args, **kargs):
'''
Set the custom tooltip content.
@param widget: the widget of you want to change.
@param cb: the function used to generate the content widget. this function should return an gtk.Widget. Be careful: if this function generate it's content affected by other runtime factor, you alsow should use "always_update"
to disable the internal cache mechanism
@param args: pass to the cb
@param kargs: pass to the cb
'''
set_value(widget, {
"custom" : cb,
"custom_args" : args,
"custom_kargs" : kargs
})
return custom
@chainmethod
def show_delay(widget, delay):
'''
set the time of the tooltip's begin show after pointer stay on the widget.
@param widget: the widget of you want to change.
@param delay: the time of start begin show.
'''
delay = max(250, delay)
set_value(widget, {"show_delay": delay})
return show_delay
@chainmethod
def hide_delay(widget, delay):
'''
set the time of the tooltip's start to hide.
@param widget: the widget of you want to change.
@param delay: the time of start begin hide.
'''
set_value(widget, {"hide_delay": delay})
return hide_delay
@chainmethod
def hide_duration(widget, delay):
'''
set the duration of the tooltip's hide effect duration.
@param widget: the widget of you want to change.
@param delay: the time of the effect duration.
'''
set_value(widget, {"hide_duration": delay})
return hide_duration
@chainmethod
def background(widget, color):
'''
set the background of the tooltip's content.
@param widget: the widget of you want to change.
@param color: the gdk.Color of background.
'''
set_value(widget, {"background": color})
return background
@chainmethod
def padding(widget, t, l, b, r):
'''
set the padding of the tooltip's content.
@param widget: the widget of you want to change.
@param t: the top space
@param l: the left space
@param b: the bottom space
@param r: the right space
'''
kv = {}
if t >= 0:
kv["padding_t"] = int(t)
if b >= 0:
kv["padding_b"] = int(b)
if l >= 0:
kv["padding_l"] = int(l)
if r >= 0:
kv["padding_r"] = int(r)
set_value(widget, kv)
return padding
@chainmethod
def has_shadow(widget, need):
'''
whether this widget's tooltip need shadow.
@param widget: the widget of you want disable tooltip.
@param need : wheter need shadow .
'''
set_value(widget, {"has_shadow": need})
return has_shadow
@chainmethod
def disable(widget, is_disable):
'''
disable this widget's tooltip
@param widget: the widget of you want disable tooltip.
@param is_disable: wheter disable tooltip.
'''
winfo = WidgetInfo.get_info(widget)
if is_disable:
if winfo and winfo.enable:
winfo.enable = False
TooltipInfo.enable_count -= 1
else:
if winfo and not winfo.enable:
winfo.enable = True
TooltipInfo.enable_count += 1
return disable
@chainmethod
def always_update(widget, need):
'''
Always create the new tooltip's content, used to show the
curstom tooltip content generate by function and the function's
return widget is different every time be invoked.
@param widget: Gtk.Widget instance.
@param need: whether alwasy update.
'''
set_value(widget, {"always_update" : need})
return always_update
#------------------------this is global effect function---------------------
def disable_all(is_disable):
'''
'''
count = TooltipInfo.enable_count
if is_disable:
if count > 0:
TooltipInfo.enable_count = -count
else:
if count < 0:
TooltipInfo.enable_count = -count
def tooltip_handler(event):
gtk.main_do_event(event)
if event.type == gdk.MOTION_NOTIFY:
# print "leave", time.time()
update_tooltip()
elif event.type == gdk.LEAVE_NOTIFY:
# print "leave", time.time()
hide_tooltip()
gdk.event_handler_set(tooltip_handler)
| gpl-3.0 | -150,503,247,418,591,170 | 30.730715 | 230 | 0.594457 | false |
Ziemin/telepathy-gabble | tests/twisted/presence/decloak.py | 2 | 2634 | from twisted.words.xish import domish
from gabbletest import exec_test, make_presence
from servicetest import EventPattern, assertEquals
import ns
import constants as cs
def test(q, bus, conn, stream, should_decloak=False):
event = q.expect('stream-iq', query_ns=ns.ROSTER)
event.stanza['type'] = 'result'
stream.send(event.stanza)
# First test is to use the CM param's value
worker(q, bus, conn, stream, should_decloak)
# We can change it at runtime, so flip it to the other value and retry
should_decloak = not should_decloak
conn.Set(cs.CONN_IFACE_GABBLE_DECLOAK, 'DecloakAutomatically',
should_decloak, dbus_interface=cs.PROPERTIES_IFACE)
worker(q, bus, conn, stream, should_decloak)
# Trivial test for SendDirectedPresence()
bob_handle = conn.get_contact_handle_sync('[email protected]')
conn.SendDirectedPresence(bob_handle, False,
dbus_interface=cs.CONN_IFACE_GABBLE_DECLOAK)
q.expect('stream-presence', to='[email protected]')
def worker(q, bus, conn, stream, should_decloak):
decloak_automatically = conn.Get(cs.CONN_IFACE_GABBLE_DECLOAK,
'DecloakAutomatically', dbus_interface=cs.PROPERTIES_IFACE)
assertEquals(should_decloak, decloak_automatically)
amy_handle = conn.get_contact_handle_sync('[email protected]')
# Amy directs presence to us
presence = make_presence('[email protected]/panopticon')
decloak = presence.addElement((ns.TEMPPRES, 'temppres'))
decloak['reason'] = 'media'
stream.send(presence)
events = [
EventPattern('dbus-signal', signal='PresencesChanged',
args=[{amy_handle: (cs.PRESENCE_AVAILABLE, 'available', '')}]),
EventPattern('dbus-signal', signal='DecloakRequested',
args=[amy_handle, 'media', should_decloak]),
]
forbidden = []
if should_decloak:
events.append(EventPattern('stream-presence',
to='[email protected]/panopticon'))
else:
forbidden = [EventPattern('stream-presence')]
q.forbid_events(forbidden)
q.expect_many(*events)
presence = make_presence('[email protected]/panopticon', type='unavailable')
stream.send(presence)
q.expect('dbus-signal', signal='PresencesChanged',
args=[{amy_handle: (cs.PRESENCE_OFFLINE, 'offline', '')}])
q.unforbid_events(forbidden)
if __name__ == '__main__':
exec_test(test,
params={cs.CONN_IFACE_GABBLE_DECLOAK + '.DecloakAutomatically': False})
exec_test(lambda q, b, c, s: test(q, b, c, s, should_decloak=True),
params={cs.CONN_IFACE_GABBLE_DECLOAK + '.DecloakAutomatically': True})
| lgpl-2.1 | -1,589,560,376,580,385,300 | 36.098592 | 79 | 0.665907 | false |
jackjlynch/github-activity-mirror | gh_copy.py | 1 | 1333 | from lxml import html
import requests
import argparse
from datetime import datetime
from git import Repo, Actor
def main():
parser = argparse.ArgumentParser(description='Copy a user\'s Github commit activity')
parser.add_argument('user')
parser.add_argument('repo_dir')
parser.add_argument('name')
parser.add_argument('email')
args = parser.parse_args()
page = requests.get('http://github.com/' + args.user)
tree = html.fromstring(page.content)
days = tree.xpath('//*[@id="contributions-calendar"]/div[1]/svg/g/g/rect')
contribs = {}
for day in days:
date = datetime.strptime(day.get('data-date'), '%Y-%m-%d')
contribs[date] = int(day.get('data-count'))
repo = Repo(args.repo_dir)
assert not repo.bare
start_date = datetime.fromtimestamp(0)
#making some dangerous assumptions here
if len(repo.heads) > 0:
start_date = datetime.fromtimestamp(repo.heads.master.commit.authored_date)
index = repo.index
author = Actor(args.name, args.email)
for date in contribs:
for i in range(contribs[date]):
if date > start_date:
commit = index.commit('', author=author, committer=author, author_date=date.isoformat())
assert commit.type == 'commit'
if __name__ == '__main__':
main()
| gpl-3.0 | 1,612,143,610,113,723,100 | 30 | 104 | 0.642911 | false |
AISystena/web_crawler | lib/image_cnn/ImagePredictor.py | 1 | 1517 | # coding: utf-8
import os.path
import pickle
import numpy as np
#from chainer import cuda
import chainer.functions as F
import Util
"""
CNNใซใใ็ปๅๅ้ก (posi-nega)
- 5ๅฑคใฎใใฃใผใใใฅใผใฉใซใใใ
"""
class ImagePredictor:
def __init__(self, gpu=0):
current_dir_path = os.path.dirname(__file__)
self.model_pkl = current_dir_path + '/model/image_cnn.pkl'
self.gpu = gpu
def load_model(self):
'''
modelใ่ชญใฟ่พผใ
'''
model = None
if os.path.exists(self.model_pkl):
with open(self.model_pkl, 'rb') as pkl:
model = pickle.load(pkl)
return model
def makeGpuAvailable(self, model):
# GPUใไฝฟใใใฉใใ
if self.gpu >= 0:
pass
#cuda.check_cuda_available()
#cuda.get_device(self.gpu).use()
#model.to_gpu()
#xp = np if self.gpu < 0 else cuda.cupy
xp = np
return xp
def predict(self, image_path):
# ใขใใซใฎๅฎ็พฉ
model = self.load_model()
if model is None:
print("model is empty")
exit()
xp = self.makeGpuAvailable(model)
x = Util.load_image(image_path)
x = xp.asarray(x.reshape((1,)+x.shape))
pred_y = F.softmax(model.predictor(x).data).data
for i, p in enumerate(pred_y[0]):
print("[{0:02d}]:{1:.3f}%".format(i, float(p)))
y = xp.argmax(pred_y[0])
return y
| mit | 6,428,009,083,430,815,000 | 23.810345 | 66 | 0.540653 | false |
oscarpilote/Ortho4XP | src/O4_Mesh_Utils.py | 1 | 24769 | import time
import sys
import os
import pickle
import subprocess
import numpy
import requests
from math import sqrt, cos, pi
import O4_DEM_Utils as DEM
import O4_UI_Utils as UI
import O4_File_Names as FNAMES
import O4_Geo_Utils as GEO
import O4_Vector_Utils as VECT
import O4_OSM_Utils as OSM
import O4_Version
if 'dar' in sys.platform:
Triangle4XP_cmd = os.path.join(FNAMES.Utils_dir,"Triangle4XP.app ")
triangle_cmd = os.path.join(FNAMES.Utils_dir,"triangle.app ")
sort_mesh_cmd = os.path.join(FNAMES.Utils_dir,"moulinette.app ")
unzip_cmd = "7z "
elif 'win' in sys.platform:
Triangle4XP_cmd = os.path.join(FNAMES.Utils_dir,"Triangle4XP.exe ")
triangle_cmd = os.path.join(FNAMES.Utils_dir,"triangle.exe ")
sort_mesh_cmd = os.path.join(FNAMES.Utils_dir,"moulinette.exe ")
unzip_cmd = os.path.join(FNAMES.Utils_dir,"7z.exe ")
else:
Triangle4XP_cmd = os.path.join(FNAMES.Utils_dir,"Triangle4XP ")
triangle_cmd = os.path.join(FNAMES.Utils_dir,"triangle ")
sort_mesh_cmd = os.path.join(FNAMES.Utils_dir,"moulinette ")
unzip_cmd = "7z "
community_server=False
if os.path.exists(os.path.join(FNAMES.Ortho4XP_dir,"community_server.txt")):
try:
f=open(os.path.join(FNAMES.Ortho4XP_dir,"community_server.txt"),'r')
for line in f.readlines():
line=line.strip()
if not line: continue
if '#' in line:
if line[0]=='#': continue
else: line=line.split('#')[0].strip()
if not line: continue
community_server=True
community_prefix=line
break
except:
pass
def community_mesh(tile):
if not community_server:
UI.exit_message_and_bottom_line("\nERROR: No community server defined in community_server.txt")
return 0
url=community_prefix+os.path.basename(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon))+'.7z'
timer=time.time()
UI.vprint(0,"Querying",url,"...")
try:
r=requests.get(url,timeout=30)
if '[200]' in str(r):
UI.vprint(0,"We've got something !")
f=open(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)+'.7z','wb')
f.write(r.content)
f.close()
if subprocess.call([unzip_cmd.strip(),'e','-y','-o'+tile.build_dir,FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)+".7z"]):
UI.exit_message_and_bottom_line("\nERROR: Could not extract community_mesh from archive.")
return 0
os.remove(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)+'.7z')
UI.timings_and_bottom_line(timer)
return 1
elif '[40' in str(r):
UI.exit_message_and_bottom_line("\nSORRY: Community server does not propose that mesh: "+str(r))
return 0
elif '[50' in str(r):
UI.exit_message_and_bottom_line("\nSORRY: Community server seems to be down or struggling: "+str(r))
return 0
else:
UI.exit_message_and_bottom_line("\nSORRY: Community server seems to be down or struggling: "+str(r))
return 0
except Exception as e:
UI.exit_message_and_bottom_line("\nERROR: Network or server unreachable:\n"+str(e))
return 0
##############################################################################
def is_in_region(lat,lon,latmin,latmax,lonmin,lonmax):
return lat>=latmin and lat<=latmax and lon>=lonmin and lon<=lonmax
##############################################################################
##############################################################################
def build_curv_tol_weight_map(tile,weight_array):
if tile.apt_curv_tol!=tile.curvature_tol and tile.apt_curv_tol>0:
UI.vprint(1,"-> Modifying curv_tol weight map according to runway locations.")
try:
f=open(FNAMES.apt_file(tile),'rb')
dico_airports=pickle.load(f)
f.close()
except:
UI.vprint(1," WARNING: File",FNAMES.apt_file(tile),"is missing (erased after Step 1?), cannot check airport info for upgraded zoomlevel.")
dico_airports={}
for airport in dico_airports:
(xmin,ymin,xmax,ymax)=dico_airports[airport]['boundary'].bounds
x_shift=1000*tile.apt_curv_ext*GEO.m_to_lon(tile.lat)
y_shift=1000*tile.apt_curv_ext*GEO.m_to_lat
colmin=max(round((xmin-x_shift)*1000),0)
colmax=min(round((xmax+x_shift)*1000),1000)
rowmax=min(round(((1-ymin)+y_shift)*1000),1000)
rowmin=max(round(((1-ymax)-y_shift)*1000),0)
weight_array[rowmin:rowmax+1,colmin:colmax+1]=tile.curvature_tol/tile.apt_curv_tol
if tile.coast_curv_tol!=tile.curvature_tol:
UI.vprint(1,"-> Modifying curv_tol weight map according to coastline location.")
sea_layer=OSM.OSM_layer()
custom_coastline=FNAMES.custom_coastline(tile.lat, tile.lon)
custom_coastline_dir=FNAMES.custom_coastline_dir(tile.lat, tile.lon)
if os.path.isfile(custom_coastline):
UI.vprint(1," * User defined custom coastline data detected.")
sea_layer.update_dicosm(custom_coastline,input_tags=None,target_tags=None)
elif os.path.isdir(custom_coastline_dir):
UI.vprint(1," * User defined custom coastline data detected (multiple files).")
for osm_file in os.listdir(custom_coastline_dir):
UI.vprint(2," ",osm_file)
sea_layer.update_dicosm(os.path.join(custom_coastline_dir,osm_file),input_tags=None,target_tags=None)
sea_layer.write_to_file(custom_coastline)
else:
queries=['way["natural"="coastline"]']
tags_of_interest=[]
if not OSM.OSM_queries_to_OSM_layer(queries,sea_layer,tile.lat,tile.lon,tags_of_interest,cached_suffix='coastline'):
return 0
for nodeid in sea_layer.dicosmn:
(lonp,latp)=[float(x) for x in sea_layer.dicosmn[nodeid]]
if lonp<tile.lon or lonp>tile.lon+1 or latp<tile.lat or latp>tile.lat+1: continue
x_shift=1000*tile.coast_curv_ext*GEO.m_to_lon(tile.lat)
y_shift=tile.coast_curv_ext/(111.12)
colmin=max(round((lonp-tile.lon-x_shift)*1000),0)
colmax=min(round((lonp-tile.lon+x_shift)*1000),1000)
rowmax=min(round((tile.lat+1-latp+y_shift)*1000),1000)
rowmin=max(round((tile.lat+1-latp-y_shift)*1000),0)
weight_array[rowmin:rowmax+1,colmin:colmax+1]=numpy.maximum(weight_array[rowmin:rowmax+1,colmin:colmax+1],tile.curvature_tol/tile.coast_curv_tol)
del(sea_layer)
# It could be of interest to write the weight file as a png for user editing
#from PIL import Image
#Image.fromarray((weight_array!=1).astype(numpy.uint8)*255).save('weight.png')
return
##############################################################################
##############################################################################
def post_process_nodes_altitudes(tile):
dico_attributes=VECT.Vector_Map.dico_attributes
f_node = open(FNAMES.output_node_file(tile),'r')
init_line_f_node=f_node.readline()
nbr_pt=int(init_line_f_node.split()[0])
vertices=numpy.zeros(6*nbr_pt)
UI.vprint(1,"-> Loading of the mesh computed by Triangle4XP.")
for i in range(0,nbr_pt):
vertices[6*i:6*i+6]=[float(x) for x in f_node.readline().split()[1:7]]
end_line_f_node=f_node.readline()
f_node.close()
UI.vprint(1,"-> Post processing of altitudes according to vector data")
f_ele = open(FNAMES.output_ele_file(tile),'r')
nbr_tri= int(f_ele.readline().split()[0])
water_tris=set()
sea_tris=set()
interp_alt_tris=set()
for i in range(nbr_tri):
line = f_ele.readline()
# triangle attributes are powers of 2, except for the dummy attributed which doesn't require post-treatment
if line[-2]=='0': continue
(v1,v2,v3,attr)=[int(x)-1 for x in line.split()[1:5]]
attr+=1
if attr >= dico_attributes['INTERP_ALT']:
interp_alt_tris.add((v1,v2,v3))
elif attr & dico_attributes['SEA']:
sea_tris.add((v1,v2,v3))
elif attr & dico_attributes['WATER'] or attr & dico_attributes['SEA_EQUIV']:
water_tris.add((v1,v2,v3))
if tile.water_smoothing:
UI.vprint(1," Smoothing inland water.")
for j in range(tile.water_smoothing):
for (v1,v2,v3) in water_tris:
zmean=(vertices[6*v1+2]+vertices[6*v2+2]+vertices[6*v3+2])/3
vertices[6*v1+2]=zmean
vertices[6*v2+2]=zmean
vertices[6*v3+2]=zmean
UI.vprint(1," Smoothing of sea water.")
for (v1,v2,v3) in sea_tris:
if tile.sea_smoothing_mode=='zero':
vertices[6*v1+2]=0
vertices[6*v2+2]=0
vertices[6*v3+2]=0
elif tile.sea_smoothing_mode=='mean':
zmean=(vertices[6*v1+2]+vertices[6*v2+2]+vertices[6*v3+2])/3
vertices[6*v1+2]=zmean
vertices[6*v2+2]=zmean
vertices[6*v3+2]=zmean
else:
vertices[6*v1+2]=max(vertices[6*v1+2],0)
vertices[6*v2+2]=max(vertices[6*v2+2],0)
vertices[6*v3+2]=max(vertices[6*v3+2],0)
UI.vprint(1," Treatment of airports, roads and patches.")
for (v1,v2,v3) in interp_alt_tris:
vertices[6*v1+2]=vertices[6*v1+5]
vertices[6*v2+2]=vertices[6*v2+5]
vertices[6*v3+2]=vertices[6*v3+5]
vertices[6*v1+3]=0
vertices[6*v2+3]=0
vertices[6*v3+3]=0
vertices[6*v1+4]=0
vertices[6*v2+4]=0
vertices[6*v3+4]=0
UI.vprint(1,"-> Writing output nodes file.")
f_node = open(FNAMES.output_node_file(tile),'w')
f_node.write(init_line_f_node)
for i in range(0,nbr_pt):
f_node.write(str(i+1)+" "+' '.join(('{:.15f}'.format(x) for x in vertices[6*i:6*i+6]))+"\n")
f_node.write(end_line_f_node)
f_node.close()
return vertices
##############################################################################
##############################################################################
def write_mesh_file(tile,vertices):
UI.vprint(1,"-> Writing final mesh to the file "+FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon))
f_ele = open(FNAMES.output_ele_file(tile),'r')
nbr_vert=len(vertices)//6
nbr_tri=int(f_ele.readline().split()[0])
f=open(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon),"w")
f.write("MeshVersionFormatted "+O4_Version.version+"\n")
f.write("Dimension 3\n\n")
f.write("Vertices\n")
f.write(str(nbr_vert)+"\n")
for i in range(0,nbr_vert):
f.write('{:.7f}'.format(vertices[6*i]+tile.lon)+" "+\
'{:.7f}'.format(vertices[6*i+1]+tile.lat)+" "+\
'{:.7f}'.format(vertices[6*i+2]/100000)+" 0\n")
f.write("\n")
f.write("Normals\n")
f.write(str(nbr_vert)+"\n")
for i in range(0,nbr_vert):
f.write('{:.2f}'.format(vertices[6*i+3])+" "+\
'{:.2f}'.format(vertices[6*i+4])+"\n")
f.write("\n")
f.write("Triangles\n")
f.write(str(nbr_tri)+"\n")
for i in range(0,nbr_tri):
f.write(' '.join(f_ele.readline().split()[1:])+"\n")
f_ele.close()
f.close()
return
##############################################################################
##############################################################################
# Build a textured .obj wavefront over the extent of an orthogrid cell
##############################################################################
def extract_mesh_to_obj(mesh_file,til_x_left,til_y_top,zoomlevel,provider_code):
UI.red_flag=False
timer=time.time()
(latmax,lonmin)=GEO.gtile_to_wgs84(til_x_left,til_y_top,zoomlevel)
(latmin,lonmax)=GEO.gtile_to_wgs84(til_x_left+16,til_y_top+16,zoomlevel)
obj_file_name=FNAMES.obj_file(til_x_left,til_y_top,zoomlevel,provider_code)
mtl_file_name=FNAMES.mtl_file(til_x_left,til_y_top,zoomlevel,provider_code)
f_mesh=open(mesh_file,"r")
for i in range(4):
f_mesh.readline()
nbr_pt_in=int(f_mesh.readline())
UI.vprint(1," Reading nodes...")
pt_in=numpy.zeros(5*nbr_pt_in,'float')
for i in range(nbr_pt_in):
pt_in[5*i:5*i+3]=[float(x) for x in f_mesh.readline().split()[:3]]
for i in range(3):
f_mesh.readline()
for i in range(nbr_pt_in):
pt_in[5*i+3:5*i+5]=[float(x) for x in f_mesh.readline().split()[:2]]
for i in range(0,2): # skip 2 lines
f_mesh.readline()
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
UI.vprint(1," Reading triangles...")
nbr_tri_in=int(f_mesh.readline()) # read nbr of tris
textured_nodes={}
textured_nodes_inv={}
nodes_st_coord={}
len_textured_nodes=0
dico_new_tri={}
len_dico_new_tri=0
for i in range(0,nbr_tri_in):
(n1,n2,n3)=[int(x)-1 for x in f_mesh.readline().split()[:3]]
(lon1,lat1,z1,u1,v1)=pt_in[5*n1:5*n1+5]
(lon2,lat2,z2,u2,v2)=pt_in[5*n2:5*n2+5]
(lon3,lat3,z3,u3,v3)=pt_in[5*n3:5*n3+5]
if is_in_region((lat1+lat2+lat3)/3.0,(lon1+lon2+lon3)/3.0,latmin,latmax,lonmin,lonmax):
if n1 not in textured_nodes_inv:
len_textured_nodes+=1
textured_nodes_inv[n1]=len_textured_nodes
textured_nodes[len_textured_nodes]=n1
nodes_st_coord[len_textured_nodes]=GEO.st_coord(lat1,lon1,til_x_left,til_y_top,zoomlevel,provider_code)
n1new=textured_nodes_inv[n1]
if n2 not in textured_nodes_inv:
len_textured_nodes+=1
textured_nodes_inv[n2]=len_textured_nodes
textured_nodes[len_textured_nodes]=n2
nodes_st_coord[len_textured_nodes]=GEO.st_coord(lat2,lon2,til_x_left,til_y_top,zoomlevel,provider_code)
n2new=textured_nodes_inv[n2]
if n3 not in textured_nodes_inv:
len_textured_nodes+=1
textured_nodes_inv[n3]=len_textured_nodes
textured_nodes[len_textured_nodes]=n3
nodes_st_coord[len_textured_nodes]=GEO.st_coord(lat3,lon3,til_x_left,til_y_top,zoomlevel,provider_code)
n3new=textured_nodes_inv[n3]
dico_new_tri[len_dico_new_tri]=(n1new,n2new,n3new)
len_dico_new_tri+=1
nbr_vert=len_textured_nodes
nbr_tri=len_dico_new_tri
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
UI.vprint(1," Writing the obj file.")
# first the obj file
f=open(obj_file_name,"w")
for i in range(1,nbr_vert+1):
j=textured_nodes[i]
f.write("v "+'{:.9f}'.format(pt_in[5*j]-lonmin)+" "+\
'{:.9f}'.format(pt_in[5*j+1]-latmin)+" "+\
'{:.9f}'.format(pt_in[5*j+2])+"\n")
f.write("\n")
for i in range(1,nbr_vert+1):
j=textured_nodes[i]
f.write("vn "+'{:.9f}'.format(pt_in[5*j+3])+" "+'{:.9f}'.format(pt_in[5*j+4])+" "+'{:.9f}'.format(sqrt(max(1-pt_in[5*j+3]**2-pt_in[5*j+4]**2,0)))+"\n")
f.write("\n")
for i in range(1,nbr_vert+1):
j=textured_nodes[i]
f.write("vt "+'{:.9f}'.format(nodes_st_coord[i][0])+" "+\
'{:.9f}'.format(nodes_st_coord[i][1])+"\n")
f.write("\n")
f.write("usemtl orthophoto\n\n")
for i in range(0,nbr_tri):
(one,two,three)=dico_new_tri[i]
f.write("f "+str(one)+"/"+str(one)+"/"+str(one)+" "+str(two)+"/"+str(two)+"/"+str(two)+" "+str(three)+"/"+str(three)+"/"+str(three)+"\n")
f_mesh.close()
f.close()
# then the mtl file
f=open(mtl_file_name,'w')
f.write("newmtl orthophoto\nmap_Kd "+FNAMES.geotiff_file_name_from_attributes(til_x_left,til_y_top,zoomlevel,provider_code)+"\n")
f.close()
UI.timings_and_bottom_line(timer)
return
##############################################################################
##############################################################################
def build_mesh(tile):
if UI.is_working: return 0
UI.is_working=1
UI.red_flag=False
VECT.scalx=cos((tile.lat+0.5)*pi/180)
UI.logprint("Step 2 for tile lat=",tile.lat,", lon=",tile.lon,": starting.")
UI.vprint(0,"\nStep 2 : Building mesh for tile "+FNAMES.short_latlon(tile.lat,tile.lon)+" : \n--------\n")
UI.progress_bar(1,0)
poly_file = FNAMES.input_poly_file(tile)
node_file = FNAMES.input_node_file(tile)
alt_file = FNAMES.alt_file(tile)
weight_file = FNAMES.weight_file(tile)
if not os.path.isfile(node_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find ",node_file)
return 0
if not tile.iterate and not os.path.isfile(poly_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find ",poly_file)
return 0
if not tile.iterate:
if not os.path.isfile(alt_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find",alt_file,". You must run Step 1 first.")
return 0
try:
fill_nodata = tile.fill_nodata or "to zero"
source= ((";" in tile.custom_dem) and tile.custom_dem.split(";")[0]) or tile.custom_dem
tile.dem=DEM.DEM(tile.lat,tile.lon,source,fill_nodata,info_only=True)
if not os.path.getsize(alt_file)==4*tile.dem.nxdem*tile.dem.nydem:
UI.exit_message_and_bottom_line("\nERROR: Cached raster elevation does not match the current custom DEM specs.\n You must run Step 1 and Step 2 with the same elevation base.")
return 0
except Exception as e:
print(e)
UI.exit_message_and_bottom_line("\nERROR: Could not determine the appropriate source. Please check your custom_dem entry.")
return 0
else:
try:
source= ((";" in tile.custom_dem) and tile.custom_dem.split(";")[tile.iterate]) or tile.custom_dem
tile.dem=DEM.DEM(tile.lat,tile.lon,source,fill_nodata=False,info_only=True)
if not os.path.isfile(alt_file) or not os.path.getsize(alt_file)==4*tile.dem.nxdem*tile.dem.nydem:
tile.dem=DEM.DEM(tile.lat,tile.lon,source,fill_nodata=False,info_only=False)
tile.dem.write_to_file(FNAMES.alt_file(tile))
except Exception as e:
print(e)
UI.exit_message_and_bottom_line("\nERROR: Could not determine the appropriate source. Please check your custom_dem entry.")
return 0
try:
f=open(node_file,'r')
input_nodes=int(f.readline().split()[0])
f.close()
except:
UI.exit_message_and_bottom_line("\nERROR: In reading ",node_file)
return 0
timer=time.time()
tri_verbosity = 'Q' if UI.verbosity<=1 else 'V'
output_poly = 'P' if UI.cleaning_level else ''
do_refine = 'r' if tile.iterate else 'A'
limit_tris = 'S'+str(max(int(tile.limit_tris/1.9-input_nodes),0)) if tile.limit_tris else ''
Tri_option = '-p'+do_refine+'uYB'+tri_verbosity+output_poly+limit_tris
weight_array=numpy.ones((1001,1001),dtype=numpy.float32)
build_curv_tol_weight_map(tile,weight_array)
weight_array.tofile(weight_file)
del(weight_array)
curv_tol_scaling=sqrt(tile.dem.nxdem/(1000*(tile.dem.x1-tile.dem.x0)))
hmin_effective=max(tile.hmin,(tile.dem.y1-tile.dem.y0)*GEO.lat_to_m/tile.dem.nydem/2)
mesh_cmd=[Triangle4XP_cmd.strip(),
Tri_option.strip(),
'{:.9g}'.format(GEO.lon_to_m(tile.lat)),
'{:.9g}'.format(GEO.lat_to_m),
'{:n}'.format(tile.dem.nxdem),
'{:n}'.format(tile.dem.nydem),
'{:.9g}'.format(tile.dem.x0),
'{:.9g}'.format(tile.dem.y0),
'{:.9g}'.format(tile.dem.x1),
'{:.9g}'.format(tile.dem.y1),
'{:.9g}'.format(tile.dem.nodata),
'{:.9g}'.format(tile.curvature_tol*curv_tol_scaling),
'{:.9g}'.format(tile.min_angle),str(hmin_effective),alt_file,weight_file,poly_file]
del(tile.dem) # for machines with not much RAM, we do not need it anymore
tile.dem=None
UI.vprint(1,"-> Start of the mesh algorithm Triangle4XP.")
UI.vprint(2,' Mesh command:',' '.join(mesh_cmd))
fingers_crossed=subprocess.Popen(mesh_cmd,stdout=subprocess.PIPE,bufsize=0)
while True:
line = fingers_crossed.stdout.readline()
if not line:
break
else:
try:
print(line.decode("utf-8")[:-1])
except:
pass
time.sleep(0.3)
fingers_crossed.poll()
if fingers_crossed.returncode:
UI.vprint(0,"\nWARNING: Triangle4XP could not achieve the requested quality (min_angle), most probably due to an uncatched OSM error.\n"+\
"It will be tempted now with no angle constraint (i.e. min_angle=0).")
mesh_cmd[-5]='{:.9g}'.format(0)
fingers_crossed=subprocess.Popen(mesh_cmd,stdout=subprocess.PIPE,bufsize=0)
while True:
line = fingers_crossed.stdout.readline()
if not line:
break
else:
try:
print(line.decode("utf-8")[:-1])
except:
pass
time.sleep(0.3)
fingers_crossed.poll()
if fingers_crossed.returncode:
UI.exit_message_and_bottom_line("\nERROR: Triangle4XP really couldn't make it !\n\n"+\
"If the reason is not due to the limited amount of RAM please\n"+\
"file a bug including the .node and .poly files that you\n"+\
"will find in "+str(tile.build_dir)+".\n")
return 0
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
vertices=post_process_nodes_altitudes(tile)
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
write_mesh_file(tile,vertices)
#
if UI.cleaning_level:
try: os.remove(FNAMES.weight_file(tile))
except: pass
try: os.remove(FNAMES.output_node_file(tile))
except: pass
try: os.remove(FNAMES.output_ele_file(tile))
except: pass
if UI.cleaning_level>2:
try: os.remove(FNAMES.alt_file(tile))
except: pass
try: os.remove(FNAMES.input_node_file(tile))
except: pass
try: os.remove(FNAMES.input_poly_file(tile))
except: pass
UI.timings_and_bottom_line(timer)
UI.logprint("Step 2 for tile lat=",tile.lat,", lon=",tile.lon,": normal exit.")
return 1
##############################################################################
##############################################################################
def sort_mesh(tile):
if UI.is_working: return 0
UI.is_working=1
UI.red_flag=False
mesh_file = FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)
if not os.path.isfile(mesh_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find ",mesh_file)
return 0
sort_mesh_cmd_list=[sort_mesh_cmd.strip(),str(tile.default_zl),mesh_file]
UI.vprint(1,"-> Reorganizing mesh triangles.")
timer=time.time()
moulinette=subprocess.Popen(sort_mesh_cmd_list,stdout=subprocess.PIPE,bufsize=0)
while True:
line = moulinette.stdout.readline()
if not line:
break
else:
print(line.decode("utf-8")[:-1])
UI.timings_and_bottom_line(timer)
UI.logprint("Moulinette applied for tile lat=",tile.lat,", lon=",tile.lon," and ZL",tile.default_zl)
return 1
##############################################################################
##############################################################################
def triangulate(name,path_to_Ortho4XP_dir):
Tri_option = ' -pAYPQ '
mesh_cmd=[os.path.join(path_to_Ortho4XP_dir,triangle_cmd).strip(),Tri_option.strip(),name+'.poly']
fingers_crossed=subprocess.Popen(mesh_cmd,stdout=subprocess.PIPE,bufsize=0)
while True:
line = fingers_crossed.stdout.readline()
if not line:
break
else:
print(line.decode("utf-8")[:-1])
fingers_crossed.poll()
if fingers_crossed.returncode:
print("\nERROR: triangle crashed, check osm mask data.\n")
return 0
return 1
##############################################################################
| gpl-3.0 | -8,716,772,008,718,751,000 | 45.039033 | 197 | 0.559772 | false |
Treeki/NewerSMBW | Koopatlas/src/worldeditor.py | 1 | 5227 | from common import *
import re
def editableColourStr(array):
return '#%02X%02X%02X (%d)' % tuple(array)
NICE_STR_RE = re.compile('^#([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})\s*(?:\((\d+)\))?$')
def colourFromNiceStr(thing):
match = NICE_STR_RE.match(thing)
try:
if match:
r,g,b,a = match.groups()
return (int(r,16), int(g,16), int(b,16), int(a) if a != None else 255)
except:
pass
return None
class KPWorldTableModel(QtCore.QAbstractTableModel):
FIELDS = ('Name', 'World ID', 'Track ID',
'FS Text 1', 'FS Text 2',
'FS Hint 1', 'FS Hint 2',
'HUD Text 1', 'HUD Text 2',
'HUD Hue', 'HUD Saturation', 'HUD Lightness',
'Title Level')
def __init__(self, kpmap, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.currentMap = kpmap
self.worlds = kpmap.worlds
def columnCount(self, parent):
return len(self.FIELDS)
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return self.FIELDS[section]
else:
if role == Qt.DisplayRole:
return str(self.worlds[section].uniqueKey)
return QtCore.QVariant()
def rowCount(self, parent):
if parent.isValid():
return 0
else:
return len(self.worlds)
def data(self, index, role):
if index.isValid():
entry = self.worlds[index.row()]
col = index.column()
if role == Qt.DisplayRole or role == Qt.EditRole:
if col == 0:
return entry.name
elif col == 1:
return entry.worldID
elif col == 2:
return entry.musicTrackID
elif col == 3 or col == 4:
return editableColourStr(entry.fsTextColours[col - 3])
elif col == 5 or col == 6:
return editableColourStr(entry.fsHintColours[col - 5])
elif col == 7 or col == 8:
return editableColourStr(entry.hudTextColours[col - 7])
elif col >= 9 and col <= 11:
return entry.hudHintTransform[col - 9]
elif col == 12:
return entry.titleScreenID
if role == Qt.DecorationRole:
if col == 3 or col == 4:
return QtGui.QColor(*entry.fsTextColours[col - 3])
elif col == 5 or col == 6:
return QtGui.QColor(*entry.fsHintColours[col - 5])
elif col == 7 or col == 8:
return QtGui.QColor(*entry.hudTextColours[col - 7])
return QtCore.QVariant()
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def setData(self, index, value, role):
if index.isValid():
if role == Qt.EditRole:
success = False
entry = self.worlds[index.row()]
col = index.column()
if col == 0:
entry.name = str(value.toString())
success = True
elif col == 1:
entry.worldID = str(value.toString())
success = True
elif col == 2:
v,ok = value.toInt()
if ok:
entry.musicTrackID = v
success = True
elif col >= 3 and col <= 8:
newCol = colourFromNiceStr(str(value.toString()))
if newCol:
success = True
if col == 3:
entry.fsTextColours = (newCol, entry.fsTextColours[1])
elif col == 4:
entry.fsTextColours = (entry.fsTextColours[0], newCol)
elif col == 5:
entry.fsHintColours = (newCol, entry.fsHintColours[1])
elif col == 6:
entry.fsHintColours = (entry.fsHintColours[0], newCol)
elif col == 7:
entry.hudTextColours = (newCol, entry.hudTextColours[1])
elif col == 8:
entry.hudTextColours = (entry.hudTextColours[0], newCol)
elif col >= 9 and col <= 11:
v,ok = value.toInt()
if ok:
new = list(entry.hudHintTransform)
new[col - 9] = v
entry.hudHintTransform = new
success = True
elif col == 12:
entry.titleScreenID = str(value.toString())
success = True
if success:
self.dataChanged.emit(index, index)
return success
return False
def addEntryToEnd(self):
self.beginInsertRows(QtCore.QModelIndex(), len(self.worlds), len(self.worlds))
entry = KPWorldDef()
entry.uniqueKey = self.currentMap.allocateWorldDefKey()
self.worlds.append(entry)
self.endInsertRows()
def removeRows(self, row, count, parent):
if not parent.isValid():
if row >= 0 and (row + count) <= len(self.worlds):
self.beginRemoveRows(parent, row, row+count-1)
for i in xrange(count):
del self.worlds[row]
self.endRemoveRows()
class KPWorldEditor(QtGui.QWidget):
def __init__(self, kpmap, parent=None):
QtGui.QWidget.__init__(self, parent, Qt.Window)
self.setWindowTitle('World Editor')
self.dataView = QtGui.QTableView(self)
self.addButton = QtGui.QPushButton('Add', self)
self.removeButton = QtGui.QPushButton('Remove', self)
layout = QtGui.QGridLayout(self)
layout.addWidget(self.dataView, 0, 0, 1, 2)
layout.addWidget(self.addButton, 1, 0, 1, 1)
layout.addWidget(self.removeButton, 1, 1, 1, 1)
self.model = KPWorldTableModel(kpmap, self)
self.dataView.setModel(self.model)
self.addButton.clicked.connect(self.model.addEntryToEnd)
self.removeButton.clicked.connect(self.removeCurrentEntry)
def removeCurrentEntry(self):
what = self.dataView.selectionModel().currentIndex()
if what.isValid():
what = what.row()
key = self.model.worlds[what].uniqueKey
self.model.removeRows(what, 1, QtCore.QModelIndex())
| mit | 4,182,107,282,050,427,000 | 27.562842 | 96 | 0.650277 | false |
daviddrysdale/pynamo | dynamo2.py | 1 | 10913 | """Implementation of Dynamo
Second iteration, adding detection of missing responses and expansion of the preference list."""
import copy
import random
import logging
import logconfig
from node import Node
from framework import Framework
from hash_multiple import ConsistentHashTable
from dynamomessages import ClientPut, ClientGet, ClientPutRsp, ClientGetRsp
from dynamomessages import PutReq, GetReq, PutRsp, GetRsp
from dynamomessages import DynamoRequestMessage
logconfig.init_logging()
_logger = logging.getLogger('dynamo')
# PART dynamonode
class DynamoNode(Node):
timer_priority = 20
T = 10 # Number of repeats for nodes in consistent hash table
N = 3 # Number of nodes to replicate at
W = 2 # Number of nodes that need to reply to a write operation
R = 2 # Number of nodes that need to reply to a read operation
nodelist = []
chash = ConsistentHashTable(nodelist, T)
def __init__(self):
super(DynamoNode, self).__init__()
self.local_store = {} # key => (value, metadata)
self.pending_put_rsp = {} # seqno => set of nodes that have stored
self.pending_put_msg = {} # seqno => original client message
self.pending_get_rsp = {} # seqno => set of (node, value, metadata) tuples
self.pending_get_msg = {} # seqno => original client message
# seqno => set of requests sent to other nodes, for each message class
self.pending_req = {PutReq: {}, GetReq: {}}
self.failed_nodes = []
# Rebuild the consistent hash table
DynamoNode.nodelist.append(self)
DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
# PART reset
@classmethod
def reset(cls):
cls.nodelist = []
cls.chash = ConsistentHashTable(cls.nodelist, cls.T)
# PART storage
def store(self, key, value, metadata):
self.local_store[key] = (value, metadata)
def retrieve(self, key):
if key in self.local_store:
return self.local_store[key]
else:
return (None, None)
# PART rsp_timer_pop
def rsp_timer_pop(self, reqmsg):
# no response to this request; treat the destination node as failed
_logger.info("Node %s now treating node %s as failed", self, reqmsg.to_node)
self.failed_nodes.append(reqmsg.to_node)
failed_requests = Framework.cancel_timers_to(reqmsg.to_node)
failed_requests.append(reqmsg)
for failedmsg in failed_requests:
self.retry_request(failedmsg)
def retry_request(self, reqmsg):
if not isinstance(reqmsg, DynamoRequestMessage):
return
# Send the request to an additional node by regenerating the preference list
preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
kls = reqmsg.__class__
# Check the pending-request list for this type of request message
if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
for node in preference_list:
if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
# Found a node on the new preference list that hasn't been sent the request.
# Send it a copy
newreqmsg = copy.copy(reqmsg)
newreqmsg.to_node = node
self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
Framework.send_message(newreqmsg)
# PART rcv_clientput
def rcv_clientput(self, msg):
preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
# Determine if we are in the list
if self not in preference_list:
# Forward to the coordinator for this key
_logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
coordinator = preference_list[0]
Framework.forward_message(msg, coordinator)
else:
# Use an incrementing local sequence number to distinguish
# multiple requests for the same key
seqno = self.generate_sequence_number()
_logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
metadata = (self.name, seqno) # For now, metadata is just sequence number at coordinator
# Send out to preference list, and keep track of who has replied
self.pending_req[PutReq][seqno] = set()
self.pending_put_rsp[seqno] = set()
self.pending_put_msg[seqno] = msg
reqcount = 0
for node in preference_list:
# Send message to get node in preference list to store
putmsg = PutReq(self, node, msg.key, msg.value, metadata, msg_id=seqno)
self.pending_req[PutReq][seqno].add(putmsg)
Framework.send_message(putmsg)
reqcount = reqcount + 1
if reqcount >= DynamoNode.N:
# preference_list may have more than N entries to allow for failed nodes
break
# PART rcv_clientget
def rcv_clientget(self, msg):
preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
# Determine if we are in the list
if self not in preference_list:
# Forward to the coordinator for this key
_logger.info("get(%s=?) maps to %s", msg.key, preference_list)
coordinator = preference_list[0]
Framework.forward_message(msg, coordinator)
else:
seqno = self.generate_sequence_number()
self.pending_req[GetReq][seqno] = set()
self.pending_get_rsp[seqno] = set()
self.pending_get_msg[seqno] = msg
reqcount = 0
for node in preference_list:
getmsg = GetReq(self, node, msg.key, msg_id=seqno)
self.pending_req[GetReq][seqno].add(getmsg)
Framework.send_message(getmsg)
reqcount = reqcount + 1
if reqcount >= DynamoNode.N:
# preference_list may have more than N entries to allow for failed nodes
break
# PART rcv_put
def rcv_put(self, putmsg):
_logger.info("%s: store %s=%s", self, putmsg.key, putmsg.value)
self.store(putmsg.key, putmsg.value, putmsg.metadata)
putrsp = PutRsp(putmsg)
Framework.send_message(putrsp)
# PART rcv_putrsp
def rcv_putrsp(self, putrsp):
seqno = putrsp.msg_id
if seqno in self.pending_put_rsp:
self.pending_put_rsp[seqno].add(putrsp.from_node)
if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
_logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
_logger.debug(" copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
# Tidy up tracking data structures
original_msg = self.pending_put_msg[seqno]
del self.pending_req[PutReq][seqno]
del self.pending_put_rsp[seqno]
del self.pending_put_msg[seqno]
# Reply to the original client
client_putrsp = ClientPutRsp(original_msg)
Framework.send_message(client_putrsp)
else:
pass # Superfluous reply
# PART rcv_get
def rcv_get(self, getmsg):
_logger.info("%s: retrieve %s=?", self, getmsg.key)
(value, metadata) = self.retrieve(getmsg.key)
getrsp = GetRsp(getmsg, value, metadata)
Framework.send_message(getrsp)
# PART rcv_getrsp
def rcv_getrsp(self, getrsp):
seqno = getrsp.msg_id
if seqno in self.pending_get_rsp:
self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
_logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
_logger.debug(" copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
# Build up all the distinct values/metadata values for the response to the original request
results = set([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
# Tidy up tracking data structures
original_msg = self.pending_get_msg[seqno]
del self.pending_req[GetReq][seqno]
del self.pending_get_rsp[seqno]
del self.pending_get_msg[seqno]
# Reply to the original client, including all received values
client_getrsp = ClientGetRsp(original_msg,
[value for (value, metadata) in results],
[metadata for (value, metadata) in results])
Framework.send_message(client_getrsp)
else:
pass # Superfluous reply
# PART rcvmsg
def rcvmsg(self, msg):
if isinstance(msg, ClientPut):
self.rcv_clientput(msg)
elif isinstance(msg, PutReq):
self.rcv_put(msg)
elif isinstance(msg, PutRsp):
self.rcv_putrsp(msg)
elif isinstance(msg, ClientGet):
self.rcv_clientget(msg)
elif isinstance(msg, GetReq):
self.rcv_get(msg)
elif isinstance(msg, GetRsp):
self.rcv_getrsp(msg)
else:
raise TypeError("Unexpected message type %s", msg.__class__)
# PART get_contents
def get_contents(self):
results = []
for key, value in self.local_store.items():
results.append("%s:%s" % (key, value[0]))
return results
# PART clientnode
class DynamoClientNode(Node):
timer_priority = 17
def put(self, key, metadata, value, destnode=None):
if destnode is None: # Pick a random node to send the request to
destnode = random.choice(DynamoNode.nodelist)
putmsg = ClientPut(self, destnode, key, value, metadata)
Framework.send_message(putmsg)
def get(self, key, destnode=None):
if destnode is None: # Pick a random node to send the request to
destnode = random.choice(DynamoNode.nodelist)
getmsg = ClientGet(self, destnode, key)
Framework.send_message(getmsg)
def rsp_timer_pop(self, reqmsg):
if isinstance(reqmsg, ClientPut): # retry
_logger.info("Put request timed out; retrying")
self.put(reqmsg.key, reqmsg.metadata, reqmsg.value)
elif isinstance(reqmsg, ClientGet): # retry
_logger.info("Get request timed out; retrying")
self.get(reqmsg.key)
# PART clientrcvmsg
def rcvmsg(self, msg):
pass # Client does nothing with results
| gpl-2.0 | 6,073,878,917,768,870,000 | 43.004032 | 121 | 0.607441 | false |
3WiseMen/python | pystock/pystock_xingAPI/xarequest.py | 1 | 23907 | #xarequest.py
import os
import threading
import time
import win32com.client
from .abstract_component import AbstractQueryProviderComponent
from .abstract_component import AbstractSubscriptionProviderComponent
class XAQueryEvents:
def __init__(self):
self._event_object_connector = XAQueryEvents.event_object_connector
def OnReceiveData(self, szTrCode):
try:
self._event_object_connector.logger.debug("OnReceiveData("+str(szTrCode)+")")
self._event_object_connector.on_receive_data_arg = szTrCode
#์๋๋ outblock ์ ๊ทผ์ฝ๋๊ฐ ์ด ์ฐ๋ ๋์ ์์ด์ผํ์ง๋ง ์ ์ฒด query์ฝ๋๊ฐ ๋ธ๋ญ์ด๋ฏ๋ก
#query ์์ฒด๊ฐ ๋๊ฐ์ง ์์ผ๋ฏ๋ก ์ด ์ฐ๋ ๋์์ ๋ค๋ฅธ ์ฐ๋ ๋๋ก ๋๊ฐ๋๋ผ๋ ์์ ํจ
#ํ์ง๋ง subscribe์ ๊ฒฝ์ฐ ๋ค๋ฆ
except Exception as ex:
self._event_object_connector.logger.warn("OnReceiveData error: %s", ex)
self._event_object_connector.logger.debug(ex, exc_info=True)
return None
finally:
#? tr_code ๊ฒ์ฌ๊ฐ ํ์ํ๊ฐ?
self._event_object_connector.on_receive_data_event.set()
def OnReceiveMessage(self, bIsSystemError, szMessageCode, szMessage):
try:
self._event_object_connector.logger.debug("OnReceiveMessage("+", ".join([str(bIsSystemError), str(szMessageCode), str(szMessage)])+")")
self._event_object_connector.on_receive_message_arg = (bIsSystemError, szMessageCode, szMessage)
except Exception as ex:
self._event_object_connector.logger.warn("OnReceiveMessage error: %s", ex)
self._event_object_connector.logger.debug(ex, exc_info=True)
return None
finally:
self._event_object_connector.on_receive_message_event.set()
class XARealEvents:
def __init__(self):
self._event_object_connector = XARealEvents.event_object_connector
def OnReceiveRealData(self, *args):
start_time = time.time()
try:
self._event_object_connector.logger.debug("OnReceiveRealData("+str(args)+")")
outblock = self._read_outblocks(self._event_object_connector.res_blocks)
self._event_object_connector.logger.debug(str(outblock))
self._event_object_connector.queue.put(outblock)
except Exception as ex:
self._event_object_connector.logger.warn("OnReceiveRealData error: %s", ex)
self._event_object_connector.logger.debug(ex, exc_info=True)
return None
finally:
self._event_object_connector.logger.debug('[It took %fs]', time.time() - start_time)
pass
def _read_outblocks(self, res_blocks, comp_yn_flag = False):
outblocks = filter(lambda b:not b['is_input'], res_blocks)
ret = dict()
for block in outblocks:
if not block['is_occurs']:
sub_ret = dict()
for arg in block['args']:
sub_ret[arg['code']] = self._event_object_connector.xaquery_xareal.GetFieldData(block['bname'], arg['code'])
else:
sub_ret = list()
block_count = 0
if comp_yn_flag: # if compressed?
decompressed_size = self._event_object_connector.xaquery_xareal.Decompress(block['bname'])
if decompressed_size > 0:
block_count = self._event_object_connector.xaquery_xareal.GetBlockCount(block['bname'])
else:
block_count = self._event_object_connector.xaquery_xareal.GetBlockCount(block['bname'])
for occur in range(block_count):
sub_sub_ret = dict()
for arg in block['args']:
sub_sub_ret[arg['code']] = self._event_object_connector.xaquery_xareal.GetFieldData(block['bname'], arg['code'])
sub_ret.append(sub_sub_ret)
ret[block['bname']] = sub_ret
return ret
class EventObjectConnector:
logger = None
class XARequest(AbstractQueryProviderComponent, AbstractSubscriptionProviderComponent):
def __init__(self, xasession, res_info, query_block_timeout):
self._res_info = res_info
self._xasession = xasession
self.query_block_timeout = float(query_block_timeout)
def init(self):
self.logger.info("Initializing XARequest_" + self._res_info['header']['tr_code'])
self.event_object_connector = EventObjectConnector()
self.event_object_connector.logger = self.logger.getChild(self._res_info['header']['tr_code'] + "_events")
if self._res_info['header']['is_query']:
self._init_xaquery()
else:
self._init_xareal()
self.logger.debug('self.xaquery_xareal.GetTrCode():%s', self.xaquery_xareal.GetTrCode())
def getAvailableQueryCodeSet(self):
if self._res_info['header']['is_query']:
return {'xing.'+self._res_info['header']['tr_code']}
else:
return {}
def getAvailableSubscriptionCodeSet(self):
if not self._res_info['header']['is_query']:
return {'xing.'+self._res_info['header']['tr_code']}
else:
return {}
def query(self, query_code, arg_set):
try:
if not self._res_info['header']['is_query']:
return None
if query_code.startswith('xing.'):
query_code = query_code.split('.')[1]
else:
return None
self.logger.info("query has been received(%s)", query_code)
self._verify_arguments(query_code, arg_set)
#self.logger.debug("argumentd verified")
# ์ค์ inblock ์ฒ๋ฆฌ
comp_yn_flag = self._write_inblocks(arg_set)
self.event_object_connector.on_receive_message_event.clear()
self.event_object_connector.on_receive_data_event.clear()
continu_query = True if arg_set.get('continue_query') else False
#2015-05-07 period limit๋ฅผ ๋จผ์ ์ฒดํฌํ๊ณ ๊ทธ ํ์ ์ด๋น ์ฝ ์ ํ์ ์ฒดํฌ
if self.limit_period != 0:
current_time = time.time()
#์๋ฐฉ๋ฒ์ list์ ์์ฒญ๋ tr์์ฒญ ์๊ฐ์ ๋ชจ๋ ๊ธฐ๋กํด ๋๊ณ ์ ํํ๊ฒ 10๋ถ์ด ์ด๊ณผ๋ ์์ฒญ๋ง ๋ฆฌ์คํธ์์ ์ ๊ฑฐํด๊ฐ๋ฉด์ ์์ฒญ ์ ์ด๊ณผ๋ฅผ ํ์ธ
#self.last_calls = list(filter(lambda x:x>=(current_time-self.limit_period), self.last_calls))
#2015-05-26 xingAPI์ period limit ์ฒดํฌ ๋ฐฉ๋ฒ์ด ์์๋ณด๋ค ๋จ์ํ์ฌ ์ ํํํผ ์กฐ๊ฑด์ ์ด์ ๋ง์ถฐ์ ๋ค์ ๊ตฌํ(์ ๋ฐฉ๋ฒ์ ๋ฐ๋ก ์ ๋ผ์ธ)
#eBEST ๋ฌธ์๊ฒฐ๊ณผ ์ต์ด tr์์ฒญ ์ ์๊ฐ์ ๊ธฐ๋กํ๊ณ 10๋ถ์์ 200ํ๋ฅผ ์ด๊ณผํ ์์ฒญ์ด ๋ค์ด์ค๋์ง ํ์ธ, 10๋ถ์ด ์ง๋๊ฒ ๋๋ฉด ๋ค์ ์ดํ์ ์ต์ด tr์์ฒญ์ ๊ธฐ์ค์ผ๋ก ๋ฐ๋ณต
if len(self.last_calls) > 0 and current_time - self.last_calls[0] > self.limit_period:
self.last_calls = []
if len(self.last_calls) >= self.limit_call_count:
#์ ํ์ ๊ฑธ๋ฆผ, ์ผ๋ง๋ sleepํด์ผํ๋์ง ํ์ธ
tmp_sleep_time = self.last_calls[0] + self.limit_period - current_time
self.last_calls = []
if tmp_sleep_time > 0:
self.logger.debug('sleep for period limit:'+str(tmp_sleep_time))
time.sleep(tmp_sleep_time)
# ์ด๋น ์์ฒญ์ ํ ์ค์
tmp_sleep_time = self.last_call + self.minimum_interval_between_calls - time.time()
if tmp_sleep_time > 0: # 0.01์ด ๋ฃจํ๋ฅผ ์ฌ๋ฌ๋ฒ ๋ณด๋๊ฒ ๋ณด๋ค sleepํ์ ๊ณ์ฐํ๋๊ฒ์ด ์ ์ - 2015-05-07
self.logger.debug('sleep:'+str(tmp_sleep_time))
time.sleep(tmp_sleep_time)
#while time.time() - self.last_call < self.minimum_interval_between_calls:
#self.logger.debug('sleep:'+str(0.01))
#time.sleep(0.01)
request_ret = self.xaquery_xareal.Request(continu_query)
# ๋ฆฌํด๊ฐ ๊ธฐ๋ฐ ์๋ฌ์ฒ๋ฆฌ
while request_ret < 0:
if request_ret in [-21,-15,-16,-17]:
#๋ ๋๊น์ง ์ฌ์๋ ํด์ผํ๋ ์๋ฌ
self.logger.warn("Warnning request_ret:"+str(request_ret))
time.sleep(self.minimum_interval_between_calls + 0.01)
request_ret = self.xaquery_xareal.Request(continu_query)
elif request_ret in [-1,-2,-3,-4,-7,-13,-14,-15]:
#xasession์ ๋ค์ ์์ํด์ผํ๋ ์๋ฌ
self.logger.error("Error request_ret:"+str(request_ret))
self._xasession.reconnect()
break
elif request_ret in [-5,-6,-8,-9,-10,-11,-12,-18,-19,-20,-22,-23,-24,-25]:
#๊ตฌ์ ๋ถ๋ฅ ์๋ฌ, ์ด๋ฉ์ผ๋ก ์๋ ค์ผํจ
self.logger.critical("Critical request_ret:"+str(request_ret))
#TODO shutdown ์ฝ๋ ์ถ๊ฐ
# exit()
#์ฐ์ ์ ์ฌ์ ์ ์๋
self.logger.error("Error request_ret:"+str(request_ret))
self._xasession.reconnect()
break
self.logger.debug("request_ret:"+str(request_ret))
#TODO ์๋ฌ์ฒ๋ฆฌ ํ์!
if request_ret < 0:
self.logger.warn("Request return:"+str(request_ret))
return None
else:
if not self.event_object_connector.on_receive_message_event.wait(self.query_block_timeout):
#timeout
self._xasession.reconnect()
return None
self.last_call = time.time()
#2015-05-07 ํธ์ถ ์๊ฐ ์์ง
if self.limit_period != 0:
self.last_calls.append(time.time())
#self.event_object_connector.on_receive_message_arg = (bIsSystemError, szMessageCode, szMessage)
if not self.event_object_connector.on_receive_data_event.wait(self.query_block_timeout):
#timeout
self._xasession.reconnect()
return None
return self._read_outblocks(self._res_info['block'], comp_yn_flag)
except Exception as ex:
self.logger.warn("XARequest_" + self._res_info['header']['tr_code'] + " query error: %s", ex)
self.logger.debug(ex, exc_info=True)
return None
finally:
pass
def subscribe(self, subscribe_code, arg_set, queue):
try:
if self._res_info['header']['is_query']:
return None
if subscribe_code.startswith('xing.'):
subscribe_code = subscribe_code.split('.')[1]
else:
return None
self.logger.info("subscribe has been received(%s)", subscribe_code)
self._verify_arguments(subscribe_code, arg_set)
#self.logger.debug("arguments verified")
if self._subscribe_key_code is not None:
key = list(arg_set.values())[0][self._subscribe_key_code]
if self.event_object_connector.queue.register(queue, key):
self._write_inblocks_for_subscription(arg_set)
self.xaquery_xareal.AdviseRealData()
self.logger.debug("Actual AdviseRealData called(key=%s)", key)
else:
self.logger.debug("Subscription add to existing queue(key=%s)", key)
else:
if self.event_object_connector.queue.register(queue):
self._write_inblocks_for_subscription(arg_set)
self.xaquery_xareal.AdviseRealData()
self.logger.debug("Actual AdviseRealData called(no key)")
else:
self.logger.debug("Subscription add to existing queue(no key)")
return True
except Exception as ex:
self.logger.warn("XARequest_" + self._res_info['header']['tr_code'] + " subscribe error: %s", ex)
self.logger.debug(ex, exc_info=True)
return None
finally:
pass
def unsubscribe(self, subscribe_code, arg_set, queue):
try:
if self._res_info['header']['is_query']:
return None
if subscribe_code.startswith('xing.'):
subscribe_code = subscribe_code.split('.')[1]
else:
return None
self.logger.info("unsubscribe has been received(%s)", subscribe_code)
self._verify_arguments(subscribe_code, arg_set)
#self.logger.debug("arguments verified")
if self._subscribe_key_code is not None:
self.logger.debug("%s has a key", subscribe_code)
key = list(arg_set.values())[0][self._subscribe_key_code]
self.logger.debug("unregister from queue")
if self.event_object_connector.queue.unregister(queue, key):
self.logger.debug("call UnadviseRealDataWithKey(%s)", key)
self.xaquery_xareal.UnadviseRealDataWithKey(key)
else:
self.logger.debug("%s has no key", subscribe_code)
if self.event_object_connector.queue.unregister(queue):
self.xaquery_xareal.AdviseRealData()
#self.logger.debug("unsubscribe finished")
return True
except Exception as ex:
self.logger.warn("XARequest_" + self._res_info['header']['tr_code'] + " unsubscribe error: %s", ex)
self.logger.warn(ex, exc_info=True)
return None
finally:
pass
# internal methods
def _write_inblocks(self, arg_set):
comp_yn_flag = False
for block_name in arg_set.keys():
if block_name == 'continue_query':
continue # it's not real inblock
if isinstance(arg_set[block_name], dict): # non=occurs
for arg_code in arg_set[block_name].keys():
if arg_set[block_name][arg_code] is not None:
self.xaquery_xareal.SetFieldData(block_name, arg_code, 0, arg_set[block_name][arg_code])
if (not comp_yn_flag) and arg_code.lower() == 'comp_yn' and str(arg_set[block_name][arg_code]) == 'Y':
comp_yn_flag = True # compress
else: # occurs
block_count = len(arg_set[block_name])
self.xaquery_xareal.SetBlockCount(block_name, block_count)
for i, arg_set1 in enumerate(arg_set[block_name]):
for arg_code in arg_set1.keys():
self.xaquery_xareal.SetFieldData(block_name, arg_code, i, arg_set1[arg_code])
return comp_yn_flag
def _read_outblocks(self, res_blocks, comp_yn_flag = False):
outblocks = filter(lambda b:not b['is_input'], res_blocks)
ret = dict()
for block in outblocks:
if not block['is_occurs']:
sub_ret = dict()
for arg in block['args']:
sub_ret[arg['code']] = self.xaquery_xareal.GetFieldData(block['bname'], arg['code'], 0)
else:
sub_ret = list()
block_count = 0
if comp_yn_flag: # if compressed?
decompressed_size = self.xaquery_xareal.Decompress(block['bname'])
if decompressed_size > 0:
block_count = self.xaquery_xareal.GetBlockCount(block['bname'])
else:
block_count = self.xaquery_xareal.GetBlockCount(block['bname'])
for occur in range(block_count):
sub_sub_ret = dict()
for arg in block['args']:
sub_sub_ret[arg['code']] = self.xaquery_xareal.GetFieldData(block['bname'], arg['code'], occur)
sub_ret.append(sub_sub_ret)
ret[block['bname']] = sub_ret
return ret
def _verify_arguments(self, tr_code, arg_set):
# ์ ๋ฌ๋ฐ์ arg_set ๊ฒ์ฌ ์์
if self._res_info['header']['tr_code'] != tr_code:
raise Exception('Wrong tr-code has been received (%s)', tr_code)
inblocks = list(filter(lambda b:b['is_input'], self._res_info['block']))
arg_set_key_set = set(arg_set.keys())
inblocks_bname_set = set(map(lambda b:b['bname'], inblocks))
if arg_set_key_set-inblocks_bname_set-{'continue_query'}:
raise Exception('Unsupported inblock name has been received (%s)', arg_set_key_set-inblocks_bname_set)
for block_name in arg_set.keys():
if block_name == 'continue_query':
continue
inblock = list(filter(lambda bn:bn['bname']==block_name, inblocks))[0]
if inblock['is_occurs'] and isinstance(arg_set[block_name], dict):
raise Exception("Unexpected dict('{}') for occurs found, list('[]]') should be here instead")
if not inblock['is_occurs'] and isinstance(arg_set[block_name], list):
raise Exception("Unexpected list('[]') for non-occurs found, dict('{}') should be here instead")
if isinstance(arg_set[block_name], dict):
arg_set_keys = set(arg_set[block_name].keys())
else:
arg_set_keys = set()
for a in arg_set[block_name]:
arg_set_keys.update(set(a.keys()))
arg_code_set = set(map(lambda b:b['code'], inblock['args']))
if arg_set_keys-arg_code_set:
raise Exception('Unsupported argument code has been received (%s)', str(arg_set_keys-arg_code_set))
# ์ ๋ฌ๋ฐ์ arg_set ๊ฒ์ฌ ๋
def _init_xaquery(self):
self.event_object_connector.on_receive_data_event = threading.Event()
self.event_object_connector.on_receive_message_event = threading.Event()
XAQueryEvents.event_object_connector = self.event_object_connector
self.xaquery_xareal = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery",XAQueryEvents)
self.xaquery_xareal.LoadFromResFile(os.path.join(self._xasession.res_dir_path, self._res_info['header']['tr_code']+'.res'))
#print(dir(self.xaquery_xareal.GetTRCountPerSec.__self__))
count_per_sec_limit = self.xaquery_xareal.GetTRCountPerSec(self._res_info['header']['tr_code'])
self.logger.debug("self.xaquery_xareal.GetTRCountPerSec(%s)", self._res_info['header']['tr_code'])
self.logger.debug('count_per_sec_limit(%s):%s', self._res_info['header']['tr_code'], str(count_per_sec_limit))
#2015-05-26 ์ ๊ท ์ถ๊ฐ๋ GetTRCountBaseSec api๋ฅผ ์ด์ฉํ ์์ฒญ ์ ํ ์ถ๊ฐ
count_per_sec_base = self.xaquery_xareal.GetTRCountBaseSec(self._res_info['header']['tr_code'])
self.logger.debug("self.xaquery_xareal.GetTRCountBaseSec(%s)", self._res_info['header']['tr_code'])
self.logger.debug('count_per_sec_base(%s):%s', self._res_info['header']['tr_code'], str(count_per_sec_base))
if count_per_sec_limit:
self.minimum_interval_between_calls = count_per_sec_base / count_per_sec_limit
else:
self.minimum_interval_between_calls = 0
self.logger.debug('self.minimum_interval_between_calls:%s', str(self.minimum_interval_between_calls))
self.last_call = 0
#2015-05-07 period limit์ ์ํ ๋ณ์ ์ค์
self.limit_call_count, self.limit_period = self._getTrCountPerPeriod(self._res_info['header']['tr_code'])
#self.limit_period ๊ธฐ๊ฐ ์์ self.limit_call_count ํ์ ์ด์์ ์ฝ์ ๊ธ์ง(๊ฐ๋ฅํ์ง๋ง ํ๋ํฐ(์ถ๊ฐ ๋๋ ์ด)๊ฐ ์ฃผ์ด์ง)
self.last_calls = []
def _init_xareal(self):
XARealEvents.event_object_connector = self.event_object_connector
self.xaquery_xareal = win32com.client.DispatchWithEvents("XA_DataSet.XAReal",XARealEvents)
self.xaquery_xareal.LoadFromResFile(os.path.join(self._xasession.res_dir_path, self._res_info['header']['tr_code']+'.res'))
self.event_object_connector.res_blocks = self._res_info['block']
self.event_object_connector.xaquery_xareal = self.xaquery_xareal
#subscribe์ inblock์ 1๊ฐ๋ฐ์ ์์
args = list(filter(lambda b:b['is_input'], self.event_object_connector.res_blocks))[0]['args']
if len(args) > 0:
#key ์์
self._subscribe_key_code = args[0]['code']
self.event_object_connector.queue = QueueConnectAndDispatcher(self._subscribe_key_code)
else:
#key ์์
self._subscribe_key_code = None
self.event_object_connector.queue = QueueConnectAndDispatcherWithoutKey()
# internal methods
def _write_inblocks_for_subscription(self, arg_set):
for block_name in arg_set.keys():
for arg_code in arg_set[block_name].keys():
if arg_set[block_name][arg_code] is not None:
self.xaquery_xareal.SetFieldData(block_name, arg_code, arg_set[block_name][arg_code])
def finalize_com_object(self):
if hasattr(self, 'xaquery_xareal'):
del(self.xaquery_xareal)
if hasattr(self.event_object_connector, 'xaquery_xareal'):
del(self.event_object_connector.xaquery_xareal)
#์ฐ์ ์ ํํ์ด์ง์ ๊ณต์ง๋ trcode ๋ฆฌ์คํธ๋ก period ์ ํ์ ๋๋ ์ฝ๋๋ฅผ ๊ตฌ๋ถ
#์ฐจํ์๋ api๋ฑ์ ํตํด ์ป์ด ์ฌ ์ ์๋๋ก ํด์ผํ ๊ฒ(์ ๊ณตํ๋ค๋ฉด) - 2015-05-07
_tmp_periodLimitedTrCodes=["CCEAQ01100",
"CCEAQ06000",
"CCEAQ10100",
"CCEAQ50600",
"CCEBQ10500",
"CDPCQ04700",
"CDPCQ13900",
"CDPCQ14400",
"CEXAQ21100",
"CEXAQ21200",
"CEXAQ31100",
"CEXAQ31200",
"CEXAQ44200",
"CFOAQ00600",
"CFOAQ10100",
"CFOAQ50400",
"CFOBQ10300",
"CFOBQ10500",
"CFOBQ10800",
"CFOEQ11100",
"CFOEQ82600",
"CFOEQ82700",
"CFOFQ02400",
"CFXBQ03700",
"CFXBQ03900",
"CFXBQ07000",
"CFXBQ08400",
"CFXBQ08600",
"CFXBQ08700",
"CFXBQ08800",
"CFXBT03600",
"ChartExcel",
"ChartIndex",
"CIDBQ01400",
"CIDBQ01500",
"CIDBQ01800",
"CIDBQ02400",
"CIDBQ03000",
"CIDBQ05300",
"CIDEQ00800",
"CLNAQ00100",
"CSPAQ00600",
"CSPAQ02200",
"CSPAQ02300",
"CSPAQ03700",
"CSPAQ12200",
"CSPAQ12300",
"CSPAQ13700",
"CSPBQ00200",
"CSPBQ01300",
"f8301",
"f8307",
"f8309",
"f8311",
"FOCCQ33600",
"FOCCQ33700",
"MMDAQ91200",
"o3101",
"o3103",
"o3104",
"o3105",
"o3106",
"o3107",
"o3116",
"o3117",
"t1302",
"t1305",
"t1308",
"t1404",
"t1405",
"t1449",
"t1471",
"t1475",
"t1485",
"t1514",
"t1516",
"t1532",
"t1537",
"t1602",
"t1603",
"t1615",
"t1617",
"t1621",
"t1631",
"t1632",
"t1633",
"t1640",
"t1662",
"t1664",
"t1665",
"t1701",
"t1702",
"t1717",
"t1752",
"t1764",
"t1771",
"t1809",
"t1825",
"t1826",
"t1833",
"t1901",
"t1902",
"t1903",
"t1904",
"t1921",
"t1926",
"t1927",
"t1941",
"t1954",
"t2106",
"t2203",
"t2209",
"t2405",
"t2421",
"t2541",
"t2545",
"t2805",
"t2813",
"t2814",
"t2816",
"t2833",
"t3102",
"t3202",
"t3320",
"t3325",
"t3341",
"t3401",
"t3518",
"t3521",
"t4201",
"t8405",
"t8406",
"t8408",
"t8409",
"t8411",
"t8412",
"t8413",
"t8414",
"t8415",
"t8416",
"t8417",
"t8418",
"t8419",
"t8424",
"t8427",
"t8428",
"t8429",]
def _getTrCountPerPeriod(self, trcode):
if trcode in self._tmp_periodLimitedTrCodes:
return 200, 600 #600์ด๋น 200ํ ๊น์ง ์กฐํ ๊ฐ๋ฅ
#return 5, 10 #ํ
์คํธ์ฝ๋ - 10์ด๋น 5ํ ๊น์ง ์กฐํ ๊ฐ๋ฅ
else:
return 0, 0
class QueueConnectAndDispatcher:
def __init__(self, key_code):
self._key_code = key_code
self._queues = dict()
self._lock = threading.Lock()
self._queue_count = 0
def register(self, queue, key):
new_key = False
with self._lock:
queue_set = self._queues.get(key)
if queue_set:
queue_set.add(queue)
else:
queue_set = {queue}
self._queues[key] = queue_set
new_key = True
self._queue_count += 1
return new_key
def unregister(self, queue, key):
with self._lock:
remove_key = False
queue_set = self._queues.get(key)
queue_set.remove(queue)
if len(queue_set) == 0:
self._queues.pop(key)
remove_key = True
self._queue_count -= 1
return remove_key
def getRegisteredQueuesCount():
with self._lock:
return self._queue_count
#queue part
def task_done(self):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
def join(self):
with self._lock:
for queue in self._queues.values():
queue.join()
def qsize(self):
return 0
def empty(self):
return True
def full(self):
return False
#def put(self, item, block=True, timeout=None):
def put(self, item):
#key ์ถ์ถ
key = list(item.values())[0][self._key_code]
with self._lock:
queue_set = self._queues.get(key)
if queue_set:
for queue in queue_set:
queue.put(item)
def get(self, block=True, timeout=None):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
def put_nowait(self, item):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
def get_nowait(self):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
class QueueConnectAndDispatcherWithoutKey:
def __init__(self):
self._queues = set()
self._lock = threading.Lock()
def register(self, queue):
with self._lock:
self._queues.add(queue)
return len(self._queues)==1
def unregister(self, queue):
with self._lock:
self._queues.remove(queue)
return len(self._queues)==0
def getRegisteredQueuesCount():
with self._lock:
return len(self._queues)
#dummy queue part
def task_done(self):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
def join(self):
with self._lock:
for queue in self._queues:
queue.join()
def qsize(self):
return 0
def empty(self):
return True
def full(self):
return False
#def put(self, item, block=True, timeout=None):
def put(self, item):
with self._lock:
for queue in self._queues:
queue.put(item)
pass
def get(self, block=True, timeout=None):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
def put_nowait(self, item):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called')
def get_nowait(self):
"""์ง์์ํจ"""
raise NotImplementedError('This should not be called') | mit | 6,547,232,836,483,975,000 | 31.366959 | 138 | 0.660839 | false |
pyannote/pyannote-algorithms | pyannote/algorithms/segmentation/__init__.py | 1 | 1228 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS (Hervรฉ BREDIN - http://herve.niderb.fr)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
| mit | 6,311,056,182,666,504,000 | 46.192308 | 80 | 0.770986 | false |
hosseinoliabak/learningpy | 13_51_googleApi.py | 1 | 2968 | '''
Calling a JSON API
In this assignment, you will write a Python program somewhat similar to
http://www.py4e.com/code3/geojson.py. The program will prompt for a location,
contact a web service and retrieve JSON for the web service and parse that data,
and retrieve the first place_id from the JSON. A place ID is a textual
identifier that uniquely identifies a place as within Google Maps.
API End Points:
To complete this assignment, you should use this API endpoint that has a static
subset of the Google Data:
http://py4e-data.dr-chuck.net/geojson?
This API uses the same parameters (sensor and address) as the Google API.
This API also has no rate limit so you can test as often as you like. If you
visit the URL with no parameters, you get a list of all of the address values
which can be used with this API.
To call the API, you need to provide address that you are requesting as the
address= parameter that is properly URL encoded using the urllib.urlencode()
fuction as shown in http://www.py4e.com/code3/geojson.py
Test Data / Sample Execution:
You can test to see if your program is working with a location of
"South Federal University" which will have a place_id of
"ChIJJ8oO7_B_bIcR2AlhC8nKlok".
+-------------Sample Execution-------------+
|$ python3 solution.py |
|Enter location: South Federal University |
|Retrieving http://... |
|Retrieved 2101 characters |
|Place id ChIJJ8oO7_B_bIcR2AlhC8nKlok |
+------------------------------------------+
Turn In:
Please run your program to find the place_id for this location:
+++ University of Malaya +++
Make sure to enter the name and case exactly as above and enter the
place_id and your Python code below. Hint: The first seven characters of the
place_id are "ChIJC9_ ..."
Make sure to retreive the data from the URL specified above and not the normal
Google API. Your program should work with the Google API - but the place_id may
not match for this assignment.
'''
import urllib.request
import json
# Note that Google is increasingly requiring keys for this API
#sServiceUrl = 'http://maps.googleapis.com/maps/api/geocode/json?'
#To use googleapis uncomment the above line and comment the below line
sServiceUrl = 'http://py4e-data.dr-chuck.net/geojson?'
sAddress = input('Enter location [South Federal University]: ')
if not sAddress:
sAddress = 'South Federal University'
sUrl = sServiceUrl + urllib.parse.urlencode({'address': sAddress})
print('Retrieving', sUrl)
httpResponse = urllib.request.urlopen(sUrl)
sData = httpResponse.read().decode()
try:
dJsonData = json.loads(sData)
except:
dJsonData = None
sPlaceID = dJsonData['results'][0]['place_id']
print('place_id:', sPlaceID)
fLat = dJsonData['results'][0]['geometry']['location']['lat']
fLng = dJsonData["results"][0]["geometry"]["location"]["lng"]
print('lat', fLat, 'lng', fLng)
sLocation = dJsonData['results'][0]['formatted_address']
print(sLocation)
| gpl-3.0 | 1,357,545,549,673,138,400 | 37.545455 | 80 | 0.723383 | false |
titiwu/simpl | tests/test_simplDirectories.py | 1 | 3981 | from unittest import TestCase
import simpl.music_directories
class TestSimplDirectories(TestCase):
dir_list = [{'playlists': [],
'directory': '01-Test',
'files': ['Doobie Brothers - Without Love (Where would you be now).mp3',
'Frank Sinatra - New York New York.mp3', 'Al Jarreau - Boogie Down.wma',
'Curtis Mayfield - Move On Up.mp3', 'Buckshot LeFonque -Some Cow Funk (more tea, vicar).wma',
'Frank Sinatra - You make me feel so young.mp3', 'Frank Sinatra - The Lady is a Tramp.mp3',
"Frank Sinatra - I've got you under my Skin.mp3",
'Frank Sinatra - Nice work if you can get it.mp3', 'Earth Wind & Fire - Shinning Star.mp3',
'Earth Wind & Fire - Mix funk.mp3', 'Earth, Wind & Fire - Got To Get You Into My Life.mp3']},
{'playlists': [],
'directory': '02-Peter und der Wolf',
'files': ['Peter Fox - Die Affen steigen auf den Thron.mp3', 'Peter Fox - Lok auf 2 Beinen.mp3',
'Peter Fox - Der letzte Tag.mp3', 'Peter Fox - Stadtaffe.mp3', 'Peter Fox - Haus am See.mp3',
'Peter Fox - Kopf verloren.mp3', 'Peter Fox - Das zweite Gesicht.mp3',
'Peter Fox - Schwinger.mp3', 'Peter Fox - Ich Steine, Du Steine.mp3',
'Peter Fox - Fieber.mp3', 'Peter Fox - Schuttel deinen Speck.mp3',
'Peter Fox - Grosshirn RMX.mp3', 'Peter Fox - Schwarz zu Blau.mp3',
'Peter Fox - She moved in (Miss Platnum).mp3',
'Peter Fox - Marry me (feat. Miss Platnum).mp3', 'Peter Fox - Aufstehn.mp3',
'Peter Fox - Alles Neu.mp3', 'Peter Fox - Dickes Ende.mp3']},
{'playlists': [],
'directory': '03-Die_Prinzessin',
'files': ['01 dota_kehr - zeitgeist.mp3', '02 dota_kehr - sternschnuppen.mp3',
'03 dota_kehr - zauberer.mp3', '04 dota_kehr - selten_aber_manchmal.mp3',
'05 dota_kehr - friedberg.mp3', '06 dota_kehr - mediomelo.mp3',
'07 dota_kehr - kaulquappe.mp3', '08 dota_kehr - schneeknig.mp3',
'09 dota_kehr - nichts_neues.mp3', '10 dota_kehr - geheimnis.mp3',
'11 dota_kehr - erledigungszettelschreiber.mp3', '12 dota_kehr - die_drei.mp3']},
{'playlists': ['bayern_2.m3u', 'M_94_5.ogg.m3u', 'F_M_4.pls'],
'directory': '04-Radio',
'files': []}]
def setUp(self):
self.directories = simpl.music_directories.SimplDirectories(self.dir_list)
def test_folder_exists(self):
self.assertFalse(self.directories.folder_exists(5))
self.assertTrue(self.directories.folder_exists(4))
def test_get_text_for_folder(self):
self.assertEqual(self.directories.get_text_for_folder(2), "Peter und der Wolf")
def test_get_folder_uri_for_mpd(self):
self.assertEqual(self.directories.get_folder_uri_for_mpd(4), "04-Radio")
def test_get_nr_of_folder_entries(self):
pass
def test_is_radio_folder(self):
self.assertFalse(self.directories.is_radio_folder(2))
self.assertTrue(self.directories.is_radio_folder(4))
def test_get_radio_playlist_text(self):
self.assertEqual(self.directories.get_radio_playlist_text(1, 2), "Fehler")
self.assertEqual(self.directories.get_radio_playlist_text(4, 2), "F M 4")
def test_get_radio_uri_for_mpd(self):
self.assertEqual(self.directories.get_radio_uri_for_mpd(4, 0), "04-Radio/bayern_2.m3u")
def test_get_next_radio_playlist_nr(self):
pass
def test_get_previous_radio_playlist_nr(self):
pass
def test_process_directories(self):
pass
| gpl-3.0 | 1,080,743,227,735,018,400 | 53.534247 | 120 | 0.560663 | false |
mgp/sharebears | sharebears/url_decoder_github.py | 1 | 5196 | import re
import url_decoder
from url_decoder import UrlDecoder, UrlDecoderException
class _GitHubUrlDecoder(UrlDecoder):
@staticmethod
def can_decode_url(url, parsed_url):
if not parsed_url.netloc.startswith("github."):
return False
return True
class GitHubRepositoryOwnerItem:
"""The owner in a GitHubRepositoryItem."""
def __init__(self, decoded_owner):
self.login = decoded_owner["login"]
self.avatar_url = decoded_owner["avatar_url"]
self.html_url = decoded_owner["html_url"]
class GitHubRepositoryItem:
"""A GitHub repository for a RenderableItem."""
def __init__(self, decoded_url):
self.name = decoded_url["name"]
self.description = decoded_url["description"]
self.html_url = decoded_url["html_url"]
self.language = decoded_url["language"]
self.owner = GitHubRepositoryOwnerItem(decoded_url["owner"])
class GitHubRepositoryUrlDecoder(_GitHubUrlDecoder):
"""Renders a GitHub repository."""
_PATH_REGEX = re.compile("^/(?P<owner>\w+)/(?P<repo>\w+)$")
def __init__(self, github_client):
self.github_client = github_client
@staticmethod
def name():
return "github-repository"
@staticmethod
def _match_parsed_url(parsed_url):
return GitHubRepositoryUrlDecoder._PATH_REGEX.match(parsed_url.path)
@staticmethod
def can_decode_url(url, parsed_url):
if not _GitHubUrlDecoder.can_decode_url(url, parsed_url):
return False
elif not GitHubRepositoryUrlDecoder._match_parsed_url(parsed_url):
return False
return True
def _filter_json(self, json):
"""Filters the JSON from https://developer.github.com/v3/repos/#get"""
# Filter the repository owner.
owner_json = json["owner"]
filtered_owner_json = url_decoder.filter_json(owner_json,
"login", "avatar_url", "html_url")
# Filter the repository.
filtered_json = url_decoder.filter_json(json,
"name",
"description",
"html_url",
"language")
filtered_json["owner"] = filtered_owner_json
return filtered_json
def decode_url(self, url, parsed_url):
match = self._match_parsed_url(parsed_url)
if not match:
raise UrlDecoderException("URL is not decodeable: %s" % parsed_url)
owner = match.group("owner")
repo = match.group("repo")
json = self.github_client.get_repository(owner, repo)
return self._filter_json(json)
def item_for_rendering(self, decoded_url):
return GitHubRepositoryItem(decoded_url)
class GitHubCommitUserItem:
"""A user in a GitHubCommitItem."""
def __init__(self, decoded_user):
self.name = decoded_user["name"]
self.email = decoded_user["email"]
self.date = url_decoder.to_datetime(decoded_user["date"])
class GitHubCommitItem:
"""A GitHub commit for a RenderableItem."""
def __init__(self, decoded_url):
self.sha = decoded_url["sha"]
self.url = decoded_url["url"]
self.message = decoded_url["message"]
self.author = GitHubCommitUserItem(decoded_url["author"])
self.committer = GitHubCommitUserItem(decoded_url["committer"])
class GitHubCommitUrlDecoder(_GitHubUrlDecoder):
"""Renders a commit belonging to a GitHub repository."""
_PATH_REGEX = re.compile("^/(?P<owner>\w+)/(?P<repo>\w+)/commit/(?P<sha>\w+)$")
def __init__(self, github_client):
self.github_client = github_client
@staticmethod
def name():
return "github-commit"
@staticmethod
def _match_parsed_url(parsed_url):
return GitHubCommitUrlDecoder._PATH_REGEX.match(parsed_url.path)
@staticmethod
def can_decode_url(url, parsed_url):
if not _GitHubUrlDecoder.can_decode_url(url, parsed_url):
return False
elif not GitHubCommitUrlDecoder._match_parsed_url(parsed_url):
return False
return True
def _filter_json(self, json):
"""Filters the JSON from https://developer.github.com/v3/git/commits/#get-a-commit"""
return url_decoder.filter_json(json,
"sha",
"url",
"author",
"committer",
"message")
def decode_url(self, url, parsed_url):
match = self._match_parsed_url(parsed_url)
if not match:
raise UrlDecoderException("URL is not decodeable: %s" % parsed_url)
owner = match.group("owner")
repo = match.group("repo")
sha = match.group("sha")
json = self.github_client.get_commit(owner, repo, sha)
return self._filter_json(json)
def item_for_rendering(self, decoded_url):
return GitHubCommitItem(decoded_url)
class GitHubGistItem:
"""A GitHub Gist for a RenderableItem."""
def __init__(self, decoded_url):
self.url = decoded_url["url"]
class GitHubGistUrlDecoder(UrlDecoder):
"""Embeds a Gist."""
_PATH_REGEX = re.compile("^/\w+/\w+$")
@staticmethod
def name():
return "github-gist"
@staticmethod
def can_decode_url(url, parsed_url):
if not parsed_url.netloc.startswith("gist.github."):
return False
elif not GitHubGistUrlDecoder._PATH_REGEX.match(parsed_url.path):
return False
return True
def decode_url(self, url, parsed_url):
# Use an embedded Gist.
return { "url": url }
def item_for_rendering(self, decoded_url):
return GitHubGistItem(decoded_url)
| apache-2.0 | 2,276,164,251,358,414,600 | 26.0625 | 89 | 0.672825 | false |
zhlinh/leetcode | 0142.Linked List Cycle II/solution.py | 1 | 1073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-03-17
Last_modify: 2016-03-17
******************************************
'''
'''
Given a linked list,
return the node where the cycle begins.
If there is no cycle, return null.
Note: Do not modify the linked list.
Follow up:
Can you solve it without using extra space?
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
fast, slow, entry = head, head, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
while slow != entry:
entry = entry.next
slow = slow.next
return entry
return None
| apache-2.0 | -6,262,251,490,648,963,000 | 22.844444 | 44 | 0.50699 | false |
harkrishan/TwitterBot | TwitterBot.py | 1 | 3213 | #!/usr/bin/python
# Importing Twython
from twython import Twython
# Importing Wolfram
import urllib
import wolframalpha
import urllib2
from xml.etree import ElementTree as etree
import sys
import time
#Setting variables for Twitter
app_key = "*****YOUR APP KEY******"
app_secret = "****YOUR APP SECRET*****"
oauth_token = "*****YOUR OAUTH TOKEN******"
oauth_token_secret = "*****YOUR OAUTH TOKEN SECRET*****"
#Twython Object
twitter = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
#Setting variables for Wolfram
app_id = "*****YOUR WOLFRAMALPHA APP ID******"
#Wolfram Object
client = wolframalpha.Client(app_id)
#File to store the last tweet's id
file = "/twitterBot/max_id.txt"
#Function to ask Wolframalpha and return the answer
def wolfram_alpha(str):
str=str.lower()
str=str.replace("@berry_bots"," ")
str=str.replace("."," ")
str=str.replace("hi berry bots"," ")
str=str.replace("hello berry bots"," ")
str=str.replace("berry bots"," ")
str=str.replace("berry bot"," ")
str=str.strip()
str=str.encode('utf-8')
res = client.query(str)
if len(res.pods)>0:
text=" "
pod = res.pods[1]
if pod.text:
text = pod.text.encode('utf-8')
else:
text = 'x'
else:
text = 'x'
return text;
#Reading file for last tweet's id
max=open(file,"r")
max.seek(0,0)
mid=max.read(18)
search_results = None
#Searching Twitter for Questions
try:
search_results = twitter.search(q="@berry_bots",lang="en",count=5,since_id=str(mid))
except TwythonError as err:
print err
max.close()
mxd = None
#Traversing the tweets searched and tweeting back the answers
try:
if search_results:
for tweet in reversed(search_results["statuses"]):
mxd = tweet["id_str"]
answer = wolfram_alpha(tweet["text"])
if answer!='x':
reply_id = tweet["id_str"]
reply_user = '@' + tweet['user']['screen_name']
reply_user = reply_user.encode('utf-8')
if len(answer)>123:
n=123
for i in range(0, len(answer), n):
ans=answer[i:i+n]
ans = reply_user + ' ' + ans
twitter.update_status(status=ans,in_reply_to_status_id=reply_id)
time.sleep(5)
else:
answer = reply_user + ' ' + answer
twitter.update_status(status=answer,in_reply_to_status_id=reply_id)
time.sleep(5)
except TwythonError as e:
print e
#Writing the id if the last replied tweet into the file
if mxd:
max=open(file,"w")
max.write(mxd)
max.close()
| gpl-2.0 | 5,662,756,556,183,229,000 | 29.894231 | 112 | 0.498599 | false |
redcorelinux/sisyphus | src/frontend/cli/sisyphus-cli.py | 1 | 1263 | #!/usr/bin/python3
import sys
from libsisyphus import *
checkSystemMode()
pkgList = sys.argv[2:]
if "__main__" == __name__:
if sys.argv[1:]:
if "install" in sys.argv[1:]:
startInstall(pkgList)
elif "uninstall" in sys.argv[1:]:
startUninstall(pkgList)
elif "force-uninstall" in sys.argv[1:]:
startUninstallForce(pkgList)
elif "remove-orphans" in sys.argv[1:]:
removeOrphans()
elif "update" in sys.argv[1:]:
startSync()
elif "upgrade" in sys.argv[1:]:
startUpgrade()
elif "search" in sys.argv[1:]:
startSearch(pkgList)
elif "spmsync" in sys.argv[1:]:
startSyncSPM()
elif "rescue" in sys.argv[1:]:
rescueDB()
elif "sysinfo" in sys.argv[1:]:
sysInfo()
elif "mirror" in sys.argv[1:]:
if "list" in sys.argv[2:]:
listRepo()
elif "set" in sys.argv[2:]:
if sys.argv[3:]:
setRepo(sys.argv[3:])
else:
showHelp()
else:
showHelp()
elif "help" in sys.argv[1:]:
showHelp()
else:
showHelp()
| gpl-2.0 | 5,561,284,598,834,534,000 | 27.066667 | 47 | 0.47981 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.