repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tomazc/orange-bio | orangecontrib/bio/kegg/tests/test_entry.py | 1 | 1064 | from six import StringIO
import doctest
import unittest
from orangecontrib.bio.kegg.entry import parser, fields, DBEntry, entry_decorate
TEST_ENTRY = """\
ENTRY test_id something else
NAME test
DESCRIPTION This is a test's description.
it spans
multiple lines
SUB This is a description's sub
section
///
"""
@entry_decorate
class Entry(DBEntry):
pass
class TestEntry(unittest.TestCase):
def test_entry(self):
"""
Test basic DBEntry class.
"""
entry = Entry(TEST_ENTRY)
self.assertEqual(entry.entry_key, "test_id")
self.assertEqual(entry.ENTRY.TITLE, "ENTRY")
self.assertEqual(str(entry), TEST_ENTRY[:-4])
class TestParser(unittest.TestCase):
def test_parser(self):
parse = parser.DBGETEntryParser()
stream = StringIO(TEST_ENTRY)
for event, title, text in parse.parse(stream):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(parser))
return tests
| gpl-3.0 | -5,013,785,239,307,211,000 | 20.714286 | 80 | 0.638158 | false |
blackball/an-test6 | net/migrations/0004_update_calibrations.py | 1 | 22054 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from astrometry.net.settings import *
from astrometry.util.util import Tan
import math
import os
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for calib in orm.Calibration.objects.all():
wcsfn = os.path.join(JOBDIR, '%08i' % calib.job.id)
wcsfn = os.path.join(wcsfn, 'wcs.fits')
wcs = Tan(str(wcsfn), 0)
ra,dec = wcs.radec_center()
radius = (wcs.pixel_scale() *
math.hypot(wcs.imagew, wcs.imageh)/2. / 3600.)
# Find cartesian coordinates
ra *= math.pi/180
dec *= math.pi/180
tempr = math.cos(dec)
calib.x = tempr*math.cos(ra)
calib.y = tempr*math.sin(ra)
calib.z = math.sin(dec)
calib.r = radius/180*math.pi
calib.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'net.album': {
'Meta': {'object_name': 'Album'},
'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'albums'", 'symmetrical': 'False', 'to': "orm['net.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'albums'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_images': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'albums'", 'symmetrical': 'False', 'to': "orm['net.UserImage']"})
},
'net.cachedfile': {
'Meta': {'object_name': 'CachedFile'},
'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.DiskFile']"}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'})
},
'net.calibration': {
'Meta': {'object_name': 'Calibration'},
'decmax': ('django.db.models.fields.FloatField', [], {}),
'decmin': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'r': ('django.db.models.fields.FloatField', [], {}),
'ramax': ('django.db.models.fields.FloatField', [], {}),
'ramin': ('django.db.models.fields.FloatField', [], {}),
'raw_tan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations_raw'", 'null': 'True', 'to': "orm['net.TanWCS']"}),
'sip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.SipWCS']", 'null': 'True'}),
'sky_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations'", 'null': 'True', 'to': "orm['net.SkyLocation']"}),
'tweaked_tan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations_tweaked'", 'null': 'True', 'to': "orm['net.TanWCS']"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
'net.comment': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments_left'", 'to': "orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['net.CommentReceiver']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'net.commentreceiver': {
'Meta': {'object_name': 'CommentReceiver'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'net.diskfile': {
'Meta': {'object_name': 'DiskFile'},
'file_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'primary_key': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'size': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'net.flag': {
'Meta': {'ordering': "['name']", 'object_name': 'Flag'},
'explanation': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '56', 'primary_key': 'True'})
},
'net.flaggeduserimage': {
'Meta': {'object_name': 'FlaggedUserImage'},
'flag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Flag']"}),
'flagged_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'user_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.UserImage']"})
},
'net.image': {
'Meta': {'object_name': 'Image'},
'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.DiskFile']"}),
'display_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_display_set'", 'null': 'True', 'to': "orm['net.Image']"}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_thumbnail_set'", 'null': 'True', 'to': "orm['net.Image']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'net.job': {
'Meta': {'object_name': 'Job'},
'calibration': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'job'", 'unique': 'True', 'null': 'True', 'to': "orm['net.Calibration']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'queued_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'user_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': "orm['net.UserImage']"})
},
'net.license': {
'Meta': {'object_name': 'License'},
'allow_commercial_use': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '1'}),
'allow_modifications': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license_name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'license_uri': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'net.processsubmissions': {
'Meta': {'object_name': 'ProcessSubmissions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'watchdog': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'net.queuedjob': {
'Meta': {'object_name': 'QueuedJob'},
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Job']"}),
'procsub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': "orm['net.ProcessSubmissions']"}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'net.queuedsubmission': {
'Meta': {'object_name': 'QueuedSubmission'},
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'procsub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subs'", 'to': "orm['net.ProcessSubmissions']"}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Submission']"}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'net.sipwcs': {
'Meta': {'object_name': 'SipWCS'},
'apterms': ('django.db.models.fields.TextField', [], {'default': "''"}),
'aterms': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bpterms': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bterms': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'tan': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.TanWCS']", 'unique': 'True'})
},
'net.skylocation': {
'Meta': {'object_name': 'SkyLocation'},
'healpix': ('django.db.models.fields.BigIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nside': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'net.skyobject': {
'Meta': {'object_name': 'SkyObject'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'primary_key': 'True'})
},
'net.sourcelist': {
'Meta': {'object_name': 'SourceList', '_ormbases': ['net.Image']},
'image_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.Image']", 'unique': 'True', 'primary_key': 'True'}),
'source_type': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'net.submission': {
'Meta': {'object_name': 'Submission'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Album']", 'null': 'True', 'blank': 'True'}),
'center_dec': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'center_ra': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}),
'deduplication_nonce': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['net.DiskFile']"}),
'downsample_factor': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.License']"}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'positional_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'processing_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'processing_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'processing_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}),
'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'scale_err': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'scale_est': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'scale_lower': ('django.db.models.fields.FloatField', [], {'default': '0.10000000000000001', 'null': 'True', 'blank': 'True'}),
'scale_type': ('django.db.models.fields.CharField', [], {'default': "'ul'", 'max_length': '2'}),
'scale_units': ('django.db.models.fields.CharField', [], {'default': "'degwidth'", 'max_length': '20'}),
'scale_upper': ('django.db.models.fields.FloatField', [], {'default': '180', 'null': 'True', 'blank': 'True'}),
'submitted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['auth.User']"})
},
'net.tag': {
'Meta': {'object_name': 'Tag'},
'text': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'primary_key': 'True'})
},
'net.taggeduserimage': {
'Meta': {'object_name': 'TaggedUserImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Tag']"}),
'tagger': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.UserImage']"})
},
'net.tanwcs': {
'Meta': {'object_name': 'TanWCS'},
'cd11': ('django.db.models.fields.FloatField', [], {}),
'cd12': ('django.db.models.fields.FloatField', [], {}),
'cd21': ('django.db.models.fields.FloatField', [], {}),
'cd22': ('django.db.models.fields.FloatField', [], {}),
'crpix1': ('django.db.models.fields.FloatField', [], {}),
'crpix2': ('django.db.models.fields.FloatField', [], {}),
'crval1': ('django.db.models.fields.FloatField', [], {}),
'crval2': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageh': ('django.db.models.fields.FloatField', [], {}),
'imagew': ('django.db.models.fields.FloatField', [], {})
},
'net.userimage': {
'Meta': {'object_name': 'UserImage'},
'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'flags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'through': "orm['net.FlaggedUserImage']", 'to': "orm['net.Flag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Image']"}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.License']"}),
'original_file_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}),
'sky_objects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'to': "orm['net.SkyObject']"}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_images'", 'to': "orm['net.Submission']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'through': "orm['net.TaggedUserImage']", 'to': "orm['net.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_images'", 'null': 'True', 'to': "orm['auth.User']"})
},
'net.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'apikey': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'default_license': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['net.License']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['net']
| gpl-2.0 | -8,568,439,465,542,566,000 | 73.255892 | 203 | 0.540446 | false |
MSEMJEJME/Get-Dumped | renpy/statements.py | 1 | 3307 | # Copyright 2004-2012 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This module contains code to support user-defined statements.
import renpy
# The statement registry. It's a map from tuples giving the prefixes of
# statements to dictionaries giving the methods used for that statement.
registry = { }
parsers = renpy.parser.ParseTrie()
def register(name, parse=None, lint=None, execute=None, predict=None, next=None, scry=None, block=False, init=False): #@ReservedAssignment
name = tuple(name.split())
registry[name] = dict(parse=parse,
lint=lint,
execute=execute,
predict=predict,
next=next,
scry=scry)
# The function that is called to create an ast.UserStatement.
def parse_user_statement(l, loc):
renpy.exports.push_error_handler(l.error)
try:
rv = renpy.ast.UserStatement(loc, l.text, l.subblock)
if not block:
l.expect_noblock(" ".join(name) + " statement")
l.advance()
else:
l.expect_block(" ".join(name) + " statement")
l.advance()
finally:
renpy.exports.pop_error_handler()
if init and not l.init:
rv = renpy.ast.Init(loc, [ rv ], 0)
return rv
renpy.parser.statements.add(name, parse_user_statement)
# The function that is called to get our parse data.
def parse_data(l):
return (name, registry[name]["parse"](l))
parsers.add(name, parse_data)
def parse(node, line, subblock):
block = [ (node.filename, node.linenumber, line, subblock) ]
l = renpy.parser.Lexer(block)
l.advance()
renpy.exports.push_error_handler(l.error)
try:
pf = parsers.parse(l)
if pf is None:
l.error("Could not find user-defined statement at runtime.")
return pf(l)
finally:
renpy.exports.pop_error_handler()
def call(method, parsed, *args, **kwargs):
name, parsed = parsed
method = registry[name].get(method)
if method is None:
return None
return method(parsed, *args, **kwargs)
| gpl-2.0 | -8,841,751,182,915,426,000 | 32.07 | 138 | 0.643786 | false |
Pedram26/Humans-vs-Aliens | HumansAliens.app/Contents/Resources/lib/python2.7/pygame/tests/font_test.py | 1 | 21363 | import sys
import os
if __name__ == '__main__':
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
import unittest
import pygame
from pygame import font as pygame_font # So font can be replaced with ftfont
from pygame.compat import as_unicode, as_bytes, xrange_, filesystem_errors
from pygame.compat import PY_MAJOR_VERSION
UCS_4 = sys.maxunicode > 0xFFFF
def equal_images(s1, s2):
size = s1.get_size()
if s2.get_size() != size:
return False
w, h = size
for x in xrange_(w):
for y in xrange_(h):
if s1.get_at((x, y)) != s2.get_at((x, y)):
return False
return True
class FontModuleTest( unittest.TestCase ):
def setUp(self):
pygame_font.init()
def tearDown(self):
pygame_font.quit()
def test_SysFont(self):
# Can only check that a font object is returned.
fonts = pygame_font.get_fonts()
o = pygame_font.SysFont(fonts[0], 20)
self.failUnless(isinstance(o, pygame_font.FontType))
o = pygame_font.SysFont(fonts[0], 20, italic=True)
self.failUnless(isinstance(o, pygame_font.FontType))
o = pygame_font.SysFont(fonts[0], 20, bold=True)
self.failUnless(isinstance(o, pygame_font.FontType))
o = pygame_font.SysFont('thisisnotafont', 20)
self.failUnless(isinstance(o, pygame_font.FontType))
def test_get_default_font(self):
self.failUnlessEqual(pygame_font.get_default_font(), 'freesansbold.ttf')
def test_get_fonts_returns_something(self):
fnts = pygame_font.get_fonts()
self.failUnless(fnts)
# to test if some files exist...
#def XXtest_has_file_osx_10_5_sdk(self):
# import os
# f = "/Developer/SDKs/MacOSX10.5.sdk/usr/X11/include/ft2build.h"
# self.assertEqual(os.path.exists(f), True)
#def XXtest_has_file_osx_10_4_sdk(self):
# import os
# f = "/Developer/SDKs/MacOSX10.4u.sdk/usr/X11R6/include/ft2build.h"
# self.assertEqual(os.path.exists(f), True)
def test_get_fonts(self):
fnts = pygame_font.get_fonts()
if not fnts:
raise Exception(repr(fnts))
self.failUnless(fnts)
if (PY_MAJOR_VERSION >= 3):
# For Python 3.x, names will always be unicode strings.
name_types = (str,)
else:
# For Python 2.x, names may be either unicode or ascii strings.
name_types = (str, unicode)
for name in fnts:
# note, on ubuntu 2.6 they are all unicode strings.
self.failUnless(isinstance(name, name_types), name)
self.failUnless(name.islower(), name)
self.failUnless(name.isalnum(), name)
def test_get_init(self):
self.failUnless(pygame_font.get_init())
pygame_font.quit()
self.failIf(pygame_font.get_init())
def test_init(self):
pygame_font.init()
def test_match_font_all_exist(self):
fonts = pygame_font.get_fonts()
# Ensure all listed fonts are in fact available, and the returned file
# name is a full path.
for font in fonts:
path = pygame_font.match_font(font)
self.failIf(path is None)
self.failUnless(os.path.isabs(path))
def test_match_font_bold(self):
fonts = pygame_font.get_fonts()
# Look for a bold font.
for font in fonts:
if pygame_font.match_font(font, bold=True) is not None:
break
else:
self.fail()
def test_match_font_italic(self):
fonts = pygame_font.get_fonts()
# Look for an italic font.
for font in fonts:
if pygame_font.match_font(font, italic=True) is not None:
break
else:
self.fail()
def test_match_font_comma_separated(self):
fonts = pygame_font.get_fonts()
# Check for not found.
self.failUnless(pygame_font.match_font('thisisnotafont') is None)
# Check comma separated list.
names = ','.join(['thisisnotafont', fonts[-1], 'anothernonfont'])
self.failIf(pygame_font.match_font(names) is None)
names = ','.join(['thisisnotafont1', 'thisisnotafont2', 'thisisnotafont3'])
self.failUnless(pygame_font.match_font(names) is None)
def test_quit(self):
pygame_font.quit()
class FontTest(unittest.TestCase):
def setUp(self):
pygame_font.init()
def tearDown(self):
pygame_font.quit()
def test_render_args(self):
screen = pygame.display.set_mode((600, 400))
rect = screen.get_rect()
f = pygame_font.Font(None, 20)
screen.fill((10, 10, 10))
font_surface = f.render(" bar", True, (0, 0, 0), (255, 255, 255))
font_rect = font_surface.get_rect()
font_rect.topleft = rect.topleft
self.assertTrue(font_surface)
screen.blit(font_surface, font_rect, font_rect)
pygame.display.update()
self.assertEqual(tuple(screen.get_at((0,0)))[:3], (255, 255, 255))
self.assertEqual(tuple(screen.get_at(font_rect.topleft))[:3], (255, 255, 255))
# If we don't have a real display, don't do this test.
# Transparent background doesn't seem to work without a read video card.
if os.environ.get('SDL_VIDEODRIVER') != 'dummy':
screen.fill((10, 10, 10))
font_surface = f.render(" bar", True, (0, 0, 0), None)
font_rect = font_surface.get_rect()
font_rect.topleft = rect.topleft
self.assertTrue(font_surface)
screen.blit(font_surface, font_rect, font_rect)
pygame.display.update()
self.assertEqual(tuple(screen.get_at((0,0)))[:3], (10, 10, 10))
self.assertEqual(tuple(screen.get_at(font_rect.topleft))[:3], (10, 10, 10))
screen.fill((10, 10, 10))
font_surface = f.render(" bar", True, (0, 0, 0))
font_rect = font_surface.get_rect()
font_rect.topleft = rect.topleft
self.assertTrue(font_surface)
screen.blit(font_surface, font_rect, font_rect)
pygame.display.update(rect)
self.assertEqual(tuple(screen.get_at((0,0)))[:3], (10, 10, 10))
self.assertEqual(tuple(screen.get_at(font_rect.topleft))[:3], (10, 10, 10))
class FontTypeTest( unittest.TestCase ):
def setUp(self):
pygame_font.init()
def tearDown(self):
pygame_font.quit()
def test_get_ascent(self):
# Ckecking ascent would need a custom test font to do properly.
f = pygame_font.Font(None, 20)
ascent = f.get_ascent()
self.failUnless(isinstance(ascent, int))
self.failUnless(ascent > 0)
s = f.render("X", False, (255, 255, 255))
self.failUnless(s.get_size()[1] > ascent)
def test_get_descent(self):
# Ckecking descent would need a custom test font to do properly.
f = pygame_font.Font(None, 20)
descent = f.get_descent()
self.failUnless(isinstance(descent, int))
self.failUnless(descent < 0)
def test_get_height(self):
# Ckecking height would need a custom test font to do properly.
f = pygame_font.Font(None, 20)
height = f.get_height()
self.failUnless(isinstance(height, int))
self.failUnless(height > 0)
s = f.render("X", False, (255, 255, 255))
self.failUnless(s.get_size()[1] == height)
def test_get_linesize(self):
# Ckecking linesize would need a custom test font to do properly.
# Questions: How do linesize, height and descent relate?
f = pygame_font.Font(None, 20)
linesize = f.get_linesize()
self.failUnless(isinstance(linesize, int))
self.failUnless(linesize > 0)
def test_metrics(self):
# Ensure bytes decoding works correctly. Can only compare results
# with unicode for now.
f = pygame_font.Font(None, 20);
um = f.metrics(as_unicode("."))
bm = f.metrics(as_bytes("."))
self.assert_(len(um) == 1)
self.assert_(len(bm) == 1)
self.assert_(um[0] is not None)
self.assert_(um == bm)
u = as_unicode(r"\u212A")
b = u.encode("UTF-16")[2:] # Keep byte order consistent. [2:] skips BOM
bm = f.metrics(b)
self.assert_(len(bm) == 2)
try:
um = f.metrics(u)
except pygame.error:
pass
else:
self.assert_(len(um) == 1)
self.assert_(bm[0] != um[0])
self.assert_(bm[1] != um[0])
if UCS_4:
u = as_unicode(r"\U00013000")
bm = f.metrics(u)
self.assert_(len(bm) == 1 and bm[0] is None)
return # unfinished
# The documentation is useless here. How large a list?
# How do list positions relate to character codes?
# What about unicode characters?
# __doc__ (as of 2008-08-02) for pygame_font.Font.metrics:
# Font.metrics(text): return list
# Gets the metrics for each character in the pased string.
#
# The list contains tuples for each character, which contain the
# minimum X offset, the maximum X offset, the minimum Y offset, the
# maximum Y offset and the advance offset (bearing plus width) of the
# character. [(minx, maxx, miny, maxy, advance), (minx, maxx, miny,
# maxy, advance), ...]
self.fail()
def test_render(self):
"""
"""
f = pygame_font.Font(None, 20)
s = f.render("foo", True, [0, 0, 0], [255, 255, 255])
s = f.render("xxx", True, [0, 0, 0], [255, 255, 255])
s = f.render("", True, [0, 0, 0], [255, 255, 255])
s = f.render("foo", False, [0, 0, 0], [255, 255, 255])
s = f.render("xxx", False, [0, 0, 0], [255, 255, 255])
s = f.render("xxx", False, [0, 0, 0])
s = f.render(" ", False, [0, 0, 0])
s = f.render(" ", False, [0, 0, 0], [255, 255, 255])
# null text should be 1 pixel wide.
s = f.render("", False, [0, 0, 0], [255, 255, 255])
self.assertEqual(s.get_size()[0], 1)
# None text should be 1 pixel wide.
s = f.render(None, False, [0, 0, 0], [255, 255, 255])
self.assertEqual(s.get_size()[0], 1)
# Non-text should raise a TypeError.
self.assertRaises(TypeError, f.render,
[], False, [0, 0, 0], [255, 255, 255])
self.assertRaises(TypeError, f.render,
1, False, [0, 0, 0], [255, 255, 255])
# is background transparent for antialiasing?
s = f.render(".", True, [255, 255, 255])
self.failUnlessEqual(s.get_at((0, 0))[3], 0)
# is Unicode and bytes encoding correct?
# Cannot really test if the correct characters are rendered, but
# at least can assert the encodings differ.
su = f.render(as_unicode("."), False, [0, 0, 0], [255, 255, 255])
sb = f.render(as_bytes("."), False, [0, 0, 0], [255, 255, 255])
self.assert_(equal_images(su, sb))
u = as_unicode(r"\u212A")
b = u.encode("UTF-16")[2:] # Keep byte order consistent. [2:] skips BOM
sb = f.render(b, False, [0, 0, 0], [255, 255, 255])
try:
su = f.render(u, False, [0, 0, 0], [255, 255, 255])
except pygame.error:
pass
else:
self.assert_(not equal_images(su, sb))
# If the font module is SDL_ttf based, then it can only supports UCS-2;
# it will raise an exception for an out-of-range UCS-4 code point.
if UCS_4 and not hasattr(f, 'ucs4'):
ucs_2 = as_unicode(r"\uFFEE")
s = f.render(ucs_2, False, [0, 0, 0], [255, 255, 255])
ucs_4 = as_unicode(r"\U00010000")
self.assertRaises(UnicodeError, f.render,
ucs_4, False, [0, 0, 0], [255, 255, 255])
b = as_bytes("ab\x00cd")
self.assertRaises(ValueError, f.render, b, 0, [0, 0, 0])
u = as_unicode("ab\x00cd")
self.assertRaises(ValueError, f.render, b, 0, [0, 0, 0])
# __doc__ (as of 2008-08-02) for pygame_font.Font.render:
# Font.render(text, antialias, color, background=None): return Surface
# draw text on a new Surface
#
# This creates a new Surface with the specified text rendered on it.
# Pygame provides no way to directly draw text on an existing Surface:
# instead you must use Font.render() to create an image (Surface) of
# the text, then blit this image onto another Surface.
#
# The text can only be a single line: newline characters are not
# rendered. The antialias argument is a boolean: if true the
# characters will have smooth edges. The color argument is the color
# of the text [e.g.: (0,0,255) for blue]. The optional background
# argument is a color to use for the text background. If no background
# is passed the area outside the text will be transparent.
#
# The Surface returned will be of the dimensions required to hold the
# text. (the same as those returned by Font.size()). If an empty
# string is passed for the text, a blank surface will be returned that
# is one pixel wide and the height of the font.
#
# Depending on the type of background and antialiasing used, this
# returns different types of Surfaces. For performance reasons, it is
# good to know what type of image will be used. If antialiasing is not
# used, the return image will always be an 8bit image with a two color
# palette. If the background is transparent a colorkey will be set.
# Antialiased images are rendered to 24-bit RGB images. If the
# background is transparent a pixel alpha will be included.
#
# Optimization: if you know that the final destination for the text
# (on the screen) will always have a solid background, and the text is
# antialiased, you can improve performance by specifying the
# background color. This will cause the resulting image to maintain
# transparency information by colorkey rather than (much less
# efficient) alpha values.
#
# If you render '\n' a unknown char will be rendered. Usually a
# rectangle. Instead you need to handle new lines yourself.
#
# Font rendering is not thread safe: only a single thread can render
# text any time.
def test_set_bold(self):
f = pygame_font.Font(None, 20)
self.failIf(f.get_bold())
f.set_bold(True)
self.failUnless(f.get_bold())
f.set_bold(False)
self.failIf(f.get_bold())
def test_set_italic(self):
f = pygame_font.Font(None, 20)
self.failIf(f.get_italic())
f.set_italic(True)
self.failUnless(f.get_italic())
f.set_italic(False)
self.failIf(f.get_italic())
def test_set_underline(self):
f = pygame_font.Font(None, 20)
self.failIf(f.get_underline())
f.set_underline(True)
self.failUnless(f.get_underline())
f.set_underline(False)
self.failIf(f.get_underline())
def test_size(self):
f = pygame_font.Font(None, 20)
text = as_unicode("Xg")
size = f.size(text)
w, h = size
self.assert_(isinstance(w, int) and isinstance(h, int))
s = f.render(text, False, (255, 255, 255))
self.assert_(size == s.get_size())
btext = text.encode("ascii")
self.assert_(f.size(btext) == size)
text = as_unicode(r"\u212A")
btext = text.encode("UTF-16")[2:] # Keep the byte order consistent.
bsize = f.size(btext)
try:
size = f.size(text)
except pygame.error:
pass
else:
self.assert_(size != bsize)
def test_font_file_not_found(self):
# A per BUG reported by Bo Jangeborg on pygame-user mailing list,
# http://www.mail-archive.com/[email protected]/msg11675.html
pygame_font.init()
self.failUnlessRaises(IOError,
pygame_font.Font,
'some-fictional-font.ttf', 20)
def test_load_from_file(self):
font_name = pygame_font.get_default_font()
font_path = os.path.join(os.path.split(pygame.__file__)[0],
pygame_font.get_default_font())
f = pygame_font.Font(font_path, 20)
def test_load_from_file_obj(self):
font_name = pygame_font.get_default_font()
font_path = os.path.join(os.path.split(pygame.__file__)[0],
pygame_font.get_default_font())
f = open(font_path, "rb")
font = pygame_font.Font(f, 20)
def test_load_default_font_filename(self):
# In font_init, a special case is when the filename argument is
# identical to the default font file name.
f = pygame_font.Font(pygame_font.get_default_font(), 20)
def test_load_from_file_unicode(self):
base_dir = os.path.dirname(pygame.__file__)
font_path = os.path.join(base_dir, pygame_font.get_default_font())
if os.path.sep == '\\':
font_path = font_path.replace('\\', '\\\\')
ufont_path = as_unicode(font_path)
f = pygame_font.Font(ufont_path, 20)
def test_load_from_file_bytes(self):
font_path = os.path.join(os.path.split(pygame.__file__)[0],
pygame_font.get_default_font())
filesystem_encoding = sys.getfilesystemencoding()
try:
font_path = font_path.decode(filesystem_encoding,
filesystem_errors)
except AttributeError:
pass
bfont_path = font_path.encode(filesystem_encoding,
filesystem_errors)
f = pygame_font.Font(bfont_path, 20)
class VisualTests( unittest.TestCase ):
__tags__ = ['interactive']
screen = None
aborted = False
def setUp(self):
if self.screen is None:
pygame.init()
self.screen = pygame.display.set_mode((600, 200))
self.screen.fill((255, 255, 255))
pygame.display.flip()
self.f = pygame_font.Font(None, 32)
def abort(self):
if self.screen is not None:
pygame.quit()
self.aborted = True
def query(self,
bold=False, italic=False, underline=False, antialiase=False):
if self.aborted:
return False
spacing = 10
offset = 20
y = spacing
f = self.f
screen = self.screen
screen.fill((255, 255, 255))
pygame.display.flip()
if not (bold or italic or underline or antialiase):
text = "normal"
else:
modes = []
if bold:
modes.append("bold")
if italic:
modes.append("italic")
if underline:
modes.append("underlined")
if antialiase:
modes.append("antialiased")
text = "%s (y/n):" % ('-'.join(modes),)
f.set_bold(bold)
f.set_italic(italic)
f.set_underline(underline)
s = f.render(text, antialiase, (0, 0, 0))
screen.blit(s, (offset, y))
y += s.get_size()[1] + spacing
f.set_bold(False)
f.set_italic(False)
f.set_underline(False)
s = f.render("(some comparison text)", False, (0, 0, 0))
screen.blit(s, (offset, y))
pygame.display.flip()
while 1:
for evt in pygame.event.get():
if evt.type == pygame.KEYDOWN:
if evt.key == pygame.K_ESCAPE:
self.abort()
return False
if evt.key == pygame.K_y:
return True
if evt.key == pygame.K_n:
return False
if evt.type == pygame.QUIT:
self.abort()
return False
def test_bold(self):
self.failUnless(self.query(bold=True))
def test_italic(self):
self.failUnless(self.query(italic=True))
def test_underline(self):
self.failUnless(self.query(underline=True))
def test_antialiase(self):
self.failUnless(self.query(antialiase=True))
def test_bold_antialiase(self):
self.failUnless(self.query(bold=True, antialiase=True))
def test_italic_underline(self):
self.failUnless(self.query(italic=True, underline=True))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,488,330,198,394,416,000 | 35.896373 | 95 | 0.562795 | false |
RayRuizhiLiao/ITK_4D | Modules/ThirdParty/pygccxml/src/pygccxml/parser/etree_scanner.py | 1 | 2166 | # Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import warnings
from . import scanner
# keep py2exe happy
import xml.etree.ElementTree
import xml.etree.cElementTree as ElementTree
class etree_saxifier_t(object):
def __init__(self, etree, handler):
"""
Deprecated since 1.8.0. Will be removed in 1.9.0.
"""
warnings.warn("etree_saxifier_t is deprecated.\n", DeprecationWarning)
self.__root_elem = etree.getroot()
self.__handler = handler
def saxify(self):
self.__handler.startDocument()
self.__recursive_saxify(self.__root_elem)
self.__handler.endDocument()
def __recursive_saxify(self, element):
self.__handler.startElement(element.tag, element.attrib)
for e in element:
self.__recursive_saxify(e)
self.__handler.endElement(element.tag)
class etree_scanner_t(scanner.scanner_t):
def __init__(self, xml_file, decl_factory, *args):
"""
Deprecated since 1.8.0. Will be removed in 1.9.0.
"""
warnings.warn(
"etree_scanner_t is deprecated.\n" +
"Please use ietree_scanner_t instead.", DeprecationWarning)
scanner.scanner_t.__init__(self, xml_file, decl_factory, *args)
def read(self):
tree = ElementTree.parse(self.xml_file)
saxifier = etree_saxifier_t(tree, self)
saxifier.saxify()
class ietree_scanner_t(scanner.scanner_t):
def __init__(self, xml_file, decl_factory, *args):
scanner.scanner_t.__init__(self, xml_file, decl_factory, *args)
def read(self):
context = ElementTree.iterparse(
self.xml_file,
events=("start", "end"))
for event, elem in context:
if event == 'start':
self.startElement(elem.tag, elem.attrib)
else:
self.endElement(elem.tag)
elem.clear()
self.endDocument()
| apache-2.0 | -8,333,672,104,491,799,000 | 28.507042 | 78 | 0.591874 | false |
shiningdesign/universal_tool_template.py | _ARCHIVE/universal_tool_template_1020.py | 1 | 84756 | # Univeral Tool Template v011.0
tpl_ver = 10.2
tpl_date = 180220
print("tpl_ver: {0}-{1}".format(tpl_ver, tpl_date))
# by ying - https://github.com/shiningdesign/universal_tool_template.py
import importlib
import sys
# ---- hostMode ----
hostMode = ''
hostModeList = [
['maya', {'mui':'maya.OpenMayaUI', 'cmds':'maya.cmds'} ],
['nuke', {'nuke':'nuke', 'nukescripts':'nukescripts'} ],
['fusion', {'fs':'fusionscript'} ],
['houdini', {'hou':'hou'} ],
['blender', {'bpy':'bpy'} ],
['npp', {'Npp':'Npp'} ],
]
for name, libs in hostModeList:
try:
for x in libs.keys():
globals()[x] = importlib.import_module(libs[x])
hostMode = name
break
except ImportError:
pass
if hostMode == '':
hostMode = 'desktop'
print('Host: {0}'.format(hostMode))
# ---- qtMode ----
qtMode = 0 # 0: PySide; 1 : PyQt, 2: PySide2, 3: PyQt5
qtModeList = ('PySide', 'PyQt4', 'PySide2', 'PyQt5')
try:
from PySide import QtGui, QtCore
import PySide.QtGui as QtWidgets
qtMode = 0
if hostMode == "maya":
import shiboken
except ImportError:
try:
from PySide2 import QtCore, QtGui, QtWidgets
qtMode = 2
if hostMode == "maya":
import shiboken2 as shiboken
except ImportError:
try:
from PyQt4 import QtGui,QtCore
import PyQt4.QtGui as QtWidgets
import sip
qtMode = 1
except ImportError:
from PyQt5 import QtGui,QtCore,QtWidgets
import sip
qtMode = 3
print('Qt: {0}'.format(qtModeList[qtMode]))
# ---- pyMode ----
# python 2,3 support unicode function
try:
UNICODE_EXISTS = bool(type(unicode))
except NameError:
# lambda s: str(s) # this works for function but not for class check
unicode = str
if sys.version_info[:3][0]>=3:
reload = importlib.reload # add reload
pyMode = '.'.join([ str(n) for n in sys.version_info[:3] ])
print("Python: {0}".format(pyMode))
# ---- osMode ----
osMode = 'other'
if sys.platform in ['win32','win64']:
osMode = 'win'
elif sys.platform == 'darwin':
osMode = 'mac'
elif sys.platform == 'linux2':
osMode = 'linux'
print("OS: {0}".format(osMode))
# ---- template module list ----
import os # for path and language code
from functools import partial # for partial function creation
import json # for ascii data output
if sys.version_info[:3][0]<3:
import cPickle # for binary data output
else:
import _pickle as cPickle
import re # for name pattern
import ctypes # for windows instance detection
import subprocess # for cmd call
#=======================================
# UniversalToolUI template class
#=======================================
class UniversalToolUI(QtWidgets.QMainWindow):
def __init__(self, parent=None, mode=0):
QtWidgets.QMainWindow.__init__(self, parent)
#------------------------------
# class variables
#------------------------------
self.version = '0.1'
self.date = '2017.01.01'
self.log = 'no version log in user class'
self.help = 'no help guide in user class'
self.uiList={} # for ui obj storage
self.memoData = {} # key based variable data storage
self.memoData['font_size_default'] = QtGui.QFont().pointSize()
self.memoData['font_size'] = self.memoData['font_size_default']
self.memoData['last_export'] = ''
self.memoData['last_import'] = ''
self.name = self.__class__.__name__
self.location = ''
if getattr(sys, 'frozen', False):
# frozen - cx_freeze
self.location = sys.executable
else:
# unfrozen
self.location = os.path.realpath(sys.modules[self.__class__.__module__].__file__)
self.iconPath = os.path.join(os.path.dirname(self.location),'icons',self.name+'.png')
self.iconPix = QtGui.QPixmap(self.iconPath)
self.icon = QtGui.QIcon(self.iconPath)
self.fileType='.{0}_EXT'.format(self.name)
#------------------------------
# core function variable
#------------------------------
self.qui_core_dict = {
'vbox': 'QVBoxLayout','hbox':'QHBoxLayout','grid':'QGridLayout', 'form':'QFormLayout',
'split': 'QSplitter', 'grp':'QGroupBox', 'tab':'QTabWidget',
'btn':'QPushButton', 'btnMsg':'QPushButton', 'label':'QLabel', 'input':'QLineEdit', 'check':'QCheckBox', 'choice':'QComboBox',
'txt': 'QTextEdit',
'list': 'QListWidget', 'tree': 'QTreeWidget', 'table': 'QTableWidget',
'space': 'QSpacerItem',
'menu' : 'QMenu', 'menubar' : 'QMenuBar',
}
self.qui_user_dict = {}
def setupStyle(self):
# global app style setting for desktop
if hostMode == "desktop":
QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Cleanlooks'))
self.setStyleSheet("QLineEdit:disabled{background-color: gray;}")
def setupMenu(self):
# global help menu
if 'help_menu' in self.uiList.keys():
# for info review
self.qui_atn('helpHostMode_atnNone','Host Mode - {}'.format(hostMode),'Host Running.')
self.qui_atn('helpHostMode_atnNone','Host Mode - {}'.format(hostMode),'Host Running.')
self.qui_atn('helpPyMode_atnNone','Python Mode - {}'.format(pyMode),'Python Library Running.')
self.qui_atn('helpQtMode_atnNone','Qt Mode - {}'.format(qtModeList[qtMode]),'Qt Library Running.')
self.qui_atn('helpTemplate_atnNone','Universal Tool Teamplate - {0}.{1}'.format(tpl_ver, tpl_date),'based on Univeral Tool Template v{0} by Shining Ying - https://github.com/shiningdesign/universal{1}tool{1}template.py'.format(tpl_ver,'_'))
self.uiList['helpGuide_msg'] = self.help
self.qui_atn('helpGuide_atnMsg','Usage Guide','How to Usge Guide.')
self.uiList['helpLog_msg'] = self.log
self.qui_atn('helpLog_atnMsg','About v{0} - {1}'.format(self.version, self.date),'Vesion Log.')
self.qui_menu('helpHostMode_atnNone | helpHostMode_atnNone | helpPyMode_atnNone | helpQtMode_atnNone | helpTemplate_atnNone | _ | helpGuide_atnMsg | helpLog_atnMsg', 'help_menu')
def setupWin(self):
self.setWindowTitle(self.name + " - v" + self.version + " - host: " + hostMode)
self.setWindowIcon(self.icon)
self.drag_position=QtGui.QCursor.pos() # initial win drag position
def setupUI(self, layout='grid'):
main_widget = QtWidgets.QWidget()
self.setCentralWidget(main_widget)
main_layout = self.quickLayout(layout, 'main_layout') # grid for auto fill window size
main_widget.setLayout(main_layout)
def Establish_Connections(self):
for ui_name in self.uiList.keys():
prefix = ui_name.rsplit('_', 1)[0]
if ui_name.endswith('_btn'):
self.uiList[ui_name].clicked.connect(getattr(self, prefix+"_action", partial(self.default_action,ui_name)))
elif ui_name.endswith('_atn'):
self.uiList[ui_name].triggered.connect(getattr(self, prefix+"_action", partial(self.default_action,ui_name)))
elif ui_name.endswith('_btnMsg'):
self.uiList[ui_name].clicked.connect(getattr(self, prefix+"_message", partial(self.default_message,ui_name)))
elif ui_name.endswith('_atnMsg'):
self.uiList[ui_name].triggered.connect(getattr(self, prefix+"_message", partial(self.default_message,ui_name)))
#=======================================
# ui response functions
#=======================================
def default_action(self, ui_name, *argv):
print("No action defined for this UI element: "+ui_name)
def default_message(self, ui_name):
prefix = ui_name.rsplit('_', 1)[0]
msgName = prefix+"_msg"
msg_txt = msgName + " is not defined in uiList."
if msgName in self.uiList:
msg_txt = self.uiList[msgName]
self.quickMsg(msg_txt)
def default_menu_call(self, ui_name, point):
if ui_name in self.uiList.keys() and ui_name+'_menu' in self.uiList.keys():
self.uiList[ui_name+'_menu'].exec_(self.uiList[ui_name].mapToGlobal(point))
#=======================================
# ui feedback functions
#=======================================
def ____ui_feedback_functions____():
pass
def quickInfo(self, info):
self.statusBar().showMessage(info)
def quickMsg(self, msg, block=1):
tmpMsg = QtWidgets.QMessageBox(self) # for simple msg that no need for translation
tmpMsg.setWindowTitle("Info")
tmpMsg.setText(msg)
if block == 0:
tmpMsg.setWindowModality( QtCore.Qt.NonModal )
tmpMsg.addButton("OK",QtWidgets.QMessageBox.YesRole)
if block:
tmpMsg.exec_()
else:
tmpMsg.show()
def quickMsgAsk(self, msg, mode=0, choice=[]):
# getItem, getInteger, getDouble, getText
modeOpt = (QtWidgets.QLineEdit.Normal, QtWidgets.QLineEdit.NoEcho, QtWidgets.QLineEdit.Password, QtWidgets.QLineEdit.PasswordEchoOnEdit)
# option: QtWidgets.QInputDialog.UseListViewForComboBoxItems
if len(choice)==0:
txt, ok = QtWidgets.QInputDialog.getText(self, "Input", msg, modeOpt[mode])
return (unicode(txt), ok)
else:
txt, ok = QtWidgets.QInputDialog.getItem(self, "Input", msg, choice, 0, 0)
return (unicode(txt), ok)
def quickModKeyAsk(self):
modifiers = QtWidgets.QApplication.queryKeyboardModifiers()
clickMode = 0 # basic mode
if modifiers == QtCore.Qt.ControlModifier:
clickMode = 1 # ctrl
elif modifiers == QtCore.Qt.ShiftModifier:
clickMode = 2 # shift
elif modifiers == QtCore.Qt.AltModifier:
clickMode = 3 # alt
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier | QtCore.Qt.AltModifier:
clickMode = 4 # ctrl+shift+alt
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.AltModifier:
clickMode = 5 # ctrl+alt
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier:
clickMode = 6 # ctrl+shift
elif modifiers == QtCore.Qt.AltModifier | QtCore.Qt.ShiftModifier:
clickMode = 7 # alt+shift
return clickMode
def quickFileAsk(self, type, ext=None, dir=None):
if ext == None:
ext = "RAW data (*.json);;RAW binary data (*.dat);;Format Txt (*{0});;AllFiles (*.*)".format(self.fileType)
elif isinstance(ext, (str,unicode)):
if ';;' not in ext:
if ext == '':
ext = 'AllFiles (*.*)'
else:
ext = self.extFormat(ext) + ';;AllFiles (*.*)'
elif isinstance(ext, (tuple,list)):
if len(ext) > 0 and isinstance(ext[0], (tuple,list)):
tmp_list = [self.extFormat(x) for x in ext]
tmp_list.append('AllFiles (*.*)')
ext = ';;'.join(tmp_list)
else:
ext = ';;'.join([self.extFormat(x) for x in ext].append('AllFiles(*.*)'))
elif isinstance(ext, dict):
tmp_list = [self.extFormat(x) for x in ext.items()]
tmp_list.append('AllFiles (*.*)')
ext = ';;'.join(tmp_list)
else:
ext = "AllFiles (*.*)"
file = ''
if type == 'export':
if dir == None:
dir = self.memoData['last_export']
file = QtWidgets.QFileDialog.getSaveFileName(self, "Save File",dir,ext)
elif type == 'import':
if dir == None:
dir = self.memoData['last_import']
file = QtWidgets.QFileDialog.getOpenFileName(self, "Open File",dir,ext)
if isinstance(file, (list, tuple)):
file = file[0] # for deal with pyside case
else:
file = unicode(file) # for deal with pyqt case
# save last dir in memoData
if file != '':
if type == 'export':
self.memoData['last_export'] = os.path.dirname(file) #QFileInfo().path()
elif type == 'import':
self.memoData['last_import'] = os.path.dirname(file)
return file
def extFormat(self, ext):
if isinstance(ext, (tuple,list)):
ext = '{0} (*.{1})'.format(ext[1],ext[0])
else:
if ext.startswith('.'):
ext = ext[1:]
ext = '{0} (*.{0})'.format(ext)
return ext
def quickFolderAsk(self):
return unicode(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory"))
def openFolder(self, folderPath):
if os.path.isfile(folderPath):
folderPath = os.path.dirname(folderPath)
if os.path.isdir(folderPath):
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', folderPath]
elif sys.platform == 'linux2':
cmd_list = ['xdg-open', '--', folderPath]
elif sys.platform in ['win32','win64']:
cmd_list = ['explorer', folderPath.replace('/','\\')]
if cmd_list != None:
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass # handle errors in the called executable
except OSError:
pass # executable not found
def openFile(self, filePath):
if sys.platform in ['win32','win64']:
os.startfile(filePath)
elif sys.platform == 'darwin':
os.open(filePath)
elif sys.platform == 'linux2':
os.xdg-open(filePath)
def newFolder(self, parentPath, name=None):
created = 0
if name == None:
name, ok = self.quickMsgAsk('Enter the folder name:')
if not ok or name=='':
return
create_path = os.path.join(parentPath, name)
if os.path.isdir(create_path):
self.quickMsg('Already Exists')
else:
try:
os.makedirs(create_path)
created = 1
except OSError:
self.quickMsg('Error on creation user data folder')
return created
#=======================================
# ui info functions
#=======================================
def ____ui_info_functions____():
pass
def input_text(self, input_name, msg=None):
name = unicode(self.uiList[input_name].text())
if name == '':
print("Please define the name. {0}".format(msg))
return
return name
def input_int(self, input_name, min=None, max=None, msg=None):
input_txt = str(self.uiList[input_name].text())
result = None
# int valid
if not input_txt.isdigit():
print("Please enter a valid int. {0}".format(msg))
return
result = int(input_txt)
# min
if min != None:
if result < min:
print("Please enter a valid int number >= {0}. {1}".format(min, msg))
return
# max
if max != None:
if result > max:
print("Please enter a valid int number <= {0}. {1}".format(max, msg))
return
return result
def input_float(self, input_name, min=None, max=None, msg=None):
input_txt = str(self.uiList[input_name].text())
result = None
try:
result = float(input_txt)
except (ValueError, TypeError):
return
# min
if min != None:
if result < min:
print("Please enter a valid int number >= {0}. {1}".format(min, msg))
return
# max
if max != None:
if result > max:
print("Please enter a valid int number <= {0}. {1}".format(max, msg))
return
return result
def input_choice(self, ui_name):
if ui_name in self.uiList.keys():
return self.uiList[ui_name].currentIndex()
else:
return
def output_text(self, ui_name, text):
if ui_name in self.uiList.keys():
self.uiList[ui_name].setText(text)
#=======================================
# file data functions
#=======================================
def ____file_functions____():
pass
def readDataFile(self,file,binary=0):
with open(file) as f:
if binary == 0:
data = json.load(f)
else:
data = cPickle.load(f)
return data
def writeDataFile(self,data,file,binary=0):
with open(file, 'w') as f:
if binary == 0:
json.dump(data, f)
else:
cPickle.dump(data, f)
def readTextFile(self, file):
with open(file) as f:
txt = f.read()
return txt
def writeTextFile(self, txt, file):
with open(file, 'w') as f:
f.write(txt)
def dict_merge(self, default_dict, extra_dict, addKey=0):
# dictionary merge, with optional adding extra data from extra_dict
new_dict = {}
for key in default_dict.keys():
if not isinstance( default_dict[key], dict ):
# value case
if key in extra_dict.keys():
is_same_text_type = isinstance(extra_dict[key], (str,unicode)) and isinstance(default_dict[key], (str,unicode))
is_same_non_text_type = type(extra_dict[key]) is type(default_dict[key])
if is_same_text_type or is_same_non_text_type:
print('use config file value for key: '+key)
new_dict[key] = extra_dict[key]
else:
new_dict[key] = default_dict[key]
else:
new_dict[key] = default_dict[key]
else:
# dictionary case
if key in extra_dict.keys() and isinstance( extra_dict[key], dict ):
new_dict[key] = self.dict_merge( default_dict[key], extra_dict[key], addKey )
else:
new_dict[key] = default_dict[key]
# optional, add additional keys
if addKey == 1:
for key in [ x for x in extra_dict.keys() if x not in default_dict.keys() ]:
new_dict[key] = extra_dict[key]
return new_dict
#=======================================
# ui text functions
#=======================================
def ____ui_text_functions____():
pass
def fontNormal_action(self):
self.memoData['font_size'] = self.memoData['font_size_default']
self.setStyleSheet("QLabel,QPushButton { font-size: %dpt;}" % self.memoData['font_size'])
def fontUp_action(self):
self.memoData['font_size'] += 2
self.setStyleSheet("QLabel,QPushButton { font-size: %dpt;}" % self.memoData['font_size'])
def fontDown_action(self):
if self.memoData['font_size'] >= self.memoData['font_size_default']:
self.memoData['font_size'] -= 2
self.setStyleSheet("QLabel,QPushButton { font-size: %dpt;}" % self.memoData['font_size'])
def loadLang(self):
# store default language
self.memoData['lang']={}
self.memoData['lang']['default']={}
for ui_name in self.uiList.keys():
ui_element = self.uiList[ui_name]
if isinstance(ui_element, (QtWidgets.QLabel, QtWidgets.QPushButton, QtWidgets.QAction, QtWidgets.QCheckBox) ):
# uiType: QLabel, QPushButton, QAction(menuItem), QCheckBox
self.memoData['lang']['default'][ui_name] = unicode(ui_element.text())
elif isinstance(ui_element, (QtWidgets.QGroupBox, QtWidgets.QMenu) ):
# uiType: QMenu, QGroupBox
self.memoData['lang']['default'][ui_name] = unicode(ui_element.title())
elif isinstance(ui_element, QtWidgets.QTabWidget):
# uiType: QTabWidget
tabCnt = ui_element.count()
tabNameList = []
for i in range(tabCnt):
tabNameList.append(unicode(ui_element.tabText(i)))
self.memoData['lang']['default'][ui_name]=';'.join(tabNameList)
elif isinstance(ui_element, QtWidgets.QComboBox):
# uiType: QComboBox
itemCnt = ui_element.count()
itemNameList = []
for i in range(itemCnt):
itemNameList.append(unicode(ui_element.itemText(i)))
self.memoData['lang']['default'][ui_name]=';'.join(itemNameList)
elif isinstance(ui_element, QtWidgets.QTreeWidget):
# uiType: QTreeWidget
labelCnt = ui_element.headerItem().columnCount()
labelList = []
for i in range(labelCnt):
labelList.append(unicode(ui_element.headerItem().text(i)))
self.memoData['lang']['default'][ui_name]=';'.join(labelList)
elif isinstance(ui_element, QtWidgets.QTableWidget):
# uiType: QTableWidget
colCnt = ui_element.columnCount()
headerList = []
for i in range(colCnt):
if ui_element.horizontalHeaderItem(i):
headerList.append( unicode(ui_element.horizontalHeaderItem(i).text()) )
else:
headerList.append('')
self.memoData['lang']['default'][ui_name]=';'.join(headerList)
elif isinstance(ui_element, (str, unicode) ):
# uiType: string for msg
self.memoData['lang']['default'][ui_name] = self.uiList[ui_name]
# language menu
self.quickMenu(['language_menu;&Language'])
cur_menu = self.uiList['language_menu']
self.quickMenuAction('langDefault_atnLang', 'Default','','langDefault.png', cur_menu)
self.uiList['langDefault_atnLang'].triggered.connect(partial(self.setLang,'default'))
cur_menu.addSeparator()
# scan for language file
lang_path = os.path.dirname(self.location)
baseName = os.path.splitext( os.path.basename(self.location) )[0]
for file in self.getPathChild(lang_path, pattern=baseName+'_lang_[a-zA-Z]+.json', isfile=1):
langName = re.findall(baseName+'_lang_(.+)\.json', file)
if len(langName) == 1:
langName = langName[0].upper()
self.memoData['lang'][ langName ] = self.readDataFile( os.path.join(lang_path, file) )
if 'language_menu' in self.uiList:
self.quickMenuAction(langName+'_atnLang', langName,'',langName + '.png', self.uiList['language_menu'])
self.uiList[langName+'_atnLang'].triggered.connect(partial(self.setLang,langName))
# if no language file detected, add export default language option
if isinstance(self, QtWidgets.QMainWindow) and len(self.memoData['lang']) == 1:
self.quickMenuAction('langExport_atnLang', 'Export Default Language','','langExport.png', self.uiList['language_menu'])
self.uiList['langExport_atnLang'].triggered.connect(self.exportLang)
def setLang(self, langName):
lang_data = self.memoData['lang'][langName]
for ui_name in lang_data.keys():
if ui_name in self.uiList.keys() and lang_data[ui_name] != '':
ui_element = self.uiList[ui_name]
# '' means no translation availdanle in that data file
if isinstance(ui_element, (QtWidgets.QLabel, QtWidgets.QPushButton, QtWidgets.QAction, QtWidgets.QCheckBox) ):
# uiType: QLabel, QPushButton, QAction(menuItem), QCheckBox
ui_element.setText(lang_data[ui_name])
elif isinstance(ui_element, (QtWidgets.QGroupBox, QtWidgets.QMenu) ):
# uiType: QMenu, QGroupBox
ui_element.setTitle(lang_data[ui_name])
elif isinstance(ui_element, QtWidgets.QTabWidget):
# uiType: QTabWidget
tabCnt = ui_element.count()
tabNameList = lang_data[ui_name].split(';')
if len(tabNameList) == tabCnt:
for i in range(tabCnt):
if tabNameList[i] != '':
ui_element.setTabText(i,tabNameList[i])
elif isinstance(ui_element, QtWidgets.QComboBox):
# uiType: QComboBox
itemCnt = ui_element.count()
itemNameList = lang_data[ui_name].split(';')
ui_element.clear()
ui_element.addItems(itemNameList)
elif isinstance(ui_element, QtWidgets.QTreeWidget):
# uiType: QTreeWidget
labelCnt = ui_element.headerItem().columnCount()
labelList = lang_data[ui_name].split(';')
ui_element.setHeaderLabels(labelList)
elif isinstance(ui_element, QtWidgets.QTableWidget):
# uiType: QTableWidget
colCnt = ui_element.columnCount()
headerList = lang_data[ui_name].split(';')
cur_table.setHorizontalHeaderLabels( headerList )
elif isinstance(ui_element, (str, unicode) ):
# uiType: string for msg
self.uiList[ui_name] = lang_data[ui_name]
def exportLang(self):
file = self.quickFileAsk('export', ext='json')
if file != '':
self.writeDataFile( self.memoData['lang']['default'], file )
self.quickMsg("Languge File created: '"+file)
#=======================================
# qui functions
#=======================================
def ____ui_creation_functions____():
pass
def qui(self, ui_list_string, parent_ui_string='', insert_opt=''):
ui_creation_list = [ x.strip() for x in ui_list_string.split('|') ]
ui_creation_quickUI_list = []
# ------------
# - ui list
# ------------
for ui_creation in ui_creation_list:
arg_list = ui_creation.split(';')
uiName = arg_list[0].split('@')[0]
# ------------
# continue if ui is already created. pass as ui reference
if uiName in self.uiList.keys():
ui_creation_quickUI_list.append(self.uiList[uiName])
continue
# ------------
# create quickUI string
# - expand short name for Class
uiClass = uiName.rsplit('_',1)[-1]
if uiClass == 'layout' and len(arg_list)>1:
uiClass = arg_list[1]
arg_list = [ arg_list[0] ]
if uiClass in self.qui_user_dict:
uiClass = self.qui_user_dict[uiClass] # first, try user dict
elif uiClass in self.qui_core_dict:
uiClass = self.qui_core_dict[uiClass] # then, try default core dict
# - check it is valid Qt class or a user class
if hasattr(QtWidgets, uiClass) or uiClass in sys.modules:
pass # uiClass is valid for Qt class, user module
else:
print("WARNING: ({0}) is not defined in self.qui_user_dict and it is not a Qt widget class or User class; Item {1} Ignored.".format(uiClass, uiName))
continue
# - set quickUI creation format
arg_list[0] = arg_list[0] +';'+uiClass
if len(arg_list)==1:
if uiClass in ('QPushButton','QLabel'):
arg_list.append(uiName) # give empty button and label a place holder name
ui_creation_quickUI_list.append(';'.join(arg_list))
# ------------
# - ui parent
# ------------
parent_creation_quickUI_input = ''
parent_arg_list = parent_ui_string.split(';')
parent_uiName = parent_arg_list[0]
# - continue if parent ui is already created. pass as ui reference
if parent_uiName in self.uiList.keys():
parent_creation_quickUI_input = self.uiList[parent_uiName]
else:
parent_uiClass = parent_uiName.rsplit('_',1)[-1]
if parent_uiClass == 'layout' and len(parent_arg_list)>1:
parent_uiClass = parent_arg_list[1]
parent_arg_list = [ parent_arg_list[0] ]
if parent_uiClass in self.qui_user_dict:
parent_uiClass = self.qui_user_dict[parent_uiClass] # first, try user dict
elif parent_uiClass in self.qui_core_dict:
parent_uiClass = self.qui_core_dict[parent_uiClass] # then, try default core dict
# - check it is valid Qt class or a user class
if hasattr(QtWidgets, parent_uiClass) or parent_uiClass in sys.modules:
pass # uiClass is valid for Qt class, user module
else:
print("WARNING: ({0}) is not defined in self.qui_user_dict and it is not a Qt widget class or User class; Item {1} Ignored.".format(parent_uiClass, parent_uiName))
return
# - set quickUI creation format
parent_arg_list[0] = parent_arg_list[0] +';'+parent_uiClass
parent_creation_quickUI_input = ';'.join(parent_arg_list)
self.quickUI(ui_creation_quickUI_list, parent_creation_quickUI_input, insert_opt)
return parent_uiName
def qui_menu(self, action_list_str, menu_str):
# qui menu creation
# syntax: self.qui_menu('right_menu_createFolder_atn;Create Folder,Ctrl+D | right_menu_openFolder_atn;Open Folder', 'right_menu')
if menu_str not in self.uiList.keys():
self.uiList[menu_str] = QtWidgets.QMenu()
create_opt_list = [ x.strip() for x in action_list_str.split('|') ]
for each_creation in create_opt_list:
ui_info = [ x.strip() for x in each_creation.split(';') ]
atn_name = ui_info[0]
atn_title = ''
atn_hotkey = ''
if len(ui_info) > 1:
options = ui_info[1].split(',')
atn_title = '' if len(options) < 1 else options[0]
atn_hotkey = '' if len(options) < 2 else options[1]
if atn_name != '':
if atn_name == '_':
self.uiList[menu_str].addSeparator()
else:
if atn_name not in self.uiList.keys():
self.uiList[atn_name] = QtWidgets.QAction(atn_title, self)
if atn_hotkey != '':
self.uiList[atn_name].setShortcut(QtGui.QKeySequence(atn_hotkey))
self.uiList[menu_str].addAction(self.uiList[atn_name])
def qui_atn(self, ui_name, title, tip=None, icon=None, parent=None, key=None):
self.uiList[ui_name] = QtWidgets.QAction(title, self)
if icon!=None:
self.uiList[ui_name].setIcon(QtGui.QIcon(icon))
if tip !=None:
self.uiList[ui_name].setStatusTip(tip)
if key != None:
self.uiList[ui_name].setShortcut(QtGui.QKeySequence(key))
if parent !=None:
if isinstance(parent, (str, unicode)) and parent in self.uiList.keys():
self.uiList[parent].addAction(self.uiList[ui_name])
elif isinstance(parent, QtWidgets.QMenu):
parent.addAction(self.uiList[ui_name])
return ui_name
def qui_key(self, key_name, key_combo, func):
self.hotkey[key_name] = QtWidgets.QShortcut(QtGui.QKeySequence(key_combo), self)
self.hotkey[key_name].activated.connect( func )
def qui_menubar(self, menu_list_str):
if not isinstance(self, QtWidgets.QMainWindow):
print("Warning: Only QMainWindow can have menu bar.")
return
menubar = self.menuBar()
create_opt_list = [ x.strip() for x in menu_list_str.split('|') ]
for each_creation in create_opt_list:
ui_info = [ x.strip() for x in each_creation.split(';') ]
menu_name = ui_info[0]
menu_title = ''
if len(ui_info) > 1:
menu_title = ui_info[1]
if menu_name not in self.uiList.keys():
self.uiList[menu_name] = QtWidgets.QMenu(menu_title)
menubar.addMenu(self.uiList[menu_name])
# compatible hold function
def quickMenuAction(self, ui_name, title, tip, icon, menuObj):
self.qui_atn(ui_name, title, tip, icon, menuObj)
def quickMenu(self, ui_names):
if isinstance(ui_names, (list, tuple)):
self.qui_menubar('|'.join(ui_names))
else:
self.qui_menubar(ui_names)
#=======================================
# ui creation functions
#=======================================
def quickLayout(self, type, ui_name=""):
the_layout = ''
if type in ("form", "QFormLayout"):
the_layout = QtWidgets.QFormLayout()
the_layout.setLabelAlignment(QtCore.Qt.AlignLeft)
the_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
elif type in ("grid", "QGridLayout"):
the_layout = QtWidgets.QGridLayout()
elif type in ("hbox", "QHBoxLayout"):
the_layout = QtWidgets.QHBoxLayout()
the_layout.setAlignment(QtCore.Qt.AlignTop)
else:
the_layout = QtWidgets.QVBoxLayout()
the_layout.setAlignment(QtCore.Qt.AlignTop)
if ui_name != "":
self.uiList[ui_name] = the_layout
return the_layout
def quickUI(self, part_list, parentObject="", insert_opt=""):
# part_list contains:
# -- 1. string (strings for widget/space, layout, container[group, tab, splitter])
# -- 2. object (widget/space, layout, container[group, tab, splitter])
# -- 3. object list
# -- 4. [object list, label_object list]
# parentObject contains:
# -- 1. string (strings for layout, container[group, tab, splitter])
# -- 2. object (layout, container[group, tab, splitter])
# insert_opt:
# -- insert into grid layout, h, v
# -- insert into tab, titles
if not isinstance(part_list, (list, tuple)):
part_list = [part_list]
# func variable
ui_list = []
ui_label_list = []
form_type = 0 # flag for store whether ui_list need a label widget list for form layout creation
# 1. convert string to object and flatten part_list
for each_part in part_list:
# 1.1 string
if isinstance(each_part, str):
# - string : get part info
partInfo = each_part.split(';')
uiNameLabel = partInfo[0].split('@')
uiName = uiNameLabel[0]
uiLabel = ''
if len(uiNameLabel) > 1:
uiLabel = uiNameLabel[1]
form_type = 1
uiType = partInfo[1] if len(partInfo) > 1 else ""
uiArgs = partInfo[2] if len(partInfo) > 2 else ""
# - string : valid info
if uiType == "":
print("Warning (QuickUI): uiType is empty for "+each_part)
else:
# - string : to object creation
ui_create_state = 0 # flag to track creation success
if not uiType[0] == 'Q':
# -- 3rd ui type, create like UI_Class.UI_Class()
self.uiList[uiName] = getattr(sys.modules[uiType], uiType)() # getattr(eval(uiType), uiType)()
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# -- Qt ui
if uiType in ('QVBoxLayout', 'QHBoxLayout', 'QFormLayout', 'QGridLayout'):
# --- Qt Layout creation preset func
ui_list.append(self.quickLayout(uiType, uiName))
ui_create_state = 1
elif uiType in ('QSplitter', 'QTabWidget', 'QGroupBox'):
# --- Qt container creation
if uiType == 'QSplitter':
# ---- QSplitter as element
split_type = QtCore.Qt.Horizontal
if uiArgs == 'v':
split_type = QtCore.Qt.Vertical
self.uiList[uiName]=QtWidgets.QSplitter(split_type)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QTabWidget':
# ---- QTabWidget as element, no tab label need for input
self.uiList[uiName]=QtWidgets.QTabWidget()
self.uiList[uiName].setStyleSheet("QTabWidget::tab-bar{alignment:center;}QTabBar::tab { min-width: 100px; }")
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QGroupBox':
# ---- QGroupBox as element, with layout type and optional title
arg_list = [x.strip() for x in uiArgs.split(',')]
grp_layout = arg_list[0] if arg_list[0]!='' else 'vbox'
grp_title = arg_list[1] if len(arg_list)>1 else uiName
# create layout and set grp layout
grp_layout = self.quickLayout(grp_layout, uiName+"_layout" )
self.uiList[uiName] = QtWidgets.QGroupBox(grp_title)
self.uiList[uiName].setLayout(grp_layout)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# --- Qt widget creation
if uiArgs == "":
# ---- widget with no uiArgs
self.uiList[uiName] = getattr(QtWidgets, uiType)()
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# ---- widget with uiArgs
if not ( uiArgs.startswith("(") and uiArgs.endswith(")") ):
# ----- with string arg
self.uiList[uiName] = getattr(QtWidgets, uiType)(uiArgs)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# ----- with array arg
arg_list = uiArgs.replace('(','').replace(')','').split(',')
if uiType == 'QComboBox':
self.uiList[uiName] = QtWidgets.QComboBox()
self.uiList[uiName].addItems(arg_list)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QTreeWidget':
self.uiList[uiName] = QtWidgets.QTreeWidget()
self.uiList[uiName].setHeaderLabels(arg_list)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QSpacerItem':
policyList = ( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Ignored)
# 0 = fixed; 1 > min; 2 < max; 3 = prefered; 4 = <expanding>; 5 = expanding> Aggresive; 6=4 ignored size input
# factors in fighting for space: horizontalStretch
# extra space: setContentsMargins and setSpacing
# ref: http://www.cnblogs.com/alleyonline/p/4903337.html
arg_list = [ int(x) for x in arg_list ]
self.uiList[uiName] = QtWidgets.QSpacerItem(arg_list[0],arg_list[1], policyList[arg_list[2]], policyList[arg_list[3]] )
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
print("Warning (QuickUI): uiType don't support array arg for "+each_part)
# - string : Qt widget label for form element creation
if ui_create_state == 1:
if uiLabel != '':
ui_label_list.append((uiName,uiLabel))
else:
ui_label_list.append('')
ui_create_state = 0
else:
# 1.2 other part like: object, object list, [object, label object]
if isinstance(each_part, (QtWidgets.QWidget, QtWidgets.QLayout, QtWidgets.QSpacerItem)):
# - object
ui_list.append(each_part)
ui_label_list.append('')
elif isinstance(each_part, (tuple, list)):
# - object list, [object, label object]
if len(each_part) != 0:
if isinstance(each_part[0], (tuple, list)) and len(each_part)==2:
# -- [object, label object]
ui_list.extend(each_part[0])
ui_label_list.extend(each_part[1])
else:
# -- object list
ui_list.extend(each_part)
ui_label_list.extend(['']*len(each_part))
# 2 parentObject part
if parentObject == '':
# - if no parentObject, return object list or [object list, label_object list]
if form_type == 1:
return [ui_list, ui_label_list]
else:
return ui_list
else:
if isinstance(parentObject, str):
# - if parentObject, convert string to parentObject
parentName = ''
parentType = ''
parentArgs = ''
layout_type_list = (
'QVBoxLayout', 'QHBoxLayout', 'QFormLayout', 'QGridLayout', 'vbox', 'hbox', 'grid', 'form',
'QSplitter', 'QTabWidget', 'QGroupBox', 'split', 'tab', 'grp',
)
# get options
parentOpt = parentObject.split(';')
if len(parentOpt) == 1:
# -- only 1 arg case: strict name format, eg. conf_QHBoxLayout, config_hbox
parentName = parentOpt[0] # 1 para case: strict name endfix format
parentType = parentName.rsplit('_',1)[-1]
elif len(parentOpt)==2:
# -- only 2 arg case:
# a. flexible name format + type eg. conf_layout;QGridLayout, conf_layout;hbox
# b. strict name format, + setting eg. conf_QGridLayout;h, config_grid;h
parentName = parentOpt[0]
if parentOpt[1] in layout_type_list:
parentType = parentOpt[1] # a
else:
parentType = parentName.rsplit('_',1)[-1]
parentArgs = parentOpt[1] # b
elif len(parentOpt)>=3:
# -- 3 arg case:
# flexible name format + type + settings eg. conf_layout;QGridLayout;h
parentName = parentOpt[0]
parentType = parentOpt[1]
parentArgs = parentOpt[2]
# - validate layout options
if parentName=='' or (parentType not in layout_type_list):
print("Warning (QuickUI): quickUI not support parent layout as "+parentObject)
return
else:
# - create layout
if parentType in ('QVBoxLayout', 'QHBoxLayout', 'QFormLayout', 'QGridLayout', 'vbox', 'hbox', 'grid', 'form'):
# -- layout object case
parentObject = self.quickLayout(parentType, parentName)
elif parentType in ('QSplitter', 'QTabWidget', 'QGroupBox', 'split', 'tab', 'grp'):
# --- Qt container creation
if parentType in ('QSplitter', 'split'):
# ---- QSplitter as element
split_type = QtCore.Qt.Horizontal
if parentArgs == 'v':
split_type = QtCore.Qt.Vertical
self.uiList[parentName]=QtWidgets.QSplitter(split_type)
parentObject = self.uiList[parentName]
elif parentType in ('QTabWidget', 'tab'):
# ---- QTabWidget as element, no tab label need for input
self.uiList[parentName]=QtWidgets.QTabWidget()
self.uiList[parentName].setStyleSheet("QTabWidget::tab-bar{alignment:center;}QTabBar::tab { min-width: 100px; }")
parentObject = self.uiList[parentName]
elif parentType in ('QGroupBox', 'grp'):
# ---- QGroupBox as element, with layout type and optional title
arg_list = [x.strip() for x in parentArgs.split(',')]
grp_layout = arg_list[0] if arg_list[0]!='' else 'vbox'
grp_title = arg_list[1] if len(arg_list)>1 else parentName
# create layout and set grp layout
grp_layout = self.quickLayout(grp_layout, parentName+"_layout" )
self.uiList[parentName] = QtWidgets.QGroupBox(grp_title)
self.uiList[parentName].setLayout(grp_layout)
parentObject = self.uiList[parentName]
# 3. get parentLayout inside parentObject
parentLayout = ''
if isinstance(parentObject, QtWidgets.QLayout):
parentLayout = parentObject
elif isinstance(parentObject, QtWidgets.QGroupBox):
parentLayout = parentObject.layout()
# 3.1 insert part_list into parentLayout for layout and groupbox
if isinstance(parentLayout, QtWidgets.QBoxLayout):
for each_ui in ui_list:
if isinstance(each_ui, QtWidgets.QWidget):
parentLayout.addWidget(each_ui)
elif isinstance(each_ui, QtWidgets.QSpacerItem):
parentLayout.addItem(each_ui)
elif isinstance(each_ui, QtWidgets.QLayout):
parentLayout.addLayout(each_ui)
elif isinstance(parentLayout, QtWidgets.QGridLayout):
# one row/colume operation only
insertRow = parentLayout.rowCount()
insertCol = parentLayout.columnCount()
for i in range(len(ui_list)):
each_ui = ui_list[i]
x = insertRow if insert_opt=="h" else i
y = i if insert_opt=="h" else insertCol
if isinstance(each_ui, QtWidgets.QWidget):
parentLayout.addWidget(each_ui,x,y)
elif isinstance(each_ui, QtWidgets.QSpacerItem):
parentLayout.addItem(each_ui,x,y)
elif isinstance(each_ui, QtWidgets.QLayout):
parentLayout.addLayout(each_ui,x,y)
elif isinstance(parentLayout, QtWidgets.QFormLayout):
for i in range(len(ui_list)):
each_ui = ui_list[i]
if isinstance(each_ui, QtWidgets.QWidget) or isinstance(each_ui, QtWidgets.QLayout):
# create and add label: (uiName, uiLabel)
if ui_label_list[i] != '':
uiLabelName = ui_label_list[i][0] + "_label"
uiLabelText = ui_label_list[i][1]
self.uiList[uiLabelName] = QtWidgets.QLabel(uiLabelText)
parentLayout.addRow(self.uiList[uiLabelName], each_ui)
else:
parentLayout.addRow(each_ui)
else:
# 3.2 insert for empty parentLayout for split, and tab
if isinstance(parentObject, QtWidgets.QSplitter):
for each_ui in ui_list:
if isinstance(each_ui, QtWidgets.QWidget):
parentObject.addWidget(each_ui)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_ui)
parentObject.addWidget(tmp_holder)
elif isinstance(parentObject, QtWidgets.QTabWidget):
tab_names = insert_opt.replace('(','').replace(')','').split(',')
for i in range( len(ui_list) ):
each_tab = ui_list[i]
each_name = 'tab_'+str(i)
if i < len(tab_names):
if tab_names[i] != '':
each_name = tab_names[i]
if isinstance(each_tab, QtWidgets.QWidget):
parentObject.addTab(each_tab, each_name)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_tab)
parentObject.addTab(tmp_holder, each_name)
return parentObject
def quickSplitUI(self, name, part_list, type):
split_type = QtCore.Qt.Horizontal
if type == 'v':
split_type = QtCore.Qt.Vertical
self.uiList[name]=QtWidgets.QSplitter(split_type)
for each_part in part_list:
if isinstance(each_part, QtWidgets.QWidget):
self.uiList[name].addWidget(each_part)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_part)
self.uiList[name].addWidget(tmp_holder)
return self.uiList[name]
def quickTabUI(self, name, tab_list, tab_names):
self.uiList[name]=QtWidgets.QTabWidget()
self.uiList[name].setStyleSheet("QTabWidget::tab-bar{alignment:center;}QTabBar::tab { min-width: 100px; }")
for i in range( len(tab_list) ):
each_tab = tab_list[i]
each_name = tab_names[i]
if isinstance(each_tab, QtWidgets.QWidget):
self.uiList[name].addTab(each_tab, each_name)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_tab)
self.uiList[name].addTab(tmp_holder, each_name)
return self.uiList[name]
def quickGrpUI(self, ui_name, ui_label, ui_layout):
self.uiList[ui_name] = QtWidgets.QGroupBox(ui_label)
if isinstance(ui_layout, QtWidgets.QLayout):
self.uiList[ui_name].setLayout(ui_layout)
elif isinstance(ui_layout, str):
ui_layout = self.quickLayout(ui_name+"_layout", ui_layout)
self.uiList[ui_name].setLayout(ui_layout)
return [self.uiList[ui_name], ui_layout]
def quickPolicy(self, ui_list, w, h):
if not isinstance(ui_list, (list, tuple)):
ui_list = [ui_list]
# 0 = fixed; 1 > min; 2 < max; 3 = prefered; 4 = <expanding>; 5 = expanding> Aggresive; 6=4 ignored size input
policyList = ( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Ignored)
for each_ui in ui_list:
if isinstance(each_ui, str):
each_ui = self.uiList[each_ui]
each_ui.setSizePolicy(policyList[w],policyList[h])
def mui_to_qt(self, mui_name):
if hostMode != "maya":
return
ptr = mui.MQtUtil.findControl(mui_name)
if ptr is None:
ptr = mui.MQtUtil.findLayout(mui_name)
if ptr is None:
ptr = mui.MQtUtil.findMenuItem(mui_name)
if ptr is not None:
if qtMode in (0,2):
# ==== for pyside ====
return shiboken.wrapInstance(long(ptr), QtWidgets.QWidget)
elif qtMode in (1,3):
# ==== for PyQt====
return sip.wrapinstance(long(ptr), QtCore.QObject)
def qt_to_mui(self, qt_obj):
if hostMode != "maya":
return
ref = None
if qtMode in (0,2):
# ==== for pyside ====
ref = long(shiboken.getCppPointer(qt_obj)[0])
elif qtMode in (1,3):
# ==== for PyQt====
ref = long(sip.unwrapinstance(qt_obj))
if ref is not None:
return mui.MQtUtil.fullName(ref)
#=======================================
# widget specific functions
#=======================================
def ____TreeWidget_Process_Functions____():
pass
def path_pattern_to_task(self, path_pattern):
# break config text into section of sub-directory search task
# each task: 'sub_directory_path_to/content_list', 'content_dir_variable_name'
# also, 'content_dir_variable_name' also is the key to its filter pattern
# example: [('/VFX/assets/models', 'category'), ('', 'asset'), ('/Mesh/publish', 'model_file')])
part_list = path_pattern.split('/')
task_config = []
task_pattern = re.compile('{.+}') # grab variable name in path_pattern with {variable} format
sub = ''
for each in part_list:
if task_pattern.match(each):
task_config.append( (sub,each[1:-1]) )
sub = ''
else:
sub=sub+'/'+each
return task_config
def getPathChild(self, scanPath, pattern='', isfile=0):
resultList =[]
if isfile == 0:
resultList = [x for x in os.listdir(scanPath) if os.path.isdir(os.path.join(scanPath,x))]
elif isfile == 1:
resultList = [x for x in os.listdir(scanPath) if os.path.isfile(os.path.join(scanPath,x))]
else:
resultList = os.listdir(scanPath)
if pattern != '':
cur_pattern = re.compile(pattern)
resultList = [x for x in resultList if cur_pattern.match(x)]
resultList.sort()
return resultList
def DirToData(self, scanPath, task_config, pattern_config, currentTag=''):
'''
[
node_info
node_child
]
'''
if not isinstance(task_config, (tuple, list)):
return ( [], [] )
else:
if len(task_config)== 0:
return ( [], [] )
task_list = task_config
# 1. get path if at least 1 task
cur_task = task_list[0]
rest_task = [] if len(task_list)==1 else task_list[1:]
scanPath = scanPath.replace('\\','/')
if cur_task[0] != '':
scanPath = scanPath+cur_task[0] # note join path with /startswith/ will goto top path
if not os.path.isdir(scanPath):
print('Error: path not exists: {}'.format(scanPath))
return ( [], [] )
# 2. get list and filter list
cur_pattern = '' if cur_task[1] not in pattern_config.keys() else pattern_config[cur_task[1]]
isfile = 0 # folder only
if cur_task[1].endswith('_file'):
isfile = 1 # file only
if cur_task[1].endswith('_all'):
isfile = 2 # folder and file
node_name = os.path.basename(scanPath)
node_info = ['', '', scanPath ] if currentTag == '' else [node_name, currentTag, scanPath ]
node_info_child = []
parentTag = currentTag
for each_name in self.getPathChild(scanPath, cur_pattern, isfile):
cur_path = os.path.join(scanPath, each_name).replace('\\','/')
cur_tag = each_name if parentTag == '' else parentTag+':'+each_name
if os.path.isdir(cur_path):
if len(rest_task) > 0:
# go next level task
node_info_child.append( self.DirToData(cur_path, rest_task, pattern_config, cur_tag) )
else:
node_info_child.append( ( [os.path.basename(cur_path), cur_tag, cur_path ], [] ) )
else:
node_info_child.append( ( [os.path.basename(cur_path), '', cur_path ], [] ) )
return (node_info, node_info_child)
def DirToTree(self, cur_tree, parentNode, scanPath, task_config, pattern_config):
if not isinstance(task_config, (tuple, list)):
return
else:
if len(task_config)== 0:
return
task_list = task_config
# 1. get path if at least 1 task
cur_task = task_list[0]
rest_task = [] if len(task_list)==1 else task_list[1:]
scanPath = scanPath.replace('\\','/')
if cur_task[0] != '':
# because join path with /startswith/ will goto top path
scanPath = scanPath+cur_task[0]
if not os.path.isdir(scanPath):
print('Error: path not exists: {}'.format(scanPath))
return
# 2. get list and filter list
cur_pattern = '' if cur_task[1] not in pattern_config.keys() else pattern_config[cur_task[1]]
isfile = 0 # folder only
if cur_task[1].endswith('_file'):
isfile = 1 # file only
if cur_task[1].endswith('_all'):
isfile = 2 # folder and file
child_list = self.getPathChild(scanPath, cur_pattern, isfile)
node_list = {}
# 3. create node in normal style
parentNode_info = unicode(parentNode.text(1))
if isfile == 2:
group_dict = {}
for each_name in child_list:
if os.path.isdir(os.path.join(scanPath, each_name)):
new_node = QtWidgets.QTreeWidgetItem()
new_node.setText(0, each_name)
new_node.setText(2, os.path.join(scanPath,each_name).replace('\\','/') )
parentNode.addChild(new_node)
node_list[each_name]=new_node
else:
prefix, ext = os.path.splitext(each_name)
# file type
fileType = ext[1:]
# file version
version_txt = ""
possible_version_list = re.findall(r'_v([\d]+)[_\.]', each_name) # last _v999.ext or _v999_xxx.ext
if len(possible_version_list) > 0:
version_txt = possible_version_list[-1]
# file prefix
if version_txt != "":
prefix = each_name.rsplit("_v"+version_txt, 1)[0]
# file group
group_name = prefix+':'+fileType
if group_name not in group_dict.keys():
group_dict[group_name] = []
group_dict[group_name].append(each_name)
# add group node first
for group_name in sorted(group_dict.keys()):
group_dict[group_name].sort(reverse=1)
group_item_list = group_dict[group_name]
fileType = group_name.split(':')[1]
group_node = QtWidgets.QTreeWidgetItem()
group_node_top_name = group_item_list[0]
cur_filePath = os.path.join(scanPath,group_node_top_name).replace("\\","/")
group_node.setText(0, group_node_top_name)
group_node.setText(1, fileType)
group_node.setText(2, cur_filePath)
parentNode.addChild(group_node)
# add sub version to the tree
if len(group_item_list) == 1:
node_list[group_node_top_name]=group_node
if len(group_item_list) > 1:
for each_name in group_item_list:
sub_node = QtWidgets.QTreeWidgetItem()
cur_filePath = os.path.join(scanPath,each_name).replace("\\","/")
sub_node.setText(0, each_name)
sub_node.setText(1, fileType)
sub_node.setText(2, cur_filePath)
group_node.addChild(sub_node)
node_list[each_name]=sub_node
elif isfile == 0:
for each_name in child_list:
new_node = QtWidgets.QTreeWidgetItem()
new_node.setText(0, each_name)
if parentNode_info == '':
new_node.setText(1, each_name)
else:
new_node.setText(1, parentNode_info+':'+each_name)
new_node.setText(2, os.path.join(scanPath,each_name).replace('\\','/') )
parentNode.addChild(new_node)
node_list[each_name]=new_node
elif isfile == 1:
# 3. create node in combine style
#-- group similar
group_dict = {}
for each_name in child_list:
prefix, ext = os.path.splitext(each_name)
# file type
fileType = ext[1:]
# file version
version_txt = ""
possible_version_list = re.findall(r'_v([\d]+)[_\.]', each_name) # last _v999.ext or _v999_xxx.ext
if len(possible_version_list) > 0:
version_txt = possible_version_list[-1]
# file prefix
if version_txt != "":
prefix = each_name.rsplit("_v"+version_txt, 1)[0]
# file group
group_name = prefix+':'+fileType
if group_name not in group_dict.keys():
group_dict[group_name] = []
group_dict[group_name].append(each_name)
# add group node first
for group_name in sorted(group_dict.keys()):
group_dict[group_name].sort(reverse=1)
group_item_list = group_dict[group_name]
fileType = group_name.split(':')[1]
group_node = QtWidgets.QTreeWidgetItem()
group_node_top_name = group_item_list[0]
cur_filePath = os.path.join(scanPath,group_node_top_name).replace("\\","/")
group_node.setText(0, group_node_top_name)
group_node.setText(1, fileType)
group_node.setText(2, cur_filePath)
parentNode.addChild(group_node)
# add sub version to the tree
if len(group_item_list) == 1:
node_list[group_node_top_name]=group_node
if len(group_item_list) > 1:
for each_name in group_item_list:
sub_node = QtWidgets.QTreeWidgetItem()
cur_filePath = os.path.join(scanPath,each_name).replace("\\","/")
sub_node.setText(0, each_name)
sub_node.setText(1, fileType)
sub_node.setText(2, cur_filePath)
group_node.addChild(sub_node)
node_list[each_name]=sub_node
# go next level task
if len(rest_task) > 0:
for each_name in child_list:
cur_parentPath = os.path.join(scanPath, each_name).replace('\\', '/')
if os.path.isdir(cur_parentPath):
self.DirToTree(cur_tree, node_list[each_name], cur_parentPath, rest_task, pattern_config)
def TreeToData(self, tree, cur_node):
# now take widghet col count instead tree column count with hidden ones
child_count = cur_node.childCount()
node_info = [ unicode( cur_node.text(i) ) for i in range(cur_node.columnCount()) ]
node_info_child = []
for i in range(child_count):
node_info_child.append( self.TreeToData(tree, cur_node.child(i) ) )
return (node_info, node_info_child)
def DataToTree(self, tree, cur_node, data, filter=''):
node_info = data[0]
node_info_child = data[1]
[cur_node.setText(i, node_info[i]) for i in range(len(node_info))]
target = re.compile(filter, re.IGNORECASE)
for sub_data in node_info_child:
if filter == '':
new_node = QtWidgets.QTreeWidgetItem()
cur_node.addChild(new_node)
self.DataToTree(tree, new_node, sub_data)
else:
if not target.search(sub_data[0][0]) and not self.DataChildCheck(sub_data[1], filter):
pass
else:
new_node = QtWidgets.QTreeWidgetItem()
cur_node.addChild(new_node)
new_node.setExpanded(1)
self.DataToTree(tree, new_node, sub_data, filter)
def DataChildCheck(self, DataChild, pattern):
ok_cnt = 0
target = re.compile(pattern, re.IGNORECASE)
for sub_data in DataChild:
if target.search(sub_data[0][0]) or self.DataChildCheck(sub_data[1], pattern):
ok_cnt +=1
return ok_cnt
def TreeExport(self, tree_name, file):
# export process
ui_data = self.TreeToData(self.uiList[tree_name], self.uiList[tree_name].invisibleRootItem())
# file process
if file.endswith('.dat'):
self.writeDataFile(ui_data, file, binary=1)
else:
self.writeDataFile(ui_data, file)
self.quickInfo("File: '"+file+"' creation finished.")
def TreeImport(self, tree_name, file):
# import process
ui_data = ""
if file.endswith('.dat'):
ui_data = self.readDataFile(file, binary=1)
else:
ui_data = self.readDataFile(file)
self.uiList['dir_tree'].clear()
self.DataToTree(self.uiList['dir_tree'], self.uiList['dir_tree'].invisibleRootItem(), ui_data)
self.quickInfo("File: '"+file+"' loading finished.")
def cache_tree(self, cur_tree_name, force=1):
cur_tree = self.uiList[cur_tree_name]
if 'cache' not in self.memoData:
self.memoData['cache'] = {}
if force == 1:
self.memoData['cache'][cur_tree_name] = self.TreeToData(cur_tree, cur_tree.invisibleRootItem())
else:
if cur_tree_name not in self.memoData['cache']:
self.memoData['cache'][cur_tree_name] = self.TreeToData(cur_tree, cur_tree.invisibleRootItem())
def filter_tree(self, cur_tree_name, word):
word = unicode(word)
cur_tree = self.uiList[cur_tree_name]
parentNode = cur_tree.invisibleRootItem()
# read cache, if no cache, create cache
self.cache_tree(cur_tree_name, force = 0)
# filter and show, reset back to cache
cur_tree.clear()
if word != '':
self.DataToTree(cur_tree, parentNode, self.memoData['cache'][cur_tree_name], filter=word)
else:
self.DataToTree(cur_tree, parentNode, self.memoData['cache'][cur_tree_name])
#############################################
# User Class creation
#############################################
version = '0.1'
date = '2017.01.01'
log = '''
#------------------------------
# How to Use:
# 1. global replace class name "UserClassUI" to "YourToolName" in your editor,
# - in icons folder, the Tool GUI icon should name as "YourToolName.png"
# 2. change file name "universal_tool_template.py" to "YourPythonFileName.py",
# - in icons folder, the Maya shelf icon should name as "YourPythonFileName.png", if you name all name the same, then 1 icon is enough
# 3. load it up and run
#------------------------------
'''
help = '''
# loading template - Run in python panel
myPath='/path_to_universal_tool_or_custom_name/'
import sys;myPath in sys.path or sys.path.append(myPath);
import universal_tool_template
universal_tool_template.main()
# loading template - Run in system command console
python universal_tool_template.py
'''
# --------------------
# user module list
# --------------------
class UserClassUI(UniversalToolUI):
def __init__(self, parent=None, mode=0):
UniversalToolUI.__init__(self, parent)
# class variables
self.version= version
self.date = date
self.log = log
self.help = help
# mode: example for receive extra user input as parameter
self.mode = 0
if mode in [0,1]:
self.mode = mode # mode validator
# Custom user variable
#------------------------------
# initial data
#------------------------------
self.memoData['data']=[]
self.qui_user_dict = {} # e.g: 'edit': 'LNTextEdit',
self.setupStyle()
if isinstance(self, QtWidgets.QMainWindow):
self.setupMenu()
self.setupWin()
self.setupUI()
self.Establish_Connections()
self.loadLang()
self.loadData()
#------------------------------
# overwrite functions
#------------------------------
def setupMenu(self):
self.quickMenu('file_menu;&File | setting_menu;&Setting | help_menu;&Help')
cur_menu = self.uiList['setting_menu']
for info in ['export', 'import','user']:
title = info.title()
self.quickMenuAction('{0}Config_atn'.format(info),'{0} Config (&{1})'.format(title,title[0]),'{0} Setting and Configuration.'.format(title),'{0}Config.png'.format(info), cur_menu)
self.uiList['{0}Config_atn'.format(info)].setShortcut(QtGui.QKeySequence("Ctrl+{0}".format(title[0])))
cur_menu.addSeparator()
super(self.__class__,self).setupMenu()
def setupWin(self):
super(self.__class__,self).setupWin()
self.setGeometry(500, 300, 250, 110) # self.resize(250,250)
def setupUI(self):
super(self.__class__,self).setupUI('grid')
#------------------------------
# user ui creation part
#------------------------------
# + template: qui version since universal tool template v7
# - no extra variable name, all text based creation and reference
self.qui('box_btn;Box | sphere_btn;Sphere | ring_btn;Ring', 'my_layout;grid', 'h')
self.qui('box2_btn;Box2 | sphere2_btn;Sphere2 | ring2_btn;Ring2', 'my_layout', 'h')
self.qui('cat_btn;Cat | dog_btn;Dog | pig_btn;Pig', 'pet_layout;grid', 'v')
self.qui('cat2_btn;Cat2 | dog2_btn;Dog2 | pig2_btn;Pig2', 'pet_layout', 'v')
self.qui('name_input@Name:;John | email_input@Email:;[email protected]', 'entry_form')
self.qui('user2_btn;User2 | info2_btn;Info2', 'my_grp;vbox,Personal Data')
self.qui('source_txt | process_btn;Process and Update', 'upper_vbox')
self.qui('upper_vbox | result_txt', 'input_split;v')
self.qui('filePath_input | fileLoad_btn;Load | fileExport_btn;Export', 'fileBtn_layout;hbox')
self.qui('my_layout | my_table | input_split | entry_form | fileBtn_layout | pet_layout | my_grp', 'main_layout')
cur_table = self.uiList['my_table']
cur_table.setRowCount(0)
cur_table.setColumnCount(1)
cur_table.insertColumn(cur_table.columnCount())
cur_item = QtWidgets.QTableWidgetItem('ok') #QtWidgets.QPushButton('Cool') #
cur_table.insertRow(0)
cur_table.setItem(0,1, cur_item) #setCellWidget(0,0,cur_item)
cur_table.setHorizontalHeaderLabels(('a','b'))
'''
self.qui('source_txt | process_btn;Process and Update', 'upper_vbox')
self.qui('upper_vbox | result_txt', 'input_split;v')
self.qui('filePath_input | fileLoad_btn;Load | fileExport_btn;Export', 'fileBtn_layout;hbox')
self.qui('input_split | fileBtn_layout', 'main_layout')
'''
#------------- end ui creation --------------------
keep_margin_layout = ['main_layout']
keep_margin_layout_obj = []
# add tab layouts
for each in self.uiList.values():
if isinstance(each, QtWidgets.QTabWidget):
for i in range(each.count()):
keep_margin_layout_obj.append( each.widget(i).layout() )
for name, each in self.uiList.items():
if isinstance(each, QtWidgets.QLayout) and name not in keep_margin_layout and not name.endswith('_grp_layout') and each not in keep_margin_layout_obj:
each.setContentsMargins(0, 0, 0, 0)
self.quickInfo('Ready')
# self.statusBar().hide()
def Establish_Connections(self):
super(self.__class__,self).Establish_Connections()
# custom ui response
# shortcut connection
self.hotkey = {}
# self.hotkey['my_key'] = QtWidgets.QShortcut(QtGui.QKeySequence( "Ctrl+1" ), self)
# self.hotkey['my_key'].activated.connect(self.my_key_func)
# ---- user response list ----
def loadData(self):
print("Load data")
# load config
config = {}
config['root_name'] = 'root_default_name'
# overload config file if exists next to it
# then, save merged config into self.memoData['config']
prefix, ext = os.path.splitext(self.location)
config_file = prefix+'_config.json'
if os.path.isfile(config_file):
external_config = self.readDataFile(config_file)
print('info: External config file found.')
if isinstance( external_config, dict ):
self.memoData['config'] = self.dict_merge(config, external_config, addKey=1)
print('info: External config merged.')
else:
self.memoData['config'] = config
print('info: External config is not a dict and ignored.')
else:
self.memoData['config'] = config
# load user data
user_dirPath = os.path.join(os.path.expanduser('~'), 'Tool_Config', self.__class__.__name__)
user_setting_filePath = os.path.join(user_dirPath, 'setting.json')
if os.path.isfile(user_setting_filePath):
sizeInfo = self.readDataFile(user_setting_filePath)
self.setGeometry(*sizeInfo)
def closeEvent(self, event):
user_dirPath = os.path.join(os.path.expanduser('~'), 'Tool_Config', self.__class__.__name__)
if not os.path.isdir(user_dirPath):
try:
os.makedirs(user_dirPath)
except OSError:
print('Error on creation user data folder')
if not os.path.isdir(user_dirPath):
print('Fail to create user dir.')
return
# save setting
geoInfo = self.geometry()
sizeInfo = [geoInfo.x(), geoInfo.y(), geoInfo.width(), geoInfo.height()]
user_setting_filePath = os.path.join(user_dirPath, 'setting.json')
self.writeDataFile(sizeInfo, user_setting_filePath)
# - example button functions
def process_action(self): # (optional)
config = self.memoData['config']
print("Process ....")
source_txt = unicode(self.uiList['source_txt'].toPlainText())
# 2: update memory
self.memoData['data'] = [row.strip() for row in source_txt.split('\n')]
print("Update Result")
txt=config['root_name']+'\n'+'\n'.join([('>>: '+row) for row in self.memoData['data']])
self.uiList['result_txt'].setText(txt)
# - example file io function
def exportConfig_action(self):
file= self.quickFileAsk('export', {'json':'JSON data file', 'xdat':'Pickle binary file'})
if file == "":
return
# export process
ui_data = self.memoData['config']
# file process
if file.endswith('.xdat'):
self.writeDataFile(ui_data, file, binary=1)
else:
self.writeDataFile(ui_data, file)
self.quickInfo("File: '"+file+"' creation finished.")
def importConfig_action(self):
file= self.quickFileAsk('import',{'json':'JSON data file', 'xdat':'Pickle binary file'})
if file == "":
return
# import process
ui_data = ""
if file.endswith('.xdat'):
ui_data = self.readDataFile(file, binary=1)
else:
ui_data = self.readDataFile(file)
self.memoData['config'] = ui_data
self.quickInfo("File: '"+file+"' loading finished.")
def userConfig_action(self):
user_dirPath = os.path.join(os.path.expanduser('~'), 'Tool_Config', self.__class__.__name__)
self.openFolder(user_dirPath)
#=======================================
# window instance creation
#=======================================
single_UserClassUI = None
app_UserClassUI = None
def main(mode=0):
# get parent window in Maya
parentWin = None
if hostMode == "maya":
if qtMode in (0,2): # pyside
parentWin = shiboken.wrapInstance(long(mui.MQtUtil.mainWindow()), QtWidgets.QWidget)
elif qtMode in (1,3): # PyQt
parentWin = sip.wrapinstance(long(mui.MQtUtil.mainWindow()), QtCore.QObject)
# create app object for certain host
global app_UserClassUI
if hostMode in ('desktop', 'blender', 'npp', 'fusion'):
# single instance app mode on windows
if osMode == 'win':
# check if already open for single desktop instance
from ctypes import wintypes
order_list = []
result_list = []
top = ctypes.windll.user32.GetTopWindow(None)
if top:
length = ctypes.windll.user32.GetWindowTextLengthW(top)
buff = ctypes.create_unicode_buffer(length + 1)
ctypes.windll.user32.GetWindowTextW(top, buff, length + 1)
class_name = ctypes.create_string_buffer(200)
ctypes.windll.user32.GetClassNameA(top, ctypes.byref(class_name), 200)
result_list.append( [buff.value, class_name.value, top ])
order_list.append(top)
while True:
next = ctypes.windll.user32.GetWindow(order_list[-1], 2) # win32con.GW_HWNDNEXT
if not next:
break
length = ctypes.windll.user32.GetWindowTextLengthW(next)
buff = ctypes.create_unicode_buffer(length + 1)
ctypes.windll.user32.GetWindowTextW(next, buff, length + 1)
class_name = ctypes.create_string_buffer(200)
ctypes.windll.user32.GetClassNameA(next, ctypes.byref(class_name), 200)
result_list.append( [buff.value, class_name.value, next] )
order_list.append(next)
# result_list: [(title, class, hwnd int)]
winTitle = 'UserClassUI' # os.path.basename(os.path.dirname(__file__))
is_opened = 0
for each in result_list:
if re.match(winTitle+' - v[0-9.]* - host: desktop',each[0]) and each[1] == 'QWidget':
is_opened += 1
if is_opened == 1:
ctypes.windll.user32.SetForegroundWindow(each[2])
return
if hostMode in ('npp','fusion'):
app_UserClassUI = QtWidgets.QApplication([])
elif hostMode in ('houdini'):
pass
else:
app_UserClassUI = QtWidgets.QApplication(sys.argv)
#--------------------------
# ui instance
#--------------------------
# template 1 - Keep only one copy of windows ui in Maya
global single_UserClassUI
if single_UserClassUI is None:
if hostMode == 'maya':
single_UserClassUI = UserClassUI(parentWin, mode)
elif hostMode == 'nuke':
single_UserClassUI = UserClassUI(QtWidgets.QApplication.activeWindow(), mode)
else:
single_UserClassUI = UserClassUI()
# extra note: in Maya () for no parent; (parentWin,0) for extra mode input
single_UserClassUI.show()
ui = single_UserClassUI
if hostMode != 'desktop':
ui.activateWindow()
# template 2 - allow loading multiple windows of same UI in Maya
'''
if hostMode == "maya":
ui = UserClassUI(parentWin)
ui.show()
else:
pass
# extra note: in Maya () for no parent; (parentWin,0) for extra mode input
'''
# loop app object for certain host
if hostMode in ('desktop'):
sys.exit(app_UserClassUI.exec_())
elif hostMode in ('npp','fusion'):
app_UserClassUI.exec_()
return ui
if __name__ == "__main__":
main()
| mit | -6,143,926,066,668,047,000 | 47.105446 | 282 | 0.514335 | false |
dnlcrl/PyFunt | tools/cythonize.py | 1 | 6618 | #!/usr/bin/env python
""" cythonize
SOURCE: https://github.com/scipy/scipy/blob/master/setup.py
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'pyfunt'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'pyfunt'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.22'):
raise Exception('Building PyFunt requires Cython >= 0.22')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
# if fromfile == 'im2col_cython.pyx':
# print('compiling im2col_cython')
# r = subprocess.call(
# ['python', 'pyfunt/layers/setup.py', 'build_ext', '--inplace'])
# else:
r = subprocess.call(
['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception("Cython either isn't installed or it failed.")
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building PyFunt requires Tempita: '
'pip install --user Tempita')
from_filename = tempita.Template.from_filename
template = from_filename(fromfile, encoding=sys.getdefaultencoding())
pyxcontent = template.substitute()
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx': process_pyx,
'.pyx.in': process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s to %s' % (fullfrompath, fulltopath))
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(
br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I | re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| mit | -7,944,039,619,602,638,000 | 29.219178 | 97 | 0.599577 | false |
deepmind/interval-bound-propagation | interval_bound_propagation/src/simplex_bounds.py | 1 | 7609 | # coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naive bound calculation for common neural network layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from interval_bound_propagation.src import bounds as basic_bounds
from interval_bound_propagation.src import relative_bounds
import sonnet as snt
import tensorflow.compat.v1 as tf
class SimplexBounds(basic_bounds.AbstractBounds):
"""Specifies a bounding simplex within an embedding space."""
def __init__(self, vertices, nominal, r):
"""Initialises the simplex bounds.
Args:
vertices: Tensor of shape (num_vertices, *input_shape)
or of shape (batch_size, num_vertices, *input_shape)
containing the vertices in embedding space.
nominal: Tensor of shape (batch_size, *input_shape) specifying
the unperturbed inputs in embedding space, where `*input_shape`
denotes either (embedding_size,) for flat input (e.g. bag-of-words)
or (input_length, embedding_channels) for sequence input.
r: Scalar specifying the dilation factor of the simplex. The dilated
simplex will have vertices `nominal + r * (vertices-nominal)`.
"""
super(SimplexBounds, self).__init__()
self._vertices = vertices
self._nominal = nominal
self._r = r
@property
def vertices(self):
return self._vertices
@property
def nominal(self):
return self._nominal
@property
def r(self):
return self._r
@property
def shape(self):
return self.nominal.shape.as_list()
@classmethod
def convert(cls, bounds):
if not isinstance(bounds, cls):
raise ValueError('Cannot convert "{}" to "{}"'.format(bounds,
cls.__name__))
return bounds
def apply_batch_reshape(self, wrapper, shape):
reshape = snt.BatchReshape(shape)
if self.vertices.shape.ndims == self.nominal.shape.ndims:
reshape_vertices = reshape
else:
reshape_vertices = snt.BatchReshape(shape, preserve_dims=2)
return SimplexBounds(reshape_vertices(self.vertices),
reshape(self.nominal),
self.r)
def apply_linear(self, wrapper, w, b):
mapped_centres = tf.matmul(self.nominal, w)
mapped_vertices = tf.tensordot(self.vertices, w, axes=1)
lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -2)
nominal_out = tf.matmul(self.nominal, w)
if b is not None:
nominal_out += b
return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out)
def apply_conv1d(self, wrapper, w, b, padding, stride):
mapped_centres = tf.nn.conv1d(self.nominal, w,
padding=padding, stride=stride)
if self.vertices.shape.ndims == 3:
# `self.vertices` has no batch dimension; its shape is
# (num_vertices, input_length, embedding_channels).
mapped_vertices = tf.nn.conv1d(self.vertices, w,
padding=padding, stride=stride)
elif self.vertices.shape.ndims == 4:
# `self.vertices` has shape
# (batch_size, num_vertices, input_length, embedding_channels).
# Vertices are different for each example in the batch,
# e.g. for word perturbations.
mapped_vertices = snt.BatchApply(
lambda x: tf.nn.conv1d(x, w, padding=padding, stride=stride))(
self.vertices)
else:
raise ValueError('"vertices" must have either 3 or 4 dimensions.')
lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -3)
nominal_out = tf.nn.conv1d(self.nominal, w,
padding=padding, stride=stride)
if b is not None:
nominal_out += b
return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out)
def apply_conv2d(self, wrapper, w, b, padding, strides):
mapped_centres = tf.nn.convolution(self.nominal, w,
padding=padding, strides=strides)
if self.vertices.shape.ndims == 4:
# `self.vertices` has no batch dimension; its shape is
# (num_vertices, input_height, input_width, input_channels).
mapped_vertices = tf.nn.convolution(self.vertices, w,
padding=padding, strides=strides)
elif self.vertices.shape.ndims == 5:
# `self.vertices` has shape
# (batch_size, num_vertices, input_height, input_width, input_channels).
# Vertices are different for each example in the batch.
mapped_vertices = snt.BatchApply(
lambda x: tf.nn.convolution(x, w, padding=padding, strides=strides))(
self.vertices)
else:
raise ValueError('"vertices" must have either 4 or 5 dimensions.')
lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -4)
nominal_out = tf.nn.convolution(self.nominal, w,
padding=padding, strides=strides)
if b is not None:
nominal_out += b
return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out)
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
if fn.__name__ in ('add', 'reduce_mean', 'reduce_sum', 'avg_pool'):
if self.vertices.shape.ndims == self.nominal.shape.ndims:
vertices_fn = fn
else:
vertices_fn = snt.BatchApply(fn, n_dims=2)
return SimplexBounds(
vertices_fn(self.vertices, *[bounds.vertices for bounds in args]),
fn(self.nominal, *[bounds.nominal for bounds in args]),
self.r)
elif fn.__name__ == 'quotient':
return SimplexBounds(
self.vertices / tf.expand_dims(parameters['denom'], axis=1),
fn(self.nominal),
self.r)
else:
return super(SimplexBounds, self).apply_increasing_monotonic_fn(
wrapper, fn, *args, **parameters)
def _simplex_bounds(mapped_vertices, mapped_centres, r, axis):
"""Calculates naive bounds on the given layer-mapped vertices.
Args:
mapped_vertices: Tensor of shape (num_vertices, *output_shape)
or of shape (batch_size, num_vertices, *output_shape)
containing the vertices in the layer's output space.
mapped_centres: Tensor of shape (batch_size, *output_shape)
containing the layer's nominal outputs.
r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex.
axis: Index of the `num_vertices` dimension of `mapped_vertices`.
Returns:
lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds
on the outputs of the affine layer.
ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds
on the outputs of the affine layer.
"""
# Use the negative of r, instead of the complement of r, as
# we're shifting the input domain to be centred at the origin.
lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis)
ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis)
return lb_out, ub_out
| apache-2.0 | 3,811,566,715,278,191,600 | 38.020513 | 79 | 0.655539 | false |
sigurdga/nidarholm | organization/templatetags/grouplistings.py | 1 | 2466 | from django import template
from django.contrib.auth.models import Group
from organization.models import GroupCategory, Role
import re
register = template.Library()
def roles_for_user_in_group(user, group):
return Role.objects.filter(membership__user=user, membership__group=group)
def phone_number_format(number):
if number:
m = re.search(r'^((?:4|9)\d{2})(\d{2})(\d{3})$', number)
if m:
return "%s %s %s" % (m.group(1), m.group(2), m.group(3))
else:
n = re.search(r'^(\d{2})(\d{2})(\d{2})(\d{2})$', number)
if n:
return "%s %s %s %s" % (n.group(1), n.group(2), n.group(3), n.group(4))
else:
return number
@register.simple_tag
def list_groups(request, group_name, groupcategory_name):
"""Give a group and a not related group category.
Lists all groups in groupcategory, filtered on users in the given group.
"""
group = Group.objects.get(name__iexact=group_name)
groupcategory = GroupCategory.objects.get(name=groupcategory_name)
#TODO: Add 404 on exceptions
ret = '<ul class="reset">'
for groupprofile in groupcategory.groupprofile_set.all():
ret += "<li>"
ret += "<h2>" + groupprofile.group.name + "</h2>"
ret += "<table>"
for u in groupprofile.group.user_set.all():
# groupprofile.group.user_set.filter(groups=group) is too eager
#if u.groups.filter(id=group.id).exists():
if u.userprofile_set.filter(status__lt=4):
ret += "<tr>"
if request.organization.group in request.user.groups.all():
ret += "<td class=\"col4\"><a href=\"" + u.get_absolute_url() +"\">" + u.get_full_name() + "</a></td>"
else:
ret += "<td class=\"col4\">" + u.get_full_name() + "</td>"
ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, group) ]) + "</td>"
if request.user.groups.filter(id=group.id):
ret += "<td class=\"col2\">%s</td>" % (phone_number_format(u.get_profile().cellphone) or "",)
ret += "<td class=\"col5\">%s</td>" % (u.email,)
ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, groupprofile.group) ]) + "</td>"
ret += "</tr>"
ret += "</table>"
ret += "</li>"
ret += "</ul>"
return ret
| agpl-3.0 | -7,402,660,224,905,612,000 | 41.517241 | 125 | 0.539335 | false |
sidnarayanan/BAdNet | train/gen/baseline/models/shallow_models/train_v3_nopt.py | 1 | 2793 | #!/usr/local/bin/python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = ""
import numpy as np
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from subtlenet import config
from subtlenet.generators.gen_singletons import make_coll, generate
'''
some global definitions
'''
NEPOCH = 20
APOSTLE = 'v3_nopt'
system('cp %s shallow_models/train_%s.py'%(argv[0], APOSTLE))
'''
instantiate data loaders
'''
basedir = '/data/t3serv014/snarayan/deep/v_deepgen_3/'
top = make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
'''
first build the classifier!
'''
# set up data
classifier_train_gen = generate(data, partition='train', batch=1000)
classifier_validation_gen = generate(data, partition='validate', batch=10000)
classifier_test_gen = generate(data, partition='test', batch=10)
test_i, test_o, test_w = next(classifier_test_gen)
#print test_i
inputs = Input(shape=(len(config.gen_default_variables),), name='input')
dense = Dense(32, activation='tanh',name='dense1',kernel_initializer='lecun_uniform') (inputs)
dense = Dense(32, activation='tanh',name='dense2',kernel_initializer='lecun_uniform') (dense)
dense = Dense(32, activation='tanh',name='dense3',kernel_initializer='lecun_uniform') (dense)
y_hat = Dense(config.n_truth, activation='softmax') (dense)
classifier = Model(inputs=inputs, outputs=[y_hat])
classifier.compile(optimizer=Adam(lr=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
# ctrl+C now triggers a graceful exit
def save_classifier(name='shallow', model=classifier):
model.save('shallow_models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='shallow', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=5000,
epochs=NEPOCH,
validation_data=classifier_validation_gen,
validation_steps=10,
)
save_classifier()
| mit | 6,679,780,554,708,395,000 | 31.103448 | 106 | 0.672037 | false |
lorensen/VTKExamples | src/Python/GeometricObjects/Polygon.py | 1 | 1535 | #!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
# Setup four points
points = vtk.vtkPoints()
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 1.0, 0.0)
points.InsertNextPoint(0.0, 1.0, 0.0)
# Create the polygon
polygon = vtk.vtkPolygon()
polygon.GetPointIds().SetNumberOfIds(4) # make a quad
polygon.GetPointIds().SetId(0, 0)
polygon.GetPointIds().SetId(1, 1)
polygon.GetPointIds().SetId(2, 2)
polygon.GetPointIds().SetId(3, 3)
# Add the polygon to a list of polygons
polygons = vtk.vtkCellArray()
polygons.InsertNextCell(polygon)
# Create a PolyData
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetPolys(polygons)
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polygonPolyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("Silver"))
# Visualize
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Polygon")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("Salmon"))
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | -4,295,740,643,100,989,000 | 26.909091 | 61 | 0.695765 | false |
angelicadly/prog-script | tekton-master/backend/appengine/routes/rotas/rest.py | 1 | 1044 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from rota_app import facade
def index():
cmd = facade.list_rotas_cmd()
rota_list = cmd()
short_form=facade.rota_short_form()
rota_short = [short_form.fill_with_model(m) for m in rota_list]
return JsonResponse(rota_short)
def save(**rota_properties):
cmd = facade.save_rota_cmd(**rota_properties)
return _save_or_update_json_response(cmd)
def update(rota_id, **rota_properties):
cmd = facade.update_rota_cmd(rota_id, **rota_properties)
return _save_or_update_json_response(cmd)
def delete(rota_id):
facade.delete_rota_cmd(rota_id)()
def _save_or_update_json_response(cmd):
try:
rota = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.rota_short_form()
return JsonResponse(short_form.fill_with_model(rota))
| mit | -1,036,920,398,554,202,000 | 27.216216 | 67 | 0.704981 | false |
nedbat/django_coverage_plugin | tests/test_settings.py | 1 | 1672 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/django_coverage_plugin/blob/master/NOTICE.txt
"""Settings tests for django_coverage_plugin."""
from django.test.utils import override_settings
from .plugin_test import DjangoPluginTestCase, test_settings
# Make settings overrides for tests below.
NON_DJANGO_BACKEND = 'django.template.backends.dummy.TemplateStrings'
DEBUG_FALSE_OVERRIDES = test_settings()
DEBUG_FALSE_OVERRIDES['TEMPLATES'][0]['OPTIONS']['debug'] = False
NO_OPTIONS_OVERRIDES = test_settings()
del NO_OPTIONS_OVERRIDES['TEMPLATES'][0]['OPTIONS']
OTHER_ENGINE_OVERRIDES = test_settings()
OTHER_ENGINE_OVERRIDES['TEMPLATES'][0]['BACKEND'] = NON_DJANGO_BACKEND
OTHER_ENGINE_OVERRIDES['TEMPLATES'][0]['OPTIONS'] = {}
class SettingsTest(DjangoPluginTestCase):
"""Tests of detecting that the settings need to be right for the plugin to work."""
@override_settings(**DEBUG_FALSE_OVERRIDES)
def test_debug_false(self):
self.make_template('Hello')
with self.assert_plugin_disabled("Template debugging must be enabled in settings."):
self.run_django_coverage()
@override_settings(**NO_OPTIONS_OVERRIDES)
def test_no_options(self):
self.make_template('Hello')
with self.assert_plugin_disabled("Template debugging must be enabled in settings."):
self.run_django_coverage()
@override_settings(**OTHER_ENGINE_OVERRIDES)
def test_other_engine(self):
self.make_template('Hello')
with self.assert_plugin_disabled("Can't use non-Django templates."):
self.run_django_coverage()
| apache-2.0 | -1,464,384,054,192,477,200 | 37.883721 | 92 | 0.718301 | false |
akshayka/edxclassify | edxclassify/classifiers/feature_generation.py | 1 | 3989 | from edxclassify.feature_spec import FEATURE_COLUMNS
from edxclassify.classifiers.word_lists import *
from edxclassify.data_cleaners.dc_util import compress_likert
import re
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
def to_int(value, aux=None):
if value == '':
return 0
return int(value)
def to_float(value, aux=None):
if value == '':
return 0
return 1 if float(value) > 0.94 else 0
def is_anonymous(value, aux=None):
return 1 if value.lower() == 'true' else 0
def is_comment_thread(value, aux=None):
return 1 if value.lower() == 'commentthread' else 0
def count_question_marks(document, aux=None):
count = 0
for c in document:
if c == '?':
count = count + 1
return count
# TODO: How do these play with logistic regression?
# TODO: Idea -- feature for sentiment ~ 1 iff #pos > #neg
def count_negative_words(document, token_patrn):
words = re.findall(token_patrn, document)
count = 0
for w in words:
if w in NEGATIVE_WORDS:
count = count + 1
return count
def count_urgent_words(document, token_patrn):
words = re.findall(token_patrn, document)
count = 0
for w in words:
if w in URGENT_WORDS:
return 1
return 0
def count_opinion_words(document, token_patrn):
words = re.findall(token_patrn, document)
count = 0
for w in words:
if w in OPINION_WORDS:
count = count + 1
return count
def count_nouns(document, aux=None):
tagged_words = []
for s in sent_tokenize(document.decode('utf-8')):
tagged_words.extend(nltk.pos_tag(word_tokenize(s)))
count = 0
for word, tag in tagged_words:
if tag == 'NN':
count = count + 1
return count
# TODO: We might want to discretize the grades and number of attempts
class FeatureExtractor:
def __init__(self, feature_name):
self.feature_name = feature_name
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
idx = FEATURE_COLUMNS[self.feature_name]
return [row[idx] for row in X]
class FeatureCurator:
def __init__(self, feature_name, curate_function, aux=None):
self.feature_name = feature_name
self.curate = curate_function
self.aux=aux
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return [{self.feature_name + ' feature': self.curate(value, self.aux)}
for value in X]
def fit_transform(self, X, y=None):
return self.transform(X)
class ChainedClassifier:
def __init__(self, clf, column, guess):
self.clf = clf
self.column = column
self.y_chain = None
self.guess = guess
def fit(self, X, y=None):
# Note that the extracted values will be in
# [0, 2] for non-binary variables (confusion,
# sentiment, urgency), {0, 1} otherwise.
if self.column == 'confusion' or\
self.column == 'sentiment' or\
self.column == 'urgency':
self.y_chain = [compress_likert(
record[FEATURE_COLUMNS[self.column]],
binary=False)\
for record in X]
else:
self.y_chain = [int(record[FEATURE_COLUMNS[self.column]])\
for record in X]
self.clf.train(X, self.y_chain)
def transform(self, X, y=None):
if self.y_chain is not None and not self.guess:
predictions = self.y_chain
# This is critical -- it ensures
# that we don't use the gold set values when
# predicting.
self.y_chain = None
else:
predictions = self.clf.test(X)
return [{self.column + ' prediction': value} for value in predictions]
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
| gpl-2.0 | 2,360,410,365,754,236,000 | 28.992481 | 78 | 0.591376 | false |
srmagura/goodnight-lead | gl_site/statistics/views.py | 1 | 6287 | # View imports
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render
from gl_site.custom_auth import login_required
# Forms
from gl_site.statistics.statistics_form import statistics_request_form, statistics_download_form
# Data
from .data_generation import format_graph_data, format_file_data, generate_data_from_sessions, get_queryset, validate_sessions
from gl_site.statistics import inventory_keys
# IO
from django.core.files.base import ContentFile
from io import BytesIO
# JSON
import json
# Excel
import xlsxwriter
# Response statuses
BAD_REQUEST = 400
FORBIDDEN = 403
METHOD_NOT_ALLOWED = 405
# Error messages
METHOD_NOT_ALLOWED_MESSAGE = "Method not allowed."
INVALID_DATA_SELECTION = "Invalid data selection."
@login_required
def view_statistics(request):
""" View responsable for initially loading the statistics page """
# Get the proper queryset and generate the form
querysets = get_queryset(request.user)
form = statistics_request_form(
querysets['organizations'],
querysets['sessions']
)
downloads = statistics_download_form(
querysets['organizations'],
querysets['sessions'],
auto_id='id_downloads_%s'
)
return render(request, 'statistics/statistics.html', {
'form': form,
'downloads': downloads,
'statistics_active': True,
})
@login_required
def load_data(request):
""" Returns a JSON respons containing statistics data """
# Deny non GET requests
if (request.method != 'GET'):
return JsonResponse([METHOD_NOT_ALLOWED_MESSAGE], status=METHOD_NOT_ALLOWED, safe=False)
# Get the querysets accessable by the user
querysets = get_queryset(request.user)
# Build the submitted form from request data
form = statistics_request_form(
querysets['organizations'],
querysets['sessions'],
request.GET
)
# Validate the form
if (not form.is_valid()):
return JsonResponse([INVALID_DATA_SELECTION], status=FORBIDDEN, safe=False)
try:
# Validate sessions
sessions = validate_sessions(
form.cleaned_data['organization'],
form.cleaned_data['session'],
request.user
)
# Generate and format the data
data = generate_data_from_sessions(sessions, request.user)
data = format_graph_data(data)
# Return the JSON encoded response
return JsonResponse(data, safe=False)
except LookupError as e:
return JsonResponse([str(e)], status=BAD_REQUEST, safe=False)
def download_data(request):
# Get the querysets accessable by the user
querysets = get_queryset(request.user)
# Get the selected downloads
downloads = statistics_download_form(
querysets['organizations'],
querysets['sessions'],
request.GET,
auto_id='id_downloads_%s'
)
# If it is a valid choice
if (downloads.is_valid()):
data = []
try:
# Validate sessions
sessions = validate_sessions(
downloads.cleaned_data['organization'],
downloads.cleaned_data['session'],
request.user
)
# Generate the data
data = generate_data_from_sessions(sessions, request.user)
data = format_file_data(data)
except LookupError:
pass
else:
data_file = ContentFile('')
# Finalize the output
if (downloads.cleaned_data['file_type'] == 'application/xlsx'):
# Create an excel workbook wrapped around python byte io.
# Use in memory to prevent the use of temp files.
output = BytesIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
# Create a worksheet.
worksheet = workbook.add_worksheet()
# Set ID, Organization, and Session headers
worksheet.write('A1', 'User ID')
worksheet.write('B1', 'Organization')
worksheet.write('C1', 'Session')
# Add all user IDs (row number), organization, and session information
row = 2
for user in data:
worksheet.write('A{}'.format(row), row - 1)
worksheet.write('B{}'.format(row), user['organization'])
worksheet.write('C{}'.format(row), user['session'])
row += 1
# Print inventory data starting at column D
prefix = ''
column = ord('D')
for inventory in inventory_keys:
# Print all metrics within the inventory
for key in inventory['keys']:
# If column is greater than 'Z' move to 'AA'
if (column > ord('Z')):
prefix = 'A'
column = ord('A')
# Write the column header: Inventory - Metric
worksheet.write(prefix + chr(column) + '1', inventory['name'] + ' - ' + key)
# Print metric data for each user
row = 2
for user in data:
inventory_name = inventory['name']
# Only print if the user has data for this inventory
if (inventory_name in user and key in user[inventory_name]):
cell = (prefix + chr(column) + '{}').format(row)
worksheet.write(cell, user[inventory['name']][key])
# Move on to the next row
row += 1
# Move on to the next column
column += 1
# Close the workbook
workbook.close()
# Get the output bytes for creating a django file
output = output.getvalue()
# Set the appropriate application extension
extension = '.xlsx'
else:
# Generate the JSON output string
output = json.dumps(data)
# Set the appropriate application extension
extension = '.json'
# Generate the data file
data_file = ContentFile(output)
# Create the response containing the file
response = HttpResponse(
data_file,
content_type=downloads.cleaned_data['file_type']
)
response['Content-Disposition'] = 'attachment; filename=statistics{}'.format(extension)
return response
| gpl-3.0 | -8,336,534,110,600,097,000 | 30.435 | 126 | 0.606171 | false |
jhunkeler/hstcal | tests/wfc3/test_uvis_32single.py | 1 | 1252 | import subprocess
import pytest
from ..helpers import BaseWFC3
@pytest.mark.xfail(reason="Temporary xfail. New input/truth files on Artifactory, but branch not merged.")
class TestUVIS32Single(BaseWFC3):
"""
Test pos UVIS2 subarray data with CTE correction
"""
detector = 'uvis'
def _single_raw_calib(self, rootname):
raw_file = '{}_raw.fits'.format(rootname)
# Prepare input file.
self.get_input_file(raw_file)
# Run CALWF3
subprocess.call(['calwf3.e', raw_file, '-vts'])
# Compare results
outputs = [('{}_flt.fits'.format(rootname), '{}_flt_ref.fits'.format(rootname)),
('{}_flc.fits'.format(rootname), '{}_flc_ref.fits'.format(rootname)),
('{}_rac_tmp.fits'.format(rootname), '{}_rac_tmp_ref.fits'.format(rootname))]
self.compare_outputs(outputs)
# Ported from ``calwf3_uv_32``.
@pytest.mark.parametrize(
'rootname', ['ib3805v0q'])
# 'rootname', ['ib3805v0q',
# 'ib2kabmaq',
# 'ib3503wwq',
# 'ibde04msq',
# 'icoc14hcq'])
def test_uvis_32single(self, rootname):
self._single_raw_calib(rootname)
| bsd-3-clause | 4,627,072,372,218,348,000 | 31.102564 | 106 | 0.567093 | false |
adamfast/faadata | faadata/aircraft/parser.py | 1 | 3304 | import datetime
class AircraftManufacturerCode(object):
def __init__(self, record):
self.code = record[:7].strip()
self.manufacturer = record[8:38].strip()
self.model = record[39:59].strip()
self.aircraft_type = record[60].strip()
self.engine_type = record[62].strip()
self.category = record[64].strip()
self.builder_certification_code = record[66].strip()
self.number_of_engines = record[68:70].strip()
self.number_of_seats = record[71:74].strip()
self.aircraft_weight = record[75:82].strip()
self.cruising_speed = record[83:87].strip()
class AircraftRegistration(object):
def __init__(self, record):
# first parse the fixed-width
self.n_number = record[:5].strip()
self.serial_number = record[6:36].strip()
self.aircraft_mfr_model_code = record[37:44].strip()
self.engine_mfr_model_code = record[45:50].strip()
self.year_mfg = record[51:55].strip()
if record[56].strip():
self.type_registrant = record[56].strip()
else:
self.type_registrant = None
self.registrant_name = record[58:108].strip()
self.street1 = record[109:142].strip()
self.street2 = record[143:176].strip()
self.city = record[177:195].strip()
self.state = record[196:198].strip()
self.zip_code = record[199:209].strip()
self.region = record[210].strip()
self.county = record[212:215].strip()
self.country = record[216:218].strip()
if record[219:227].strip():
self.last_activity_date = datetime.datetime.strptime(record[219:227], "%Y%m%d").date()
else:
self.last_activity_date = None
if record[228:236].strip():
self.certificate_issue_date = datetime.datetime.strptime(record[228:236], "%Y%m%d").date()
else:
self.certificate_issue_date = None
self.airworthiness_classification_code = record[237:238].strip()
if record[248].strip():
self.aircraft_type = record[248].strip()
else:
self.aircraft_type = None
if record[250:252].strip():
self.engine_type = record[250:252].strip()
else:
self.engine_type = None
self.status_code = record[253:255].strip()
self.mode_s_code = record[256:264].strip()
self.fractional_ownership = record[265].strip()
if record[267:275].strip():
self.airworthiness_date = datetime.datetime.strptime(record[267:275], "%Y%m%d").date()
else:
self.airworthiness_date = None
self.other_name_1 = record[276:326].strip()
self.other_name_2 = record[327:377].strip()
self.other_name_3 = record[378:428].strip()
self.other_name_4 = record[429:479].strip()
self.other_name_5 = record[480:530].strip()
if record[531:539].strip():
self.expiration_date = datetime.datetime.strptime(record[531:539], "%Y%m%d").date()
else:
self.expiration_date = None
self.unique_id = record[540:548].strip()
self.kit_manufacturer = record[549:579].strip()
self.kit_model = record[580:600].strip()
self.mode_s_code_hex = record[601:611].strip()
| bsd-3-clause | -8,092,445,956,844,940,000 | 43.648649 | 102 | 0.592918 | false |
Aravinthu/odoo | odoo/fields.py | 1 | 104967 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" High-level objects for fields. """
from collections import OrderedDict, defaultdict
from datetime import date, datetime
from functools import partial
from operator import attrgetter
import itertools
import logging
import pytz
try:
from xmlrpc.client import MAXINT
except ImportError:
#pylint: disable=bad-python3-import
from xmlrpclib import MAXINT
import psycopg2
from .sql_db import LazyCursor
from .tools import float_repr, float_round, frozendict, html_sanitize, human_size, pg_varchar, ustr, OrderedSet, pycompat, sql
from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
from .tools.translate import html_translate, _
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
EMPTY_DICT = frozendict()
RENAMED_ATTRS = [('select', 'index'), ('digits_compute', 'digits')]
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__[:-7] + '.schema')
Default = object() # default value for __init__() methods
def copy_cache(records, env):
""" Recursively copy the cache of ``records`` to the environment ``env``. """
src, dst = records.env.cache, env.cache
todo, done = set(records), set()
while todo:
record = todo.pop()
if record not in done:
done.add(record)
target = record.with_env(env)
for field in src.get_fields(record):
value = src.get(record, field)
dst.set(target, field, value)
if value and field.type in ('many2one', 'one2many', 'many2many', 'reference'):
todo.update(field.convert_to_record(value, record))
def resolve_mro(model, name, predicate):
""" Return the list of successively overridden values of attribute ``name``
in mro order on ``model`` that satisfy ``predicate``.
"""
result = []
for cls in type(model).__mro__:
if name in cls.__dict__:
value = cls.__dict__[name]
if not predicate(value):
break
result.append(value)
return result
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __new__(meta, name, bases, attrs):
""" Combine the ``_slots`` dict from parent classes, and determine
``__slots__`` for them on the new class.
"""
base_slots = {}
for base in reversed(bases):
base_slots.update(getattr(base, '_slots', ()))
slots = dict(base_slots)
slots.update(attrs.get('_slots', ()))
attrs['__slots__'] = set(slots) - set(base_slots)
attrs['_slots'] = slots
return type.__new__(meta, name, bases, attrs)
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if not hasattr(cls, 'type'):
return
if cls.type and cls.type not in MetaField.by_type:
MetaField.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
_global_seq = iter(itertools.count())
class Field(MetaField('DummyField', (object,), {})):
""" The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param string: the label of the field seen by users (string); if not
set, the ORM takes the field name in the class (capitalized).
:param help: the tooltip of the field seen by users (string)
:param readonly: whether the field is readonly (boolean, by default ``False``)
:param required: whether the value of the field is required (boolean, by
default ``False``)
:param index: whether the field is indexed in database (boolean, by
default ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value; use
``default=None`` to discard default values for the field
:param states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: 'readonly', 'required', 'invisible'.
Note: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param string oldname: the previous name of this field, so that ORM can rename
it automatically at migration
.. _field-computed:
.. rubric:: Computed fields
One can define a field whose value is computed instead of simply being
read from the database. The attributes that are specific to computed
fields are given below. To define such a field, simply provide a value
for the attribute ``compute``.
:param compute: name of a method that computes the field
:param inverse: name of a method that inverses the field (optional)
:param search: name of a method that implement search on the field (optional)
:param store: whether the field is stored in database (boolean, by
default ``False`` on computed fields)
:param compute_sudo: whether the field should be recomputed as superuser
to bypass access rights (boolean, by default ``False``)
The methods given for ``compute``, ``inverse`` and ``search`` are model
methods. Their signature is shown in the following example::
upper = fields.Char(compute='_compute_upper',
inverse='_inverse_upper',
search='_search_upper')
@api.depends('name')
def _compute_upper(self):
for rec in self:
rec.upper = rec.name.upper() if rec.name else False
def _inverse_upper(self):
for rec in self:
rec.name = rec.upper.lower() if rec.upper else False
def _search_upper(self, operator, value):
if operator == 'like':
operator = 'ilike'
return [('name', operator, value)]
The compute method has to assign the field on all records of the invoked
recordset. The decorator :meth:`odoo.api.depends` must be applied on
the compute method to specify the field dependencies; those dependencies
are used to determine when to recompute the field; recomputation is
automatic and guarantees cache/database consistency. Note that the same
method can be used for several fields, you simply have to assign all the
given fields in the method; the method will be invoked once for all
those fields.
By default, a computed field is not stored to the database, and is
computed on-the-fly. Adding the attribute ``store=True`` will store the
field's values in the database. The advantage of a stored field is that
searching on that field is done by the database itself. The disadvantage
is that it requires database updates when the field must be recomputed.
The inverse method, as its name says, does the inverse of the compute
method: the invoked records have a value for the field, and you must
apply the necessary changes on the field dependencies such that the
computation gives the expected value. Note that a computed field without
an inverse method is readonly by default.
The search method is invoked when processing domains before doing an
actual search on the model. It must return a domain equivalent to the
condition: ``field operator value``.
.. _field-related:
.. rubric:: Related fields
The value of a related field is given by following a sequence of
relational fields and reading a field on the reached model. The complete
sequence of fields to traverse is specified by the attribute
:param related: sequence of field names
Some field attributes are automatically copied from the source field if
they are not redefined: ``string``, ``help``, ``readonly``, ``required`` (only
if all fields in the sequence are required), ``groups``, ``digits``, ``size``,
``translate``, ``sanitize``, ``selection``, ``comodel_name``, ``domain``,
``context``. All semantic-free attributes are copied from the source
field.
By default, the values of related fields are not stored to the database.
Add the attribute ``store=True`` to make it stored, just like computed
fields. Related fields are automatically recomputed when their
dependencies are modified.
.. _field-company-dependent:
.. rubric:: Company-dependent fields
Formerly known as 'property' fields, the value of those fields depends
on the company. In other words, users that belong to different companies
may see different values for the field on a given record.
:param company_dependent: whether the field is company-dependent (boolean)
.. _field-incremental-definition:
.. rubric:: Incremental definition
A field is defined as class attribute on a model class. If the model
is extended (see :class:`~odoo.models.Model`), one can also extend
the field definition by redefining a field with the same name and same
type on the subclass. In that case, the attributes of the field are
taken from the parent class and overridden by the ones given in
subclasses.
For instance, the second class below only adds a tooltip on the field
``state``::
class First(models.Model):
_name = 'foo'
state = fields.Selection([...], required=True)
class Second(models.Model):
_inherit = 'foo'
state = fields.Selection(help="Blah blah blah")
"""
type = None # type of the field (string)
relational = False # whether the field is a relational one
translate = False # whether the field is translated
column_type = None # database column type (ident, spec)
column_format = '%s' # placeholder for value in queries
column_cast_from = () # column types that may be cast to this
_slots = {
'args': EMPTY_DICT, # the parameters given to __init__()
'_attrs': EMPTY_DICT, # the field's non-slot attributes
'_module': None, # the field's module name
'_setup_done': None, # the field's setup state: None, 'base' or 'full'
'_sequence': None, # absolute ordering of the field
'automatic': False, # whether the field is automatically created ("magic" field)
'inherited': False, # whether the field is inherited (_inherits)
'name': None, # name of the field
'model_name': None, # name of the model of this field
'comodel_name': None, # name of the model of values (if relational)
'store': True, # whether the field is stored in database
'index': False, # whether the field is indexed in database
'manual': False, # whether the field is a custom field
'copy': True, # whether the field is copied over by BaseModel.copy()
'depends': (), # collection of field dependencies
'recursive': False, # whether self depends on itself
'compute': None, # compute(recs) computes field on recs
'compute_sudo': False, # whether field should be recomputed as admin
'inverse': None, # inverse(recs) inverses field on recs
'search': None, # search(recs, operator, value) searches on self
'related': None, # sequence of field names, for related fields
'related_sudo': True, # whether related fields should be read as admin
'company_dependent': False, # whether ``self`` is company-dependent (property field)
'default': None, # default(recs) returns the default value
'string': None, # field label
'help': None, # field tooltip
'readonly': False, # whether the field is readonly
'required': False, # whether the field is required
'states': None, # set readonly and required depending on state
'groups': None, # csv list of group xml ids
'change_default': False, # whether the field may trigger a "user-onchange"
'deprecated': None, # whether the field is deprecated
'related_field': None, # corresponding related field
'group_operator': None, # operator for aggregating values
'group_expand': None, # name of method to expand groups in read_group()
'prefetch': True, # whether the field is prefetched
'context_dependent': False, # whether the field's value depends on context
}
def __init__(self, string=Default, **kwargs):
kwargs['string'] = string
self._sequence = kwargs['_sequence'] = next(_global_seq)
args = {key: val for key, val in kwargs.items() if val is not Default}
self.args = args or EMPTY_DICT
self._setup_done = None
def new(self, **kwargs):
""" Return a field of the same type as ``self``, with its own parameters. """
return type(self)(**kwargs)
def __getattr__(self, name):
""" Access non-slot field attribute. """
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
""" Set slot or non-slot field attribute. """
try:
object.__setattr__(self, name, value)
except AttributeError:
if self._attrs:
self._attrs[name] = value
else:
self._attrs = {name: value} # replace EMPTY_DICT
def set_all_attrs(self, attrs):
""" Set all field attributes at once (with slot defaults). """
# optimization: we assign slots only
assign = object.__setattr__
for key, val in self._slots.items():
assign(self, key, attrs.pop(key, val))
if attrs:
assign(self, '_attrs', attrs)
def __delattr__(self, name):
""" Remove non-slot field attribute. """
try:
del self._attrs[name]
except KeyError:
raise AttributeError(name)
def __str__(self):
return "%s.%s" % (self.model_name, self.name)
def __repr__(self):
return "%s.%s" % (self.model_name, self.name)
############################################################################
#
# Base field setup: things that do not depend on other models/fields
#
def setup_base(self, model, name):
""" Base setup: things that do not depend on other models/fields. """
if self._setup_done and not self.related:
# optimization for regular fields: keep the base setup
self._setup_done = 'base'
else:
# do the base setup from scratch
self._setup_attrs(model, name)
if not self.related:
self._setup_regular_base(model)
self._setup_done = 'base'
#
# Setup field parameter attributes
#
def _can_setup_from(self, field):
""" Return whether ``self`` can retrieve parameters from ``field``. """
return isinstance(field, type(self))
def _get_attrs(self, model, name):
""" Return the field parameter attributes as a dictionary. """
# determine all inherited field attributes
attrs = {}
if not (self.args.get('automatic') or self.args.get('manual')):
# magic and custom fields do not inherit from parent classes
for field in reversed(resolve_mro(model, name, self._can_setup_from)):
attrs.update(field.args)
attrs.update(self.args) # necessary in case self is not in class
attrs['args'] = self.args
attrs['model_name'] = model._name
attrs['name'] = name
# initialize ``self`` with ``attrs``
if attrs.get('compute'):
# by default, computed fields are not stored, not copied and readonly
attrs['store'] = attrs.get('store', False)
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
attrs['context_dependent'] = attrs.get('context_dependent', True)
if attrs.get('related'):
# by default, related fields are not stored and not copied
attrs['store'] = attrs.get('store', False)
attrs['copy'] = attrs.get('copy', False)
if attrs.get('company_dependent'):
# by default, company-dependent fields are not stored and not copied
attrs['store'] = False
attrs['copy'] = attrs.get('copy', False)
attrs['default'] = self._default_company_dependent
attrs['compute'] = self._compute_company_dependent
if not attrs.get('readonly'):
attrs['inverse'] = self._inverse_company_dependent
attrs['search'] = self._search_company_dependent
attrs['context_dependent'] = attrs.get('context_dependent', True)
if attrs.get('translate'):
# by default, translatable fields are context-dependent
attrs['context_dependent'] = attrs.get('context_dependent', True)
return attrs
def _setup_attrs(self, model, name):
""" Initialize the field parameter attributes. """
attrs = self._get_attrs(model, name)
self.set_all_attrs(attrs)
# check for renamed attributes (conversion errors)
for key1, key2 in RENAMED_ATTRS:
if key1 in attrs:
_logger.warning("Field %s: parameter %r is no longer supported; use %r instead.",
self, key1, key2)
# prefetch only stored, column, non-manual and non-deprecated fields
if not (self.store and self.column_type) or self.manual or self.deprecated:
self.prefetch = False
if not self.string and not self.related:
# related fields get their string from their parent field
self.string = (
name[:-4] if name.endswith('_ids') else
name[:-3] if name.endswith('_id') else name
).replace('_', ' ').title()
# self.default must be a callable
if self.default is not None:
value = self.default
self.default = value if callable(value) else lambda model: value
############################################################################
#
# Full field setup: everything else, except recomputation triggers
#
def setup_full(self, model):
""" Full setup: everything else, except recomputation triggers. """
if self._setup_done != 'full':
if not self.related:
self._setup_regular_full(model)
else:
self._setup_related_full(model)
self._setup_done = 'full'
#
# Setup of non-related fields
#
def _setup_regular_base(self, model):
""" Setup the attributes of a non-related field. """
def make_depends(deps):
return tuple(deps(model) if callable(deps) else deps)
if isinstance(self.compute, pycompat.string_types):
# if the compute method has been overridden, concatenate all their _depends
self.depends = ()
for method in resolve_mro(model, self.compute, callable):
self.depends += make_depends(getattr(method, '_depends', ()))
else:
self.depends = make_depends(getattr(self.compute, '_depends', ()))
def _setup_regular_full(self, model):
""" Setup the inverse field(s) of ``self``. """
pass
#
# Setup of related fields
#
def _setup_related_full(self, model):
""" Setup the attributes of a related field. """
# fix the type of self.related if necessary
if isinstance(self.related, pycompat.string_types):
self.related = tuple(self.related.split('.'))
# determine the chain of fields, and make sure they are all set up
target = model
for name in self.related:
field = target._fields[name]
field.setup_full(target)
target = target[name]
self.related_field = field
# check type consistency
if self.type != field.type:
raise TypeError("Type of related field %s is inconsistent with %s" % (self, field))
# determine dependencies, compute, inverse, and search
self.depends = ('.'.join(self.related),)
self.compute = self._compute_related
if not (self.readonly or field.readonly):
self.inverse = self._inverse_related
if field._description_searchable:
# allow searching on self only if the related field is searchable
self.search = self._search_related
# copy attributes from field to self (string, help, etc.)
for attr, prop in self.related_attrs:
if not getattr(self, attr):
setattr(self, attr, getattr(field, prop))
for attr, value in field._attrs.items():
if attr not in self._attrs:
setattr(self, attr, value)
# special case for states: copy it only for inherited fields
if not self.states and self.inherited:
self.states = field.states
# special case for inherited required fields
if self.inherited and field.required:
self.required = True
def traverse_related(self, record):
""" Traverse the fields of the related field `self` except for the last
one, and return it as a pair `(last_record, last_field)`. """
for name in self.related[:-1]:
record = record[name][:1].with_prefetch(record._prefetch)
return record, self.related_field
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
# when related_sudo, bypass access rights checks when reading values
others = records.sudo() if self.related_sudo else records
for record, other in pycompat.izip(records, others):
if not record.id and record.env != other.env:
# draft records: copy record's cache to other's cache first
copy_cache(record, other.env)
other, field = self.traverse_related(other)
record[self.name] = other[field.name]
def _inverse_related(self, records):
""" Inverse the related field ``self`` on ``records``. """
# store record values, otherwise they may be lost by cache invalidation!
record_value = {record: record[self.name] for record in records}
for record in records:
other, field = self.traverse_related(record)
if other:
other[field.name] = record_value[record]
def _search_related(self, records, operator, value):
""" Determine the domain to search on field ``self``. """
return [('.'.join(self.related), operator, value)]
# properties used by _setup_related_full() to copy values from related field
_related_comodel_name = property(attrgetter('comodel_name'))
_related_string = property(attrgetter('string'))
_related_help = property(attrgetter('help'))
_related_readonly = property(attrgetter('readonly'))
_related_groups = property(attrgetter('groups'))
_related_group_operator = property(attrgetter('group_operator'))
@property
def base_field(self):
""" Return the base field of an inherited field, or ``self``. """
return self.related_field.base_field if self.inherited else self
#
# Company-dependent fields
#
def _default_company_dependent(self, model):
return model.env['ir.property'].get(self.name, self.model_name)
def _compute_company_dependent(self, records):
Property = records.env['ir.property']
values = Property.get_multi(self.name, self.model_name, records.ids)
for record in records:
record[self.name] = values.get(record.id)
def _inverse_company_dependent(self, records):
Property = records.env['ir.property']
values = {
record.id: self.convert_to_write(record[self.name], record)
for record in records
}
Property.set_multi(self.name, self.model_name, values)
def _search_company_dependent(self, records, operator, value):
Property = records.env['ir.property']
return Property.search_multi(self.name, self.model_name, operator, value)
#
# Setup of field triggers
#
# The triggers of ``self`` are a collection of pairs ``(field, path)`` of
# fields that depend on ``self``. When ``self`` is modified, it invalidates
# the cache of each ``field``, and determines the records to recompute based
# on ``path``. See method ``modified`` below for details.
#
def resolve_deps(self, model):
""" Return the dependencies of ``self`` as tuples ``(model, field, path)``,
where ``path`` is an optional list of field names.
"""
model0 = model
result = []
# add self's own dependencies
for dotnames in self.depends:
if dotnames == self.name:
_logger.warning("Field %s depends on itself; please fix its decorator @api.depends().", self)
model, path = model0, dotnames.split('.')
for i, fname in enumerate(path):
field = model._fields[fname]
result.append((model, field, path[:i]))
model = model0.env.get(field.comodel_name)
# add self's model dependencies
for mname, fnames in model0._depends.items():
model = model0.env[mname]
for fname in fnames:
field = model._fields[fname]
result.append((model, field, None))
# add indirect dependencies from the dependencies found above
for model, field, path in list(result):
for inv_field in model._field_inverses[field]:
inv_model = model0.env[inv_field.model_name]
inv_path = None if path is None else path + [field.name]
result.append((inv_model, inv_field, inv_path))
return result
def setup_triggers(self, model):
""" Add the necessary triggers to invalidate/recompute ``self``. """
for model, field, path in self.resolve_deps(model):
if field is not self:
path_str = None if path is None else ('.'.join(path) or 'id')
model._field_triggers.add(field, (self, path_str))
elif path:
self.recursive = True
model._field_triggers.add(field, (self, '.'.join(path)))
############################################################################
#
# Field description
#
def get_description(self, env):
""" Return a dictionary that describes the field ``self``. """
desc = {'type': self.type}
for attr, prop in self.description_attrs:
value = getattr(self, prop)
if callable(value):
value = value(env)
if value is not None:
desc[attr] = value
return desc
# properties used by get_description()
_description_store = property(attrgetter('store'))
_description_manual = property(attrgetter('manual'))
_description_depends = property(attrgetter('depends'))
_description_related = property(attrgetter('related'))
_description_company_dependent = property(attrgetter('company_dependent'))
_description_readonly = property(attrgetter('readonly'))
_description_required = property(attrgetter('required'))
_description_states = property(attrgetter('states'))
_description_groups = property(attrgetter('groups'))
_description_change_default = property(attrgetter('change_default'))
_description_deprecated = property(attrgetter('deprecated'))
@property
def _description_searchable(self):
return bool(self.store or self.search)
@property
def _description_sortable(self):
return self.store or (self.inherited and self.related_field._description_sortable)
def _description_string(self, env):
if self.string and env.lang:
model_name = self.base_field.model_name
field_string = env['ir.translation'].get_field_string(model_name)
return field_string.get(self.name) or self.string
return self.string
def _description_help(self, env):
if self.help and env.lang:
model_name = self.base_field.model_name
field_help = env['ir.translation'].get_field_help(model_name)
return field_help.get(self.name) or self.help
return self.help
############################################################################
#
# Conversion of values
#
def cache_key(self, record):
""" Return the key to get/set the value of ``self`` on ``record`` in
cache, the full cache key being ``(self, record.id, key)``.
"""
env = record.env
return env if self.context_dependent else (env.cr, env.uid)
def null(self, record):
""" Return the null value for this field in the record format. """
return False
def convert_to_column(self, value, record, values=None):
""" Convert ``value`` from the ``write`` format to the SQL format. """
if value is None or value is False:
return None
return pycompat.to_native(value)
def convert_to_cache(self, value, record, validate=True):
""" Convert ``value`` to the cache format; ``value`` may come from an
assignment, or have the format of methods :meth:`BaseModel.read` or
:meth:`BaseModel.write`. If the value represents a recordset, it should
be added for prefetching on ``record``.
:param bool validate: when True, field-specific validation of ``value``
will be performed
"""
return value
def convert_to_record(self, value, record):
""" Convert ``value`` from the cache format to the record format.
If the value represents a recordset, it should share the prefetching of
``record``.
"""
return value
def convert_to_read(self, value, record, use_name_get=True):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.read`.
:param bool use_name_get: when True, the value's display name will be
computed using :meth:`BaseModel.name_get`, if relevant for the field
"""
return False if value is None else value
def convert_to_write(self, value, record):
""" Convert ``value`` from the record format to the format of method
:meth:`BaseModel.write`.
"""
return self.convert_to_read(value, record)
def convert_to_onchange(self, value, record, names):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.onchange`.
:param names: a tree of field names (for relational fields only)
"""
return self.convert_to_read(value, record)
def convert_to_export(self, value, record):
""" Convert ``value`` from the record format to the export format. """
if not value:
return ''
return value if record._context.get('export_raw_data') else ustr(value)
def convert_to_display_name(self, value, record):
""" Convert ``value`` from the record format to a suitable display name. """
return ustr(value)
############################################################################
#
# Update database schema
#
def update_db(self, model, columns):
""" Update the database schema to implement this field.
:param model: an instance of the field's model
:param columns: a dict mapping column names to their configuration in database
:return: ``True`` if the field must be recomputed on existing rows
"""
if not self.column_type:
return
column = columns.get(self.name)
if not column and hasattr(self, 'oldname'):
# column not found; check whether it exists under its old name
column = columns.get(self.oldname)
if column:
sql.rename_column(model._cr, model._table, self.oldname, self.name)
# create/update the column, not null constraint, indexes
self.update_db_column(model, column)
self.update_db_notnull(model, column)
self.update_db_index(model, column)
return not column
def update_db_column(self, model, column):
""" Create/update the column corresponding to ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
if not column:
# the column does not exist, create it
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
return
if column['udt_name'] == self.column_type[0]:
return
if column['udt_name'] in self.column_cast_from:
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
else:
newname = (self.name + '_moved{}').format
i = 0
while sql.column_exists(model._cr, model._table, newname(i)):
i += 1
if column['is_nullable'] == 'NO':
sql.drop_not_null(model._cr, model._table, self.name)
sql.rename_column(model._cr, model._table, self.name, newname(i))
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
def update_db_notnull(self, model, column):
""" Add or remove the NOT NULL constraint on ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
has_notnull = column and column['is_nullable'] == 'NO'
if not column or (self.required and not has_notnull):
# the column is new or it becomes required; initialize its values
if model._table_has_rows():
model._init_column(self.name)
if self.required and not has_notnull:
sql.set_not_null(model._cr, model._table, self.name)
elif not self.required and has_notnull:
sql.drop_not_null(model._cr, model._table, self.name)
def update_db_index(self, model, column):
""" Add or remove the index corresponding to ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
indexname = '%s_%s_index' % (model._table, self.name)
if self.index:
sql.create_index(model._cr, indexname, model._table, ['"%s"' % self.name])
else:
sql.drop_index(model._cr, indexname, model._table)
############################################################################
#
# Read from/write to database
#
def read(self, records):
""" Read the value of ``self`` on ``records``, and store it in cache. """
return NotImplementedError("Method read() undefined on %s" % self)
def write(self, records, value, create=False):
""" Write the value of ``self`` on ``records``. The ``value`` must be in
the format of method :meth:`BaseModel.write`.
:param create: whether ``records`` have just been created (to enable
some optimizations)
"""
return NotImplementedError("Method write() undefined on %s" % self)
############################################################################
#
# Descriptor methods
#
def __get__(self, record, owner):
""" return the value of field ``self`` on ``record`` """
if record is None:
return self # the field is accessed through the owner class
if record:
# only a single record may be accessed
record.ensure_one()
try:
value = record.env.cache.get(record, self)
except KeyError:
# cache miss, determine value and retrieve it
if record.id:
self.determine_value(record)
else:
self.determine_draft_value(record)
value = record.env.cache.get(record, self)
else:
# null record -> return the null value for this field
value = self.convert_to_cache(False, record, validate=False)
return self.convert_to_record(value, record)
def __set__(self, record, value):
""" set the value of field ``self`` on ``record`` """
env = record.env
# only a single record may be updated
record.ensure_one()
# adapt value to the cache level
value = self.convert_to_cache(value, record)
if env.in_draft or not record.id:
# determine dependent fields
spec = self.modified_draft(record)
# set value in cache, inverse field, and mark record as dirty
record.env.cache.set(record, self, value)
if env.in_onchange:
for invf in record._field_inverses[self]:
invf._update(record[self.name], record)
record._set_dirty(self.name)
# determine more dependent fields, and invalidate them
if self.relational:
spec += self.modified_draft(record)
env.cache.invalidate(spec)
else:
# Write to database
write_value = self.convert_to_write(self.convert_to_record(value, record), record)
record.write({self.name: write_value})
# Update the cache unless value contains a new record
if not (self.relational and not all(value)):
record.env.cache.set(record, self, value)
############################################################################
#
# Computation of field values
#
def _compute_value(self, records):
""" Invoke the compute method on ``records``. """
# initialize the fields to their corresponding null value in cache
fields = records._field_computed[self]
cache = records.env.cache
for field in fields:
for record in records:
cache.set(record, field, field.convert_to_cache(False, record, validate=False))
if isinstance(self.compute, pycompat.string_types):
getattr(records, self.compute)()
else:
self.compute(records)
def compute_value(self, records):
""" Invoke the compute method on ``records``; the results are in cache. """
fields = records._field_computed[self]
with records.env.do_in_draft(), records.env.protecting(fields, records):
try:
self._compute_value(records)
except (AccessError, MissingError):
# some record is forbidden or missing, retry record by record
for record in records:
try:
self._compute_value(record)
except Exception as exc:
record.env.cache.set_failed(record, [self], exc)
def determine_value(self, record):
""" Determine the value of ``self`` for ``record``. """
env = record.env
if self.store and not (self.compute and env.in_onchange):
# this is a stored field or an old-style function field
if self.compute:
# this is a stored computed field, check for recomputation
recs = record._recompute_check(self)
if recs:
# recompute the value (only in cache)
self.compute_value(recs)
# HACK: if result is in the wrong cache, copy values
if recs.env != env:
computed = record._field_computed[self]
for source, target in pycompat.izip(recs, recs.with_env(env)):
try:
values = {f.name: source[f.name] for f in computed}
target._cache.update(target._convert_to_cache(values, validate=False))
except MissingError as exc:
target._cache.set_failed(target._fields, exc)
# the result is saved to database by BaseModel.recompute()
return
# read the field from database
record._prefetch_field(self)
elif self.compute:
# this is either a non-stored computed field, or a stored computed
# field in onchange mode
if self.recursive:
self.compute_value(record)
else:
recs = record._in_cache_without(self)
recs = recs.with_prefetch(record._prefetch)
self.compute_value(recs)
else:
# this is a non-stored non-computed field
record.env.cache.set(record, self, self.convert_to_cache(False, record, validate=False))
def determine_draft_value(self, record):
""" Determine the value of ``self`` for the given draft ``record``. """
if self.compute:
fields = record._field_computed[self]
with record.env.protecting(fields, record):
self._compute_value(record)
else:
null = self.convert_to_cache(False, record, validate=False)
record.env.cache.set_special(record, self, lambda: null)
def determine_inverse(self, records):
""" Given the value of ``self`` on ``records``, inverse the computation. """
if isinstance(self.inverse, pycompat.string_types):
getattr(records, self.inverse)()
else:
self.inverse(records)
def determine_domain(self, records, operator, value):
""" Return a domain representing a condition on ``self``. """
if isinstance(self.search, pycompat.string_types):
return getattr(records, self.search)(operator, value)
else:
return self.search(records, operator, value)
############################################################################
#
# Notification when fields are modified
#
def modified_draft(self, records):
""" Same as :meth:`modified`, but in draft mode. """
env = records.env
# invalidate the fields on the records in cache that depend on
# ``records``, except fields currently being computed
spec = []
for field, path in records._field_triggers[self]:
if not field.compute:
# Note: do not invalidate non-computed fields. Such fields may
# require invalidation in general (like *2many fields with
# domains) but should not be invalidated in this case, because
# we would simply lose their values during an onchange!
continue
target = env[field.model_name]
protected = env.protected(field)
if path == 'id' and field.model_name == records._name:
target = records - protected
elif path and env.in_onchange:
target = (env.cache.get_records(target, field) - protected).filtered(
lambda rec: rec if path == 'id' else rec._mapped_cache(path) & records
)
else:
target = env.cache.get_records(target, field) - protected
if target:
spec.append((field, target._ids))
return spec
class Boolean(Field):
type = 'boolean'
column_type = ('bool', 'bool')
def convert_to_column(self, value, record, values=None):
return bool(value)
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, record):
if record._context.get('export_raw_data'):
return value
return ustr(value)
class Integer(Field):
type = 'integer'
column_type = ('int4', 'int4')
_slots = {
'group_operator': 'sum',
}
_description_group_operator = property(attrgetter('group_operator'))
def convert_to_column(self, value, record, values=None):
return int(value or 0)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', False)
return int(value or 0)
def convert_to_read(self, value, record, use_name_get=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > MAXINT:
return float(value)
return value
def _update(self, records, value):
# special case, when an integer field is used as inverse for a one2many
cache = records.env.cache
for record in records:
cache.set(record, self, value.id or 0)
def convert_to_export(self, value, record):
if value or value == 0:
return value if record._context.get('export_raw_data') else ustr(value)
return ''
class Float(Field):
""" The precision digits are given by the attribute
:param digits: a pair (total, decimal), or a function taking a database
cursor and returning a pair (total, decimal)
"""
type = 'float'
column_cast_from = ('int4', 'numeric', 'float8')
_slots = {
'_digits': None, # digits argument passed to class initializer
'group_operator': 'sum',
}
def __init__(self, string=Default, digits=Default, **kwargs):
super(Float, self).__init__(string=string, _digits=digits, **kwargs)
@property
def column_type(self):
# Explicit support for "falsy" digits (0, False) to indicate a NUMERIC
# field with no fixed precision. The values are saved in the database
# with all significant digits.
# FLOAT8 type is still the default when there is no precision because it
# is faster for most operations (sums, etc.)
return ('numeric', 'numeric') if self.digits is not None else \
('float8', 'double precision')
@property
def digits(self):
if callable(self._digits):
with LazyCursor() as cr:
return self._digits(cr)
else:
return self._digits
_related__digits = property(attrgetter('_digits'))
_description_digits = property(attrgetter('digits'))
_description_group_operator = property(attrgetter('group_operator'))
def convert_to_column(self, value, record, values=None):
result = float(value or 0.0)
digits = self.digits
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
value = float(value or 0.0)
if not validate:
return value
digits = self.digits
return float_round(value, precision_digits=digits[1]) if digits else value
def convert_to_export(self, value, record):
if value or value == 0.0:
return value if record._context.get('export_raw_data') else ustr(value)
return ''
class Monetary(Field):
""" The decimal precision and currency symbol are taken from the attribute
:param currency_field: name of the field holding the currency this monetary
field is expressed in (default: `currency_id`)
"""
type = 'monetary'
column_type = ('numeric', 'numeric')
column_cast_from = ('float8',)
_slots = {
'currency_field': None,
'group_operator': 'sum',
}
def __init__(self, string=Default, currency_field=Default, **kwargs):
super(Monetary, self).__init__(string=string, currency_field=currency_field, **kwargs)
_related_currency_field = property(attrgetter('currency_field'))
_description_currency_field = property(attrgetter('currency_field'))
_description_group_operator = property(attrgetter('group_operator'))
def _setup_regular_full(self, model):
super(Monetary, self)._setup_regular_full(model)
if not self.currency_field:
# pick a default, trying in order: 'currency_id', 'x_currency_id'
if 'currency_id' in model._fields:
self.currency_field = 'currency_id'
elif 'x_currency_id' in model._fields:
self.currency_field = 'x_currency_id'
assert self.currency_field in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.currency_field)
def convert_to_column(self, value, record, values=None):
# retrieve currency from values or record
if values and self.currency_field in values:
field = record._fields[self.currency_field]
currency = field.convert_to_cache(values[self.currency_field], record)
currency = field.convert_to_record(currency, record)
else:
# Note: this is wrong if 'record' is several records with different
# currencies, which is functional nonsense and should not happen
currency = record[:1][self.currency_field]
value = float(value or 0.0)
if currency:
return float_repr(currency.round(value), currency.decimal_places)
return value
def convert_to_cache(self, value, record, validate=True):
# cache format: float
value = float(value or 0.0)
if validate and record[self.currency_field]:
# FIXME @rco-odoo: currency may not be already initialized if it is
# a function or related field!
value = record[self.currency_field].round(value)
return value
def convert_to_read(self, value, record, use_name_get=True):
return value
def convert_to_write(self, value, record):
return value
class _String(Field):
""" Abstract class for string fields. """
_slots = {
'translate': False, # whether the field is translated
}
def __init__(self, string=Default, **kwargs):
# translate is either True, False, or a callable
if 'translate' in kwargs and not callable(kwargs['translate']):
kwargs['translate'] = bool(kwargs['translate'])
super(_String, self).__init__(string=string, **kwargs)
_related_translate = property(attrgetter('translate'))
def _description_translate(self, env):
return bool(self.translate)
def get_trans_terms(self, value):
""" Return the sequence of terms to translate found in `value`. """
if not callable(self.translate):
return [value] if value else []
terms = []
self.translate(terms.append, value)
return terms
def get_trans_func(self, records):
""" Return a translation function `translate` for `self` on the given
records; the function call `translate(record_id, value)` translates the
field value to the language given by the environment of `records`.
"""
if callable(self.translate):
rec_src_trans = records.env['ir.translation']._get_terms_translations(self, records)
def translate(record_id, value):
src_trans = rec_src_trans[record_id]
return self.translate(src_trans.get, value)
else:
rec_trans = records.env['ir.translation']._get_ids(
'%s,%s' % (self.model_name, self.name), 'model', records.env.lang, records.ids)
def translate(record_id, value):
return rec_trans.get(record_id) or value
return translate
def check_trans_value(self, value):
""" Check and possibly sanitize the translated term `value`. """
if callable(self.translate):
# do a "no-translation" to sanitize the value
callback = lambda term: None
return self.translate(callback, value)
else:
return value
class Char(_String):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients.
:param int size: the maximum size of values stored for that field
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
"""
type = 'char'
column_cast_from = ('text',)
_slots = {
'size': None, # maximum size of values (deprecated)
}
@property
def column_type(self):
return ('varchar', pg_varchar(self.size))
def update_db_column(self, model, column):
if (
column and column['udt_name'] == 'varchar' and column['character_maximum_length'] and
(self.size is None or column['character_maximum_length'] < self.size)
):
# the column's varchar size does not match self.size; convert it
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
super(Char, self).update_db_column(model, column)
_related_size = property(attrgetter('size'))
_description_size = property(attrgetter('size'))
def _setup_regular_base(self, model):
super(Char, self)._setup_regular_base(model)
assert self.size is None or isinstance(self.size, int), \
"Char field %s with non-integer size %r" % (self, self.size)
def convert_to_column(self, value, record, values=None):
if value is None or value is False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
return pycompat.to_text(value)[:self.size]
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
return pycompat.to_text(value)[:self.size]
class Text(_String):
""" Very similar to :class:`~.Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
"""
type = 'text'
column_type = ('text', 'text')
column_cast_from = ('varchar',)
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
return ustr(value)
class Html(_String):
type = 'html'
column_type = ('text', 'text')
_slots = {
'sanitize': True, # whether value must be sanitized
'sanitize_tags': True, # whether to sanitize tags (only a white list of attributes is accepted)
'sanitize_attributes': True, # whether to sanitize attributes (only a white list of attributes is accepted)
'sanitize_style': False, # whether to sanitize style attributes
'strip_style': False, # whether to strip style attributes (removed and therefore not sanitized)
'strip_classes': False, # whether to strip classes attributes
}
def _setup_attrs(self, model, name):
super(Html, self)._setup_attrs(model, name)
# Translated sanitized html fields must use html_translate or a callable.
if self.translate is True and self.sanitize:
self.translate = html_translate
_related_sanitize = property(attrgetter('sanitize'))
_related_sanitize_tags = property(attrgetter('sanitize_tags'))
_related_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_related_sanitize_style = property(attrgetter('sanitize_style'))
_related_strip_style = property(attrgetter('strip_style'))
_related_strip_classes = property(attrgetter('strip_classes'))
_description_sanitize = property(attrgetter('sanitize'))
_description_sanitize_tags = property(attrgetter('sanitize_tags'))
_description_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_description_sanitize_style = property(attrgetter('sanitize_style'))
_description_strip_style = property(attrgetter('strip_style'))
_description_strip_classes = property(attrgetter('strip_classes'))
def convert_to_column(self, value, record, values=None):
if value is None or value is False:
return None
if self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
if validate and self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
class Date(Field):
type = 'date'
column_type = ('date', 'date')
column_cast_from = ('timestamp',)
@staticmethod
def today(*args):
""" Return the current day in the format expected by the ORM.
This function may be used to compute default values.
"""
return date.today().strftime(DATE_FORMAT)
@staticmethod
def context_today(record, timestamp=None):
""" Return the current date as seen in the client's timezone in a format
fit for date fields. This method may be used to compute default
values.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones.)
:rtype: str
"""
today = timestamp or datetime.now()
context_today = None
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST
context_today = today_utc.astimezone(pytz.timezone(tz_name))
except Exception:
_logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(DATE_FORMAT)
@staticmethod
def from_string(value):
""" Convert an ORM ``value`` into a :class:`date` value. """
if not value:
return None
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
@staticmethod
def to_string(value):
""" Convert a :class:`date` value into the format expected by the ORM. """
return value.strftime(DATE_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return False
if isinstance(value, pycompat.string_types):
if validate:
# force parsing for validation
self.from_string(value)
return value[:DATE_LENGTH]
return self.to_string(value)
def convert_to_export(self, value, record):
if not value:
return ''
return self.from_string(value) if record._context.get('export_raw_data') else ustr(value)
class Datetime(Field):
type = 'datetime'
column_type = ('timestamp', 'timestamp')
column_cast_from = ('date',)
@staticmethod
def now(*args):
""" Return the current day and time in the format expected by the ORM.
This function may be used to compute default values.
"""
return datetime.now().strftime(DATETIME_FORMAT)
@staticmethod
def context_timestamp(record, timestamp):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a default initializer,
because datetime fields are automatically converted upon
display on client side. For default values :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz_name = record._context.get('tz') or record.env.user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@staticmethod
def from_string(value):
""" Convert an ORM ``value`` into a :class:`datetime` value. """
if not value:
return None
value = value[:DATETIME_LENGTH]
if len(value) == DATE_LENGTH:
value += " 00:00:00"
return datetime.strptime(value, DATETIME_FORMAT)
@staticmethod
def to_string(value):
""" Convert a :class:`datetime` value into the format expected by the ORM. """
return value.strftime(DATETIME_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return False
if isinstance(value, pycompat.string_types):
if validate:
# force parsing for validation
self.from_string(value)
value = value[:DATETIME_LENGTH]
if len(value) == DATE_LENGTH:
value += " 00:00:00"
return value
return self.to_string(value)
def convert_to_export(self, value, record):
if not value:
return ''
return self.from_string(value) if record._context.get('export_raw_data') else ustr(value)
def convert_to_display_name(self, value, record):
assert record, 'Record expected'
return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value)))
# http://initd.org/psycopg/docs/usage.html#binary-adaptation
# Received data is returned as buffer (in Python 2) or memoryview (in Python 3).
_BINARY = memoryview
if pycompat.PY2:
_BINARY = buffer #pylint: disable=buffer-builtin
class Binary(Field):
type = 'binary'
_slots = {
'prefetch': False, # not prefetched by default
'context_dependent': True, # depends on context (content or size)
'attachment': False, # whether value is stored in attachment
}
@property
def column_type(self):
return None if self.attachment else ('bytea', 'bytea')
_description_attachment = property(attrgetter('attachment'))
def convert_to_column(self, value, record, values=None):
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast here.
# This str() coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
if not value:
return None
if isinstance(value, bytes):
return psycopg2.Binary(value)
return psycopg2.Binary(pycompat.text_type(value).encode('ascii'))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, _BINARY):
return bytes(value)
if isinstance(value, pycompat.integer_types) and \
(record._context.get('bin_size') or
record._context.get('bin_size_' + self.name)):
# If the client requests only the size of the field, we return that
# instead of the content. Presumably a separate request will be done
# to read the actual content, if necessary.
return human_size(value)
return value
def read(self, records):
# values are stored in attachments, retrieve them
assert self.attachment
domain = [
('res_model', '=', records._name),
('res_field', '=', self.name),
('res_id', 'in', records.ids),
]
# Note: the 'bin_size' flag is handled by the field 'datas' itself
data = {att.res_id: att.datas
for att in records.env['ir.attachment'].sudo().search(domain)}
cache = records.env.cache
for record in records:
cache.set(record, self, data.get(record.id, False))
def write(self, records, value, create=False):
# retrieve the attachments that stores the value, and adapt them
assert self.attachment
if create:
atts = records.env['ir.attachment'].sudo()
else:
atts = records.env['ir.attachment'].sudo().search([
('res_model', '=', records._name),
('res_field', '=', self.name),
('res_id', 'in', records.ids),
])
with records.env.norecompute():
if value:
# update the existing attachments
atts.write({'datas': value})
# create the missing attachments
for record in (records - records.browse(atts.mapped('res_id'))):
atts.create({
'name': self.name,
'res_model': record._name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
})
else:
atts.unlink()
class Selection(Field):
"""
:param selection: specifies the possible values for this field.
It is given as either a list of pairs (``value``, ``string``), or a
model method, or a method name.
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs (``value``, ``string``).
The attribute ``selection`` is mandatory except in the case of
:ref:`related fields <field-related>` or :ref:`field extensions
<field-incremental-definition>`.
"""
type = 'selection'
_slots = {
'selection': None, # [(value, string), ...], function or method name
}
def __init__(self, selection=Default, string=Default, **kwargs):
super(Selection, self).__init__(selection=selection, string=string, **kwargs)
@property
def column_type(self):
if (self.selection and
isinstance(self.selection, list) and
isinstance(self.selection[0][0], int)):
return ('int4', 'integer')
else:
return ('varchar', pg_varchar())
def _setup_regular_base(self, model):
super(Selection, self)._setup_regular_base(model)
assert self.selection is not None, "Field %s without selection" % self
def _setup_related_full(self, model):
super(Selection, self)._setup_related_full(model)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
def _setup_attrs(self, model, name):
super(Selection, self)._setup_attrs(model, name)
# determine selection (applying 'selection_add' extensions)
for field in reversed(resolve_mro(model, name, self._can_setup_from)):
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by ``_setup_attrs``.
if 'selection' in field.args:
self.selection = field.args['selection']
if 'selection_add' in field.args:
# use an OrderedDict to update existing values
selection_add = field.args['selection_add']
self.selection = list(OrderedDict(self.selection + selection_add).items())
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, pycompat.string_types):
return getattr(env[self.model_name], selection)()
if callable(selection):
return selection(env[self.model_name])
# translate selection labels
if env.lang:
name = "%s,%s" % (self.model_name, self.name)
translate = partial(
env['ir.translation']._get_source, name, 'selection', env.lang)
return [(value, translate(label) if label else label) for value, label in selection]
else:
return selection
def get_values(self, env):
""" return a list of the possible values """
selection = self.selection
if isinstance(selection, pycompat.string_types):
selection = getattr(env[self.model_name], selection)()
elif callable(selection):
selection = selection(env[self.model_name])
return [value for value, _ in selection]
def convert_to_cache(self, value, record, validate=True):
if not validate:
return value or False
if value in self.get_values(record.env):
return value
elif not value:
return False
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
if not isinstance(self.selection, list):
# FIXME: this reproduces an existing buggy behavior!
return value if value else ''
for item in self._description_selection(record.env):
if item[0] == value:
return item[1]
return False
class Reference(Selection):
type = 'reference'
@property
def column_type(self):
return ('varchar', pg_varchar())
def convert_to_cache(self, value, record, validate=True):
# cache format: (res_model, res_id) or False
def process(res_model, res_id):
record._prefetch[res_model].add(res_id)
return (res_model, res_id)
if isinstance(value, BaseModel):
if not validate or (value._name in self.get_values(record.env) and len(value) <= 1):
return process(value._name, value.id) if value else False
elif isinstance(value, pycompat.string_types):
res_model, res_id = value.split(',')
if record.env[res_model].browse(int(res_id)).exists():
return process(res_model, int(res_id))
else:
return False
elif not value:
return False
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_record(self, value, record):
return value and record.env[value[0]].browse([value[1]], record._prefetch)
def convert_to_read(self, value, record, use_name_get=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, record):
return value.name_get()[0][1] if value else ''
def convert_to_display_name(self, value, record):
return ustr(value and value.display_name)
class _Relational(Field):
""" Abstract class for relational fields. """
relational = True
_slots = {
'domain': [], # domain for searching values
'context': {}, # context for searching values
}
def _setup_regular_base(self, model):
super(_Relational, self)._setup_regular_base(model)
if self.comodel_name not in model.pool:
_logger.warning("Field %s with unknown comodel_name %r", self, self.comodel_name)
self.comodel_name = '_unknown'
@property
def _related_domain(self):
if callable(self.domain):
# will be called with another model than self's
return lambda recs: self.domain(recs.env[self.model_name])
else:
# maybe not correct if domain is a string...
return self.domain
_related_context = property(attrgetter('context'))
_description_relation = property(attrgetter('comodel_name'))
_description_context = property(attrgetter('context'))
def _description_domain(self, env):
return self.domain(env[self.model_name]) if callable(self.domain) else self.domain
def null(self, record):
return record.env[self.comodel_name]
class Many2one(_Relational):
""" The value of such a field is a recordset of size 0 (no
record) or 1 (a single record).
:param comodel_name: name of the target model (string)
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param ondelete: what to do when the referred record is deleted;
possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``
:param auto_join: whether JOINs are generated upon search through that
field (boolean, by default ``False``)
:param delegate: set it to ``True`` to make fields of the target model
accessible from the current model (corresponds to ``_inherits``)
The attribute ``comodel_name`` is mandatory except in the case of related
fields or field extensions.
"""
type = 'many2one'
column_type = ('int4', 'int4')
_slots = {
'ondelete': 'set null', # what to do when value is deleted
'auto_join': False, # whether joins are generated upon search
'delegate': False, # whether self implements delegation
}
def __init__(self, comodel_name=Default, string=Default, **kwargs):
super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)
def _setup_attrs(self, model, name):
super(Many2one, self)._setup_attrs(model, name)
# determine self.delegate
if not self.delegate:
self.delegate = name in model._inherits.values()
def update_db(self, model, columns):
comodel = model.env[self.comodel_name]
if not model.is_transient() and comodel.is_transient():
raise ValueError('Many2one %s from Model to TransientModel is forbidden' % self)
if model.is_transient() and not comodel.is_transient():
# Many2one relations from TransientModel Model are annoying because
# they can block deletion due to foreign keys. So unless stated
# otherwise, we default them to ondelete='cascade'.
self.ondelete = self.ondelete or 'cascade'
return super(Many2one, self).update_db(model, columns)
def update_db_column(self, model, column):
super(Many2one, self).update_db_column(model, column)
model.pool.post_init(self.update_db_foreign_key, model, column)
def update_db_foreign_key(self, model, column):
comodel = model.env[self.comodel_name]
# ir_actions is inherited, so foreign key doesn't work on it
if not comodel._auto or comodel._table == 'ir_actions':
return
# create/update the foreign key, and reflect it in 'ir.model.constraint'
process = sql.fix_foreign_key if column else sql.add_foreign_key
new = process(model._cr, model._table, self.name, comodel._table, 'id', self.ondelete or 'set null')
if new:
conname = '%s_%s_fkey' % (model._table, self.name)
model.env['ir.model.constraint']._reflect_constraint(model, conname, 'f', None, self._module)
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
cache = records.env.cache
for record in records:
cache.set(record, self, self.convert_to_cache(value, record, validate=False))
def convert_to_column(self, value, record, values=None):
return value or None
def convert_to_cache(self, value, record, validate=True):
# cache format: tuple(ids)
def process(ids):
return record._prefetch[self.comodel_name].update(ids) or ids
if type(value) in IdType:
return process((value,))
elif isinstance(value, BaseModel):
if not validate or (value._name == self.comodel_name and len(value) <= 1):
return process(value._ids)
raise ValueError("Wrong value for %s: %r" % (self, value))
elif isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
return process(value[:1])
elif isinstance(value, dict):
return process(record.env[self.comodel_name].new(value)._ids)
else:
return ()
def convert_to_record(self, value, record):
return record.env[self.comodel_name]._browse(value, record.env, record._prefetch)
def convert_to_read(self, value, record, use_name_get=True):
if use_name_get and value:
# evaluate name_get() as superuser, because the visibility of a
# many2one field value (id and name) depends on the current record's
# access rights, and not the value's access rights.
try:
# performance: value.sudo() prefetches the same records as value
return value.sudo().name_get()[0]
except MissingError:
# Should not happen, unless the foreign key is missing.
return False
else:
return value.id
def convert_to_write(self, value, record):
return value.id
def convert_to_export(self, value, record):
return value.name_get()[0][1] if value else ''
def convert_to_display_name(self, value, record):
return ustr(value.display_name)
def convert_to_onchange(self, value, record, names):
if not value.id:
return False
return super(Many2one, self).convert_to_onchange(value, record, names)
class _RelationalMulti(_Relational):
""" Abstract class for relational fields *2many. """
_slots = {
'context_dependent': True, # depends on context (active_test)
}
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
cache = records.env.cache
for record in records:
if cache.contains(record, self):
val = self.convert_to_cache(record[self.name] | value, record, validate=False)
cache.set(record, self, val)
else:
cache.set_special(record, self, self._update_getter(record, value))
def _update_getter(self, record, value):
def getter():
# determine the current field's value, and update it in cache only
cache = record.env.cache
cache.remove(record, self)
val = self.convert_to_cache(record[self.name] | value, record, validate=False)
cache.set(record, self, val)
return val
return getter
def convert_to_cache(self, value, record, validate=True):
# cache format: tuple(ids)
def process(ids):
return record._prefetch[self.comodel_name].update(ids) or ids
if isinstance(value, BaseModel):
if not validate or (value._name == self.comodel_name):
return process(value._ids)
elif isinstance(value, (list, tuple)):
# value is a list/tuple of commands, dicts or record ids
comodel = record.env[self.comodel_name]
# determine the value ids; by convention empty on new records
ids = OrderedSet(record[self.name].ids if record.id else ())
# modify ids with the commands
for command in value:
if isinstance(command, (tuple, list)):
if command[0] == 0:
ids.add(comodel.new(command[2], command[1]).id)
elif command[0] == 1:
comodel.browse(command[1]).update(command[2])
ids.add(command[1])
elif command[0] == 2:
# note: the record will be deleted by write()
ids.discard(command[1])
elif command[0] == 3:
ids.discard(command[1])
elif command[0] == 4:
ids.add(command[1])
elif command[0] == 5:
ids.clear()
elif command[0] == 6:
ids = OrderedSet(command[2])
elif isinstance(command, dict):
ids.add(comodel.new(command).id)
else:
ids.add(command)
# return result as a tuple
return process(tuple(ids))
elif not value:
return ()
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_record(self, value, record):
return record.env[self.comodel_name]._browse(value, record.env, record._prefetch)
def convert_to_read(self, value, record, use_name_get=True):
return value.ids
def convert_to_write(self, value, record):
# make result with new and existing records
result = [(6, 0, [])]
for record in value:
if not record.id:
values = {name: record[name] for name in record._cache}
values = record._convert_to_write(values)
result.append((0, 0, values))
elif record._is_dirty():
values = {name: record[name] for name in record._get_dirty()}
values = record._convert_to_write(values)
result.append((1, record.id, values))
else:
result[0][2].append(record.id)
return result
def convert_to_onchange(self, value, record, names):
# return the recordset value as a list of commands; the commands may
# give all fields values, the client is responsible for figuring out
# which fields are actually dirty
result = [(5,)]
for record in value:
vals = {
name: value._fields[name].convert_to_onchange(record[name], record, subnames)
for name, subnames in names.items()
if name != 'id'
}
if not record.id:
result.append((0, record.id.ref or 0, vals))
elif vals:
result.append((1, record.id, vals))
else:
result.append((4, record.id))
return result
def convert_to_export(self, value, record):
return ','.join(name for id, name in value.name_get()) if value else ''
def convert_to_display_name(self, value, record):
raise NotImplementedError()
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
super(_RelationalMulti, self)._compute_related(records)
if self.related_sudo:
# determine which records in the relation are actually accessible
target = records.mapped(self.name)
target_ids = set(target.search([('id', 'in', target.ids)]).ids)
accessible = lambda target: target.id in target_ids
# filter values to keep the accessible records only
for record in records:
record[self.name] = record[self.name].filtered(accessible)
def _setup_regular_base(self, model):
super(_RelationalMulti, self)._setup_regular_base(model)
if isinstance(self.domain, list):
self.depends += tuple(
self.name + '.' + arg[0]
for arg in self.domain
if isinstance(arg, (tuple, list)) and isinstance(arg[0], pycompat.string_types)
)
class One2many(_RelationalMulti):
""" One2many field; the value of such a field is the recordset of all the
records in ``comodel_name`` such that the field ``inverse_name`` is equal to
the current record.
:param comodel_name: name of the target model (string)
:param inverse_name: name of the inverse ``Many2one`` field in
``comodel_name`` (string)
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param auto_join: whether JOINs are generated upon search through that
field (boolean, by default ``False``)
:param limit: optional limit to use upon read (integer)
The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in
the case of related fields or field extensions.
"""
type = 'one2many'
_slots = {
'inverse_name': None, # name of the inverse field
'auto_join': False, # whether joins are generated upon search
'limit': None, # optional limit to use upon read
'copy': False, # o2m are not copied by default
}
def __init__(self, comodel_name=Default, inverse_name=Default, string=Default, **kwargs):
super(One2many, self).__init__(
comodel_name=comodel_name,
inverse_name=inverse_name,
string=string,
**kwargs
)
def _setup_regular_full(self, model):
super(One2many, self)._setup_regular_full(model)
if self.inverse_name:
# link self to its inverse field and vice-versa
comodel = model.env[self.comodel_name]
invf = comodel._fields[self.inverse_name]
# In some rare cases, a ``One2many`` field can link to ``Int`` field
# (res_model/res_id pattern). Only inverse the field if this is
# a ``Many2one`` field.
if isinstance(invf, Many2one):
model._field_inverses.add(self, invf)
comodel._field_inverses.add(invf, self)
_description_relation_field = property(attrgetter('inverse_name'))
def convert_to_onchange(self, value, record, names):
names = names.copy()
names.pop(self.inverse_name, None)
return super(One2many, self).convert_to_onchange(value, record, names)
def update_db(self, model, columns):
if self.comodel_name in model.env:
comodel = model.env[self.comodel_name]
if self.inverse_name not in comodel._fields:
raise UserError(_("No inverse field %r found for %r") % (self.inverse_name, self.comodel_name))
def read(self, records):
# retrieve the lines in the comodel
comodel = records.env[self.comodel_name].with_context(**self.context)
inverse = self.inverse_name
get_id = (lambda rec: rec.id) if comodel._fields[inverse].type == 'many2one' else int
domain = self.domain(records) if callable(self.domain) else self.domain
domain = domain + [(inverse, 'in', records.ids)]
lines = comodel.search(domain, limit=self.limit)
# group lines by inverse field (without prefetching other fields)
group = defaultdict(list)
for line in lines.with_context(prefetch_fields=False):
# line[inverse] may be a record or an integer
group[get_id(line[inverse])].append(line.id)
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write(self, records, value, create=False):
comodel = records.env[self.comodel_name].with_context(**self.context)
inverse = self.inverse_name
with records.env.norecompute():
for act in (value or []):
if act[0] == 0:
for record in records:
act[2][inverse] = record.id
comodel.create(act[2])
elif act[0] == 1:
comodel.browse(act[1]).write(act[2])
elif act[0] == 2:
comodel.browse(act[1]).unlink()
elif act[0] == 3:
inverse_field = comodel._fields[inverse]
if inverse_field.ondelete == 'cascade':
comodel.browse(act[1]).unlink()
else:
comodel.browse(act[1]).write({inverse: False})
elif act[0] == 4:
record = records[-1]
line = comodel.browse(act[1])
line_sudo = line.sudo().with_context(prefetch_fields=False)
if int(line_sudo[inverse]) != record.id:
line.write({inverse: record.id})
elif act[0] == 5:
domain = self.domain(records) if callable(self.domain) else self.domain
domain = domain + [(inverse, 'in', records.ids)]
inverse_field = comodel._fields[inverse]
if inverse_field.ondelete == 'cascade':
comodel.search(domain).unlink()
else:
comodel.search(domain).write({inverse: False})
elif act[0] == 6:
record = records[-1]
comodel.browse(act[2]).write({inverse: record.id})
query = "SELECT id FROM %s WHERE %s=%%s AND id <> ALL(%%s)" % (comodel._table, inverse)
comodel._cr.execute(query, (record.id, act[2] or [0]))
lines = comodel.browse([row[0] for row in comodel._cr.fetchall()])
inverse_field = comodel._fields[inverse]
if inverse_field.ondelete == 'cascade':
lines.unlink()
else:
lines.write({inverse: False})
class Many2many(_RelationalMulti):
""" Many2many field; the value of such a field is the recordset.
:param comodel_name: name of the target model (string)
The attribute ``comodel_name`` is mandatory except in the case of related
fields or field extensions.
:param relation: optional name of the table that stores the relation in
the database (string)
:param column1: optional name of the column referring to "these" records
in the table ``relation`` (string)
:param column2: optional name of the column referring to "those" records
in the table ``relation`` (string)
The attributes ``relation``, ``column1`` and ``column2`` are optional. If not
given, names are automatically generated from model names, provided
``model_name`` and ``comodel_name`` are different!
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param limit: optional limit to use upon read (integer)
"""
type = 'many2many'
_slots = {
'relation': None, # name of table
'column1': None, # column of table referring to model
'column2': None, # column of table referring to comodel
'auto_join': False, # whether joins are generated upon search
'limit': None, # optional limit to use upon read
}
def __init__(self, comodel_name=Default, relation=Default, column1=Default,
column2=Default, string=Default, **kwargs):
super(Many2many, self).__init__(
comodel_name=comodel_name,
relation=relation,
column1=column1,
column2=column2,
string=string,
**kwargs
)
def _setup_regular_base(self, model):
super(Many2many, self)._setup_regular_base(model)
if self.store:
if not (self.relation and self.column1 and self.column2):
# table name is based on the stable alphabetical order of tables
comodel = model.env[self.comodel_name]
if not self.relation:
tables = sorted([model._table, comodel._table])
assert tables[0] != tables[1], \
"%s: Implicit/canonical naming of many2many relationship " \
"table is not possible when source and destination models " \
"are the same" % self
self.relation = '%s_%s_rel' % tuple(tables)
if not self.column1:
self.column1 = '%s_id' % model._table
if not self.column2:
self.column2 = '%s_id' % comodel._table
# check validity of table name
check_pg_name(self.relation)
def _setup_regular_full(self, model):
super(Many2many, self)._setup_regular_full(model)
if self.relation:
m2m = model.pool._m2m
# if inverse field has already been setup, it is present in m2m
invf = m2m.get((self.relation, self.column2, self.column1))
if invf:
comodel = model.env[self.comodel_name]
model._field_inverses.add(self, invf)
comodel._field_inverses.add(invf, self)
else:
# add self in m2m, so that its inverse field can find it
m2m[(self.relation, self.column1, self.column2)] = self
def update_db(self, model, columns):
cr = model._cr
# Do not reflect relations for custom fields, as they do not belong to a
# module. They are automatically removed when dropping the corresponding
# 'ir.model.field'.
if not self.manual:
model.pool.post_init(model.env['ir.model.relation']._reflect_relation,
model, self.relation, self._module)
if not sql.table_exists(cr, self.relation):
comodel = model.env[self.comodel_name]
query = """
CREATE TABLE "{rel}" ("{id1}" INTEGER NOT NULL,
"{id2}" INTEGER NOT NULL,
UNIQUE("{id1}","{id2}"));
COMMENT ON TABLE "{rel}" IS %s;
CREATE INDEX ON "{rel}" ("{id1}");
CREATE INDEX ON "{rel}" ("{id2}")
""".format(rel=self.relation, id1=self.column1, id2=self.column2)
cr.execute(query, ['RELATION BETWEEN %s AND %s' % (model._table, comodel._table)])
_schema.debug("Create table %r: m2m relation between %r and %r", self.relation, model._table, comodel._table)
model.pool.post_init(self.update_db_foreign_keys, model)
return True
def update_db_foreign_keys(self, model):
""" Add the foreign keys corresponding to the field's relation table. """
cr = model._cr
comodel = model.env[self.comodel_name]
reflect = model.env['ir.model.constraint']._reflect_constraint
# create foreign key references with ondelete=cascade, unless the targets are SQL views
if sql.table_kind(cr, model._table) != 'v':
sql.add_foreign_key(cr, self.relation, self.column1, model._table, 'id', 'cascade')
reflect(model, '%s_%s_fkey' % (self.relation, self.column1), 'f', None, self._module)
if sql.table_kind(cr, comodel._table) != 'v':
sql.add_foreign_key(cr, self.relation, self.column2, comodel._table, 'id', 'cascade')
reflect(model, '%s_%s_fkey' % (self.relation, self.column2), 'f', None, self._module)
def read(self, records):
comodel = records.env[self.comodel_name]
# String domains are supposed to be dynamic and evaluated on client-side
# only (thus ignored here).
domain = self.domain if isinstance(self.domain, list) else []
wquery = comodel._where_calc(domain)
comodel._apply_ir_rules(wquery, 'read')
order_by = comodel._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
query = """ SELECT {rel}.{id1}, {rel}.{id2} FROM {rel}, {from_c}
WHERE {where_c} AND {rel}.{id1} IN %s AND {rel}.{id2} = {tbl}.id
{order_by} {limit} OFFSET {offset}
""".format(rel=self.relation, id1=self.column1, id2=self.column2,
tbl=comodel._table, from_c=from_c, where_c=where_c or '1=1',
limit=(' LIMIT %d' % self.limit) if self.limit else '',
offset=0, order_by=order_by)
where_params.append(tuple(records.ids))
# retrieve lines and group them by record
group = defaultdict(list)
records._cr.execute(query, where_params)
for row in records._cr.fetchall():
group[row[0]].append(row[1])
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write(self, records, value, create=False):
cr = records._cr
comodel = records.env[self.comodel_name]
parts = dict(rel=self.relation, id1=self.column1, id2=self.column2)
clear = False # whether the relation should be cleared
links = {} # {id: True (link it) or False (unlink it)}
for act in (value or []):
if not isinstance(act, (list, tuple)) or not act:
continue
if act[0] == 0:
for record in records:
links[comodel.create(act[2]).id] = True
elif act[0] == 1:
comodel.browse(act[1]).write(act[2])
elif act[0] == 2:
comodel.browse(act[1]).unlink()
elif act[0] == 3:
links[act[1]] = False
elif act[0] == 4:
links[act[1]] = True
elif act[0] == 5:
clear = True
links.clear()
elif act[0] == 6:
clear = True
links = dict.fromkeys(act[2], True)
if clear and not create:
# remove all records for which user has access rights
clauses, params, tables = comodel.env['ir.rule'].domain_get(comodel._name)
cond = " AND ".join(clauses) if clauses else "1=1"
query = """ DELETE FROM {rel} USING {tables}
WHERE {rel}.{id1} IN %s AND {rel}.{id2}={table}.id AND {cond}
""".format(table=comodel._table, tables=','.join(tables), cond=cond, **parts)
cr.execute(query, [tuple(records.ids)] + params)
# link records to the ids such that links[id] = True
if any(links.values()):
# beware of duplicates when inserting
query = """ INSERT INTO {rel} ({id1}, {id2})
(SELECT a, b FROM unnest(%s) AS a, unnest(%s) AS b)
EXCEPT (SELECT {id1}, {id2} FROM {rel} WHERE {id1} IN %s)
""".format(**parts)
ids = [id for id, flag in links.items() if flag]
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute(query, (records.ids, list(sub_ids), tuple(records.ids)))
# unlink records from the ids such that links[id] = False
if not all(links.values()):
query = """ DELETE FROM {rel}
WHERE {id1} IN %s AND {id2} IN %s
""".format(**parts)
ids = [id for id, flag in links.items() if not flag]
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute(query, (tuple(records.ids), sub_ids))
class Id(Field):
""" Special case for field 'id'. """
type = 'integer'
column_type = ('int4', 'int4')
_slots = {
'string': 'ID',
'store': True,
'readonly': True,
}
def update_db(self, model, columns):
pass # this column is created with the table
def __get__(self, record, owner):
if record is None:
return self # the field is accessed through the class owner
if not record:
return False
return record.ensure_one()._ids[0]
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
# imported here to avoid dependency cycle issues
from odoo import SUPERUSER_ID
from .exceptions import AccessError, MissingError, UserError
from .models import check_pg_name, BaseModel, IdType
| agpl-3.0 | 8,913,938,050,853,976,000 | 41.087811 | 126 | 0.585813 | false |
RyadElssalihine/RyadElssalihine | user_manager/views.py | 1 | 1508 | # Create your views here.
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from models import Profile,Application,Tab,Page,Footer
from serializer import ProfileSerializer,TabSerializer,FooterSerializer
from django.shortcuts import render
from rest_framework.parsers import JSONParser
from django.contrib.auth import authenticate, login ,logout
from django.shortcuts import redirect
from django.http import HttpResponse,HttpRequest
from forms import ConnexionForm
from django.core.urlresolvers import reverse
import RyadEssalihine
from django.contrib.auth.decorators import login_required
@api_view(['POST'])
def register(request):
pass
@api_view(['GET'])
def user_list(request):
profiles=Profile.objects.all()
serializer=ProfileSerializer(profiles,many=True)
return Response(serializer.data)
@api_view(['GET'])
def user_get(request):
try:
profiles = Profile.objects.get(user_id=request.user.id)
except Profile.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer=ProfileSerializer(profiles)
return Response(serializer.data)
@api_view(['GET'])
def tabs_list(request):
tabs=Tab.objects.all()
serializer= TabSerializer(tabs,many=True)
return Response(serializer.data)
@api_view(['GET'])
def footers_list(request):
footers=Footer.objects.all()
serializer=FooterSerializer(footers,many=True)
return Response(serializer.data)
| gpl-2.0 | -4,443,966,142,931,526,700 | 27.45283 | 71 | 0.772546 | false |
mareknetusil/twist | cbc/twist/kinematics.py | 1 | 4324 | __author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from dolfin import *
#from cbc.twist.coordinate_system import CartesianSystem
# Renaming grad to Grad because it looks nicer in the reference
# configuration
from ufl import grad as ufl_grad
# Deformation gradient
def DeformationGradient(u):
I = SecondOrderIdentity(u)
return variable(I + Grad(u))
def Grad(v):
return ufl_grad(v)
# Infinitesimal strain tensor
def InfinitesimalStrain(u):
return variable(0.5*(Grad(u) + Grad(u).T))
# Second order identity tensor
def SecondOrderIdentity(u):
return variable(Identity(u.geometric_dimension()))
# Determinant of the deformation gradient
def Jacobian(u):
F = DeformationGradient(u)
return variable(det(F))
# Right Cauchy-Green tensor
def RightCauchyGreen(u):
F = DeformationGradient(u)
return variable(F.T*F)
# Green-Lagrange strain tensor
def GreenLagrangeStrain(u):
I = SecondOrderIdentity(u)
C = RightCauchyGreen(u)
return variable(0.5*(C - I))
# Left Cauchy-Green tensor
def LeftCauchyGreen(u):
F = DeformationGradient(u)
return variable(F*F.T)
# Euler-Almansi strain tensor
def EulerAlmansiStrain(u):
I = SecondOrderIdentity(u)
b = LeftCauchyGreen(u)
return variable(0.5*(I - inv(b)))
# Invariants of an arbitrary tensor, A
def Invariants(A):
I1 = tr(A)
I2 = 0.5*(tr(A)**2 - tr(A*A))
I3 = det(A)
return [I1, I2, I3]
# Invariants of the (right/left) Cauchy-Green tensor
#TODO: NEEDS TESTING
def CauchyGreenInvariants(u):
C = RightCauchyGreen(u)
[I1, I2, I3] = Invariants(C)
return [variable(I1), variable(I2), variable(I3)]
# Isochoric part of the deformation gradient
#TODO: NEEDS TESTING
def IsochoricDeformationGradient(u):
F = DeformationGradient(u)
J = Jacobian(u)
return variable(J**(-1.0/3.0)*F)
# Isochoric part of the right Cauchy-Green tensor
#TODO: NEEDS TESTING
def IsochoricRightCauchyGreen(u):
C = RightCauchyGreen(u)
J = Jacobian(u)
return variable(J**(-2.0/3.0)*C)
# Invariants of the ischoric part of the (right/left) Cauchy-Green
# tensor. Note that I3bar = 1 by definition.
#TODO: NEEDS TESTING
def IsochoricCauchyGreenInvariants(u):
Cbar = IsochoricRightCauchyGreen(u)
[I1bar, I2bar, I3bar] = Invariants(Cbar)
return [variable(I1bar), variable(I2bar)]
# Principal stretches
#TODO: NEEDS TESTING
def PrincipalStretches(u):
C = RightCauchyGreen(u)
S = FunctionSpace(u.function_space().mesh(), "CG", 1)
if (u.cell().geometric_dimension() == 2):
D = sqrt(tr(C)*tr(C) - 4.0*det(C))
eig1 = sqrt(0.5*(tr(C) + D))
eig2 = sqrt(0.5*(tr(C) - D))
return [variable(eig1), variable(eig2)]
if (u.cell().geometric_dimension() == 3):
c = (1.0/3.0)*tr(C)
D = C - c*SecondOrderIdentity(u)
q = (1.0/2.0)*det(D)
p = (1.0/6.0)*inner(D, D)
ph = project(p, S)
if (norm(ph) < DOLFIN_EPS):
eig1 = sqrt(c)
eig2 = sqrt(c)
eig3 = sqrt(c)
else:
phi = (1.0/3.0)*atan(sqrt(p**3.0 - q**2.0)/q)
if (phi < 0.0):
phi = phi + DOLFIN_PI/3.0
end
eig1 = sqrt(c + 2*sqrt(p)*cos(phi))
eig2 = sqrt(c - sqrt(p)*(cos(phi) + sqrt(3)*sin(phi)))
eig3 = sqrt(c - sqrt(p)*(cos(phi) - sqrt(3)*sin(phi)))
return [variable(eig1), variable(eig2), variable(eig3)]
# Pull-back of a two-tensor from the current to the reference
# configuration
#TODO: NEEDS TESTING
def PiolaTransform(A, u):
J = Jacobian(u)
F = DeformationGradient(u)
B = J*A*inv(F).T
return B
# Push-forward of a two-tensor from the reference to the current
# configuration
#TODO: NEEDS TESTING
def InversePiolaTransform(A, u):
J = Jacobian(u)
F = DeformationGradient(u)
B = (1/J)*A*F.T
return B
# Computes M*C^nM
# for n = 1 equals to the stretch in the direction M
#TODO: NEEDS TESTING
def DirectionalStretch(u, M, degree = 1):
C = RightCauchyGreen(u)
Cpow = SecondOrderIdentity(u)
if degree >= 1:
for i in range(degree):
Cpow = C*Cpow
directionalstretch = inner(M,Cpow*M)
return variable(directionalstretch)
| gpl-3.0 | 1,692,405,160,494,801,000 | 26.896774 | 83 | 0.639685 | false |
snarfed/webmention-tools | bin/demo.py | 1 | 1241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from webmentiontools.urlinfo import UrlInfo
from webmentiontools.webmentionio import WebmentionIO
# If you have an access token from webmention.io,
# set it here. Some calls require it.
webmention_io_token = None
wio = WebmentionIO(webmention_io_token)
# Get all links "mentioning" http://indiewebcamp.com/webmention
target_url = 'http://indiewebcamp.com/webmention'
ret = wio.linksToURL(target_url)
if not ret:
print wio.error
else:
for link in ret['links']:
print
print 'Webmention.io ID: %s' % link['id']
print ' Source: %s' % link['source']
print ' Verification Date: %s' % link['verified_date']
# Now use UrlInfo to get some more information about the source.
# Most web apps showing webmentions, will probably do something
# like this.
info = UrlInfo(link['source'])
print ' Source URL info:'
print ' Title: %s' % info.title()
print ' Pub Date: %s' % info.pubDate()
print ' in-reply-to: %s' % info.inReplyTo()
print ' Author image: %s' % info.image()
print ' Snippet: %s' % info.snippetWithLink(target_url)
| mit | 8,639,230,686,331,052,000 | 33.472222 | 72 | 0.617244 | false |
XiaochenCui/algorithm_submit | app/auth/views.py | 1 | 5804 | from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, \
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('帐号或密码不可用')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('你已登出')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, '验证你的帐号',
'auth/email/confirm', user=user, token=token)
flash('验证邮件已发送')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('帐号已激活')
else:
flash('验证链接不可用或已过期')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, '验证帐号',
'auth/email/confirm', user=current_user, token=token)
flash('新的验证邮件已经发送到你的邮箱')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('密码更改成功')
return redirect(url_for('main.index'))
else:
flash('密码不可用')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, '重新设置你的密码',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('重置密码的邮件已经发送到你的邮箱')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, '验证邮箱',
'auth/email/change_email',
user=current_user, token=token)
flash('验证邮件已经发送到你的邮箱')
return redirect(url_for('main.index'))
else:
flash('邮箱或密码不可用')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('你的邮箱地址已经更新')
else:
flash('错误的请求')
return redirect(url_for('main.index'))
| mit | 5,879,881,981,366,820,000 | 32.95092 | 78 | 0.630105 | false |
kangwonlee/ECA | lab_07_linear_algebra/gauss_jordan.py | 1 | 1766 | # -*- coding: utf8 -*-
from pprint import pprint
import linear_algebra as la
def gauss_jordan(A):
# 행렬의 크기
n_row = len(A)
n_column = len(A[0])
# 단위 행렬과의 Augmented Matrix 를 만듦
AI = []
for i_row in xrange(n_row):
AI_row = [0.0] * (n_column * 2)
for j_column in xrange(n_column):
AI_row[j_column] = A[i_row][j_column]
for j_column in xrange(n_column, n_column * 2):
AI_row[j_column] = 0.0
AI_row[n_column + i_row] = 1.0
AI.append(AI_row)
print "Augmented matrix"
print '1234567890' * 7
pprint(AI, width=30)
# pivot 반복문
for i_pivot in xrange(n_row):
# pivot 행을 pivot 요소로 나눔.
# pivot 요소는 1이 됨
ratio = 1.0 / float(AI[i_pivot][i_pivot])
for k_column in xrange(n_column * 2):
AI[i_pivot][k_column] *= ratio
# 행 반복문
for j_row in xrange(0, n_row):
if j_row != i_pivot:
ratio = -AI[j_row][i_pivot]
# 열 반복문
for k_column in xrange(n_column * 2):
AI[j_row][k_column] += ratio * AI[i_pivot][k_column]
# 이 반복문이 끝나고 나면 주 대각선 이외의 요소는 모두 0
print "After Gauss Jordan"
pprint(AI)
# 오른쪽의 행렬을 떼어냄
result = []
for i_row in xrange(n_row):
result.append(AI[i_row][n_column:])
return result
if "__main__" == __name__:
A = [[3, 2, 1],
[2, 3, 2],
[1, 2, 3]]
A_inverse = gauss_jordan(A)
print "A inverse"
pprint(A_inverse)
I_expected = la.multiply_matrix_matrix(A, A_inverse)
print "I expected"
pprint(I_expected)
| apache-2.0 | 1,296,910,529,811,983,000 | 23.666667 | 72 | 0.514742 | false |
Chris7/django-djangui | djangui/models/mixins.py | 1 | 1697 | from __future__ import absolute_import
__author__ = 'chris'
from django.forms.models import model_to_dict
import six
class UpdateScriptsMixin(object):
def save(self, **kwargs):
super(UpdateScriptsMixin, self).save(**kwargs)
from ..backend.utils import load_scripts
load_scripts()
class DjanguiPy2Mixin(object):
def __unicode__(self):
return unicode(self.__str__())
# from
# http://stackoverflow.com/questions/1355150/django-when-saving-how-can-you-check-if-a-field-has-changed
class ModelDiffMixin(object):
"""
A model mixin that tracks model fields' values and provide some useful api
to know what fields have been changed.
"""
def __init__(self, *args, **kwargs):
super(ModelDiffMixin, self).__init__(*args, **kwargs)
self.__initial = self._dict
@property
def diff(self):
d1 = self.__initial
d2 = self._dict
diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]
return dict(diffs)
@property
def has_changed(self):
return bool(self.diff)
@property
def changed_fields(self):
return self.diff.keys()
def get_field_diff(self, field_name):
"""
Returns a diff for field if it's changed and None otherwise.
"""
return self.diff.get(field_name, None)
def save(self, *args, **kwargs):
"""
Saves model and set initial state.
"""
super(ModelDiffMixin, self).save(*args, **kwargs)
self.__initial = self._dict
@property
def _dict(self):
return model_to_dict(self, fields=[field.name for field in
self._meta.fields]) | gpl-3.0 | 4,384,404,271,976,613,400 | 27.3 | 104 | 0.602829 | false |
mosen/commandment | tests/mdm/test_checkin.py | 1 | 1404 | import pytest
from flask import Response
from tests.client import MDMClient
class TestCheckin:
def test_authenticate(self, client: MDMClient, authenticate_request: str):
"""Basic test: Authenticate"""
response: Response = client.put('/checkin', data=authenticate_request, content_type='text/xml')
assert response.status_code != 410
assert response.status_code == 200
def test_tokenupdate(self, client: MDMClient, tokenupdate_request: str):
"""Test a client attempting to update its token after being unenrolled."""
response: Response = client.put('/checkin', data=tokenupdate_request, content_type='text/xml')
assert response.status_code != 410
assert response.status_code == 200
def test_user_tokenupdate(self, client: MDMClient, tokenupdate_user_request: str):
"""Test a TokenUpdate message on the user channel."""
response: Response = client.put('/checkin', data=tokenupdate_user_request, content_type='text/xml')
assert response.status_code != 410
assert response.status_code == 200
def test_checkout(self, client: MDMClient, checkout_request: str):
"""Test a CheckOut message"""
response: Response = client.put('/checkin', data=checkout_request, content_type='text/xml')
assert response.status_code != 410
assert response.status_code == 200
| mit | -347,361,245,787,031,400 | 45.8 | 107 | 0.683761 | false |
manaris/jythonMusic | 15. simpleCircleInstrument.py | 1 | 2279 | # simpleCircleInstrument.py
#
# Demonstrates how to use mouse and keyboard events to build a simple
# drawing musical instrument.
#
from gui import *
from music import *
from math import sqrt
### initialize variables ######################
minPitch = C1 # instrument pitch range
maxPitch = C8
# create display
d = Display("Circle Instrument") # default dimensions (600 x 400)
d.setColor( Color(51, 204, 255) ) # set background to turquoise
beginX = 0 # holds starting x coordinate for next circle
beginY = 0 # holds starting y coordinate
# maximum circle diameter - same as diagonal of display
maxDiameter = sqrt(d.getWidth()**2 + d.getHeight()**2) # calculate it
### define callback functions ######################
def beginCircle(x, y): # for when mouse is pressed
global beginX, beginY
beginX = x # remember new circle's coordinates
beginY = y
def endCircleAndPlayNote(endX, endY): # for when mouse is released
global beginX, beginY, d, maxDiameter, minPitch, maxPitch
# calculate circle parameters
# first, calculate distance between begin and end points
diameter = sqrt( (beginX-endX)**2 + (beginY-endY)**2 )
diameter = int(diameter) # in pixels - make it an integer
radius = diameter/2 # get radius
centerX = (beginX + endX)/2 # circle center is halfway between...
centerY = (beginY + endY)/2 # ...begin and end points
# draw circle with yellow color, unfilled, 3 pixels thick
d.drawCircle(centerX, centerY, radius, Color.YELLOW, False, 3)
# create note
pitch = mapScale(diameter, 0, maxDiameter, minPitch, maxPitch,
MAJOR_SCALE)
# invert pitch (larger diameter, lower pitch)
pitch = maxPitch - pitch
# and play note
Play.note(pitch, 0, 5000) # start immediately, hold for 5 secs
def clearOnSpacebar(key): # for when a key is pressed
global d
# if they pressed space, clear display and stop the music
if key == VK_SPACE:
d.removeAll() # remove all shapes
Play.allNotesOff() # stop all notes
### assign callback functions to display event handlers #############
d.onMouseDown( beginCircle )
d.onMouseUp( endCircleAndPlayNote )
d.onKeyDown( clearOnSpacebar ) | gpl-3.0 | 2,095,986,917,681,406,200 | 31.571429 | 70 | 0.660816 | false |
openstack/vitrage | doc/source/conf.py | 1 | 2952 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.intersphinx',
'openstackdocstheme',
'oslo_config.sphinxconfiggen',
]
config_generator_config_file = os.path.join(ROOT,
'etc/vitrage/vitrage-config-generator.conf')
sample_config_basename = '_static/vitrage'
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'vitrage'
copyright = '2017, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_static_path = ['_static']
html_theme = 'openstackdocs'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/vitrage'
openstackdocs_auto_name = False
openstackdocs_use_storyboard = True
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
'%s Documentation' % project,
'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 | -1,014,818,334,832,758,900 | 31.8 | 88 | 0.692073 | false |
gilles-duboscq/jvb | scripts/gen-idea-configs.py | 1 | 3766 | #!/usr/bin/env python
# <configuration default="false" name="pong" type="Application" factoryName="Application">
# <option name="MAIN_CLASS_NAME" value="gd.twohundred.jvb.Main" />
# <option name="VM_PARAMETERS" value="" />
# <option name="PROGRAM_PARAMETERS" value=""$PROJECT_DIR$/../vb-roms/all-roms/pong.vb"" />
# <option name="WORKING_DIRECTORY" value="file://$PROJECT_DIR$" />
# <option name="ALTERNATIVE_JRE_PATH_ENABLED" value="false" />
# <option name="ALTERNATIVE_JRE_PATH" />
# <option name="ENABLE_SWING_INSPECTOR" value="false" />
# <option name="ENV_VARIABLES" />
# <option name="PASS_PARENT_ENVS" value="true" />
# <module name="jvb" />
# <envs />
# <method />
# </configuration>
from __future__ import print_function
from xml.etree import ElementTree
from argparse import ArgumentParser
from os.path import dirname, basename, exists, join, realpath, relpath
from glob import glob
import shlex
scripts = realpath(dirname(__file__))
name = basename(__file__)
root = dirname(scripts)
default_idea_dir = None
if exists(join(root, '.idea')):
default_idea_dir = join(root, '.idea')
parser = ArgumentParser(name, description="Creates run configurations for Intellij for *.vb files in a directory")
parser.add_argument("-p", "--project", default=default_idea_dir, help="Location of .idea project", required=default_idea_dir is None)
parser.add_argument("directory", help="directory where to look for *.vb files")
args = parser.parse_args()
workspace_file = join(args.project, 'workspace.xml')
if not exists(workspace_file):
print("No workspace.xml file, creating one")
project = ElementTree.Element('project', attrib={'version': 4})
doc = ElementTree.ElementTree(project)
else:
doc = ElementTree.parse(workspace_file)
project = doc.getroot()
if project.tag != 'project':
print("Strange workspace.xml file, exiting")
exit(1)
search_dir = realpath(args.directory)
vb_roms = set(glob(join(search_dir, "*.vb")))
run_manager = project.find("./component[@name='RunManager']")
if run_manager is None:
run_manager = ElementTree.SubElement(project, 'component')
run_manager.set('name', 'RunManager')
project_dir = dirname(realpath(args.project))
for config in run_manager.iterfind("./configuration[@type='Application']"):
params_node = config.find("./option[@name='PROGRAM_PARAMETERS']")
if params_node is None or params_node.get('value') is None:
continue
params = shlex.split(params_node.get('value'))
for param in params:
param = param.replace('$PROJECT_DIR$', project_dir)
if not param.endswith('.vb'):
continue
param = realpath(param)
if param in vb_roms:
vb_roms.remove(param)
print("Skipping '" + relpath(param, search_dir) + "'")
for vb_rom in vb_roms:
print("Adding '" + relpath(vb_rom, search_dir) + "'")
config = ElementTree.SubElement(run_manager, 'configuration', attrib={"name": relpath(vb_rom, search_dir)[:-3], "type": "Application", "factoryName": "Application"})
ElementTree.SubElement(config, 'option', attrib={'name': 'MAIN_CLASS_NAME', 'value': 'gd.twohundred.jvb.Main'})
ElementTree.SubElement(config, 'option', attrib={'name': 'VM_PARAMETERS', 'value': ''})
ElementTree.SubElement(config, 'option', attrib={'name': 'PROGRAM_PARAMETERS', 'value': '"$PROJECT_DIR$/' + relpath(vb_rom, project_dir) + '"'})
ElementTree.SubElement(config, 'option', attrib={'name': 'WORKING_DIRECTORY', 'value': 'file://$PROJECT_DIR$'})
ElementTree.SubElement(config, 'option', attrib={'name': 'PASS_PARENT_ENVS', 'value': 'true'})
ElementTree.SubElement(config, 'module', attrib={'name': 'jvb'})
doc.write(workspace_file, xml_declaration=True)
| mit | -6,872,473,540,939,162,000 | 43.305882 | 169 | 0.677642 | false |
intelligent-agent/redeem | tests/gcode/test_M83.py | 1 | 1638 | from __future__ import absolute_import
from .MockPrinter import MockPrinter
from redeem.Path import Path
class M83_Tests(MockPrinter):
def test_gcodes_M83_from_absolute(self):
""" set state as it should be after a G90, all axes absolute """
self.printer.axes_absolute = ["X", "Y", "Z", "E", "H", "A", "B", "C"]
self.printer.axes_relative = []
self.printer.movement == Path.ABSOLUTE
self.execute_gcode("M83")
self.assertEqual(self.printer.movement, Path.MIXED)
self.assertEqual(self.printer.axes_absolute, ["X", "Y", "Z"])
self.assertEqual(self.printer.axes_relative, ["E", "H", "A", "B", "C"])
def test_gcodes_M83_from_relative(self):
""" set state as it should be after a G91, all axes relative """
self.printer.axes_absolute = []
self.printer.axes_relative = ["X", "Y", "Z", "E", "H", "A", "B", "C"]
self.printer.movement == Path.RELATIVE
self.execute_gcode("M83")
self.assertEqual(self.printer.movement, Path.RELATIVE)
self.assertEqual(self.printer.axes_relative, ["X", "Y", "Z", "E", "H", "A", "B", "C"])
self.assertEqual(self.printer.axes_absolute, [])
def test_gcodes_M83_from_mixed(self):
""" set state as it should be after a G90/M83, XYZ absolute and extruders relative """
self.printer.axes_absolute = ["X", "Y", "Z"]
self.printer.axes_relative = ["E", "H", "A", "B", "C"]
self.printer.movement == Path.MIXED
self.execute_gcode("M83")
self.assertEqual(self.printer.movement, Path.MIXED)
self.assertEqual(self.printer.axes_relative, ["E", "H", "A", "B", "C"])
self.assertEqual(self.printer.axes_absolute, ["X", "Y", "Z"])
| gpl-3.0 | 4,424,008,043,799,721,000 | 44.5 | 90 | 0.639194 | false |
jayvdb/travis_log_fetch | tests/test_github.py | 1 | 1132 | """Test Github resolution."""
from __future__ import absolute_import, unicode_literals
from travis_log_fetch.config import (
_get_github,
get_options,
)
from travis_log_fetch.get import (
get_forks,
)
import pytest
# Note 'foo' is a real Github user, but they do not
# have repos bar or baz
class TestForks(object):
def test_invalid(self):
options = get_options()
if not options.access_token:
pytest.skip("github access token needed")
_github = _get_github()
pytest.raises(AssertionError, get_forks, _github, 'foo/bar')
def test_zero(self):
options = get_options()
if not options.access_token:
pytest.skip("github access token needed")
_github = _get_github()
forks = get_forks(_github, 'travispy/on_pypy')
assert len(forks) == 0
def test_fork(self):
options = get_options()
if not options.access_token:
pytest.skip("github access token needed")
_github = _get_github()
forks = get_forks(_github, 'menegazzo/travispy')
assert 'jayvdb/travispy' in forks
| mit | -6,854,272,443,919,678,000 | 27.3 | 68 | 0.621025 | false |
Jumpscale/jumpscale_core8 | tests/tools/cuisine/TestJSCuisine.py | 1 | 4718 | """
Test JSCuisine (core)
"""
import unittest
from unittest import mock
from JumpScale import j
from JumpScale.tools.cuisine.JSCuisine import JSCuisine
import JumpScale
from JumpScale.tools.cuisine.ProcessManagerFactory import ProcessManagerFactory
class TestJSCuisine(unittest.TestCase):
def setUp(self):
self._local_executor = j.tools.executor.getLocal()
self.JSCuisine = JSCuisine(self._local_executor)
def tearDown(self):
pass
def test_create_cuisine2(self):
"""
Test creating an instance
"""
self.assertIsNotNone(self.JSCuisine.core)
self.assertIsNotNone(self.JSCuisine.tools.sshreflector)
self.assertIsNotNone(self.JSCuisine.solutions.proxyclassic)
self.assertIsNotNone(self.JSCuisine.tools.bootmediainstaller)
self.assertIsNotNone(self.JSCuisine.solutions.vrouter)
self.assertIsNotNone(self.JSCuisine.tmux)
self.assertIsNotNone(self.JSCuisine.pnode)
self.assertIsNotNone(self.JSCuisine.tools.stor)
def test_create_cuisine2_platformtype(self):
"""
Test accessing platformtype property
"""
self.assertIsNotNone(self.JSCuisine.platformtype)
def test_create_cuisine2_id(self):
"""
Test accessing id property
"""
self.assertIsNotNone(self.JSCuisine.id)
def test_create_cuisine2_btrfs(self):
"""
Test accessing btrfs property
"""
self.assertIsNotNone(self.JSCuisine.btrfs)
def test_create_cuisine2_package(self):
"""
Test accessing package property
"""
self.assertIsNotNone(self.JSCuisine.package)
def test_create_cuisine2_process(self):
"""
Test accessing process property
"""
self.assertIsNotNone(self.JSCuisine.process)
def test_create_cuisine2_pip_is_not_None(self):
"""
Test accessing pip property
"""
self.assertIsNotNone(self.JSCuisine.development.pip)
def test_create_cuisine2_fw(self):
"""
Test accessing fw property
"""
self.assertIsNotNone(self.JSCuisine.systemservices.ufw)
def test_create_cuisine2_golang(self):
"""
Test accessing golang property
"""
self.assertIsNotNone(self.JSCuisine.development.golang)
def test_create_cuisine2_geodns(self):
"""
Test accessing geodns property
"""
self.assertIsNotNone(self.JSCuisine.apps.geodns)
def test_create_cuisine2_apps(self):
"""
Test accessing apps property
"""
self.assertIsNotNone(self.JSCuisine.apps)
@unittest.skip("Builder is removed while writing this")
def test_create_cuisine2_builder(self):
"""
Test accessing builder property
"""
self.assertIsNotNone(self.JSCuisine.builder)
def test_create_cuisine2_ns(self):
"""
Test accessing ns property
"""
self.assertIsNotNone(self.JSCuisine.ns)
def test_create_cuisine2_docker(self):
"""
Test accessing docker property
"""
self.assertIsNotNone(self.JSCuisine.systemservices.docker)
def test_create_cuisine2_ssh(self):
"""
Test accessing ssh property
"""
self.assertIsNotNone(self.JSCuisine.ssh)
@unittest.skip("couldn't find avahi")
def test_create_cuisine2_avahi(self):
"""
Test accessing avahi property
"""
self.assertIsNotNone(self.JSCuisine.avahi)
def test_create_cuisine2_bash(self):
"""
Test accessing bash property
"""
self.assertIsNotNone(self.JSCuisine.bash)
def test_create_cuisine2_net(self):
"""
Test accessing net property
"""
self.assertIsNotNone(self.JSCuisine.net)
def test_create_cuisine2_user_is_not_None(self):
"""
Test accessing user property
"""
self.assertIsNotNone(self.JSCuisine.user)
def test_create_cuisine2_group(self):
"""
Test accessing group property
"""
self.assertIsNotNone(self.JSCuisine.group)
def test_create_cuisine2_git(self):
"""
Test accessing git property
"""
self.assertIsNotNone(self.JSCuisine.development.git)
@mock.patch('JumpScale.tools.cuisine.ProcessManagerFactory.ProcessManagerFactory')
def test_create_cuisine2_processmanager(self, processmanager_mock):
"""
Test accessing processmanager property
"""
processmanager_mock.get.return_value = ProcessManagerFactory(self.JSCuisine)
self.assertIsNotNone(self.JSCuisine.processmanager)
| apache-2.0 | -7,099,521,589,335,590,000 | 27.944785 | 86 | 0.643281 | false |
jacobajit/ion | intranet/apps/events/views.py | 1 | 11092 | # -*- coding: utf-8 -*-
import datetime
import logging
import bleach
from django import http
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core import exceptions
from django.shortcuts import get_object_or_404, redirect, render
from .forms import AdminEventForm, EventForm
from .models import Event
logger = logging.getLogger(__name__)
@login_required
def events_view(request):
"""Events homepage.
Shows a list of events occurring in the next week, month, and
future.
"""
is_events_admin = request.user.has_admin_permission('events')
if request.method == "POST":
if "approve" in request.POST and is_events_admin:
event_id = request.POST.get('approve')
event = get_object_or_404(Event, id=event_id)
event.rejected = False
event.approved = True
event.approved_by = request.user
event.save()
messages.success(request, "Approved event {}".format(event))
if "reject" in request.POST and is_events_admin:
event_id = request.POST.get('reject')
event = get_object_or_404(Event, id=event_id)
event.approved = False
event.rejected = True
event.rejected_by = request.user
event.save()
messages.success(request, "Rejected event {}".format(event))
if is_events_admin and "show_all" in request.GET:
viewable_events = (Event.objects.prefetch_related("groups"))
else:
viewable_events = (Event.objects.visible_to_user(request.user).prefetch_related("groups"))
# get date objects for week and month
today = datetime.date.today()
delta = today - datetime.timedelta(days=today.weekday())
this_week = (delta, delta + datetime.timedelta(days=7))
this_month = (this_week[1], this_week[1] + datetime.timedelta(days=31))
events_categories = [
{
"title": "This week",
"events": viewable_events.filter(time__gte=this_week[0], time__lt=this_week[1])
}, {
"title": "This month",
"events": viewable_events.filter(time__gte=this_month[0], time__lt=this_month[1])
}, {
"title": "Future",
"events": viewable_events.filter(time__gte=this_month[1])
}
]
if is_events_admin:
unapproved_events = (Event.objects.filter(approved=False, rejected=False).prefetch_related("groups"))
events_categories = [{"title": "Awaiting Approval", "events": unapproved_events}] + events_categories
if is_events_admin and "show_all" in request.GET:
events_categories.append({"title": "Past", "events": viewable_events.filter(time__lt=this_week[0])})
context = {
"events": events_categories,
"num_events": viewable_events.count(),
"is_events_admin": is_events_admin,
"events_admin": is_events_admin,
"show_attend": True,
"show_icon": True
}
return render(request, "events/home.html", context)
@login_required
def join_event_view(request, id):
"""Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id
"""
event = get_object_or_404(Event, id=id)
if request.method == "POST":
if not event.show_attending:
return redirect("events")
if "attending" in request.POST:
attending = request.POST.get("attending")
attending = (attending == "true")
if attending:
event.attending.add(request.user)
else:
event.attending.remove(request.user)
return redirect("events")
context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')}
return render(request, "events/join_event.html", context)
@login_required
def event_roster_view(request, id):
"""Show the event roster. Users with hidden eighth period permissions will not be displayed.
Users will be able to view all other users, along with a count of the number of hidden users.
(Same as 8th roster page.) Admins will see a full roster at the bottom.
id: event id
"""
event = get_object_or_404(Event, id=id)
full_roster = list(event.attending.all())
viewable_roster = []
num_hidden_members = 0
for p in full_roster:
if p.can_view_eighth:
viewable_roster.append(p)
else:
num_hidden_members += 1
context = {
"event": event,
"viewable_roster": viewable_roster,
"full_roster": full_roster,
"num_hidden_members": num_hidden_members,
"is_events_admin": request.user.has_admin_permission('events'),
}
return render(request, "events/roster.html", context)
@login_required
def add_event_view(request):
"""Add event page.
Currently, there is an approval process for events. If a user is an
events administrator, they can create events directly. Otherwise,
their event is added in the system but must be approved.
"""
is_events_admin = request.user.has_admin_permission('events')
if not is_events_admin:
return redirect("request_event")
if request.method == "POST":
form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups'))
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
# SAFE HTML
obj.description = bleach.linkify(obj.description)
# auto-approve if admin
obj.approved = True
obj.approved_by = request.user
messages.success(request, "Because you are an administrator, this event was auto-approved.")
obj.created_hook(request)
obj.save()
return redirect("events")
else:
messages.error(request, "Error adding event")
else:
form = EventForm(all_groups=request.user.has_admin_permission('groups'))
context = {"form": form, "action": "add", "action_title": "Add" if is_events_admin else "Submit", "is_events_admin": is_events_admin}
return render(request, "events/add_modify.html", context)
@login_required
def request_event_view(request):
"""Request event page.
Currently, there is an approval process for events. If a user is an
events administrator, they can create events directly. Otherwise,
their event is added in the system but must be approved.
"""
is_events_admin = False
if request.method == "POST":
form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups'))
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
# SAFE HTML
obj.description = bleach.linkify(obj.description)
messages.success(request, "Your event needs to be approved by an administrator. If approved, it should appear on Intranet within 24 hours.")
obj.created_hook(request)
obj.save()
return redirect("events")
else:
messages.error(request, "Error adding event")
else:
form = EventForm(all_groups=request.user.has_admin_permission('groups'))
context = {"form": form, "action": "add", "action_title": "Submit", "is_events_admin": is_events_admin}
return render(request, "events/add_modify.html", context)
@login_required
def modify_event_view(request, id=None):
"""Modify event page. You may only modify an event if you were the creator or you are an
administrator.
id: event id
"""
event = get_object_or_404(Event, id=id)
is_events_admin = request.user.has_admin_permission('events')
if not is_events_admin:
raise exceptions.PermissionDenied
if request.method == "POST":
if is_events_admin:
form = AdminEventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups'))
else:
form = EventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups'))
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
# SAFE HTML
obj.description = bleach.linkify(obj.description)
obj.save()
messages.success(request, "Successfully modified event.")
# return redirect("events")
else:
messages.error(request, "Error adding event.")
else:
if is_events_admin:
form = AdminEventForm(instance=event, all_groups=request.user.has_admin_permission('groups'))
else:
form = EventForm(instance=event, all_groups=request.user.has_admin_permission('groups'))
context = {"form": form, "action": "modify", "action_title": "Modify", "id": id, "is_events_admin": is_events_admin}
return render(request, "events/add_modify.html", context)
@login_required
def delete_event_view(request, id):
"""Delete event page. You may only delete an event if you were the creator or you are an
administrator. Confirmation page if not POST.
id: event id
"""
event = get_object_or_404(Event, id=id)
if not request.user.has_admin_permission('events'):
raise exceptions.PermissionDenied
if request.method == "POST":
try:
event.delete()
messages.success(request, "Successfully deleted event.")
except Event.DoesNotExist:
pass
return redirect("events")
else:
return render(request, "events/delete.html", {"event": event})
@login_required
def show_event_view(request):
""" Unhide an event that was hidden by the logged-in user.
events_hidden in the user model is the related_name for
"users_hidden" in the EventUserMap model.
"""
if request.method == "POST":
event_id = request.POST.get("event_id")
if event_id:
event = Event.objects.get(id=event_id)
event.user_map.users_hidden.remove(request.user)
event.user_map.save()
return http.HttpResponse("Unhidden")
return http.Http404()
else:
return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED")
@login_required
def hide_event_view(request):
""" Hide an event for the logged-in user.
events_hidden in the user model is the related_name for
"users_hidden" in the EventUserMap model.
"""
if request.method == "POST":
event_id = request.POST.get("event_id")
if event_id:
event = Event.objects.get(id=event_id)
event.user_map.users_hidden.add(request.user)
event.user_map.save()
return http.HttpResponse("Hidden")
return http.Http404()
else:
return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED")
| gpl-2.0 | -4,660,954,090,174,681,000 | 33.554517 | 152 | 0.627209 | false |
TheVirtualLtd/bda.plone.shop | src/bda/plone/shop/vocabularies.py | 1 | 6162 | # -*- coding: utf-8 -*-
from bda.plone.checkout.vocabularies import country_vocabulary
from bda.plone.checkout.vocabularies import gender_vocabulary
from bda.plone.payment import Payments
from bda.plone.shipping import Shippings
from bda.plone.shop import message_factory as _
from bda.plone.shop.utils import get_shop_article_settings
from bda.plone.shop.utils import get_shop_tax_settings
from zope.interface import provider
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
# This are the overall avaiable quantity units which then can be reduced in
# control panel. If you need to provide more quantity units add it here or
# patch this vocab
AVAILABLE_QUANTITY_UNITS = {
'quantity': _('quantity', default='Quantity'),
'meter': _('meter', default='Meter'),
'kilo': _('kilo', default='Kilo'),
'liter': _('liter', default='Liter'),
}
@provider(IVocabularyFactory)
def AvailableQuantityUnitVocabulary(context):
# vocab is used in shop settings control panel
items = AVAILABLE_QUANTITY_UNITS.items()
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def QuantityUnitVocabulary(context):
# vocab is used for buyable items
try:
settings = get_shop_article_settings()
except KeyError:
# happens GS profile application if registry entries not present yet
return AvailableQuantityUnitVocabulary(context)
if not settings:
return
terms = []
for quantity_unit in settings.quantity_units:
title = AVAILABLE_QUANTITY_UNITS.get(quantity_unit, quantity_unit)
terms.append(SimpleTerm(value=quantity_unit, title=title))
return SimpleVocabulary(terms)
# This are the overall avaiable VAT values which then can be reduced in
# control panel. If you need to provide more vat values add it here or
# patch this vocab
AVAILABLE_VAT_VALUES = {
'0': '0%',
'2.5': '2,5%',
'3.8': '3,8%',
'8': '8%',
'10': '10%',
'15': '15%',
'20': '20%',
'25': '25%',
}
@provider(IVocabularyFactory)
def AvailableVatVocabulary(context):
# vocab is used in shop settings control panel
items = AVAILABLE_VAT_VALUES.items()
items = sorted(items, key=lambda x: x[0])
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def VatVocabulary(context):
# vocab is used for buyable items.
try:
settings = get_shop_tax_settings()
except KeyError:
# happens GS profile application if registry entries not present yet
return AvailableVatVocabulary(context)
settings.vat
terms = []
if settings.vat:
for vat in settings.vat:
title = AVAILABLE_VAT_VALUES.get(vat, vat)
terms.append(SimpleTerm(value=vat, title=title))
return SimpleVocabulary(terms)
# This are the overall avaiable currency values available in
# control panel. If you need to provide more currencies add it here or
# patch this vocab
AVAILABLE_CURRENCIES = {
'EUR': _('EUR', default='Euro'),
'USD': _('USD', default='US Dollar'),
'INR': _('INR', default='Indian Rupee'),
'CAD': _('CAD', default='Canadian Dollar'),
'CHF': _('CHF', default='Swiss Franc'),
'GBP': _('GBP', default='British Pound Sterling'),
'AUD': _('AUD', default='Australian Dollar'),
'NOK': _('NOK', default='Norwegian Krone'),
'SEK': _('SEK', default='Swedish Krona'),
'DKK': _('DKK', default='Danish Krone'),
'YEN': _('YEN', default='Japanese Yen'),
'NZD': _('NZD', default='New Zealand Dollar'),
}
@provider(IVocabularyFactory)
def AvailableCurrenciesVocabulary(context):
items = AVAILABLE_CURRENCIES.items()
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def CurrencyDisplayOptionsVocabulary(context):
items = [
('yes', _('yes', default='Yes')),
('no', _('no', default='No')),
('symbol', _('symbol', default='Symbol')),
]
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def GenderVocabulary(context):
return SimpleVocabulary([SimpleTerm(value=k, title=v)
for k, v in gender_vocabulary()])
@provider(IVocabularyFactory)
def CountryVocabulary(context):
"""VocabularyFactory for countries from ISO3166 source.
"""
return SimpleVocabulary([SimpleTerm(value=k, title=v)
for k, v in country_vocabulary()])
@provider(IVocabularyFactory)
def AvailableShippingMethodsVocabulary(context):
shippings = Shippings(context).shippings
items = [(shipping.sid, shipping.label) for shipping in shippings]
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def ShippingMethodsVocabulary(context):
try:
items = Shippings(context).vocab
except (KeyError, TypeError):
# happens GS profile application if registry entries not present yet
return AvailableShippingMethodsVocabulary(context)
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def AvailablePaymentMethodsVocabulary(context):
payments = Payments(context).payments
items = [(payment.pid, payment.label) for payment in payments]
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def PaymentMethodsVocabulary(context):
try:
items = Payments(context).vocab
except KeyError:
# happens GS profile application if registry entries not present yet
return AvailablePaymentMethodsVocabulary(context)
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
@provider(IVocabularyFactory)
def SurchargeablePaymentMethodsVocabulary(context):
payments = Payments(context).payments
items = [(payment.pid, payment.label) for payment in payments]
return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
| bsd-3-clause | -2,564,873,223,783,599,600 | 34.011364 | 77 | 0.697339 | false |
elaske/mufund | tests.py | 1 | 3839 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Evan Laske
# @Date: 2014-03-01 21:45:31
# @Last Modified by: Evan Laske
# @Last Modified time: 2015-09-15 23:51:12
import urllib
import urllib2
from bs4 import BeautifulSoup
import html5lib
import re
from StockQuote import StockQuote
from MutualFundData import MutualFundData
import logging
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tickers', metavar='ticker', nargs='+', help='The ticker(s) of the funds to predict.')
parser.add_argument('--logfile', dest='logfile', default='', help='Specify a log file to log info to.')
parser.add_argument('--loglevel', dest='loglevel', default='', help='Specify a logging level to output.')
args = parser.parse_args()
# Logging configuration args
logConfigArgs = dict()
# If the log level was specified
if args.loglevel:
# Convert it to something usable
numeric_level = getattr(logging, args.loglevel.upper(), None)
# Double-check it's a valid logging level
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
logConfigArgs['level'] = numeric_level
# If there was any of the logging files specified...
if args.logfile:
logConfigArgs['filename'] = args.logfile
# This will make the log file be overwritten each time.
logConfigArgs['filemode'] = 'w'
# If any of the logging arguments are specified, configure logging
if args.logfile or args.loglevel:
logging.basicConfig(**logConfigArgs)
# Gather the data from the given stocks
testStockQuote(args.tickers)
# Test the mutual fund data gathering
testMutualFund(args.tickers)
def testStockQuote(tickers):
"""
"""
for ticker in tickers:
sq = StockQuote(ticker)
print sq.ticker, sq.price, sq.change, sq.percent
def testMutualFund(tickers):
"""
"""
for ticker in tickers:
mfd = MutualFundData(ticker)
print mfd.price, mfd.change, mfd.percent
holdings = mfd.holdings()
print holdings
for h in holdings:
print 'Retrieving {0} data...'.format(h)
sq = StockQuote(h)
delta = float(holdings[h])*float(sq.percent)/100
holdings[h] = [holdings[h], sq.price, sq.change, sq.percent, delta]
print delta, holdings[h], 'Complete.'
print sq
#print holdings
print '\nESTIMATED CHANGE: {0}\nTOTAL COMPOSITION: {1}'.format(
sum([v[4] for (k,v) in holdings.items()]),
sum([float(v[0]) for (k,v) in holdings.items()]))
def randomTest():
ticker = "FBIOX"
quoteURL = 'http://quotes.morningstar.com/fund/f?t='
portfolioURL = 'http://portfolios.morningstar.com/fund/summary?t='
holdingsURL = 'http://portfolios.morningstar.com/fund/holdings?t='
googleFinanceURL = 'http://www.google.com/finance?q='
# Test with a stock
#sq = StockQuote("goog")
#print sq.price, sq.change, sq.percent
#print sq
# Test with a mutual fund
sq = StockQuote("fiuix")
print sq.price, sq.change, sq.percent
mfd = MutualFundData("FBIOX")
print mfd.price, mfd.change, mfd.percent
holdings = mfd.holdings()
#print holdings
for h in holdings:
print 'Retrieving {0} data...'.format(h)
sq = StockQuote(h)
delta = float(holdings[h])*float(sq.percent)/100
holdings[h] = [holdings[h], sq.price, sq.change, sq.percent, delta]
print 'Complete.'
#print holdings
print '\nESTIMATED CHANGE: {0}\nTOTAL COMPOSITION: {1}'.format(
sum([v[4] for (k,v) in holdings.items()]),
sum([float(v[0]) for (k,v) in holdings.items()]))
# Standard main call
if __name__ == "__main__":
main() | gpl-3.0 | 6,559,670,225,888,140,000 | 32.684211 | 110 | 0.636624 | false |
gooddata/openstack-nova | nova/tests/functional/regressions/test_bug_1780373.py | 1 | 5391 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import policy_fixture
class TestMultiCreateServerGroupMemberOverQuota(
test.TestCase, integrated_helpers.InstanceHelperMixin):
"""This tests a regression introduced in the Pike release.
Starting in the Pike release, quotas are no longer tracked using usages
and reservations tables but instead perform a resource counting operation
at the point of resource creation.
When creating multiple servers in the same request that belong in the same
server group, the [quota]/server_group_members config option is checked
to determine if those servers can belong in the same group based on quota.
However, the quota check for server_group_members only counts existing
group members based on live instances in the cell database(s). But the
actual instance record isn't created in the cell database until *after* the
server_group_members quota check happens. Because of this, it is possible
to bypass the server_group_members quota check when creating multiple
servers in the same request.
"""
def setUp(self):
super(TestMultiCreateServerGroupMemberOverQuota, self).setUp()
self.flags(server_group_members=2, group='quota')
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.api.microversion = '2.37' # so we can specify networks='none'
fake_image.stub_out_image_service(self)
self.addCleanup(fake_image.FakeImageService_reset)
group = {'name': 'test group', 'policies': ['soft-anti-affinity']}
self.created_group = self.api.post_server_groups(group)
def test_multi_create_server_group_members_over_quota(self):
"""Recreate scenario for the bug where we create an anti-affinity
server group and then create 3 servers in the group using a
multi-create POST /servers request.
"""
server_req = self._build_minimal_create_server_request(
self.api, 'test_multi_create_server_group_members_over_quota',
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
server_req['min_count'] = 3
server_req['return_reservation_id'] = True
hints = {'group': self.created_group['id']}
# We should get a 403 response due to going over quota on server
# group members in a single request.
self.api.api_post(
'/servers', {'server': server_req, 'os:scheduler_hints': hints},
check_response_status=[403])
group = self.api.api_get(
'/os-server-groups/%s' %
self.created_group['id']).body['server_group']
self.assertEqual(0, len(group['members']))
def test_concurrent_request_server_group_members_over_quota(self):
"""Recreate scenario for the bug where we create 3 servers in the
same group but in separate requests. The NoopConductorFixture is used
to ensure the instances are not created in the nova cell database which
means the quota check will have to rely on counting group members using
build requests from the API DB.
"""
# These aren't really concurrent requests, but we can simulate that
# by using NoopConductorFixture.
self.useFixture(nova_fixtures.NoopConductorFixture())
for x in range(3):
server_req = self._build_minimal_create_server_request(
self.api, 'test_concurrent_request_%s' % x,
image_uuid=fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID,
networks='none')
hints = {'group': self.created_group['id']}
# This should result in a 403 response on the 3rd server.
if x == 2:
self.api.api_post(
'/servers',
{'server': server_req, 'os:scheduler_hints': hints},
check_response_status=[403])
else:
self.api.post_server(
{'server': server_req, 'os:scheduler_hints': hints})
# There should only be two servers created which are both members of
# the same group.
servers = self.api.get_servers(detail=False)
self.assertEqual(2, len(servers))
group = self.api.api_get(
'/os-server-groups/%s' %
self.created_group['id']).body['server_group']
self.assertEqual(2, len(group['members']))
| apache-2.0 | 3,554,794,791,927,263,000 | 48.009091 | 79 | 0.667038 | false |
Foldblade/EORS | Mypackage/back_to_yesterday.py | 1 | 3091 | # encoding:utf-8
'''
————————————————————————————————
back_to_yesterday.py
对备份文件的回档,所谓‘回到昨天’功能。
实现原理:删除源文件。解压备份的zip,自动覆盖。
————————————————————————————————
'''
import os
import zipfile
import shutil
import time
def back_to_yesterday():
where_script = os.path.split(os.path.realpath(__file__))[0]
# print(where_script)
where_rootmenu = where_script[:where_script.rfind('\\')]
# print(where_rootmenu)
def unzip(zipfilepath, unzippath):
# zipfilepath 为需要解压的文件路径,unzippath为解压的目标目录
# e.g. unzip(where_rootmenu + '/cache/cache.zip', where_rootmenu + '/cache')
f = zipfile.ZipFile(zipfilepath, 'r')
for file in f.infolist():
d = file.date_time
gettime = "%s/%s/%s %s:%s" % (d[0], d[1], d[2], d[3], d[4]) # 获取文件原修改时间
f.extract(file, unzippath)
filep = os.path.join(unzippath, file.filename)
timearry = time.mktime(time.strptime(gettime, '%Y/%m/%d %H:%M'))
os.utime(filep, (timearry, timearry)) # 重写文件原修改时间
return
def clear_unexist(dirname, zipfilename):
zipfilepath = (where_rootmenu + '/backup/' + zipfilename)
fileinzip = []
f = zipfile.ZipFile(zipfilepath, 'r')
for filename in f.namelist():
# print(filename)
fileinzip.append(filename)
for parent, dirnames, filenames in os.walk(dirname):
for filename in filenames:
# print ("parent is:" + parent)
# print("filename is:" + filename)
# print ("the full name of the file is:" + os.path.join(parent,filename))
if filename not in fileinzip:
os.remove(os.path.join(parent, filename)) # 删除压缩包内不存在的文件
return
clear_unexist(where_rootmenu + '/cache', 'cache.zip')
clear_unexist(where_rootmenu + '/data', 'data.zip')
clear_unexist(where_rootmenu + '/output', 'output.zip')
# 删除压缩包内不存在的文件
shutil.copyfile(where_rootmenu + '/backup/cache.zip', where_rootmenu + '/cache/cache.zip')
shutil.copyfile(where_rootmenu + '/backup/output.zip', where_rootmenu + '/output/output.zip')
shutil.copyfile(where_rootmenu + '/backup/data.zip', where_rootmenu + '/data/data.zip')
# 拷贝备份zip到各自目录下
unzip(where_rootmenu + '/cache/cache.zip', where_rootmenu + '/cache')
unzip(where_rootmenu + '/output/output.zip', where_rootmenu + '/output')
unzip(where_rootmenu + '/data/data.zip', where_rootmenu + '/data')
# 解压文件
os.remove(where_rootmenu + '/cache/cache.zip')
os.remove(where_rootmenu + '/output/output.zip')
os.remove(where_rootmenu + '/data/data.zip')
# 删除拷贝的zip文件
print('成功穿越回昨日!!')
return
| gpl-3.0 | -882,281,870,340,508,500 | 35.5 | 97 | 0.595705 | false |
XiMuYouZi/PythonDemo | Crawler/Zhihu/zhihuuser/spiders/zhihu_user.py | 1 | 4440 | # -*- coding: utf-8 -*-
# 爬取知乎全站的用户信息
import json
from scrapy import Spider, Request
from Crawler.Zhihu.zhihuuser.items import UserItem
class ZhihuSpider(Spider):
#忽略301,302重定向请求
# handle_httpstatus_list = [301, 302]
name = "zhihu_user"
allowed_domains = ["www.zhihu.com"]
user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}'
follows_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}'
followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}'
start_user = 'excited-vczh'
user_query = 'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics'
follows_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
followers_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
def start_requests(self):
yield Request(self.user_url.format(user=self.start_user, include=self.user_query), self.parse_user,dont_filter=True)
yield Request(self.follows_url.format(user=self.start_user, include=self.follows_query, limit=20, offset=0),
self.parse_follows,dont_filter=True)
yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, limit=20, offset=0),
self.parse_followers,dont_filter=True)
def parse(self, response):
print(response.text)
#解析每个用户的信息
def parse_user(self, response):
result = json.loads(response.text,strict=False)
print('解析每个用户的信息\n: ',result)
item = UserItem()
#解析用户信息
for field in item.fields:
if field in result.keys():
item[field] = result.get(field)
yield item
# 生成该用户的关注和粉丝用户的Request
yield Request(
self.follows_url.format(user=result.get('url_token'), include=self.follows_query, limit=20, offset=0),
self.parse_follows)
yield Request(
self.followers_url.format(user=result.get('url_token'), include=self.followers_query, limit=20, offset=0),
self.parse_followers)
#解析他的关注列表
def parse_follows(self, response):
results = json.loads(response.text,strict=False)
print('解析他的关注列表\n: ',results)
if 'data' in results.keys():
for result in results.get('data'):
yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query),
self.parse_user)
if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
next_page = results.get('paging').get('next')
yield Request(next_page,
self.parse_follows)
#解析他的粉丝列表
def parse_followers(self, response):
results = json.loads(response.text,strict=False)
print('解析他的粉丝列表\n: ',results)
if 'data' in results.keys():
for result in results.get('data'):
yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query),
self.parse_user)
if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
next_page = results.get('paging').get('next')
yield Request(next_page,
self.parse_followers)
| mit | 4,831,802,806,455,866,000 | 46.88764 | 808 | 0.666823 | false |
thruflo/dogpile.cache | tests/cache/test_memcached_backend.py | 1 | 6526 | from ._fixtures import _GenericBackendTest, _GenericMutexTest
from . import eq_, winsleep
from unittest import TestCase
from threading import Thread
import time
from nose import SkipTest
from dogpile.cache import compat
class _TestMemcachedConn(object):
@classmethod
def _check_backend_available(cls, backend):
try:
client = backend._create_client()
client.set("x", "y")
assert client.get("x") == "y"
except:
raise SkipTest(
"memcached is not running or "
"otherwise not functioning correctly")
class _NonDistributedMemcachedTest(_TestMemcachedConn, _GenericBackendTest):
region_args = {
"key_mangler": lambda x: x.replace(" ", "_")
}
config_args = {
"arguments": {
"url": "127.0.0.1:11211"
}
}
class _DistributedMemcachedTest(_TestMemcachedConn, _GenericBackendTest):
region_args = {
"key_mangler": lambda x: x.replace(" ", "_")
}
config_args = {
"arguments": {
"url": "127.0.0.1:11211",
"distributed_lock": True
}
}
class _DistributedMemcachedMutexTest(_TestMemcachedConn, _GenericMutexTest):
config_args = {
"arguments": {
"url": "127.0.0.1:11211",
"distributed_lock": True
}
}
class PylibmcTest(_NonDistributedMemcachedTest):
backend = "dogpile.cache.pylibmc"
class PylibmcDistributedTest(_DistributedMemcachedTest):
backend = "dogpile.cache.pylibmc"
class PylibmcDistributedMutexTest(_DistributedMemcachedMutexTest):
backend = "dogpile.cache.pylibmc"
class BMemcachedTest(_NonDistributedMemcachedTest):
backend = "dogpile.cache.bmemcached"
class BMemcachedDistributedTest(_DistributedMemcachedTest):
backend = "dogpile.cache.bmemcached"
class BMemcachedDistributedMutexTest(_DistributedMemcachedMutexTest):
backend = "dogpile.cache.bmemcached"
class MemcachedTest(_NonDistributedMemcachedTest):
backend = "dogpile.cache.memcached"
class MemcachedDistributedTest(_DistributedMemcachedTest):
backend = "dogpile.cache.memcached"
class MemcachedDistributedMutexTest(_DistributedMemcachedMutexTest):
backend = "dogpile.cache.memcached"
from dogpile.cache.backends.memcached import GenericMemcachedBackend
from dogpile.cache.backends.memcached import PylibmcBackend
from dogpile.cache.backends.memcached import MemcachedBackend
class MockGenericMemcachedBackend(GenericMemcachedBackend):
def _imports(self):
pass
def _create_client(self):
return MockClient(self.url)
class MockMemcacheBackend(MemcachedBackend):
def _imports(self):
pass
def _create_client(self):
return MockClient(self.url)
class MockPylibmcBackend(PylibmcBackend):
def _imports(self):
pass
def _create_client(self):
return MockClient(self.url,
binary=self.binary,
behaviors=self.behaviors
)
class MockClient(object):
number_of_clients = 0
def __init__(self, *arg, **kw):
self.arg = arg
self.kw = kw
self.canary = []
self._cache = {}
MockClient.number_of_clients += 1
def get(self, key):
return self._cache.get(key)
def set(self, key, value, **kw):
self.canary.append(kw)
self._cache[key] = value
def delete(self, key):
self._cache.pop(key, None)
def __del__(self):
MockClient.number_of_clients -= 1
class PylibmcArgsTest(TestCase):
def test_binary_flag(self):
backend = MockPylibmcBackend(arguments={'url': 'foo','binary': True})
eq_(backend._create_client().kw["binary"], True)
def test_url_list(self):
backend = MockPylibmcBackend(arguments={'url': ["a", "b", "c"]})
eq_(backend._create_client().arg[0], ["a", "b", "c"])
def test_url_scalar(self):
backend = MockPylibmcBackend(arguments={'url': "foo"})
eq_(backend._create_client().arg[0], ["foo"])
def test_behaviors(self):
backend = MockPylibmcBackend(arguments={'url': "foo",
"behaviors": {"q": "p"}})
eq_(backend._create_client().kw["behaviors"], {"q": "p"})
def test_set_time(self):
backend = MockPylibmcBackend(arguments={'url': "foo",
"memcached_expire_time": 20})
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"time": 20}])
def test_set_min_compress_len(self):
backend = MockPylibmcBackend(arguments={'url': "foo",
"min_compress_len": 20})
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"min_compress_len": 20}])
def test_no_set_args(self):
backend = MockPylibmcBackend(arguments={'url': "foo"})
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{}])
class MemcachedArgstest(TestCase):
def test_set_time(self):
backend = MockMemcacheBackend(arguments={'url': "foo",
"memcached_expire_time": 20})
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"time": 20}])
def test_set_min_compress_len(self):
backend = MockMemcacheBackend(arguments={'url': "foo",
"min_compress_len": 20})
backend.set("foo", "bar")
eq_(backend._clients.memcached.canary, [{"min_compress_len": 20}])
class LocalThreadTest(TestCase):
def setUp(self):
import gc
gc.collect()
eq_(MockClient.number_of_clients, 0)
def test_client_cleanup_1(self):
self._test_client_cleanup(1)
def test_client_cleanup_3(self):
self._test_client_cleanup(3)
def test_client_cleanup_10(self):
self._test_client_cleanup(10)
def _test_client_cleanup(self, count):
backend = MockGenericMemcachedBackend(arguments={'url': 'foo'})
canary = []
def f():
backend._clients.memcached
canary.append(MockClient.number_of_clients)
time.sleep(.05)
threads = [Thread(target=f) for i in range(count)]
for t in threads:
t.start()
for t in threads:
t.join()
eq_(canary, [i + 1 for i in range(count)])
if compat.py27:
eq_(MockClient.number_of_clients, 0)
else:
eq_(MockClient.number_of_clients, 1)
| bsd-3-clause | 7,280,850,532,205,273,000 | 29.783019 | 77 | 0.610175 | false |
sgzwiz/brython | tests/console.py | 1 | 2190 | import sys
import time
import random
#this sucks.. cannot find dis since "root" path is blah/test
#we might need to create a variable we pass via the brython function
# to state what the root path is.
# For now, we'll hardcode a relative path. :(
sys.path.append("../Lib")
import dis
_rand=random.random()
editor=JSObject(ace).edit("editor")
editor.getSession().setMode("ace/mode/python")
if sys.has_local_storage:
from local_storage import storage
else:
storage = False
def reset_src():
if storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('for i in range(10):\n\tprint(i)')
editor.scrollToRow(0)
editor.gotoLine(0)
def write(data):
doc["console"].value += str(data)
sys.stdout = object()
sys.stdout.write = write
sys.stderr = object()
sys.stderr.write = write
def to_str(xx):
return str(xx)
doc['version'].text = '.'.join(map(to_str,sys.version_info))
output = ''
def show_console():
doc["console"].value = output
doc["console"].cols = 60
def clear_text():
editor.setValue('')
if sys.has_local_storage:
storage["py_src"]=''
doc["console"].value=''
def run():
global output
doc["console"].value=''
src = editor.getValue()
if storage:
storage["py_src"]=src
t0 = time.time()
exec(src)
output = doc["console"].value
print('<completed in %s ms>' %(time.time()-t0))
# load a Python script
def on_complete(req):
editor.setValue(req.text)
editor.scrollToRow(0)
editor.gotoLine(0)
def load(evt):
_name=evt.target.value
req = ajax()
req.on_complete = on_complete
req.open('GET',_name+'?foo=%s' % _rand,False)
req.send()
def show_js():
src = editor.getValue()
doc["console"].value = dis.dis(src)
def change_theme(evt):
_theme=evt.target.value
editor.setTheme(_theme)
if storage:
storage["ace_theme"]=_theme
def reset_theme():
if storage:
if storage["ace_theme"] is not None:
if storage["ace_theme"].startswith("ace/theme/"):
editor.setTheme(storage["ace_theme"])
doc["ace_theme"].value=storage["ace_theme"]
reset_src()
reset_theme()
| bsd-3-clause | -4,744,852,278,866,266,000 | 19.660377 | 68 | 0.630594 | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/ratelimitbackend/backends.py | 1 | 2730 | import logging
import warnings
from datetime import datetime, timedelta
from django.contrib.auth.backends import ModelBackend
from django.core.cache import cache
from .exceptions import RateLimitException
logger = logging.getLogger('ratelimitbackend')
class RateLimitMixin(object):
"""
A mixin to enable rate-limiting in an existing authentication backend.
"""
cache_prefix = 'ratelimitbackend-'
minutes = 5
requests = 30
username_key = 'username'
def authenticate(self, **kwargs):
request = kwargs.pop('request', None)
username = kwargs[self.username_key]
if request is not None:
counts = self.get_counters(request)
if sum(counts.values()) >= self.requests:
logger.warning(
u"Login rate-limit reached: username '{0}', IP {1}".format(
username, self.get_ip(request),
)
)
raise RateLimitException('Rate-limit reached', counts)
else:
warnings.warn(u"No request passed to the backend, unable to "
u"rate-limit. Username was '%s'" % username,
stacklevel=2)
user = super(RateLimitMixin, self).authenticate(**kwargs)
if user is None and request is not None:
logger.info(
u"Login failed: username '{0}', IP {1}".format(
username,
self.get_ip(request),
)
)
cache_key = self.get_cache_key(request)
self.cache_incr(cache_key)
return user
def get_counters(self, request):
return cache.get_many(self.keys_to_check(request))
def keys_to_check(self, request):
now = datetime.now()
return [
self.key(
request,
now - timedelta(minutes=minute),
) for minute in range(self.minutes + 1)
]
def get_cache_key(self, request):
return self.key(request, datetime.now())
def key(self, request, dt):
return '%s%s-%s' % (
self.cache_prefix,
self.get_ip(request),
dt.strftime('%Y%m%d%H%M'),
)
def get_ip(self, request):
return request.META['REMOTE_ADDR']
def cache_incr(self, key):
"""
Non-atomic cache increment operation. Not optimal but
consistent across different cache backends.
"""
cache.set(key, cache.get(key, 0) + 1, self.expire_after())
def expire_after(self):
"""Cache expiry delay"""
return (self.minutes + 1) * 60
class RateLimitModelBackend(RateLimitMixin, ModelBackend):
pass
| agpl-3.0 | 6,461,471,023,397,237,000 | 29.674157 | 79 | 0.563736 | false |
joetsoi/moonstone | python/main.py | 1 | 1239 | from collections import namedtuple
from struct import unpack, unpack_from
Segment = namedtuple('Segment', 'offset length')
ViewportDimension = namedtuple('ViewportDimension', 'right left')
class MainExe(object):
def __init__(self, file_path):
data_segment = Segment(0x138a0, 0xf460)
with open(file_path, 'rb') as f:
f.seek(data_segment.offset)
data_segment_data = f.read(data_segment.length)
self.bold_f_char_lookup = unpack(
'>96B',
data_segment_data[0x8006:0x8006 + (128 - 32)]
)
self.screen_dimensions = ViewportDimension(*unpack(
'<2H',
data_segment_data[0x8002:0x8006]
))
self.strings = {
'created by': unpack(
'<5H',
data_segment_data[0x8DCC:0x8DCC + 10] #should back 10
),
'Loading...': unpack(
'<5H',
data_segment_data[0x8de0:0x8de0 + 10]
),
'Rob Anderson': unpack(
'<5H',
data_segment_data[0x8dd6:0x8dd6 + 10]
),
}
self.palette = unpack(
'<32H', data_segment_data[0x892:0x892 + 0x40]
)
| agpl-3.0 | 326,647,625,653,657,340 | 27.813953 | 69 | 0.51816 | false |
labsquare/CuteVariant | cutevariant/gui/plugins/vql_editor/__init__.py | 1 | 1026 | from PySide2.QtWidgets import QApplication
__title__ = "VQL Editor"
__description__ = "A VQL editor"
__long_description__ = QApplication.instance().translate(
"vql_editor",
"""
<p>This plugin allows you to manage most of the work that can be done in a
Cutevariant project by writing and executing VQL queries.</p>
<p>VQL is a <i>Domain Specific Language</i>, its main purpose is to filter variants
in the same fashion as a SQL query.</p>
<p>The VQL language can be run from the user interface via this plugin or directly
via the command line of Cutevariant.</p>
<p>Although the plugin offers auto-completion (do not forget the joker character '!')
and syntax highlighting, if you want to have more details on how to write your own VQL queries,
as well as the keywords of the language, please see the project wiki page:</p>
<a href="https://github.com/labsquare/cutevariant/wiki/VQL-language">
https://github.com/labsquare/cutevariant/wiki/VQL-language</a>
""",
)
__author__ = "Sacha schutz"
__version__ = "1.0.0"
| gpl-3.0 | 5,526,662,183,943,269,000 | 40.04 | 95 | 0.734893 | false |
PabloTunnon/pykka-deb | tests/logging_test.py | 1 | 4370 | import os
import sys
import logging
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from tests import TestLogHandler
class LoggingNullHandlerTest(unittest.TestCase):
def test_null_handler_is_added_to_avoid_warnings(self):
logger = logging.getLogger('pykka')
handler_names = [h.__class__.__name__ for h in logger.handlers]
self.assert_('NullHandler' in handler_names)
class ActorLoggingTest(object):
def setUp(self):
self.on_stop_was_called = self.event_class()
self.on_failure_was_called = self.event_class()
self.actor_ref = self.AnActor.start(self.on_stop_was_called,
self.on_failure_was_called)
self.actor_proxy = self.actor_ref.proxy()
self.log_handler = TestLogHandler(logging.DEBUG)
self.root_logger = logging.getLogger()
self.root_logger.addHandler(self.log_handler)
def tearDown(self):
self.log_handler.close()
ActorRegistry.stop_all()
def test_unexpected_messages_are_logged(self):
self.actor_ref.ask({'unhandled': 'message'})
self.assertEqual(1, len(self.log_handler.messages['warning']))
log_record = self.log_handler.messages['warning'][0]
self.assertEqual('Unexpected message received by %s' % self.actor_ref,
log_record.getMessage().split(': ')[0])
def test_exception_is_logged_when_returned_to_caller(self):
try:
self.actor_proxy.raise_exception().get()
self.fail('Should raise exception')
except Exception:
pass
self.assertEqual(1, len(self.log_handler.messages['debug']))
log_record = self.log_handler.messages['debug'][0]
self.assertEqual('Exception returned from %s to caller:' %
self.actor_ref, log_record.getMessage())
self.assertEqual(Exception, log_record.exc_info[0])
self.assertEqual('foo', str(log_record.exc_info[1]))
def test_exception_is_logged_when_not_reply_requested(self):
self.on_failure_was_called.clear()
self.actor_ref.tell({'command': 'raise exception'})
self.on_failure_was_called.wait(5)
self.assertTrue(self.on_failure_was_called.is_set())
self.assertEqual(1, len(self.log_handler.messages['error']))
log_record = self.log_handler.messages['error'][0]
self.assertEqual('Unhandled exception in %s:' % self.actor_ref,
log_record.getMessage())
self.assertEqual(Exception, log_record.exc_info[0])
self.assertEqual('foo', str(log_record.exc_info[1]))
def test_base_exception_is_logged(self):
self.log_handler.reset()
self.on_stop_was_called.clear()
self.actor_ref.tell({'command': 'raise base exception'})
self.on_stop_was_called.wait(5)
self.assertTrue(self.on_stop_was_called.is_set())
self.assertEqual(3, len(self.log_handler.messages['debug']))
log_record = self.log_handler.messages['debug'][0]
self.assertEqual('BaseException() in %s. Stopping all actors.'
% self.actor_ref, log_record.getMessage())
class AnActor(object):
def __init__(self, on_stop_was_called, on_failure_was_called):
self.on_stop_was_called = on_stop_was_called
self.on_failure_was_called = on_failure_was_called
def on_stop(self):
self.on_stop_was_called.set()
def on_failure(self, exception_type, exception_value, traceback):
self.on_failure_was_called.set()
def on_receive(self, message):
if message.get('command') == 'raise exception':
return self.raise_exception()
elif message.get('command') == 'raise base exception':
raise BaseException()
else:
super(AnActor, self).on_receive(message)
def raise_exception(self):
raise Exception('foo')
class ThreadingActorLoggingTest(ActorLoggingTest, unittest.TestCase):
event_class = threading.Event
class AnActor(AnActor, ThreadingActor):
pass
if sys.version_info < (3,) and 'TRAVIS' not in os.environ:
import gevent.event
from pykka.gevent import GeventActor
class GeventActorLoggingTest(ActorLoggingTest, unittest.TestCase):
event_class = gevent.event.Event
class AnActor(AnActor, GeventActor):
pass
| apache-2.0 | -7,781,119,297,965,645,000 | 35.416667 | 78 | 0.65492 | false |
teoliphant/scipy | scipy/ndimage/filters.py | 2 | 40010 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
from scipy.misc import doccer
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array-like
input array to filter"""
_axis_doc = \
"""axis : integer, optional
axis of ``input`` along which to calculate. Default is -1"""
_output_doc = \
"""output : array, optional
The ``output`` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either ``size`` or ``footprint`` must be defined. ``size`` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
``footprint`` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and ``size`` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The ``origin`` parameter controls the placement of the filter. Default 0"""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input':_input_doc,
'axis':_axis_doc,
'output':_output_doc,
'size_foot':_size_foot_doc,
'mode':_mode_doc,
'cval':_cval_doc,
'origin':_origin_doc,
'extra_arguments':_extra_arguments_doc,
'extra_keywords':_extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis = -1, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
one-dimensional sequence of numbers
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if ((len(weights) // 2 + origin < 0) or
(len(weights) // 2 + origin > len(weights))):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis = -1, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
one-dimensional sequence of numbers
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis = -1, order = 0, output = None,
mode = "reflect", cval = 0.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1 : # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order = 0, output = None,
mode = "reflect", cval = 0.0):
"""Multi-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output = None, mode = "reflect",
cval = 0.0,
extra_arguments = (),
extra_keywords = None):
"""Calculate a multidimensional laplace filter using the provided
second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = range(input.ndim)
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output = None, mode = "reflect", cval = 0.0):
"""Calculate a multidimensional laplace filter using an estimation
for the second derivative based on differences.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output = None, mode = "reflect",
cval = 0.0):
"""Calculate a multidimensional laplace filter using gaussian
second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments = (sigma,))
@docfiller
def generic_gradient_magnitude(input, derivative, output = None,
mode = "reflect", cval = 0.0,
extra_arguments = (), extra_keywords = None):
"""Calculate a gradient magnitude using the provided function for
the gradient.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = range(input.ndim)
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
if numpy.version.short_version > '1.6.1':
numpy.sqrt(output, output, casting='unsafe')
else:
numpy.sqrt(output, output)
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output = None,
mode = "reflect", cval = 0.0):
"""Calculate a multidimensional gradient magnitude using gaussian
derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments = (sigma,))
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output = None, mode = 'reflect', cval = 0.0,
origin = 0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0,
origin = 0):
"""
Multi-dimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
.... [5, 3, 0, 4],
.... [0, 0, 0, 7],
.... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0,1,0],[0,1,0],[0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : integer
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size = 3, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D maximum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable= True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint),axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0, operation = 'rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origin)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origin)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : integer
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""
Calculates a multi-dimensional median filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either ``size`` or ``footprint`` must be defined. ``size`` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
``footprint`` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and ``size`` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size = None, footprint = None,
output = None, mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis = -1,
output = None, mode = "reflect", cval = 0.0, origin = 0,
extra_arguments = (), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
generic_filter1d iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
function to apply along given axis
filter_size : scalar
length of the filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if ((filter_size // 2 + origin < 0) or
(filter_size // 2 + origin >= filter_size)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size = None, footprint = None,
output = None, mode = "reflect", cval = 0.0, origin = 0,
extra_arguments = (), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
function to apply at each element
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
| bsd-3-clause | -4,108,254,230,189,439,500 | 34.004374 | 79 | 0.597751 | false |
vaniakosmos/memes-reposter | apps/imgur/migrations/0001_initial.py | 1 | 1605 | # Generated by Django 2.0.3 on 2018-06-30 17:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImgurConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score_limit', models.IntegerField(default=1000, validators=[django.core.validators.MinValueValidator(0)])),
('good_tags', models.TextField(blank=True)),
('bad_tags', models.TextField(blank=True)),
('exclude_mode', models.BooleanField(default=True, help_text='If true posts with bad tags will be filtered out. Otherwise only posts from with good tags will pass the filter.')),
('channel_username', models.CharField(max_length=200, null=True)),
('chat_id', models.BigIntegerField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imgur_id', models.CharField(max_length=200)),
('title', models.TextField()),
('is_album', models.BooleanField()),
('tags', models.TextField()),
('images_links', models.TextField()),
],
),
]
| mit | -4,660,612,646,727,245,000 | 38.146341 | 194 | 0.560748 | false |
dsweet04/rekall | rekall-core/rekall/plugins/windows/heap_analysis.py | 1 | 16866 | # Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""The module implements user mode heap analysis.
Recent versions of windows use the Low Fragmentation Heap (LFH).
http://illmatics.com/Windows%208%20Heap%20Internals.pdf
http://illmatics.com/Understanding_the_LFH.pdf
http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/
"""
from rekall import scan
from rekall.plugins import core
from rekall.plugins.windows import common
from rekall_lib import utils
class InspectHeap(common.WinProcessFilter):
"""Inspect the process heap.
This prints a lot of interesting facts about the process heap. It is also
the foundation to many other plugins which find things in the process heaps.
NOTE: Currently we only support Windows 7 64 bit.
"""
name = "inspect_heap"
__args = [
dict(name="free", type="Boolean",
help="Also show freed chunks."),
dict(name="heaps", type="ArrayIntParser",
help="Only show these heaps (default show all)")
]
mode = "mode_amd64"
def __init__(self, *args, **kwargs):
super(InspectHeap, self).__init__(*args, **kwargs)
self.segments = utils.SortedCollection()
def enumerate_lfh_heap_allocations(self, heap, skip_freed=False):
"""Dump the low fragmentation heap."""
seen_blocks = set()
for lfh_block in heap.FrontEndHeap.SubSegmentZones.list_of_type(
"_LFH_BLOCK_ZONE", "ListEntry"):
block_length = lfh_block.FreePointer.v() - lfh_block.obj_end
segments = heap.obj_profile.Array(
target="_HEAP_SUBSEGMENT",
offset=lfh_block.obj_end,
size=block_length)
for segment in segments:
allocation_length = segment.BlockSize * 16
if segment.UserBlocks.v() in seen_blocks:
break
seen_blocks.add(segment.UserBlocks.v())
for entry in segment.UserBlocks.Entries:
# http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/
# Skip freed blocks if requested.
if skip_freed and entry.UnusedBytes & 0x38:
continue
UnusedBytes = entry.UnusedBytes & 0x3f - 0x8
# The actual length of user allocation is the difference
# between the HEAP allocation bin size and the unused bytes
# at the end of the allocation.
data_len = allocation_length - UnusedBytes
# The data length can not be larger than the allocation
# minus the critical parts of _HEAP_ENTRY. Sometimes,
# allocations overrun into the next element's _HEAP_ENTRY so
# they can store data in the next entry's
# entry.PreviousBlockPrivateData. In this case the
# allocation length seems to be larger by 8 bytes.
if data_len > allocation_length - 0x8:
data_len -= 0x8
yield (heap.obj_profile.String(entry.obj_end, term=None,
length=data_len),
allocation_length)
def enumerate_backend_heap_allocations(self, heap):
"""Enumerate all allocations for _EPROCESS instance."""
for seg in heap.Segments:
seg_end = seg.LastValidEntry.v()
# Ensure sanity.
if seg.Heap.deref() != heap:
continue
# The segment is empty - often seg_end is zero here.
if seg_end < seg.FirstEntry.v():
break
for entry in seg.FirstEntry.walk_list("NextEntry", True):
# If this is the last entry it goes until the end of the
# segment.
start = entry.obj_offset + 0x10
if start > seg_end:
break
allocation = entry.Allocation
yield allocation
def GenerateHeaps(self):
task = self.session.GetParameter("process_context")
resolver = self.session.address_resolver
# Try to load the ntdll profile.
ntdll_mod = resolver.GetModuleByName("ntdll")
if not ntdll_mod:
return
ntdll_prof = ntdll_mod.profile
# Set the ntdll profile on the _PEB member.
peb = task.m("Peb").cast(
"Pointer", target="_PEB", profile=ntdll_prof,
vm=task.get_process_address_space())
for heap in peb.ProcessHeaps:
yield heap
def render(self, renderer):
cc = self.session.plugins.cc()
with cc:
for task in self.filter_processes():
cc.SwitchProcessContext(task)
renderer.section()
renderer.format("{0:r}\n", task)
for heap in self.GenerateHeaps():
self.render_process_heap_info(heap, renderer)
def render_low_frag_info(self, heap, renderer):
"""Displays information about the low fragmentation front end."""
renderer.format("Low Fragmentation Front End Information:\n")
renderer.table_header([
dict(name="Entry", style="address"),
("Alloc", "allocation_length", "4"),
("Length", "length", ">4"),
dict(name="Data"),
])
# Render the LFH allocations in increasing allocation sizes. Collect
# them first, then display by sorted allocation size, and offset.
entries_by_size = {}
for entry, allocation_length in self.enumerate_lfh_heap_allocations(
heap):
entries_by_size.setdefault(allocation_length, []).append(entry)
for allocation_length, entries in sorted(entries_by_size.iteritems()):
for entry in sorted(entries, key=lambda x: x.obj_offset):
data = entry.v()[:64]
renderer.table_row(
entry,
allocation_length,
entry.length,
utils.HexDumpedString(data),
)
def render_process_heap_info(self, heap, renderer):
if (self.plugin_args.heaps and
heap.ProcessHeapsListIndex not in self.plugin_args.heaps):
return
if 1 <= heap.ProcessHeapsListIndex <= 64:
renderer.format("Heap {0}: {1:#x} ({2})\nBackend Info:\n\n",
heap.ProcessHeapsListIndex,
heap.BaseAddress,
heap.FrontEndHeapType)
renderer.table_header([
dict(name="Segment", type="TreeNode", width=18,
child=dict(style="address")),
("End", "segment_end", "[addr]"),
("Length", "length", "8"),
dict(name="Data"),
])
for seg in heap.Segments:
seg_start = seg.FirstEntry.obj_offset
seg_end = seg.LastValidEntry.v()
renderer.table_row(
seg_start, seg_end, seg_end - seg_start, depth=1)
for entry in seg.FirstEntry.walk_list("NextEntry", True):
# If this is the last entry it goes until the end of the
# segment.
start = entry.obj_offset + 0x10
if start > seg_end:
break
if entry.Flags.LAST_ENTRY:
end = seg.LastValidEntry.v()
else:
end = entry.obj_offset + entry.Size * 16
data = heap.obj_vm.read(start, min(16, end-start))
renderer.table_row(
entry,
end, end - start,
utils.HexDumpedString(data),
depth=2)
if heap.FrontEndHeapType.LOW_FRAG:
self.render_low_frag_info(heap, renderer)
class ShowAllocation(common.WindowsCommandPlugin):
"""Show the allocation containing the address."""
name = "show_allocation"
__args = [
dict(name="address", type="ArrayIntParser", positional=True,
help="The address to display"),
dict(name="preamble", type="IntParser", default=32,
help="How many bytes prior to the address to display."),
dict(name="length", type="IntParser", default=50 * 16,
help="How many bytes after the address to display.")
]
def BuildAllocationMap(self):
"""Build a map of all allocations for fast looksup."""
allocations = utils.RangedCollection()
inspect_heap = self.session.plugins.inspect_heap()
for heap in inspect_heap.GenerateHeaps():
# First do the backend allocations.
for allocation in inspect_heap.enumerate_backend_heap_allocations(
heap):
# Include the header in the allocation.
allocations.insert(
allocation.obj_offset - 16,
allocation.obj_offset + allocation.length + 16,
(allocation.obj_offset, allocation.length, "B"))
self.session.report_progress(
"Enumerating backend allocation: %#x",
lambda allocation=allocation: allocation.obj_offset)
# Now do the LFH allocations (These will mask the subsegments in the
# RangedCollection).
for _ in inspect_heap.enumerate_lfh_heap_allocations(
heap, skip_freed=False):
allocation, allocation_length = _
self.session.report_progress(
"Enumerating frontend allocation: %#x",
lambda: allocation.obj_offset)
# Front end allocations do not have their own headers.
allocations.insert(
allocation.obj_offset,
allocation.obj_offset + allocation_length,
(allocation.obj_offset, allocation_length, "F"))
return allocations
def __init__(self, *args, **kwargs):
super(ShowAllocation, self).__init__(*args, **kwargs)
self.offset = None
# Get cached allocations for current process context.
task = self.session.GetParameter("process_context")
cache_key = "heap_allocations_%x" % task.obj_offset
self.allocations = self.session.GetParameter(cache_key)
if self.allocations == None:
self.allocations = self.BuildAllocationMap()
# Cache the allocations for next time.
self.session.SetCache(cache_key, self.allocations)
def GetAllocationForAddress(self, address):
return self.allocations.get_containing_range(address)
def CreateAllocationMap(self, start, length, alloc_start, alloc_type):
address_map = core.AddressMap()
# For backend allocs we highlight the heap entry before them.
if alloc_type == "B":
address_map.AddRange(alloc_start-16, alloc_start, "_HEAP_ENTRY")
# Try to interpret pointers to other allocations and highlight them.
count = length / 8
for pointer in self.profile.Array(
offset=start, count=count, target="Pointer"):
name = None
alloc_start, alloc_length, alloc_type = (
self.allocations.get_containing_range(pointer.v()))
if alloc_type is not None:
# First check if the pointer points inside this allocation.
if alloc_start == start + 16:
name = "+%#x(%#x)" % (pointer.v() - start, pointer.v())
else:
name = "%#x(%s@%#x)" % (
pointer.v(), alloc_length, alloc_start)
else:
# Maybe it is a resolvable address.
name = ",".join(self.session.address_resolver.format_address(
pointer.v(), max_distance=1024*1024))
if name:
address_map.AddRange(
pointer.obj_offset, pointer.obj_offset + 8,
# Color it using a unique color related to the address. This
# helps to visually relate the same address across different
# dumps.
"%s" % name, color_index=pointer.obj_offset)
return address_map
def render(self, renderer):
for address in self.plugin_args.address:
# If the user requested to view more than one address we do not
# support plugin continuation (with v() plugin).
if len(self.plugin_args.address) > 1:
self.offset = None
alloc_start, alloc_length, alloc_type = (
self.allocations.get_containing_range(address))
if not alloc_type:
renderer.format("Allocation not found for address "
"{0:style=address} in any heap.\n", address)
alloc_start = address
alloc_length = 50 * 16
alloc_type = None
else:
renderer.format(
"Address {0:style=address} is {1} bytes into "
"{2} allocation of size {3} "
"({4:style=address} - {5:style=address})\n",
address, address - alloc_start, alloc_type,
alloc_length, alloc_start, alloc_start + alloc_length)
# Start dumping preamble before the address if self.offset is not
# specified. It will be specified when we run the plugin again using
# v().
if self.offset is None:
# Start dumping a little before the requested address, but do
# not go before the start of the allocation.
start = max(alloc_start, address - self.plugin_args.preamble)
else:
# Continue dumping from the last run.
start = self.offset
# Also show the _HEAP_ENTRY before backend allocations (Front end
# allocations do not have a _HEAP_ENTRY).
if alloc_type == "B":
start -= 16
length = min(alloc_start + alloc_length - start,
self.plugin_args.length)
dump = self.session.plugins.dump(
offset=start, length=length,
address_map=self.CreateAllocationMap(
start, length, alloc_start, alloc_type))
dump.render(renderer)
self.offset = dump.offset
class FindReferenceAlloc(common.WindowsCommandPlugin):
"""Show allocations that refer to an address."""
name = "show_referrer_alloc"
__args = [
dict(name="address", type="IntParser", positional=True, required=True,
help="The address to display")
]
def get_referrers(self, address, maxlen=None):
addr = self.profile.address()
addr.write(address)
pointer_scanner = scan.BaseScanner(
address_space=self.session.GetParameter("default_address_space"),
session=self.session,
checks=[
('StringCheck', dict(needle=addr.obj_vm.getvalue()))
])
# Just scan the entire userspace address space. This means we might find
# hits outside the heap but this is usually useful as it would locate
# static pointers in dlls.
if maxlen is None:
maxlen = self.session.GetParameter("highest_usermode_address")
for hit in pointer_scanner.scan(maxlen=maxlen):
yield hit
def render(self, renderer):
show_allocation = None
for hit in self.get_referrers(self.address):
show_allocation = self.session.plugins.show_allocation(hit)
show_allocation.render(renderer)
return show_allocation
| gpl-2.0 | 5,344,717,608,450,970,000 | 37.594966 | 104 | 0.563441 | false |
google/makani | gs/monitor2/apps/plugins/indicators/servo.py | 1 | 17733 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""Monitor indicators from the ground station."""
import collections
import operator
from makani.analysis.checks import avionics_util
from makani.analysis.checks import check_range
from makani.analysis.control import flap_limits
from makani.avionics.common import pack_avionics_messages
from makani.avionics.common import servo_types as servo_common
from makani.avionics.firmware.monitors import servo_types
from makani.avionics.network import aio_labels
from makani.control import control_types
from makani.gs.monitor2.apps.layout import indicator
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.indicators import avionics
from makani.lib.python import c_helpers
from makani.lib.python import struct_tree
import numpy
_SERVO_WARNING_HELPER = c_helpers.EnumHelper('ServoWarning', servo_common)
_SERVO_ERROR_HELPER = c_helpers.EnumHelper('ServoError', servo_common)
_SERVO_STATUS_HELPER = c_helpers.EnumHelper('ServoStatus', servo_common)
_SERVO_LABELS_HELPER = c_helpers.EnumHelper('ServoLabel', aio_labels,
prefix='kServo')
_SERVO_ANALOG_VOLTAGE_HELPER = c_helpers.EnumHelper('ServoAnalogVoltage',
servo_types)
_SERVO_MON_WARNING_HELPER = c_helpers.EnumHelper('ServoMonitorWarning',
servo_types)
_SERVO_MON_ERROR_HELPER = c_helpers.EnumHelper('ServoMonitorError',
servo_types)
_ACTUATOR_STATE_HELPER = c_helpers.EnumHelper('ActuatorState',
pack_avionics_messages,
exclude='ActuatorStateCommand')
class BaseServoIndicator(avionics.BaseActuatorIndicator):
"""Base class with utilities shared by servo indicators."""
def __init__(self, mode, label, precision,
servo_labels=_SERVO_LABELS_HELPER.ShortNames(),
show_label=True):
super(BaseServoIndicator, self).__init__(
mode, label, precision, servo_labels, 'Servo',
_SERVO_LABELS_HELPER, common.MAX_NO_UPDATE_COUNT_SERVO_STATUS,
full_comms_message_type='ServoStatus',
tether_attribute='servo_statuses', show_label=show_label)
class BaseArmedIndicator(BaseServoIndicator):
"""Base indicator for servos' armed status."""
def _GetSingleValue(self, arg_idx, *args):
"""Obtain a single value for one servo, invoked within _GetAvailableValues.
Args:
arg_idx: The index referring to the n-th servo.
*args: The list of attributes to the indicator. The attributes vary
in different modes. For FULL_COMMS_MODE, it is the list
of ServoStatus messages for each servo, so args[arg_idx] refers to
the servo's message struct. For SPARSE_COMMS_MODE, it is
[TetherDown.servo_statuses, valid, timestamp_sec], so
args[0][`EnumValue(A2)`] refers to the state of servo A2.
Returns:
The servo status of the n-th servo.
"""
if self._mode == common.FULL_COMMS_MODE:
if struct_tree.IsValidElement(args[arg_idx]):
return args[arg_idx].flags.status
else:
return None
elif self._mode == common.SPARSE_COMMS_MODE:
return self._GetTetherValue(args[0], self._node_labels[arg_idx], 'state')
else:
assert False
@indicator.ReturnIfInputInvalid('--', stoplights.STOPLIGHT_UNAVAILABLE)
def _Filter(self, *args):
"""Get the armed information of all servos.
Args:
*args: The list of attributes to the indicator. The attributes vary
in different modes. For FULL_COMMS_MODE, it is the list
of ServoStatus messages for each servo, so args[arg_idx] refers to
the servo's message struct. For SPARSE_COMMS_MODE, it is
[TetherDown.servo_statuses, valid, timestamp_sec], so
args[0][`EnumValue(A2)`] refers to the state of servo A2.
Returns:
The text and stoplight to show.
"""
servo_status = self._GetAvailableValues(*args)
if self._mode == common.FULL_COMMS_MODE:
status_helper = _SERVO_STATUS_HELPER
expecting = ['Armed']
elif self._mode == common.SPARSE_COMMS_MODE:
status_helper = _ACTUATOR_STATE_HELPER
expecting = ['Armed', 'Running']
else:
assert False
return self._CheckStatusFlags(servo_status, status_helper, expecting,
stoplights.STOPLIGHT_ERROR)
class BaseR22TemperatureIndicator(BaseServoIndicator):
"""Base indicator for servos' R22 temperatures."""
def __init__(self, *args, **kwargs):
super(BaseR22TemperatureIndicator, self).__init__(*args, show_label=False,
**kwargs)
self._normal_ranges = check_range.BuildRanges([[None, 65]])
self._warning_ranges = check_range.BuildRanges([[None, 75]])
def _GetSingleValue(self, arg_idx, *args):
if self._mode == common.FULL_COMMS_MODE:
if struct_tree.IsValidElement(args[arg_idx]):
return args[arg_idx].r22.temperature
else:
return None
elif self._mode == common.SPARSE_COMMS_MODE:
return self._GetTetherValue(
args[0], self._node_labels[arg_idx], 'r22_temp')
else:
assert False
@indicator.ReturnIfInputInvalid('', stoplights.STOPLIGHT_UNAVAILABLE)
def _Filter(self, *args):
temperatures, stoplight = self._GetFieldInfo(
self._normal_ranges, self._warning_ranges, None, *args)
return self._DictToString(temperatures), stoplight
class BaseLvBusIndicator(indicator.BaseIndicator):
"""The base class for low voltage bus indicators."""
_voltage_names = ['LvA', 'LvB']
def __init__(self, servos, name):
self._short_names = servos
super(BaseLvBusIndicator, self).__init__(name)
def _GatherVoltageData(self, messages):
"""Gather voltage data from the messages."""
voltages = collections.defaultdict(dict)
any_value = False
warning = False
errors = []
for servo in self._short_names:
if 'ServoStatus.Servo' + servo not in messages:
continue
any_value = True
populated = messages[
'ServoStatus.Servo%s.servo_mon.analog_populated' % servo]
for voltage_name in self._voltage_names:
# Guard against bad voltage names.
if voltage_name not in _SERVO_ANALOG_VOLTAGE_HELPER:
errors.append('Servo %s: Invalid voltage (%s)' %
(servo, voltage_name))
continue
index = _SERVO_ANALOG_VOLTAGE_HELPER.Value(voltage_name)
if not avionics_util.TestMask(populated, index):
continue
voltages[voltage_name][servo] = messages[
'ServoStatus.Servo%s.servo_mon.analog_data[%d]' % (servo, index)]
warning |= avionics_util.CheckWarning(
messages['ServoStatus.Servo%s.servo_mon.flags' % servo],
_SERVO_MON_WARNING_HELPER.Value(voltage_name))
if errors:
stoplight = stoplights.STOPLIGHT_ERROR
elif not any_value:
stoplight = stoplights.STOPLIGHT_UNAVAILABLE
elif warning:
stoplight = stoplights.STOPLIGHT_WARNING
else:
stoplight = stoplights.STOPLIGHT_NORMAL
return voltages, stoplight, errors
def Filter(self, messages):
if not messages:
return '--', stoplights.STOPLIGHT_UNAVAILABLE
voltages, stoplight, errors = self._GatherVoltageData(messages)
results = [' ' + ' '.join(v.rjust(4) for v in self._voltage_names)]
for servo in self._short_names:
servo_text = '%s:' % servo
for voltage_name in self._voltage_names:
if voltage_name in voltages and servo in voltages[voltage_name]:
servo_text += ' %5.1f' % voltages[voltage_name][servo]
else:
servo_text += ' --'.rjust(6)
results.append(servo_text)
return '\n'.join(errors + results), stoplight
class ArmedTailIndicator(BaseArmedIndicator):
def __init__(self, mode):
super(ArmedTailIndicator, self).__init__(
mode, 'Tail Armed', 0, ['E1', 'E2', 'R1', 'R2'])
class ArmedPortIndicator(BaseArmedIndicator):
def __init__(self, mode):
super(ArmedPortIndicator, self).__init__(
mode, 'Port Armed', 0, ['A1', 'A2', 'A4'])
class ArmedStarboardIndicator(BaseArmedIndicator):
def __init__(self, mode):
super(ArmedStarboardIndicator, self).__init__(
mode, 'Starboard Armed', 0, ['A5', 'A7', 'A8'])
class R22TemperatureTailIndicator(BaseR22TemperatureIndicator):
def __init__(self, mode):
super(R22TemperatureTailIndicator, self).__init__(
mode, 'Tail R22 Temp', 0, ['E1', 'E2', 'R1', 'R2'])
class R22TemperaturePortIndicator(BaseR22TemperatureIndicator):
def __init__(self, mode):
super(R22TemperaturePortIndicator, self).__init__(
mode, 'Port R22 Temp', 0, ['A1', 'A2', 'A4'])
class R22TemperatureStarboardIndicator(BaseR22TemperatureIndicator):
def __init__(self, mode):
super(R22TemperatureStarboardIndicator, self).__init__(
mode, 'Star R22 Temp', 0, ['A5', 'A7', 'A8'])
class LvBusTailIndicator(BaseLvBusIndicator):
def __init__(self):
super(LvBusTailIndicator, self).__init__(
['E1', 'E2', 'R1', 'R2'], 'Tail Bus [V]')
class LvBusPortIndicator(BaseLvBusIndicator):
def __init__(self):
super(LvBusPortIndicator, self).__init__(
['A1', 'A2', 'A4'], 'Port Bus [V]')
class LvBusStarboardIndicator(BaseLvBusIndicator):
def __init__(self):
super(LvBusStarboardIndicator, self).__init__(
['A5', 'A7', 'A8'], 'Starboard Bus [V]')
class BasePosChart(avionics.ActuatorCmdDictChart):
"""The indicator to show servo position angles."""
def __init__(self, mode, name, servo_labels, show_cmd=True, **base_kwargs):
super(BasePosChart, self).__init__(
mode, name, servo_labels, 'Servo',
_SERVO_LABELS_HELPER, common.MAX_NO_UPDATE_COUNT_SERVO_STATUS,
show_cmd=show_cmd, full_comms_message_type='ServoStatus',
tether_attribute='servo_statuses', precision=0, **base_kwargs)
def _GetValuePerNode(self, arg_idx, *args):
if self._mode == common.FULL_COMMS_MODE:
return (numpy.rad2deg(args[arg_idx].angle_estimate)
if struct_tree.IsValidElement(args[arg_idx]) else None)
elif self._mode == common.SPARSE_COMMS_MODE:
rad = self._GetTetherValue(args[0], self._node_labels[arg_idx], 'angle')
return numpy.rad2deg(rad) if rad is not None else None
else:
assert False
def _GetCmdValue(self, servo, controller_command):
servo_idx = _SERVO_LABELS_HELPER.Value(servo)
return numpy.rad2deg(controller_command.servo_angle[servo_idx])
class RudPosChart(BasePosChart):
def __init__(self, mode, **widget_kwargs):
nodes = ['R1', 'R2']
super(RudPosChart, self).__init__(
mode, 'Rud Pos [°]', nodes, show_cmd=True, **widget_kwargs)
limits = flap_limits.FlapsToServos(
flap_limits.GetControlCrosswindLimits())['R1']
limits = numpy.rad2deg(limits).tolist()
self._SetLimits({
self._ObservationLabel(n): (
check_range.Interval(limits, inclusiveness=(False, False)),
check_range.AllInclusiveRange())
for n in nodes
}, [control_types.kFlightModeCrosswindNormal,
control_types.kFlightModeCrosswindPrepTransOut])
class ElePosChart(BasePosChart):
def __init__(self, mode, **widget_kwargs):
nodes = ['E1', 'E2']
super(ElePosChart, self).__init__(
mode, 'Ele Pos [°]', nodes, show_cmd=True, **widget_kwargs)
limits = flap_limits.FlapsToServos(
flap_limits.GetControlCrosswindLimits())['E1']
limits = numpy.rad2deg(limits).tolist()
self._SetLimits({
self._ObservationLabel(n): (
check_range.Interval(limits, inclusiveness=(False, False)),
check_range.AllInclusiveRange())
for n in nodes
}, [control_types.kFlightModeCrosswindNormal,
control_types.kFlightModeCrosswindPrepTransOut])
class PortPosChart(BasePosChart):
def __init__(self, mode, **widget_kwargs):
super(PortPosChart, self).__init__(
mode, 'Port Ail Pos [°]', ['A1', 'A2', 'A4'], show_cmd=True,
**widget_kwargs)
self._SetLimits({
self._ObservationLabel(n): (
check_range.Interval(
numpy.rad2deg(flap_limits.FlapsToServos(
flap_limits.GetControlCrosswindLimits())[n]).tolist(),
inclusiveness=(False, False)),
check_range.AllInclusiveRange())
for n in ['A1', 'A2']
}, [control_types.kFlightModeCrosswindNormal,
control_types.kFlightModeCrosswindPrepTransOut])
class StarboardPosChart(BasePosChart):
def __init__(self, mode, **widget_kwargs):
super(StarboardPosChart, self).__init__(
mode, 'Star Ail Pos [°]', ['A5', 'A7', 'A8'], show_cmd=True,
**widget_kwargs)
self._SetLimits({
self._ObservationLabel(n): (
check_range.Interval(
numpy.rad2deg(flap_limits.FlapsToServos(
flap_limits.GetControlCrosswindLimits())[n]).tolist(),
inclusiveness=(False, False)),
check_range.AllInclusiveRange())
for n in ['A7', 'A8']
}, [control_types.kFlightModeCrosswindNormal,
control_types.kFlightModeCrosswindPrepTransOut])
class LvBusSummaryIndicator(BaseLvBusIndicator):
"""The summary class for low voltage bus indicators."""
_voltage_names = ['LvA', 'LvB']
def __init__(self):
super(LvBusSummaryIndicator, self).__init__(
_SERVO_LABELS_HELPER.ShortNames(), 'Servo LV Bus [V]')
def Filter(self, messages):
if not messages:
return '--', stoplights.STOPLIGHT_UNAVAILABLE
all_voltages, stoplight, errors = self._GatherVoltageData(messages)
all_stats = {}
for voltage_name in self._voltage_names:
voltages = all_voltages[voltage_name]
sorted_pairs = sorted(voltages.items(), key=operator.itemgetter(1))
num_units = len(voltages)
all_stats[voltage_name] = {
'min': sorted_pairs[0] if voltages else None,
'max': sorted_pairs[-1] if voltages else None,
'median': sorted_pairs[num_units / 2] if voltages else None,
}
delimiter = ' '
results = [' '.rjust(7) + delimiter +
delimiter.join(v.rjust(8) for v in self._voltage_names)]
for metric in ['min', 'max', 'median']:
text = metric.rjust(7)
for voltage_name in self._voltage_names:
stats = all_stats[voltage_name]
text += delimiter
if stats[metric] is not None:
if isinstance(stats[metric], tuple):
text += '{: 2.1f}({:2})'.format(
stats[metric][1], stats[metric][0])
else:
text += '{: 7.1f}'.format(stats[metric])
else:
text += '--'.rjust(8)
results.append(text)
return '\n'.join(errors + results), stoplight
class StatusIndicator(BaseServoIndicator):
"""Summary servo status."""
@indicator.RegisterModes(common.FULL_COMMS_MODE, common.SPARSE_COMMS_MODE)
def __init__(self, mode, **format_kwargs):
super(StatusIndicator, self).__init__(mode, 'Servo Status', 0)
self._format_kwargs = format_kwargs
def _GetSingleValue(self, arg_idx, *args):
if self._mode == common.FULL_COMMS_MODE:
if struct_tree.IsValidElement(args[arg_idx]):
return [args[arg_idx].flags, args[arg_idx].servo_mon.flags]
else:
return None
elif self._mode == common.SPARSE_COMMS_MODE:
return self._GetTetherValue(
args[0], self._node_labels[arg_idx], 'state')
else:
assert False
@indicator.ReturnIfInputInvalid('', stoplights.STOPLIGHT_UNAVAILABLE)
def _Filter(self, *attributes):
any_warning_or_error = False
warnings = collections.defaultdict(list)
errors = collections.defaultdict(list)
report_by_servo = collections.defaultdict(list)
any_servo = False
reports = self._GetAvailableValues(*attributes)
for servo in _SERVO_LABELS_HELPER.ShortNames():
if servo not in reports or reports[servo] is None:
continue
if self._mode == common.FULL_COMMS_MODE:
flags, mon_flags = reports[servo]
any_servo = True
if common.CheckFlags(servo, report_by_servo, warnings, errors, flags,
_SERVO_WARNING_HELPER, _SERVO_ERROR_HELPER):
any_warning_or_error = True
if common.CheckFlags(
servo, report_by_servo, warnings, errors, mon_flags,
_SERVO_MON_WARNING_HELPER, _SERVO_MON_ERROR_HELPER):
any_warning_or_error = True
elif self._mode == common.SPARSE_COMMS_MODE:
any_servo = True
if reports[servo] & _ACTUATOR_STATE_HELPER.Value('Error'):
any_warning_or_error = True
report_by_servo[servo].append(('ERROR', 'status'))
errors['status'].append(servo)
return common.SummarizeWarningsAndErrors(
any_servo, report_by_servo, warnings, errors, any_warning_or_error,
**self._format_kwargs)
| apache-2.0 | 5,210,630,127,275,359,000 | 35.189796 | 79 | 0.646366 | false |
barak/autograd | examples/fluidsim/wing.py | 1 | 6136 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import os
from builtins import range
rows, cols = 40, 60
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def occlude(f, occlusion):
return f * (1 - occlusion)
def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
+ np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
+ np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
p = make_continuous(p, occlusion)
vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_ys).astype(np.int)
top_ix = np.floor(center_xs).astype(np.int)
rw = center_ys - left_ix # Relative weight of right-hand cells.
bw = center_xs - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def make_continuous(f, occlusion):
non_occluded = 1 - occlusion
num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\
+ np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\
+ np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\
+ np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1)
den = np.roll(non_occluded, 1, axis=0)\
+ np.roll(non_occluded, -1, axis=0)\
+ np.roll(non_occluded, 1, axis=1)\
+ np.roll(non_occluded, -1, axis=1)
return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001)
def sigmoid(x):
return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.
def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False):
occlusion = sigmoid(occlusion)
# Disallow occlusion outside a certain area.
mask = np.zeros((rows, cols))
mask[10:30, 10:30] = 1.0
occlusion = occlusion * mask
# Initialize smoke bands.
red_smoke = np.zeros((rows, cols))
red_smoke[rows/4:rows/2] = 1
blue_smoke = np.zeros((rows, cols))
blue_smoke[rows/2:3*rows/4] = 1
print("Running simulation...")
vx, vy = project(vx, vy, occlusion)
for t in range(num_time_steps):
plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated, occlusion)
red_smoke = advect(red_smoke, vx, vy)
red_smoke = occlude(red_smoke, occlusion)
blue_smoke = advect(blue_smoke, vx, vy)
blue_smoke = occlude(blue_smoke, occlusion)
plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render)
return vx, vy
def plot_matrix(ax, r, g, b, t, render=False):
if ax:
plt.cla()
ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 20
print("Loading initial and target states...")
init_vx = np.ones((rows, cols))
init_vy = np.zeros((rows, cols))
# Initialize the occlusion to be a block.
init_occlusion = -np.ones((rows, cols))
init_occlusion[15:25, 15:25] = 0.0
init_occlusion = init_occlusion.ravel()
def drag(vx): return np.mean(init_vx - vx)
def lift(vy): return np.mean(vy - init_vy)
def objective(params):
cur_occlusion = np.reshape(params, (rows, cols))
final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
return -lift(final_vy) / drag(final_vx)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(weights):
cur_occlusion = np.reshape(weights, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)
print("Rendering initial flow...")
callback(init_occlusion)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
options={'maxiter':50, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
final_occlusion = np.reshape(result.x, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True)
print("Converting frames to an animated GIF...") # Using imagemagick.
os.system("convert -delay 5 -loop 0 step*.png "
"-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps))
os.system("rm step*.png")
| mit | 7,440,294,014,902,484,000 | 36.414634 | 100 | 0.608703 | false |
gkc1000/pyscf | pyscf/nao/test/test_0021_nao_after_gpaw.py | 1 | 2374 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
try:
from ase import Atoms
from gpaw import GPAW
skip_test = False
fname = os.path.dirname(os.path.abspath(__file__))+'/h2o.gpw'
if os.path.isfile(fname):
calc = GPAW(fname, txt=None) # read previous calculation if the file exists
else:
from gpaw import PoissonSolver
atoms = Atoms('H2O', positions=[[0.0,-0.757,0.587], [0.0,+0.757,0.587], [0.0,0.0,0.0]])
atoms.center(vacuum=3.5)
convergence = {'density': 1e-7} # Increase accuracy of density for ground state
poissonsolver = PoissonSolver(eps=1e-14, remove_moment=1 + 3) # Increase accuracy of Poisson Solver and apply multipole corrections up to l=1
calc = GPAW(basis='dzp', xc='LDA', h=0.3, nbands=23, convergence=convergence, poissonsolver=poissonsolver, mode='lcao', txt=None) # nbands must be equal to norbs (in this case 23)
atoms.set_calculator(calc)
atoms.get_potential_energy() # Do SCF the ground state
calc.write(fname, mode='all') # write DFT output
except:
skip_test = True
class KnowValues(unittest.TestCase):
def test_nao_after_gpaw(self):
""" Do GPAW LCAO calculation, then init system_vars_c with it """
if skip_test: return
#print(dir(calc.atoms))
#print(dir(calc))
#print(dir(calc.hamiltonian))
# for aname in dir(calc.hamiltonian):
# print(aname, getattr(calc.hamiltonian, aname))
#print(calc.setups.id_a) # this is atom->specie !
#print(dir(calc.setups))
#print(calc.setups.nao)
#print(dir(calc.setups.setups[(1, 'paw', u'dzp')]))
# O = calc.setups.setups[(8, 'paw', u'dzp')]
# for aname in dir(O):
# print(aname, getattr(O, aname))
if __name__ == "__main__": unittest.main()
| apache-2.0 | 8,433,557,450,837,063,000 | 39.237288 | 187 | 0.689553 | false |
MarvinTeichmann/KittiBox | tests/test_anno_load.py | 1 | 14271 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import logging
import os
import sys
import random
from random import shuffle
import numpy as np
import scipy as scp
import scipy.misc
sys.path.insert(1, '../incl')
from scipy.misc import imread, imresize
from utils.data_utils import (annotation_jitter, annotation_to_h5)
from utils.annolist import AnnotationLib as AnnoLib
import threading
from collections import namedtuple
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.app.flags.DEFINE_boolean(
'save', False, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug, '
'hence it will get overwritten by further runs.'))
flags.DEFINE_string('name', None,
'Append a name Tag to run.')
fake_anno = namedtuple('fake_anno_object', ['rects'])
from PIL import Image, ImageDraw
rect = namedtuple('Rectangel', ['left', 'top', 'right', 'bottom'])
def _get_ignore_rect(x, y, cell_size):
left = x*cell_size
right = (x+1)*cell_size
top = y*cell_size
bottom = (y+1)*cell_size
return rect(left, top, right, bottom)
def _rescale_boxes(current_shape, anno, target_height, target_width):
x_scale = target_width / float(current_shape[1])
y_scale = target_height / float(current_shape[0])
for r in anno.rects:
assert r.x1 < r.x2
r.x1 *= x_scale
r.x2 *= x_scale
assert r.y1 < r.y2
r.y1 *= y_scale
r.y2 *= y_scale
return anno
def read_kitti_anno(label_file):
""" Reads a kitti annotation file.
Args:
label_file: Path to file
Returns:
Lists of rectangels: Cars and don't care area.
"""
labels = [line.rstrip().split(' ') for line in open(label_file)]
rect_list = []
for label in labels:
if not (label[0] == 'Car' or label[0] == 'Van' or
label[0] == 'DontCare'):
continue
if label[0] == 'DontCare':
class_id = -1
else:
class_id = 1
object_rect = AnnoLib.AnnoRect(
x1=float(label[4]), y1=float(label[5]),
x2=float(label[6]), y2=float(label[7]))
assert object_rect.x1 < object_rect.x2
assert object_rect.y1 < object_rect.y2
object_rect.classID = class_id
rect_list.append(object_rect)
return rect_list
def _load_idl_tf(idlfile, hypes, jitter=False, random_shuffel=True):
"""Take the idlfile and net configuration and create a generator
that outputs a jittered version of a random image from the annolist
that is mean corrected."""
annolist = AnnoLib.parse(idlfile)
annos = []
for anno in annolist:
anno.imageName = os.path.join(
os.path.dirname(os.path.realpath(idlfile)), anno.imageName)
annos.append(anno)
random.seed(0)
if hypes['data']['truncate_data']:
annos = annos[:10]
for epoch in itertools.count():
if random_shuffel:
random.shuffle(annos)
for anno in annos:
im = imread(anno.imageName)
if im.shape[2] == 4:
im = im[:, :, :3]
if im.shape[0] != hypes["image_height"] or \
im.shape[1] != hypes["image_width"]:
if epoch == 0:
anno = _rescale_boxes(im.shape, anno,
hypes["image_height"],
hypes["image_width"])
im = imresize(
im, (hypes["image_height"], hypes["image_width"]),
interp='cubic')
if jitter:
jitter_scale_min = 0.9
jitter_scale_max = 1.1
jitter_offset = 16
im, anno = annotation_jitter(
im, anno, target_width=hypes["image_width"],
target_height=hypes["image_height"],
jitter_scale_min=jitter_scale_min,
jitter_scale_max=jitter_scale_max,
jitter_offset=jitter_offset)
boxes, flags = annotation_to_h5(hypes,
anno,
hypes["grid_width"],
hypes["grid_height"],
hypes["rnn_len"])
boxes = boxes.reshape([hypes["grid_height"],
hypes["grid_width"], 4])
flags = flags.reshape(hypes["grid_height"], hypes["grid_width"])
yield {"image": im, "boxes": boxes, "flags": flags,
"rects": anno.rects, "anno": anno}
def _generate_mask(hypes, ignore_rects):
width = hypes["image_width"]
height = hypes["image_height"]
grid_width = hypes["grid_width"]
grid_height = hypes["grid_height"]
mask = np.ones([grid_height, grid_width])
for rect in ignore_rects:
left = int(rect.x1/width*grid_width)
right = int(rect.x2/width*grid_width)
top = int(rect.y1/height*grid_height)
bottom = int(rect.y2/height*grid_height)
for x in range(left, right+1):
for y in range(top, bottom+1):
mask[y, x] = 0
return mask
def _load_kitti_txt(kitti_txt, hypes, jitter=False, random_shuffel=True):
"""Take the txt file and net configuration and create a generator
that outputs a jittered version of a random image from the annolist
that is mean corrected."""
base_path = os.path.realpath(os.path.dirname(kitti_txt))
files = [line.rstrip() for line in open(kitti_txt)]
if hypes['data']['truncate_data']:
files = files[:10]
random.seed(0)
for epoch in itertools.count():
if random_shuffel:
random.shuffle(files)
for file in files:
image_file, gt_image_file = file.split(" ")
image_file = os.path.join(base_path, image_file)
assert os.path.exists(image_file), \
"File does not exist: %s" % image_file
gt_image_file = os.path.join(base_path, gt_image_file)
assert os.path.exists(gt_image_file), \
"File does not exist: %s" % gt_image_file
rect_list = read_kitti_anno(gt_image_file)
anno = fake_anno(rect_list)
im = scp.misc.imread(image_file)
if im.shape[2] == 4:
im = im[:, :, :3]
if im.shape[0] != hypes["image_height"] or \
im.shape[1] != hypes["image_width"]:
if epoch == 0:
anno = _rescale_boxes(im.shape, anno,
hypes["image_height"],
hypes["image_width"])
im = imresize(
im, (hypes["image_height"], hypes["image_width"]),
interp='cubic')
if jitter:
jitter_scale_min = 0.9
jitter_scale_max = 1.1
jitter_offset = 16
im, anno = annotation_jitter(
im, anno, target_width=hypes["image_width"],
target_height=hypes["image_height"],
jitter_scale_min=jitter_scale_min,
jitter_scale_max=jitter_scale_max,
jitter_offset=jitter_offset)
pos_list = [rect for rect in anno.rects if rect.classID == 1]
pos_anno = fake_anno(pos_list)
boxes, confs = annotation_to_h5(hypes,
pos_anno,
hypes["grid_width"],
hypes["grid_height"],
hypes["rnn_len"])
mask_list = [rect for rect in anno.rects if rect.classID == -1]
mask = _generate_mask(hypes, mask_list)
boxes = boxes.reshape([hypes["grid_height"],
hypes["grid_width"], 4])
confs = confs.reshape(hypes["grid_height"], hypes["grid_width"])
yield {"image": im, "boxes": boxes, "confs": confs,
"rects": pos_list, "mask": mask}
def _make_sparse(n, d):
v = np.zeros((d,), dtype=np.float32)
v[n] = 1.
return v
def _load_data_gen(hypes, phase, jitter):
grid_size = hypes['grid_width'] * hypes['grid_height']
data_file = hypes["data"]['%s_idl' % phase]
data_dir = hypes['dirs']['data_dir']
data_file = os.path.join(data_dir, data_file)
data = _load_idl_tf(data_file, hypes,
jitter={'train': jitter, 'val': False}[phase])
for d in data:
output = {}
rnn_len = hypes["rnn_len"]
flags = d['flags'][0, :, 0, 0:rnn_len, 0]
boxes = np.transpose(d['boxes'][0, :, :, 0:rnn_len, 0], (0, 2, 1))
assert(flags.shape == (grid_size, rnn_len))
assert(boxes.shape == (grid_size, rnn_len, 4))
output['image'] = d['image']
confs = [[_make_sparse(int(detection), d=hypes['num_classes'])
for detection in cell] for cell in flags]
output['confs'] = np.array(confs)
output['boxes'] = boxes
output['flags'] = flags
yield output
def test_new_kitti():
idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl"
kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt"
with open('../hypes/kittiBox.json', 'r') as f:
logging.info("f: %s", f)
hypes = json.load(f)
hypes["rnn_len"] = 1
hypes["image_height"] = 200
hypes["image_width"] = 800
gen1 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False)
gen2 = _load_idl_tf(idlfile, hypes, random_shuffel=False)
print('testing generators')
for i in range(20):
data1 = gen1.next()
data2 = gen2.next()
rects1 = data1['rects']
rects2 = data2['rects']
assert len(rects1) <= len(rects2)
if not len(rects1) == len(rects2):
print('ignoring flags')
continue
else:
print('comparing flags')
assert(np.all(data1['image'] == data2['image']))
# assert(np.all(data1['boxes'] == data2['boxes']))
if np.all(data1['flags'] == data2['flags']):
print('same')
else:
print('diff')
def draw_rect(draw, rect, color):
rect_cords = ((rect.left, rect.top), (rect.left, rect.bottom),
(rect.right, rect.bottom), (rect.right, rect.top),
(rect.left, rect.top))
draw.line(rect_cords, fill=color, width=2)
def draw_encoded(image, confs, mask=None, rects=None, cell_size=32):
image = image.astype('uint8')
im = Image.fromarray(image)
shape = confs.shape
if mask is None:
mask = np.ones(shape)
# overimage = mycm(confs_pred, bytes=True)
poly = Image.new('RGBA', im.size)
pdraw = ImageDraw.Draw(poly)
for y in range(shape[0]):
for x in range(shape[1]):
outline = (0, 0, 0, 255)
if confs[y, x]:
fill = (0, 255, 0, 100)
else:
fill = (0, 0, 0, 0)
rect = _get_ignore_rect(x, y, cell_size)
pdraw.rectangle(rect, fill=fill,
outline=fill)
if not mask[y, x]:
pdraw.line(((rect.left, rect.bottom), (rect.right, rect.top)),
fill=(0, 0, 0, 255), width=2)
pdraw.line(((rect.left, rect.top), (rect.right, rect.bottom)),
fill=(0, 0, 0, 255), width=2)
color = (0, 0, 255)
for rect in rects:
rect_cords = ((rect.x1, rect.y1), (rect.x1, rect.y2),
(rect.x2, rect.y2), (rect.x2, rect.y1),
(rect.x1, rect.y1))
pdraw.line(rect_cords, fill=color, width=2)
im.paste(poly, mask=poly)
return np.array(im)
def draw_kitti_jitter():
idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl"
kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt"
with open('../hypes/kittiBox.json', 'r') as f:
logging.info("f: %s", f)
hypes = json.load(f)
hypes["rnn_len"] = 1
gen = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False)
data = gen.next()
for i in range(20):
data = gen.next()
image = draw_encoded(image=data['image'], confs=data['confs'],
rects=data['rects'], mask=data['mask'])
scp.misc.imshow(image)
scp.misc.imshow(data['mask'])
def draw_idl():
idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl"
kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt"
with open('../hypes/kittiBox.json', 'r') as f:
logging.info("f: %s", f)
hypes = json.load(f)
hypes["rnn_len"] = 1
gen = _load_idl_tf(idlfile, hypes, random_shuffel=False)
data = gen.next()
for i in range(20):
data = gen.next()
image = draw_encoded(image=data['image'], confs=data['flags'],
rects=data['rects'])
scp.misc.imshow(image)
def draw_both():
idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl"
kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt"
with open('../hypes/kittiBox.json', 'r') as f:
logging.info("f: %s", f)
hypes = json.load(f)
hypes["rnn_len"] = 1
gen1 = _load_idl_tf(idlfile, hypes, random_shuffel=False)
gen2 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False)
data1 = gen1.next()
data2 = gen2.next()
for i in range(20):
data1 = gen1.next()
data2 = gen2.next()
image1 = draw_encoded(image=data1['image'], confs=data1['flags'],
rects=data1['rects'])
image2 = draw_encoded(image=data2['image'], confs=data2['confs'],
rects=data2['rects'], mask=data2['mask'])
scp.misc.imshow(image1)
scp.misc.imshow(image2)
if __name__ == '__main__':
draw_both()
| mit | -198,509,635,553,043,260 | 31.434091 | 78 | 0.529676 | false |
endlessm/chromium-browser | third_party/llvm/llvm/utils/lit/tests/unit/TestRunner.py | 1 | 11413 | # RUN: %{python} %s
#
# END.
import os.path
import platform
import unittest
import lit.discovery
import lit.LitConfig
import lit.Test as Test
from lit.TestRunner import ParserKind, IntegratedTestKeywordParser, \
parseIntegratedTestScript
class TestIntegratedTestKeywordParser(unittest.TestCase):
inputTestCase = None
@staticmethod
def load_keyword_parser_lit_tests():
"""
Create and load the LIT test suite and test objects used by
TestIntegratedTestKeywordParser
"""
# Create the global config object.
lit_config = lit.LitConfig.LitConfig(progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=(
platform.system() == 'Windows'),
params={})
TestIntegratedTestKeywordParser.litConfig = lit_config
# Perform test discovery.
test_path = os.path.dirname(os.path.dirname(__file__))
inputs = [os.path.join(test_path, 'Inputs/testrunner-custom-parsers/')]
assert os.path.isdir(inputs[0])
tests = lit.discovery.find_tests_for_inputs(lit_config, inputs)
assert len(tests) == 1 and "there should only be one test"
TestIntegratedTestKeywordParser.inputTestCase = tests[0]
@staticmethod
def make_parsers():
def custom_parse(line_number, line, output):
if output is None:
output = []
output += [part for part in line.split(' ') if part.strip()]
return output
return [
IntegratedTestKeywordParser("MY_TAG.", ParserKind.TAG),
IntegratedTestKeywordParser("MY_DNE_TAG.", ParserKind.TAG),
IntegratedTestKeywordParser("MY_LIST:", ParserKind.LIST),
IntegratedTestKeywordParser("MY_BOOL:", ParserKind.BOOLEAN_EXPR),
IntegratedTestKeywordParser("MY_INT:", ParserKind.INTEGER),
IntegratedTestKeywordParser("MY_RUN:", ParserKind.COMMAND),
IntegratedTestKeywordParser("MY_CUSTOM:", ParserKind.CUSTOM,
custom_parse),
]
@staticmethod
def get_parser(parser_list, keyword):
for p in parser_list:
if p.keyword == keyword:
return p
assert False and "parser not found"
@staticmethod
def parse_test(parser_list):
script = parseIntegratedTestScript(
TestIntegratedTestKeywordParser.inputTestCase,
additional_parsers=parser_list, require_script=False)
assert not isinstance(script, lit.Test.Result)
assert isinstance(script, list)
assert len(script) == 0
def test_tags(self):
parsers = self.make_parsers()
self.parse_test(parsers)
tag_parser = self.get_parser(parsers, 'MY_TAG.')
dne_tag_parser = self.get_parser(parsers, 'MY_DNE_TAG.')
self.assertTrue(tag_parser.getValue())
self.assertFalse(dne_tag_parser.getValue())
def test_lists(self):
parsers = self.make_parsers()
self.parse_test(parsers)
list_parser = self.get_parser(parsers, 'MY_LIST:')
self.assertEqual(list_parser.getValue(),
['one', 'two', 'three', 'four'])
def test_commands(self):
parsers = self.make_parsers()
self.parse_test(parsers)
cmd_parser = self.get_parser(parsers, 'MY_RUN:')
value = cmd_parser.getValue()
self.assertEqual(len(value), 2) # there are only two run lines
self.assertEqual(value[0].strip(), "%dbg(MY_RUN: at line 4) baz")
self.assertEqual(value[1].strip(), "%dbg(MY_RUN: at line 7) foo bar")
def test_boolean(self):
parsers = self.make_parsers()
self.parse_test(parsers)
bool_parser = self.get_parser(parsers, 'MY_BOOL:')
value = bool_parser.getValue()
self.assertEqual(len(value), 2) # there are only two run lines
self.assertEqual(value[0].strip(), "a && (b)")
self.assertEqual(value[1].strip(), "d")
def test_integer(self):
parsers = self.make_parsers()
self.parse_test(parsers)
int_parser = self.get_parser(parsers, 'MY_INT:')
value = int_parser.getValue()
self.assertEqual(len(value), 2) # there are only two MY_INT: lines
self.assertEqual(type(value[0]), int)
self.assertEqual(value[0], 4)
self.assertEqual(type(value[1]), int)
self.assertEqual(value[1], 6)
def test_boolean_unterminated(self):
parsers = self.make_parsers() + \
[IntegratedTestKeywordParser("MY_BOOL_UNTERMINATED:", ParserKind.BOOLEAN_EXPR)]
try:
self.parse_test(parsers)
self.fail('expected exception')
except ValueError as e:
self.assertIn("Test has unterminated MY_BOOL_UNTERMINATED: lines", str(e))
def test_custom(self):
parsers = self.make_parsers()
self.parse_test(parsers)
custom_parser = self.get_parser(parsers, 'MY_CUSTOM:')
value = custom_parser.getValue()
self.assertEqual(value, ['a', 'b', 'c'])
def test_bad_keywords(self):
def custom_parse(line_number, line, output):
return output
try:
IntegratedTestKeywordParser("TAG_NO_SUFFIX", ParserKind.TAG),
self.fail("TAG_NO_SUFFIX failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("TAG_NO_SUFFIX raised the wrong exception: %r" % e)
try:
IntegratedTestKeywordParser("TAG_WITH_COLON:", ParserKind.TAG),
self.fail("TAG_WITH_COLON: failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("TAG_WITH_COLON: raised the wrong exception: %r" % e)
try:
IntegratedTestKeywordParser("LIST_WITH_DOT.", ParserKind.LIST),
self.fail("LIST_WITH_DOT. failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("LIST_WITH_DOT. raised the wrong exception: %r" % e)
try:
IntegratedTestKeywordParser("CUSTOM_NO_SUFFIX",
ParserKind.CUSTOM, custom_parse),
self.fail("CUSTOM_NO_SUFFIX failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("CUSTOM_NO_SUFFIX raised the wrong exception: %r" % e)
# Both '.' and ':' are allowed for CUSTOM keywords.
try:
IntegratedTestKeywordParser("CUSTOM_WITH_DOT.",
ParserKind.CUSTOM, custom_parse),
except BaseException as e:
self.fail("CUSTOM_WITH_DOT. raised an exception: %r" % e)
try:
IntegratedTestKeywordParser("CUSTOM_WITH_COLON:",
ParserKind.CUSTOM, custom_parse),
except BaseException as e:
self.fail("CUSTOM_WITH_COLON: raised an exception: %r" % e)
try:
IntegratedTestKeywordParser("CUSTOM_NO_PARSER:",
ParserKind.CUSTOM),
self.fail("CUSTOM_NO_PARSER: failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("CUSTOM_NO_PARSER: raised the wrong exception: %r" % e)
class TestApplySubtitutions(unittest.TestCase):
def test_simple(self):
script = ["echo %bar"]
substitutions = [("%bar", "hello")]
result = lit.TestRunner.applySubstitutions(script, substitutions)
self.assertEqual(result, ["echo hello"])
def test_multiple_substitutions(self):
script = ["echo %bar %baz"]
substitutions = [("%bar", "hello"),
("%baz", "world"),
("%useless", "shouldnt expand")]
result = lit.TestRunner.applySubstitutions(script, substitutions)
self.assertEqual(result, ["echo hello world"])
def test_multiple_script_lines(self):
script = ["%cxx %compile_flags -c -o %t.o",
"%cxx %link_flags %t.o -o %t.exe"]
substitutions = [("%cxx", "clang++"),
("%compile_flags", "-std=c++11 -O3"),
("%link_flags", "-lc++")]
result = lit.TestRunner.applySubstitutions(script, substitutions)
self.assertEqual(result, ["clang++ -std=c++11 -O3 -c -o %t.o",
"clang++ -lc++ %t.o -o %t.exe"])
def test_recursive_substitution_real(self):
script = ["%build %s"]
substitutions = [("%cxx", "clang++"),
("%compile_flags", "-std=c++11 -O3"),
("%link_flags", "-lc++"),
("%build", "%cxx %compile_flags %link_flags %s -o %t.exe")]
result = lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=3)
self.assertEqual(result, ["clang++ -std=c++11 -O3 -lc++ %s -o %t.exe %s"])
def test_recursive_substitution_limit(self):
script = ["%rec5"]
# Make sure the substitutions are not in an order where the global
# substitution would appear to be recursive just because they are
# processed in the right order.
substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
("%rec3", "%rec2"), ("%rec4", "%rec3"), ("%rec5", "%rec4")]
for limit in [5, 6, 7]:
result = lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=limit)
self.assertEqual(result, ["STOP"])
def test_recursive_substitution_limit_exceeded(self):
script = ["%rec5"]
substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
("%rec3", "%rec2"), ("%rec4", "%rec3"), ("%rec5", "%rec4")]
for limit in [0, 1, 2, 3, 4]:
try:
lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=limit)
self.fail("applySubstitutions should have raised an exception")
except ValueError:
pass
def test_recursive_substitution_invalid_value(self):
script = ["%rec5"]
substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
("%rec3", "%rec2"), ("%rec4", "%rec3"), ("%rec5", "%rec4")]
for limit in [-1, -2, -3, "foo"]:
try:
lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=limit)
self.fail("applySubstitutions should have raised an exception")
except AssertionError:
pass
if __name__ == '__main__':
TestIntegratedTestKeywordParser.load_keyword_parser_lit_tests()
unittest.main(verbosity=2)
| bsd-3-clause | 876,312,506,175,296,800 | 40.959559 | 100 | 0.557697 | false |
MrBloodyshadow/ShadowHunter | ini_file_validator.py | 1 | 1651 | # -*- coding: utf-8 -*-
import configparser
def config_file_exists(filename):
config = configparser.ConfigParser()
try:
with open(filename) as f:
config.read(f)
f.close()
except IOError:
raise
def validate_ini_file(filename, sections):
try:
config_file_exists(filename)
valid = True
except IOError as e:
print(e)
valid = False
if valid:
config = configparser.ConfigParser()
config.read(filename)
if type(sections) is list:
for section in sections:
valid = check_section(filename, config, section) and valid
else:
valid = check_section(filename, config, sections)
return valid
def check_section(filename, config, section):
valid = True
section_name = section[0]
if not config.has_section(section_name):
print('Section "{}" is missing in the "{}" file.'.format(section_name, filename))
valid = False
else:
section_values = config[section_name]
section_options = section[1]
for option in section_options:
if not config.has_option(section_name, option):
print('Option "{}" in section "{}" is missing in the "{}" file.'.
format(option, section_name, filename))
valid = False
else:
if len(section_values[option]) == 0:
print('Option "{}" in section "{}" is blank in the "{}" file.'.
format(option, section_name, filename))
valid = False
return valid
| mit | -8,916,940,849,141,490,000 | 27.964912 | 89 | 0.553604 | false |
baayso/learn-python3 | basic/the_list.py | 1 | 1216 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# list是一种有序的集合,可以随时添加和删除其中的元素
classmates = ['Michael', 'Bob', 'Tracy']
print(classmates)
print(len(classmates))
print(classmates[0])
print(classmates[1])
print(classmates[2])
print(classmates[-1])
print(classmates[-2])
print(classmates[-3])
print()
# 追加元素到末尾
classmates.append('Adam')
print(classmates)
print()
# 把元素插入到指定的位置
classmates.insert(1, 'Jack')
print(classmates)
print()
# 删除list末尾的元素
classmates.pop()
print(classmates)
print()
# 删除指定位置的元素
classmates.pop(1)
print(classmates)
print()
# 把某个元素替换成别的元素,直接赋值给对应的索引位置
classmates[1] = 'Sarah'
print(classmates)
print()
# list里面的元素的数据类型也可以不同
L = ['Apple', 123, True]
print(L)
print()
# list元素也可以是另一个list
s = ['python', 'java', ['asp', 'php'], 'scheme']
print(len(s))
print(s)
print(len(s[2]))
print(s[2][1])
print()
# 注意s只有4个元素,其中s[2]又是一个list
p = ['asp', 'php']
s = ['python', 'java', p, 'scheme']
print(len(s))
print(s)
print()
# 空list
L = []
print(len(L))
| apache-2.0 | -2,483,030,746,383,875,600 | 12.71831 | 48 | 0.676591 | false |
studenteternal/SoftLayer | get_list.py | 1 | 1502 | #!/usr/bin/python
import yaml
import SoftLayer
from pprint import pprint
credsFile = open("softcreds.yaml",'r')
creds = yaml.load(credsFile)
#print creds['username']
#print creds['api_key']
client = SoftLayer.Client(username=(creds['username']), api_key=(creds['api_key']))
n = 1
count = 1
kill_file = open("kill-file",'a')
while n < 2:
server_name = 'jbsampsobuntutemp' + str(count)
n = n + 1
server_return = client['Virtual_Guest'].createObject({
'datacenter': {'name': 'mex01'},
'hostname': server_name,
'domain': 'test.com',
'startCpus': 1,
'maxMemory': 4096,
'hourlyBillingFlag': 'true',
'localDiskFlag': 'false',
'networkComponents': [{'maxSpeed': 1000}],
'privateNetworkOnlyFlag': 'false',
'blockDevices': [{'device': '0', 'diskImage': {'capacity': 100}}],
'operatingSystemReferenceCode': 'UBUNTU_latest',
'primaryBackendNetworkComponent': {'networkVlan': {'id': 773482}},
# 'tags': 'jbsampso,temp',
# 'postInstallScriptUri': 'https://mex01.objectstorage.softlayer.net/v1/AUTH_3d7f3c03-9b34-418d-96f1-09a45712c21c/Jbsampso_startup_scripts/post_test.sh',
})
count = count + 1
kill_file.write(str(server_return['id']) + '\n')
# print server_return
# server_return = server_return.split(',')
# print server_return[15]
# client['Virtual_Guest'].setUserMetadata(id=server_return['id']{
# 'metadata': {'jbsampso, temp'}}
kill_file.close()
credsFile.close()
#pprint( server_return )
#print server_return['id']
| mit | -2,538,125,823,673,107,500 | 28.45098 | 154 | 0.661784 | false |
beia/beialand | projects/CitisimWebApp/app/src/users.py | 1 | 2358 | #!flask/bin/python
from flask_bcrypt import Bcrypt
from flask_login import UserMixin
from main import login_manager
from main import mydb
import MySQLdb
@login_manager.user_loader
def load_user(user_id):
user = User()
return user.getUserByID(int(user_id))
class User(UserMixin):
id = None
username = None
email = None
password = None
def __init__(self):
print "Empty constructor"
def getUserByID(self, id):
mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor)
mycursor.execute("select * from Users u where u.userID = " + str(id))
row = mycursor.fetchone()
if(row is None):
return None
self.id = row['userID']
self.username = row['userName']
self.email = row['userEmail']
self.password = row['userPass']
return self
def getUserByEmail(self, email):
mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor)
mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'")
row = mycursor.fetchone()
if(row is None):
return None
self.id = row['userID']
self.username = row['userName']
self.email = row['userEmail']
self.password = row['userPass']
return self
def userAuthentication(self, email, password):
mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor)
mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'")
row = mycursor.fetchone()
if(row is None):
return False
bcrypt = Bcrypt()
return bcrypt.check_password_hash(row['userPass'], password)
def checkIfEmailExists(self, email):
mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor)
mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'")
row = mycursor.fetchone()
if(row is None):
return False
return True
def addUser(self, name, email, password):
bcrypt = Bcrypt()
mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor)
mycursor.execute("insert into Users (userName, userEmail, userPass) values ('"+str(name)+"','"+str(email)+"','"+bcrypt.generate_password_hash(password)+"')")
mydb.connection.commit()
| gpl-3.0 | 5,566,151,524,797,660,000 | 29.623377 | 165 | 0.621713 | false |
joerocklin/gem5 | src/arch/x86/isa/insts/x87/arithmetic/partial_remainder.py | 1 | 2252 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop FPREM {
premfp st(0), st(1), st(0)
};
def macroop FPREM1 {
premfp st(0), st(1), st(0)
};
'''
| bsd-3-clause | -8,913,529,199,190,918,000 | 47.956522 | 72 | 0.78286 | false |
openstack/manila | manila/tests/share/drivers/dummy.py | 1 | 36007 | # Copyright 2016 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Dummy share driver for testing Manila APIs and other interfaces.
This driver simulates support of:
- Both available driver modes: DHSS=True/False
- NFS and CIFS protocols
- IP access for NFS shares and USER access for CIFS shares
- CIFS shares in DHSS=True driver mode
- Creation and deletion of share snapshots
- Share replication (readable)
- Share migration
- Consistency groups
- Resize of a share (extend/shrink)
"""
import functools
import time
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from manila.common import constants
from manila import exception
from manila.i18n import _
from manila.share import configuration
from manila.share import driver
from manila.share.manager import share_manager_opts # noqa
from manila.share import utils as share_utils
LOG = log.getLogger(__name__)
dummy_opts = [
cfg.FloatOpt(
"dummy_driver_default_driver_method_delay",
help="Defines default time delay in seconds for each dummy driver "
"method. To redefine some specific method delay use other "
"'dummy_driver_driver_methods_delays' config opt. Optional.",
default=2.0,
min=0,
),
cfg.DictOpt(
"dummy_driver_driver_methods_delays",
help="It is dictionary-like config option, that consists of "
"driver method names as keys and integer/float values that are "
"time delay in seconds. Optional.",
default={
"ensure_share": "1.05",
"create_share": "3.98",
"get_pool": "0.5",
"do_setup": "0.05",
"_get_pools_info": "0.1",
"_update_share_stats": "0.3",
"create_replica": "3.99",
"delete_replica": "2.98",
"promote_replica": "0.75",
"update_replica_state": "0.85",
"create_replicated_snapshot": "4.15",
"delete_replicated_snapshot": "3.16",
"update_replicated_snapshot": "1.17",
"migration_start": 1.01,
"migration_continue": 1.02, # it will be called 2 times
"migration_complete": 1.03,
"migration_cancel": 1.04,
"migration_get_progress": 1.05,
"migration_check_compatibility": 0.05,
},
),
]
CONF = cfg.CONF
def slow_me_down(f):
@functools.wraps(f)
def wrapped_func(self, *args, **kwargs):
sleep_time = self.configuration.safe_get(
"dummy_driver_driver_methods_delays").get(
f.__name__,
self.configuration.safe_get(
"dummy_driver_default_driver_method_delay")
)
time.sleep(float(sleep_time))
return f(self, *args, **kwargs)
return wrapped_func
def get_backend_configuration(backend_name):
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration which is required for share replication and "
"migration. Available stanzas are %(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
}
raise exception.BadConfigurationException(reason=msg % params)
config = configuration.Configuration(
driver.share_opts, config_group=backend_name)
config.append_config_values(dummy_opts)
config.append_config_values(share_manager_opts)
config.append_config_values(driver.ssh_opts)
return config
class DummyDriver(driver.ShareDriver):
"""Dummy share driver that implements all share driver interfaces."""
def __init__(self, *args, **kwargs):
"""Do initialization."""
super(DummyDriver, self).__init__(
[False, True], *args, config_opts=[dummy_opts], **kwargs)
self._verify_configuration()
self.private_storage = kwargs.get('private_storage')
self.backend_name = self.configuration.safe_get(
"share_backend_name") or "DummyDriver"
self.migration_progress = {}
self.security_service_update_support = True
def _verify_configuration(self):
allowed_driver_methods = [m for m in dir(self) if m[0] != '_']
allowed_driver_methods.extend([
"_setup_server",
"_teardown_server",
"_get_pools_info",
"_update_share_stats",
])
disallowed_driver_methods = (
"get_admin_network_allocations_number",
"get_network_allocations_number",
"get_share_server_pools",
)
for k, v in self.configuration.safe_get(
"dummy_driver_driver_methods_delays").items():
if k not in allowed_driver_methods:
raise exception.BadConfigurationException(reason=(
"Dummy driver does not have '%s' method." % k
))
elif k in disallowed_driver_methods:
raise exception.BadConfigurationException(reason=(
"Method '%s' does not support delaying." % k
))
try:
float(v)
except (TypeError, ValueError):
raise exception.BadConfigurationException(reason=(
"Wrong value (%(v)s) for '%(k)s' dummy driver method time "
"delay is set in 'dummy_driver_driver_methods_delays' "
"config option." % {"k": k, "v": v}
))
def _get_share_name(self, share):
return "share_%(s_id)s_%(si_id)s" % {
"s_id": share["share_id"].replace("-", "_"),
"si_id": share["id"].replace("-", "_")}
def _get_snapshot_name(self, snapshot):
return "snapshot_%(s_id)s_%(si_id)s" % {
"s_id": snapshot["snapshot_id"].replace("-", "_"),
"si_id": snapshot["id"].replace("-", "_")}
def _generate_export_locations(self, mountpoint, share_server=None):
details = share_server["backend_details"] if share_server else {
"primary_public_ip": "10.0.0.10",
"secondary_public_ip": "10.0.0.20",
"service_ip": "11.0.0.11",
}
return [
{
"path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint},
"metadata": {
"preferred": preferred,
},
"is_admin_only": is_admin_only,
} for ip, is_admin_only, preferred in (
(details["primary_public_ip"], False, True),
(details["secondary_public_ip"], False, False),
(details["service_ip"], True, False))
]
def _create_share(self, share, share_server=None):
share_proto = share["share_proto"]
if share_proto not in ("NFS", "CIFS"):
msg = _("Unsupported share protocol provided - %s.") % share_proto
raise exception.InvalidShareAccess(reason=msg)
share_name = self._get_share_name(share)
mountpoint = "/path/to/fake/share/%s" % share_name
self.private_storage.update(
share["id"], {
"fake_provider_share_name": share_name,
"fake_provider_location": mountpoint,
}
)
return self._generate_export_locations(
mountpoint, share_server=share_server)
@slow_me_down
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
return self._create_share(share, share_server=share_server)
@slow_me_down
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
export_locations = self._create_share(share, share_server=share_server)
return {
'export_locations': export_locations,
'status': constants.STATUS_AVAILABLE
}
def _create_snapshot(self, snapshot, share_server=None):
snapshot_name = self._get_snapshot_name(snapshot)
mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name
self.private_storage.update(
snapshot["id"], {
"fake_provider_snapshot_name": snapshot_name,
"fake_provider_location": mountpoint,
}
)
return {
'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'fake_key3': 'fake_value3',
"provider_location": mountpoint,
"export_locations": self._generate_export_locations(
mountpoint, share_server=share_server)
}
@slow_me_down
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
return self._create_snapshot(snapshot, share_server)
@slow_me_down
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
self.private_storage.delete(share["id"])
@slow_me_down
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
LOG.debug('Deleting snapshot with following data: %s', snapshot)
self.private_storage.delete(snapshot["id"])
@slow_me_down
def get_pool(self, share):
"""Return pool name where the share resides on."""
pool_name = share_utils.extract_host(share["host"], level="pool")
return pool_name
@slow_me_down
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that share is exported."""
@slow_me_down
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share."""
for rule in add_rules + access_rules:
share_proto = share["share_proto"].lower()
access_type = rule["access_type"].lower()
if not (
(share_proto == "nfs" and access_type == "ip") or
(share_proto == "cifs" and access_type == "user")):
msg = _("Unsupported '%(access_type)s' access type provided "
"for '%(share_proto)s' share protocol.") % {
"access_type": access_type, "share_proto": share_proto}
raise exception.InvalidShareAccess(reason=msg)
@slow_me_down
def snapshot_update_access(self, context, snapshot, access_rules,
add_rules, delete_rules, share_server=None):
"""Update access rules for given snapshot."""
self.update_access(context, snapshot['share'], access_rules,
add_rules, delete_rules, share_server)
@slow_me_down
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
@slow_me_down
def manage_existing(self, share, driver_options):
"""Brings an existing share under Manila management."""
new_export = share['export_location']
old_share_id = self._get_share_id_from_export(new_export)
old_export = self.private_storage.get(
old_share_id, key='export_location')
if old_export.split(":/")[-1] == new_export.split(":/")[-1]:
result = {"size": 1, "export_locations": self._create_share(share)}
self.private_storage.delete(old_share_id)
return result
else:
msg = ("Invalid export specified, existing share %s"
" could not be found" % old_share_id)
raise exception.ShareBackendException(msg=msg)
@slow_me_down
def manage_existing_with_server(
self, share, driver_options, share_server=None):
return self.manage_existing(share, driver_options)
def _get_share_id_from_export(self, export_location):
values = export_location.split('share_')
if len(values) > 1:
return values[1][37:].replace("_", "-")
else:
return export_location
@slow_me_down
def unmanage(self, share):
"""Removes the specified share from Manila management."""
self.private_storage.update(
share['id'], {'export_location': share['export_location']})
@slow_me_down
def unmanage_with_server(self, share, share_server=None):
self.unmanage(share)
@slow_me_down
def manage_existing_snapshot_with_server(self, snapshot, driver_options,
share_server=None):
return self.manage_existing_snapshot(snapshot, driver_options)
@slow_me_down
def manage_existing_snapshot(self, snapshot, driver_options):
"""Brings an existing snapshot under Manila management."""
old_snap_id = self._get_snap_id_from_provider_location(
snapshot['provider_location'])
old_provider_location = self.private_storage.get(
old_snap_id, key='provider_location')
if old_provider_location == snapshot['provider_location']:
self._create_snapshot(snapshot)
self.private_storage.delete(old_snap_id)
return {"size": 1,
"provider_location": snapshot["provider_location"]}
else:
msg = ("Invalid provider location specified, existing snapshot %s"
" could not be found" % old_snap_id)
raise exception.ShareBackendException(msg=msg)
def _get_snap_id_from_provider_location(self, provider_location):
values = provider_location.split('snapshot_')
if len(values) > 1:
return values[1][37:].replace("_", "-")
else:
return provider_location
@slow_me_down
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Manila management."""
self.private_storage.update(
snapshot['id'],
{'provider_location': snapshot['provider_location']})
@slow_me_down
def unmanage_snapshot_with_server(self, snapshot, share_server=None):
self.unmanage_snapshot(snapshot)
@slow_me_down
def revert_to_snapshot(self, context, snapshot, share_access_rules,
snapshot_access_rules, share_server=None):
"""Reverts a share (in place) to the specified snapshot."""
@slow_me_down
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
@slow_me_down
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
def get_network_allocations_number(self):
"""Returns number of network allocations for creating VIFs."""
return 2
def get_admin_network_allocations_number(self):
return 1
@slow_me_down
def _setup_server(self, network_info, metadata=None):
"""Sets up and configures share server with given network parameters.
Redefine it within share driver when it is going to handle share
servers.
"""
server_details = {
"primary_public_ip": network_info[
"network_allocations"][0]["ip_address"],
"secondary_public_ip": network_info[
"network_allocations"][1]["ip_address"],
"service_ip": network_info[
"admin_network_allocations"][0]["ip_address"],
"username": "fake_username",
"server_id": network_info['server_id']
}
return server_details
@slow_me_down
def _teardown_server(self, server_details, security_services=None):
"""Tears down share server."""
@slow_me_down
def _get_pools_info(self):
pools = [{
"pool_name": "fake_pool_for_%s" % self.backend_name,
"total_capacity_gb": 1230.0,
"free_capacity_gb": 1210.0,
"reserved_percentage": self.configuration.reserved_share_percentage
}]
if self.configuration.replication_domain:
pools[0]["replication_type"] = "readable"
return pools
@slow_me_down
def _update_share_stats(self, data=None):
"""Retrieve stats info from share group."""
data = {
"share_backend_name": self.backend_name,
"storage_protocol": "NFS_CIFS",
"reserved_percentage":
self.configuration.reserved_share_percentage,
"snapshot_support": True,
"create_share_from_snapshot_support": True,
"revert_to_snapshot_support": True,
"mount_snapshot_support": True,
"driver_name": "Dummy",
"pools": self._get_pools_info(),
"share_group_stats": {
"consistent_snapshot_support": "pool",
},
}
if self.configuration.replication_domain:
data["replication_type"] = "readable"
super(DummyDriver, self)._update_share_stats(data)
def get_share_server_pools(self, share_server):
"""Return list of pools related to a particular share server."""
return []
@slow_me_down
def create_consistency_group(self, context, cg_dict, share_server=None):
"""Create a consistency group."""
LOG.debug(
"Successfully created dummy Consistency Group with ID: %s.",
cg_dict["id"])
@slow_me_down
def delete_consistency_group(self, context, cg_dict, share_server=None):
"""Delete a consistency group."""
LOG.debug(
"Successfully deleted dummy consistency group with ID %s.",
cg_dict["id"])
@slow_me_down
def create_cgsnapshot(self, context, snap_dict, share_server=None):
"""Create a consistency group snapshot."""
LOG.debug("Successfully created CG snapshot %s.", snap_dict["id"])
return None, None
@slow_me_down
def delete_cgsnapshot(self, context, snap_dict, share_server=None):
"""Delete a consistency group snapshot."""
LOG.debug("Successfully deleted CG snapshot %s.", snap_dict["id"])
return None, None
@slow_me_down
def create_consistency_group_from_cgsnapshot(
self, context, cg_dict, cgsnapshot_dict, share_server=None):
"""Create a consistency group from a cgsnapshot."""
LOG.debug(
("Successfully created dummy Consistency Group (%(cg_id)s) "
"from CG snapshot (%(cg_snap_id)s)."),
{"cg_id": cg_dict["id"], "cg_snap_id": cgsnapshot_dict["id"]})
return None, []
@slow_me_down
def create_replica(self, context, replica_list, new_replica,
access_rules, replica_snapshots, share_server=None):
"""Replicate the active replica to a new replica on this backend."""
replica_name = self._get_share_name(new_replica)
mountpoint = "/path/to/fake/share/%s" % replica_name
self.private_storage.update(
new_replica["id"], {
"fake_provider_replica_name": replica_name,
"fake_provider_location": mountpoint,
}
)
return {
"export_locations": self._generate_export_locations(
mountpoint, share_server=share_server),
"replica_state": constants.REPLICA_STATE_IN_SYNC,
"access_rules_status": constants.STATUS_ACTIVE,
}
@slow_me_down
def delete_replica(self, context, replica_list, replica_snapshots,
replica, share_server=None):
"""Delete a replica."""
self.private_storage.delete(replica["id"])
@slow_me_down
def promote_replica(self, context, replica_list, replica, access_rules,
share_server=None):
"""Promote a replica to 'active' replica state."""
return_replica_list = []
for r in replica_list:
if r["id"] == replica["id"]:
replica_state = constants.REPLICA_STATE_ACTIVE
else:
replica_state = constants.REPLICA_STATE_IN_SYNC
return_replica_list.append(
{"id": r["id"], "replica_state": replica_state})
return return_replica_list
@slow_me_down
def update_replica_state(self, context, replica_list, replica,
access_rules, replica_snapshots,
share_server=None):
"""Update the replica_state of a replica."""
return constants.REPLICA_STATE_IN_SYNC
@slow_me_down
def create_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
"""Create a snapshot on active instance and update across the replicas.
"""
return_replica_snapshots = []
for r in replica_snapshots:
return_replica_snapshots.append(
{"id": r["id"], "status": constants.STATUS_AVAILABLE})
return return_replica_snapshots
@slow_me_down
def revert_to_replicated_snapshot(self, context, active_replica,
replica_list, active_replica_snapshot,
replica_snapshots, share_access_rules,
snapshot_access_rules,
share_server=None):
"""Reverts a replicated share (in place) to the specified snapshot."""
@slow_me_down
def delete_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
"""Delete a snapshot by deleting its instances across the replicas."""
return_replica_snapshots = []
for r in replica_snapshots:
return_replica_snapshots.append(
{"id": r["id"], "status": constants.STATUS_DELETED})
return return_replica_snapshots
@slow_me_down
def update_replicated_snapshot(self, context, replica_list,
share_replica, replica_snapshots,
replica_snapshot, share_server=None):
"""Update the status of a snapshot instance that lives on a replica."""
return {
"id": replica_snapshot["id"], "status": constants.STATUS_AVAILABLE}
@slow_me_down
def migration_check_compatibility(
self, context, source_share, destination_share,
share_server=None, destination_share_server=None):
"""Is called to test compatibility with destination backend."""
backend_name = share_utils.extract_host(
destination_share['host'], level='backend_name')
config = get_backend_configuration(backend_name)
compatible = 'Dummy' in config.share_driver
return {
'compatible': compatible,
'writable': compatible,
'preserve_metadata': compatible,
'nondisruptive': False,
'preserve_snapshots': compatible,
}
@slow_me_down
def migration_start(
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to perform 1st phase of driver migration of a given share.
"""
LOG.debug(
"Migration of dummy share with ID '%s' has been started.",
source_share["id"])
self.migration_progress[source_share['share_id']] = 0
@slow_me_down
def migration_continue(
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
if source_share["id"] not in self.migration_progress:
self.migration_progress[source_share["id"]] = 0
self.migration_progress[source_share["id"]] += 50
LOG.debug(
"Migration of dummy share with ID '%s' is continuing, %s.",
source_share["id"],
self.migration_progress[source_share["id"]])
return self.migration_progress[source_share["id"]] == 100
@slow_me_down
def migration_complete(
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to perform 2nd phase of driver migration of a given share.
"""
snapshot_updates = {}
for src_snap_ins, dest_snap_ins in snapshot_mappings.items():
snapshot_updates[dest_snap_ins['id']] = self._create_snapshot(
dest_snap_ins)
return {
'snapshot_updates': snapshot_updates,
'export_locations': self._do_migration(
source_share, destination_share, share_server)
}
def _do_migration(self, source_share_ref, dest_share_ref, share_server):
share_name = self._get_share_name(dest_share_ref)
mountpoint = "/path/to/fake/share/%s" % share_name
self.private_storage.delete(source_share_ref["id"])
self.private_storage.update(
dest_share_ref["id"], {
"fake_provider_share_name": share_name,
"fake_provider_location": mountpoint,
}
)
LOG.debug(
"Migration of dummy share with ID '%s' has been completed.",
source_share_ref["id"])
self.migration_progress.pop(source_share_ref["id"], None)
return self._generate_export_locations(
mountpoint, share_server=share_server)
@slow_me_down
def migration_cancel(
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to cancel driver migration."""
LOG.debug(
"Migration of dummy share with ID '%s' has been canceled.",
source_share["id"])
self.migration_progress.pop(source_share["id"], None)
@slow_me_down
def migration_get_progress(
self, context, source_share, destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Is called to get migration progress."""
# Simulate migration progress.
if source_share["id"] not in self.migration_progress:
self.migration_progress[source_share["id"]] = 0
total_progress = self.migration_progress[source_share["id"]]
LOG.debug("Progress of current dummy share migration "
"with ID '%(id)s' is %(progress)s.", {
"id": source_share["id"],
"progress": total_progress
})
return {"total_progress": total_progress}
def share_server_migration_check_compatibility(
self, context, share_server, dest_host, old_share_network,
new_share_network, shares_request_spec):
"""Is called to check migration compatibility for a share server."""
backend_name = share_utils.extract_host(
dest_host, level='backend_name')
config = get_backend_configuration(backend_name)
compatible = 'Dummy' in config.share_driver
return {
'compatible': compatible,
'writable': compatible,
'preserve_snapshots': compatible,
'nondisruptive': False,
'share_network_id': new_share_network['id'],
'migration_cancel': compatible,
'migration_get_progress': compatible
}
@slow_me_down
def share_server_migration_start(self, context, src_share_server,
dest_share_server, shares, snapshots):
"""Is called to perform 1st phase of migration of a share server."""
LOG.debug(
"Migration of dummy share server with ID '%s' has been started.",
src_share_server["id"])
self.migration_progress[src_share_server['id']] = 0
@slow_me_down
def share_server_migration_continue(self, context, src_share_server,
dest_share_server, shares, snapshots):
"""Is called to continue the migration of a share server."""
if src_share_server["id"] not in self.migration_progress:
self.migration_progress[src_share_server["id"]] = 0
self.migration_progress[src_share_server["id"]] += 50
LOG.debug(
"Migration of dummy share server with ID '%s' is continuing, %s.",
src_share_server["id"],
self.migration_progress[src_share_server["id"]])
return self.migration_progress[src_share_server["id"]] >= 100
@slow_me_down
def share_server_migration_complete(self, context, source_share_server,
dest_share_server, shares, snapshots,
new_network_allocations):
"""Is called to complete the migration of a share server."""
shares_updates = {}
pools = self._get_pools_info()
for instance in shares:
share_name = self._get_share_name(instance)
mountpoint = "/path/to/fake/share/%s" % share_name
export_locations = self._generate_export_locations(
mountpoint, share_server=dest_share_server)
dest_pool = pools[0]['pool_name']
shares_updates.update(
{instance['id']: {'export_locations': export_locations,
'pool_name': dest_pool}}
)
snapshot_updates = {}
for instance in snapshots:
snapshot_name = self._get_snapshot_name(instance)
mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name
snap_export_locations = self._generate_export_locations(
mountpoint, share_server=dest_share_server)
snapshot_updates.update(
{instance['id']: {
'provider_location': mountpoint,
'export_locations': snap_export_locations}}
)
LOG.debug(
"Migration of dummy share server with ID '%s' has been completed.",
source_share_server["id"])
self.migration_progress.pop(source_share_server["id"], None)
return {
'share_updates': shares_updates,
'snapshot_updates': snapshot_updates,
}
@slow_me_down
def share_server_migration_cancel(self, context, src_share_server,
dest_share_server, shares, snapshots):
"""Is called to cancel a share server migration."""
LOG.debug(
"Migration of dummy share server with ID '%s' has been canceled.",
src_share_server["id"])
self.migration_progress.pop(src_share_server["id"], None)
@slow_me_down
def share_server_migration_get_progress(self, context, src_share_server,
dest_share_server, shares,
snapshots):
"""Is called to get share server migration progress."""
if src_share_server["id"] not in self.migration_progress:
self.migration_progress[src_share_server["id"]] = 0
total_progress = self.migration_progress[src_share_server["id"]]
LOG.debug("Progress of current dummy share server migration "
"with ID '%(id)s' is %(progress)s.", {
"id": src_share_server["id"],
"progress": total_progress
})
return {"total_progress": total_progress}
def update_share_usage_size(self, context, shares):
share_updates = []
gathered_at = timeutils.utcnow()
for s in shares:
share_updates.append({'id': s['id'],
'used_size': 1,
'gathered_at': gathered_at})
return share_updates
@slow_me_down
def get_share_server_network_info(
self, context, share_server, identifier, driver_options):
try:
server_details = self.private_storage.get(identifier)
except Exception:
msg = ("Unable to find share server %s in "
"private storage." % identifier)
raise exception.ShareBackendException(msg=msg)
return [server_details['primary_public_ip'],
server_details['secondary_public_ip'],
server_details['service_ip']]
@slow_me_down
def manage_server(self, context, share_server, identifier, driver_options):
server_details = self.private_storage.get(identifier)
self.private_storage.delete(identifier)
return identifier, server_details
def unmanage_server(self, server_details, security_services=None):
server_details = server_details or {}
if not server_details or 'server_id' not in server_details:
# This share server doesn't have any network details. Since it's
# just being cleaned up, we'll log a warning and return without
# errors.
LOG.warning("Share server does not have network information. "
"It is being unmanaged, but cannot be re-managed "
"without first creating network allocations in this "
"driver's private storage.")
return
self.private_storage.update(server_details['server_id'],
server_details)
def get_share_status(self, share, share_server=None):
return {
'status': constants.STATUS_AVAILABLE,
'export_locations': self.private_storage.get(share['id'],
key='export_location')
}
@slow_me_down
def update_share_server_security_service(self, context, share_server,
network_info, share_instances,
share_instance_rules,
new_security_service,
current_security_service=None):
if current_security_service:
msg = _("Replacing security service %(cur_sec_serv_id)s by "
"security service %(new_sec_serv_id)s on share server "
"%(server_id)s."
) % {
'cur_sec_serv_id': current_security_service['id'],
'new_sec_serv_id': new_security_service['id'],
'server_id': share_server['id']
}
else:
msg = _("Adding security service %(sec_serv_id)s on share server "
"%(server_id)s."
) % {
'sec_serv_id': new_security_service['id'],
'server_id': share_server['id']
}
LOG.debug(msg)
def check_update_share_server_security_service(
self, context, share_server, network_info, share_instances,
share_instance_rules, new_security_service,
current_security_service=None):
return True
| apache-2.0 | -266,839,410,979,436,220 | 39.639955 | 79 | 0.576443 | false |
porridge/apt-forktracer | lib/apt_forktracer/tests/test_config.py | 1 | 1979 | #!/usr/bin/python3
# apt-forktracer - a utility for managing package versions
# Copyright (C) 2008,2010,2019 Marcin Owsiany <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import unittest
from apt_forktracer.testlib import test_helper
from apt_forktracer.config import Config
from apt_forktracer.config_stanza import ConfigStanza
class Test_Config(test_helper.MoxTestCase):
def setUp(self):
super(Test_Config, self).setUp()
self.c = Config()
def test_addition_and_retrieval(self):
foo_stanza1 = self.mox.CreateMock(ConfigStanza)
foo_stanza1.get('package').AndReturn('foo')
foo_stanza2 = self.mox.CreateMock(ConfigStanza)
foo_stanza2.get('package').AndReturn('foo')
bar_stanza = self.mox.CreateMock(ConfigStanza)
bar_stanza.get('package').AndReturn('bar')
self.mox.ReplayAll()
self.c.add(foo_stanza1)
self.c.add(foo_stanza2)
self.c.add(bar_stanza)
foo_stanzas = self.c.package('foo')
bar_stanzas = self.c.package('bar')
baz_stanzas = self.c.package('baz')
self.assertEqual(len(foo_stanzas), 2)
self.assertEqual(foo_stanzas[0], foo_stanza1)
self.assertEqual(foo_stanzas[1], foo_stanza2)
self.assertEqual(len(bar_stanzas), 1)
self.assertEqual(bar_stanzas[0], bar_stanza)
self.assertEqual(len(baz_stanzas), 0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -973,769,816,005,193,500 | 37.057692 | 73 | 0.745831 | false |
gitterHQ/ansible | v2/ansible/playbook/attribute.py | 1 | 1053 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#from ansible.common.errors import AnsibleError
class Attribute(object):
def __init__(self, isa=None, private=False, default=None):
self.isa = isa
self.private = private
self.value = None
self.default = default
def __call__(self):
return self.value
class FieldAttribute(Attribute):
pass
| gpl-3.0 | 8,832,590,789,999,572,000 | 30.909091 | 70 | 0.720798 | false |
abusesa/idiokit | idiokit/xmpp/jid.py | 1 | 6351 | # Module for XMPP JID processing as defined in on RFC 3920
# (http://www.ietf.org/rfc/rfc3920.txt) And RFC 3454
# (http://www.ietf.org/rfc/rfc3454.txt).
#
# This module was originally written using both the above RFCs and the
# xmppstringprep module of the pyxmpp package
# (http://pyxmpp.jajcus.net/) as well as the
# twisted.words.protocols.jabber.xmpp_stringprep module of Twisted
# (http://twistedmatrix.com/) as a reference.
import re
import threading
import stringprep
from encodings import idna
from unicodedata import ucd_3_2_0 as unicodedata
class JIDError(Exception):
pass
def check_prohibited_and_unassigned(chars, prohibited_tables):
in_table_a1 = stringprep.in_table_a1
for pos, ch in enumerate(chars):
if any(table(ch) for table in prohibited_tables):
raise JIDError("prohibited character {0!r} at index {1}".format(ch, pos))
if in_table_a1(ch):
raise JIDError("unassigned characted {0!r} at index {1}".format(ch, pos))
def check_bidirectional(chars):
in_table_d1 = stringprep.in_table_d1
in_table_d2 = stringprep.in_table_d2
# RFC 3454: If a string contains any RandALCat character, the
# string MUST NOT contain any LCat character.
if not any(in_table_d1(ch) for ch in chars):
return
if any(in_table_d2(ch) for ch in chars):
raise JIDError("string contains RandALCat and LCat characters")
# RFC 3454: If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the string,
# and a RandALCat character MUST be the last character of the
# string.
if not (in_table_d1(chars[0]) and in_table_d1(chars[-1])):
raise JIDError("string must start and end with RandALCat characters")
NODEPREP_PROHIBITED = (
stringprep.in_table_c11,
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9,
frozenset(u"\"&'/:<>@").__contains__
)
def nodeprep(string):
in_table_b1 = stringprep.in_table_b1
map_table_b2 = stringprep.map_table_b2
string = u"".join(map_table_b2(ch) for ch in string if not in_table_b1(ch))
string = unicodedata.normalize("NFKC", string)
check_prohibited_and_unassigned(string, NODEPREP_PROHIBITED)
check_bidirectional(string)
return string
RESOURCEPREP_PROHIBITED = (
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9
)
def resourceprep(string):
in_table_b1 = stringprep.in_table_b1
string = u"".join(ch for ch in string if not in_table_b1(ch))
string = unicodedata.normalize("NFKC", string)
check_prohibited_and_unassigned(string, RESOURCEPREP_PROHIBITED)
check_bidirectional(string)
return string
JID_REX = re.compile(r"^(?:(.*?)@)?([^\.\/]+(?:\.[^\.\/]+)*)(?:/(.*))?$", re.U)
def split_jid(jid):
match = JID_REX.match(jid)
if not match:
raise JIDError("not a valid JID")
return match.groups()
def check_length(identifier, value):
if len(value) > 1023:
raise JIDError("{0} identifier too long".format(identifier))
return value
def prep_node(node):
if not node:
return None
node = nodeprep(node)
return check_length("node", node)
def prep_resource(resource):
if not resource:
return None
resource = resourceprep(resource)
return check_length("resource", resource)
def prep_domain(domain):
labels = domain.split(".")
try:
labels = map(idna.nameprep, labels)
labels = map(idna.ToASCII, labels)
except UnicodeError as ue:
raise JIDError("not an internationalized label: {0}".format(ue))
labels = map(idna.ToUnicode, labels)
domain = ".".join(labels)
return check_length("domain", domain)
def unicodify(item):
if item is None:
return None
return unicode(item)
class JID(object):
cache = dict()
cache_size = 2 ** 14
cache_lock = threading.Lock()
__slots__ = "_node", "_domain", "_resource"
node = property(lambda x: x._node)
domain = property(lambda x: x._domain)
resource = property(lambda x: x._resource)
def __new__(cls, node=None, domain=None, resource=None):
node = unicodify(node)
domain = unicodify(domain)
resource = unicodify(resource)
with cls.cache_lock:
cache_key = node, domain, resource
if cache_key in cls.cache:
return cls.cache[cache_key]
if node is None and domain is None:
raise JIDError("either a full JID or at least a domain expected")
elif domain is None:
if resource is not None:
raise JIDError("resource not expected with a full JID")
node, domain, resource = split_jid(node)
obj = super(JID, cls).__new__(cls)
obj._node = prep_node(node)
obj._domain = prep_domain(domain)
obj._resource = prep_resource(resource)
with cls.cache_lock:
if len(cls.cache) >= cls.cache_size:
cls.cache.clear()
cls.cache[cache_key] = obj
return obj
def bare(self):
return JID(self.node, self.domain)
def __reduce__(self):
return JID, (self.node, self.domain, self.resource)
def __eq__(self, other):
if not isinstance(other, JID):
return NotImplemented
return self is other or unicode(self) == unicode(other)
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash(unicode(self))
def __repr__(self):
return "{0}({1!r})".format(type(self).__name__, unicode(self))
def __unicode__(self):
jid = self.domain
if self.node is not None:
jid = self.node + "@" + jid
if self.resource is not None:
jid = jid + "/" + self.resource
return jid
| mit | -8,818,032,369,330,515,000 | 27.608108 | 85 | 0.634861 | false |
moustakas/impy | teaching/siena_class_roster.py | 1 | 3362 | import requests
#import BeautifulSoup
from bs4 import BeautifulSoup
import HTMLParser
from HTMLParser import HTMLParser
import sys
import os
# Open the file.
r = open(sys.argv[1])
if not os.path.isdir('Detail_Class_List_files'):
os.rename('Detail Class List_files', 'Detail_Class_List_files')
# Try to parse the webpage by looking for the tables.
soup = BeautifulSoup(r)
print "\documentclass{article}"
print "\usepackage{graphicx}"
print "\usepackage{subfig}"
print "\hoffset=-1.50in"
print "\setlength{\\textwidth}{7.5in}"
print "\setlength{\\textheight}{9in}"
print "\setlength{\\voffset}{0pt}"
print "\setlength{\\topmargin}{0pt}"
print "\setlength{\headheight}{0pt}"
print "\setlength{\headsep}{0pt}"
h2s = soup.find_all('h2')
caption = 'Default'
for h in h2s:
if h.string.find('Class Roster For')>=0:
caption = h.string
tables = soup.find_all('table')
icount = 0
closed_figure = False
for table in tables:
if table['class'][0]=='datadisplaytable':
rows = table.findAll('tr')
image = None
name = None
for row in rows:
cols = row.findAll('td')
for col in cols:
img = col.findAll('img')
a = col.findAll('a')
#if len(a)>0:
# import pdb ; pdb.set_trace()
if len(img)>0 and img[0]['src'].find('jpg')>=0:
image = img[0]['src']
image = image.replace(' ','_').replace('%20', '_')
if not os.path.isfile(image):
import pdb ; pdb.set_trace()
#if os.path.isfile(image):
# import pdb ; pdb.set_trace()
if len(a)>0 and 'mailto' in a[0]['href']:
name = a[0]['target']
#if len(a)>0 and a[0]['class']==['leftaligntext']:
# name = a[0].string
# print(image, a[0].string)
#import pdb ; pdb.set_trace()
if name is not None and image is not None:
if icount%25==0:
if icount > 0:
print "\\clearpage"
else:
print "\\begin{document}"
print "\\begin{figure}"
print "\centering"
closed_figure = False
if os.stat(image).st_size < 250:
#image = './file_not_found.jpg'
image = './smiley.png'
if icount%5==4:
print "\subfloat[%s]{\includegraphics[height=0.19\\textwidth]{%s}}\\\\" % (name,image)
else:
print "\subfloat[%s]{\includegraphics[height=0.19\\textwidth]{%s}}\\hfill" % (name,image)
image = None
name = None
if icount%25==24:
print "\caption{%s}" % (caption)
print "\end{figure}"
closed_figure = True
icount += 1
if not closed_figure:
print "\caption{%s}" % (caption)
print "\end{figure}"
print "\end{document}"
| gpl-2.0 | -457,119,040,262,636,200 | 29.844037 | 113 | 0.467281 | false |
vitan/hue | desktop/libs/hadoop/src/hadoop/cluster.py | 1 | 7247 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from hadoop import conf
from hadoop.fs import webhdfs, LocalSubFileSystem
from hadoop.job_tracker import LiveJobTracker
from desktop.lib.paths import get_build_dir
LOG = logging.getLogger(__name__)
FS_CACHE = None
MR_CACHE = None
MR_NAME_CACHE = 'default'
def _make_filesystem(identifier):
choice = os.getenv("FB_FS")
if choice == "testing":
path = os.path.join(get_build_dir(), "fs")
if not os.path.isdir(path):
LOG.warning(("Could not find fs directory: %s. Perhaps you need to run manage.py filebrowser_test_setup?") % path)
return LocalSubFileSystem(path)
else:
cluster_conf = conf.HDFS_CLUSTERS[identifier]
return webhdfs.WebHdfs.from_config(cluster_conf)
def _make_mrcluster(identifier):
cluster_conf = conf.MR_CLUSTERS[identifier]
return LiveJobTracker.from_conf(cluster_conf)
def get_hdfs(identifier="default"):
global FS_CACHE
get_all_hdfs()
return FS_CACHE[identifier]
def get_defaultfs():
fs = get_hdfs()
if fs.logical_name:
return fs.logical_name
else:
return fs.fs_defaultfs
def get_all_hdfs():
global FS_CACHE
if FS_CACHE is not None:
return FS_CACHE
FS_CACHE = {}
for identifier in conf.HDFS_CLUSTERS.keys():
FS_CACHE[identifier] = _make_filesystem(identifier)
return FS_CACHE
def get_default_mrcluster():
"""
Get the default JT (not necessarily HA).
"""
global MR_CACHE
global MR_NAME_CACHE
try:
all_mrclusters()
return MR_CACHE.get(MR_NAME_CACHE)
except KeyError:
# Return an arbitrary cluster
candidates = all_mrclusters()
if candidates:
return candidates.values()[0]
return None
def get_default_yarncluster():
"""
Get the default RM (not necessarily HA).
"""
global MR_NAME_CACHE
try:
return conf.YARN_CLUSTERS[MR_NAME_CACHE]
except KeyError:
return get_yarn()
def get_next_ha_mrcluster():
"""
Return the next available JT instance and cache its name.
This method currently works for distincting between active/standby JT as a standby JT does not respond.
A cleaner but more complicated way would be to do something like the MRHAAdmin tool and
org.apache.hadoop.ha.HAServiceStatus#getServiceStatus().
"""
global MR_NAME_CACHE
candidates = all_mrclusters()
has_ha = sum([conf.MR_CLUSTERS[name].SUBMIT_TO.get() for name in conf.MR_CLUSTERS.keys()]) >= 2
mrcluster = get_default_mrcluster()
if mrcluster is None:
return None
current_user = mrcluster.user
for name in conf.MR_CLUSTERS.keys():
config = conf.MR_CLUSTERS[name]
if config.SUBMIT_TO.get():
jt = candidates[name]
if has_ha:
try:
jt.setuser(current_user)
status = jt.cluster_status()
if status.stateAsString == 'RUNNING':
MR_NAME_CACHE = name
LOG.warn('Picking HA JobTracker: %s' % name)
return (config, jt)
else:
LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status))
except Exception, ex:
LOG.info('JobTracker %s is not available, skipping it: %s' % (name, ex))
else:
return (config, jt)
return None
def get_mrcluster(identifier="default"):
global MR_CACHE
all_mrclusters()
return MR_CACHE[identifier]
def all_mrclusters():
global MR_CACHE
if MR_CACHE is not None:
return MR_CACHE
MR_CACHE = {}
for identifier in conf.MR_CLUSTERS.keys():
MR_CACHE[identifier] = _make_mrcluster(identifier)
return MR_CACHE
def get_yarn():
global MR_NAME_CACHE
if MR_NAME_CACHE in conf.YARN_CLUSTERS and conf.YARN_CLUSTERS[MR_NAME_CACHE].SUBMIT_TO.get():
return conf.YARN_CLUSTERS[MR_NAME_CACHE]
for name in conf.YARN_CLUSTERS.keys():
yarn = conf.YARN_CLUSTERS[name]
if yarn.SUBMIT_TO.get():
return yarn
def get_next_ha_yarncluster():
"""
Return the next available YARN RM instance and cache its name.
"""
from hadoop.yarn.resource_manager_api import ResourceManagerApi
global MR_NAME_CACHE
has_ha = sum([conf.YARN_CLUSTERS[name].SUBMIT_TO.get() for name in conf.YARN_CLUSTERS.keys()]) >= 2
for name in conf.YARN_CLUSTERS.keys():
config = conf.YARN_CLUSTERS[name]
if config.SUBMIT_TO.get():
rm = ResourceManagerApi(config.RESOURCE_MANAGER_API_URL.get(), config.SECURITY_ENABLED.get(), config.SSL_CERT_CA_VERIFY.get())
if has_ha:
try:
cluster_info = rm.cluster()
if cluster_info['clusterInfo']['haState'] == 'ACTIVE':
MR_NAME_CACHE = name
LOG.warn('Picking RM HA: %s' % name)
from hadoop.yarn import resource_manager_api
resource_manager_api._api_cache = None # Reset cache
from hadoop.yarn import mapreduce_api
mapreduce_api._api_cache = None
return (config, rm)
else:
LOG.info('RM %s is not RUNNING, skipping it: %s' % (name, cluster_info))
except Exception, ex:
LOG.info('RM %s is not available, skipping it: %s' % (name, ex))
else:
return (config, rm)
return None
def get_cluster_for_job_submission():
"""
Check the 'submit_to' for each MR/Yarn cluster, and return the
config section of first one that enables submission.
Support MR1/MR2 HA.
"""
yarn = get_next_ha_yarncluster()
if yarn:
return yarn
mr = get_next_ha_mrcluster()
if mr is not None:
return mr
return None
def get_cluster_conf_for_job_submission():
cluster = get_cluster_for_job_submission()
if cluster:
config, rm = cluster
return config
else:
return None
def get_cluster_addr_for_job_submission():
"""
Check the 'submit_to' for each MR/Yarn cluster, and return the logical name or host:port of first one that enables submission.
"""
if is_yarn():
if get_yarn().LOGICAL_NAME.get():
return get_yarn().LOGICAL_NAME.get()
conf = get_cluster_conf_for_job_submission()
if conf is None:
return None
return "%s:%s" % (conf.HOST.get(), conf.PORT.get())
def is_yarn():
return get_yarn() is not None
def clear_caches():
"""
Clears cluster's internal caches. Returns
something that can be given back to restore_caches.
"""
global FS_CACHE, MR_CACHE
old = FS_CACHE, MR_CACHE
FS_CACHE, MR_CACHE = None, None
return old
def restore_caches(old):
"""
Restores caches from the result of a previous clear_caches call.
"""
global FS_CACHE, MR_CACHE
FS_CACHE, MR_CACHE = old
| apache-2.0 | 5,338,848,230,225,687,000 | 25.840741 | 132 | 0.673796 | false |
woel0007/caravel | caravel/config.py | 1 | 5672 | """The main config file for Caravel
All configuration in this file can be overridden by providing a caravel_config
in your PYTHONPATH as there is a ``from caravel_config import *``
at the end of this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from dateutil import tz
from flask_appbuilder.security.manager import AUTH_DB
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# ---------------------------------------------------------
# Caravel specific config
# ---------------------------------------------------------
ROW_LIMIT = 50000
CARAVEL_WORKERS = 16
CARAVEL_WEBSERVER_PORT = 8088
CARAVEL_WEBSERVER_TIMEOUT = 60
CUSTOM_SECURITY_MANAGER = None
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h' # noqa
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/caravel.db'
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# Flask-WTF flag for CSRF
CSRF_ENABLED = True
# Whether to run the web server in debug mode or not
DEBUG = False
# Whether to show the stacktrace on 500 error
SHOW_STACKTRACE = True
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = "Caravel"
# Uncomment to setup Setup an App icon
APP_ICON = "/static/assets/images/caravel_logo.png"
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# other tz can be overridden by providing a local_config
DRUID_IS_ACTIVE = True
DRUID_TZ = tz.tzutc()
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for the GAMMA role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE_GAMMA = False
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'babel/translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag': 'us', 'name': 'English'},
# 'fr': {'flag': 'fr', 'name': 'French'},
# 'zh': {'flag': 'cn', 'name': 'Chinese'},
}
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = None
CACHE_CONFIG = {'CACHE_TYPE': 'null'}
# CORS Options
ENABLE_CORS = False
CORS_OPTIONS = {}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Blacklist pivot table and treemap:
# VIZ_TYPE_BLACKLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_BLACKLIST = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_BLACKLIST = []
"""
1) http://docs.python-guide.org/en/latest/writing/logging/
2) https://docs.python.org/2/library/logging.config.html
"""
# Console Log Settings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
LOG_LEVEL = 'DEBUG'
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = 'DEBUG'
FILENAME = '/tmp/caravel.log'
ROLLOVER = 'midnight'
INTERVAL = 1
BACKUP_COUNT = 30
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = ""
try:
from caravel_config import * # noqa
except ImportError:
pass
if not CACHE_DEFAULT_TIMEOUT:
CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
| apache-2.0 | 6,318,949,344,365,207,000 | 29.494624 | 78 | 0.590973 | false |
facebookexperimental/eden | eden/integration/snapshot/verify.py | 1 | 9902 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# pyre-strict
import abc
import os
import stat as stat_mod
import typing
from pathlib import Path
from typing import Dict, Iterator, List, Mapping, Optional, TypeVar, Union
from eden.integration.lib import hgrepo
_AnyPath = Union[Path, str]
class _DefaultObject:
pass
_DEFAULT_OBJECT: _DefaultObject = _DefaultObject()
class ExpectedFileBase(metaclass=abc.ABCMeta):
def __init__(
self, path: _AnyPath, contents: bytes, perms: int, file_type: int
) -> None:
self.path: Path = Path(path)
self.contents: bytes = contents
self.permissions: int = perms
self.file_type: int = file_type
def verify(
self, verifier: "SnapshotVerifier", path: Path, stat_info: os.stat_result
) -> None:
found_perms = stat_mod.S_IMODE(stat_info.st_mode)
if found_perms != self.permissions:
verifier.error(
f"{self.path}: expected permissions to be {self.permissions:#o}, "
f"found {found_perms:#o}"
)
found_file_type = stat_mod.S_IFMT(stat_info.st_mode)
if found_file_type != self.file_type:
verifier.error(
f"{self.path}: expected file type to be {self.file_type:#o}, "
f"found {found_file_type:#o}"
)
else:
self.verify_contents(verifier, path)
@abc.abstractmethod
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
pass
def _error(self, msg: str) -> None:
raise ValueError(msg)
class ExpectedFile(ExpectedFileBase):
def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o644) -> None:
super().__init__(path, contents, perms, stat_mod.S_IFREG)
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
with path.open("rb") as f:
actual_contents = f.read()
if actual_contents != self.contents:
verifier.error(
f"file contents mismatch for {self.path}:\n"
f"expected: {self.contents!r}\n"
f"actual: {actual_contents!r}"
)
class ExpectedSymlink(ExpectedFileBase):
def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o777) -> None:
super().__init__(path, contents, perms, stat_mod.S_IFLNK)
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
actual_contents = os.readlink(bytes(path))
if actual_contents != self.contents:
verifier.error(
f"symlink contents mismatch for {self.path}:\n"
f"expected: {self.contents!r}\n"
f"actual: {actual_contents!r}"
)
class ExpectedSocket(ExpectedFileBase):
def __init__(self, path: _AnyPath, perms: int = 0o755) -> None:
super().__init__(path, b"", perms, stat_mod.S_IFSOCK)
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
pass
_ExpectedFile = TypeVar("_ExpectedFile", bound=ExpectedFileBase)
class ExpectedFileSet(Mapping[Path, ExpectedFileBase]):
"""
ExpectedFileSet is basically a container of ExpectedFileBase objects,
but also provides some helper methods for accessing and updating entries by path.
"""
def __init__(self) -> None:
self._entries: Dict[Path, ExpectedFileBase] = {}
def __len__(self) -> int:
return len(self._entries)
def __iter__(self) -> Iterator[Path]:
return iter(self._entries.keys())
def __getitem__(self, path: _AnyPath) -> ExpectedFileBase:
key = Path(path)
return self._entries[key]
def __delitem__(self, path: _AnyPath) -> None:
key = Path(path)
del self._entries[key]
# pyre-fixme[14]: `__contains__` overrides method defined in `Mapping`
# inconsistently.
def __contains__(self, path: object) -> bool:
if isinstance(path, str):
key = Path(path)
elif isinstance(path, Path):
key = path
else:
return False
return key in self._entries
@typing.overload
def pop(self, path: _AnyPath) -> ExpectedFileBase:
...
@typing.overload # noqa: F811
def pop(self, path: _AnyPath, default: ExpectedFileBase) -> ExpectedFileBase:
...
@typing.overload # noqa: F811
def pop(self, path: _AnyPath, default: None) -> Optional[ExpectedFileBase]:
...
def pop( # noqa: F811
self,
path: _AnyPath,
default: Union[ExpectedFileBase, None, _DefaultObject] = _DEFAULT_OBJECT,
) -> Optional[ExpectedFileBase]:
key = Path(path)
if default is _DEFAULT_OBJECT:
return self._entries.pop(key)
else:
tmp = typing.cast(Optional[ExpectedFileBase], default)
return self._entries.pop(key, tmp)
def add_file(
self, path: _AnyPath, contents: bytes, perms: int = 0o644
) -> ExpectedFile:
return self.add(ExpectedFile(path=path, contents=contents, perms=perms))
def add_symlink(
self, path: _AnyPath, contents: bytes, perms: int = 0o777
) -> ExpectedSymlink:
return self.add(ExpectedSymlink(path=path, contents=contents, perms=perms))
def add_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket:
return self.add(ExpectedSocket(path=path, perms=perms))
def add(self, entry: _ExpectedFile) -> _ExpectedFile:
assert entry.path not in self
self._entries[entry.path] = entry
return entry
def set_file(
self, path: _AnyPath, contents: bytes, perms: int = 0o644
) -> ExpectedFile:
return self.set(ExpectedFile(path=path, contents=contents, perms=perms))
def set_symlink(
self, path: _AnyPath, contents: bytes, perms: int = 0o777
) -> ExpectedSymlink:
return self.set(ExpectedSymlink(path=path, contents=contents, perms=perms))
def set_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket:
return self.set(ExpectedSocket(path=path, perms=perms))
def set(self, entry: _ExpectedFile) -> _ExpectedFile:
self._entries[entry.path] = entry
return entry
class SnapshotVerifier:
def __init__(self) -> None:
self.errors: List[str] = []
self.quiet: bool = False
def error(self, message: str) -> None:
self.errors.append(message)
if not self.quiet:
print(f"==ERROR== {message}")
def verify_directory(self, path: Path, expected: ExpectedFileSet) -> None:
"""Confirm that the contents of a directory match the expected file state."""
found_files = enumerate_directory(path)
for expected_entry in expected.values():
file_stat = found_files.pop(expected_entry.path, None)
if file_stat is None:
self.error(f"{expected_entry.path}: file not present in snapshot")
continue
full_path = path / expected_entry.path
try:
expected_entry.verify(self, full_path, file_stat)
except AssertionError as ex:
self.error(f"{expected_entry.path}: {ex}")
continue
for path, stat_info in found_files.items():
if stat_mod.S_ISDIR(stat_info.st_mode):
# Don't require directories to be listed explicitly in the input files
continue
if str(path.parents[0]) == ".hg":
# Don't complain about files inside the .hg directory that the caller
# did not explicitly specify. Mercurial can create a variety of files
# here, and we don't care about checking the exact list of files it
# happened to create when the snapshot was generated.
continue
self.error(f"{path}: unexpected file present in snapshot")
def verify_hg_status(
self,
repo: hgrepo.HgRepository,
expected: Dict[str, str],
check_ignored: bool = True,
) -> None:
actual_status = repo.status(include_ignored=check_ignored)
for path, expected_char in expected.items():
actual_char = actual_status.pop(path, None)
if expected_char != actual_char:
self.error(
f"{path}: unexpected hg status difference: "
f"reported as {actual_char}, expected {expected_char}"
)
for path, actual_char in actual_status.items():
self.error(
f"{path}: unexpected hg status difference: "
f"reported as {actual_char}, expected None"
)
def enumerate_directory(path: Path) -> Dict[Path, os.stat_result]:
"""
Recursively walk a directory and return a dictionary of all of the files and
directories it contains.
Returns a dictionary of [path -> os.stat_result]
The returned paths are relative to the input directory.
"""
entries: Dict[Path, os.stat_result] = {}
_enumerate_directory_helper(path, Path(), entries)
return entries
def _enumerate_directory_helper(
root_path: Path, rel_path: Path, results: Dict[Path, os.stat_result]
) -> None:
for entry in os.scandir(root_path / rel_path):
# Current versions of typeshed don't know about the follow_symlinks argument,
# so ignore type errors on the next line.
stat_info: os.stat_result = entry.stat(follow_symlinks=False)
entry_path: Path = rel_path / entry.name
results[entry_path] = stat_info
if stat_mod.S_ISDIR(stat_info.st_mode):
_enumerate_directory_helper(root_path, entry_path, results)
| gpl-2.0 | -8,863,614,257,235,671,000 | 34.113475 | 86 | 0.608261 | false |
codelv/enaml-native | src/enamlnative/widgets/popup_window.py | 1 | 4574 | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Mar 17, 2018
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, Str, Float, Coerced, Bool, Enum, observe,
)
from enaml.core.declarative import d_
from enaml.widgets.toolkit_object import ToolkitObject, ProxyToolkitObject
from .view import coerce_size, coerce_gravity
class ProxyPopupWindow(ProxyToolkitObject):
""" The abstract definition of a proxy dialgo object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: PopupWindow)
def set_height(self, height):
raise NotImplementedError
def set_width(self, width):
raise NotImplementedError
def set_x(self, x):
raise NotImplementedError
def set_y(self, y):
raise NotImplementedError
def set_position(self, position):
raise NotImplementedError
def set_focusable(self, enabled):
raise NotImplementedError
def set_touchable(self, enabled):
raise NotImplementedError
def set_outside_touchable(self, enabled):
raise NotImplementedError
def set_background_color(self, color):
raise NotImplementedError
def set_show(self, show):
raise NotImplementedError
def set_style(self, style):
raise NotImplementedError
def set_animation(self, style):
raise NotImplementedError
class PopupWindow(ToolkitObject):
""" A popup window that may contain a view.
"""
#: Width and height or a string "match_parent" or "fill_parent"
width = d_(Coerced(int, coercer=coerce_size))
height = d_(Coerced(int, coercer=coerce_size))
#: Layout gravity
gravity = d_(Coerced(int, coercer=coerce_gravity))
#: Position
x = d_(Float(strict=False))
y = d_(Float(strict=False))
#: Set whether the popup window can be focused
focusable = d_(Bool())
#: Set whether the popup is touchable
touchable = d_(Bool(True))
#: Controls whether the pop-up will be informed of touch events outside
#: of its window.
outside_touchable = d_(Bool(True))
#: Start the popup and display it on screen (or hide if False)
show = d_(Bool())
#: Background color of the window (white by default)
background_color = d_(Str())
#: If relative, show as a dropdown on the parent view, otherwise
#: show at the position given by `x` and `y`.
position = d_(Enum('relative', 'absolute'))
#: Animation style for the PopupWindow using the @style format
#: (ex. @style/MyAnimation
animation = d_(Str())
#: PopupWindow style using the @style format
#: (ex. @style/Theme_Light_NoTitleBar_Fullscreen
style = d_(Str())
#: A reference to the proxy object.
proxy = Typed(ProxyPopupWindow)
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe('width', 'height', 'x', 'y', 'position', 'focusable', 'touchable',
'outside_touchable', 'show', 'animation', 'style',
'background_color')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(PopupWindow, self)._update_proxy(change)
def popup(self):
""" Show the window from code. This will initialize and activate
if needed.
Examples
--------
>>> enamldef ContextMenu(PopupWindow): popup:
attr result: lambda text: None
Button:
text = "One"
clicked ::
dialog.show = False
dialog.result(self.text)
Button:
text = "Two"
clicked ::
dialog.show = False
dialog.result(self.text)
def on_result(value):
print("User clicked: {}".format(value))
ContextMenu(result=on_result).popup()
Notes
------
This does NOT block. Callbacks should be used to handle click events
or the `show` state should be observed to know when it is closed.
"""
if not self.is_initialized:
self.initialize()
if not self.proxy_is_active:
self.activate_proxy()
self.show = True
| mit | -8,627,291,073,902,098,000 | 27.234568 | 79 | 0.593791 | false |
gst/alignak | alignak/macroresolver.py | 2 | 23875 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, [email protected]
# Nicolas Dupeux, [email protected]
# Gerhard Lausser, [email protected]
# Grégory Starck, [email protected]
# Frédéric Pégé, [email protected]
# Sebastien Coavoux, [email protected]
# Olivier Hanesse, [email protected]
# Jean Gabes, [email protected]
# Zoran Zaric, [email protected]
# David Gil, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class resolve Macro in commands by looking at the macros list
in Class of elements. It give a property that call be callable or not.
It not callable, it's a simple property and replace the macro with the value
If callable, it's a method that is called to get the value. for example, to
get the number of service in a host, you call a method to get the
len(host.services)
"""
import re
import time
from alignak.borg import Borg
class MacroResolver(Borg):
"""MacroResolver class is used to resolve macros (in command call). See above for details"""
my_type = 'macroresolver'
# Global macros
macros = {
'TOTALHOSTSUP': '_get_total_hosts_up',
'TOTALHOSTSDOWN': '_get_total_hosts_down',
'TOTALHOSTSUNREACHABLE': '_get_total_hosts_unreachable',
'TOTALHOSTSDOWNUNHANDLED': '_get_total_hosts_unhandled',
'TOTALHOSTSUNREACHABLEUNHANDLED': '_get_total_hosts_unreachable_unhandled',
'TOTALHOSTPROBLEMS': '_get_total_host_problems',
'TOTALHOSTPROBLEMSUNHANDLED': '_get_total_host_problems_unhandled',
'TOTALSERVICESOK': '_get_total_service_ok',
'TOTALSERVICESWARNING': '_get_total_services_warning',
'TOTALSERVICESCRITICAL': '_get_total_services_critical',
'TOTALSERVICESUNKNOWN': '_get_total_services_unknown',
'TOTALSERVICESWARNINGUNHANDLED': '_get_total_services_warning_unhandled',
'TOTALSERVICESCRITICALUNHANDLED': '_get_total_services_critical_unhandled',
'TOTALSERVICESUNKNOWNUNHANDLED': '_get_total_services_unknown_unhandled',
'TOTALSERVICEPROBLEMS': '_get_total_service_problems',
'TOTALSERVICEPROBLEMSUNHANDLED': '_get_total_service_problems_unhandled',
'LONGDATETIME': '_get_long_date_time',
'SHORTDATETIME': '_get_short_date_time',
'DATE': '_get_date',
'TIME': '_get_time',
'TIMET': '_get_timet',
'PROCESSSTARTTIME': '_get_process_start_time',
'EVENTSTARTTIME': '_get_events_start_time',
}
output_macros = [
'HOSTOUTPUT',
'HOSTPERFDATA',
'HOSTACKAUTHOR',
'HOSTACKCOMMENT',
'SERVICEOUTPUT',
'SERVICEPERFDATA',
'SERVICEACKAUTHOR',
'SERVICEACKCOMMENT'
]
def init(self, conf):
"""Init macroresolver instance with conf.
Must be called once.
:param conf: conf to load
:type conf:
:return: None
"""
# For searching class and elements for ondemand
# we need link to types
self.conf = conf
self.lists_on_demand = []
self.hosts = conf.hosts
# For special void host_name handling...
self.host_class = self.hosts.inner_class
self.lists_on_demand.append(self.hosts)
self.services = conf.services
self.contacts = conf.contacts
self.lists_on_demand.append(self.contacts)
self.hostgroups = conf.hostgroups
self.lists_on_demand.append(self.hostgroups)
self.commands = conf.commands
self.servicegroups = conf.servicegroups
self.lists_on_demand.append(self.servicegroups)
self.contactgroups = conf.contactgroups
self.lists_on_demand.append(self.contactgroups)
self.illegal_macro_output_chars = conf.illegal_macro_output_chars
# Try cache :)
# self.cache = {}
def _get_macros(self, chain):
"""Get all macros of a chain
Cut '$' char and create a dict with the following structure::
{ 'MacroSTR1' : {'val': '', 'type': 'unknown'}
'MacroSTR2' : {'val': '', 'type': 'unknown'}
}
:param chain: chain to parse
:type chain: str
:return: dict with macro parsed as key
:rtype: dict
"""
# if chain in self.cache:
# return self.cache[chain]
regex = re.compile(r'(\$)')
elts = regex.split(chain)
macros = {}
in_macro = False
for elt in elts:
if elt == '$':
in_macro = not in_macro
elif in_macro:
macros[elt] = {'val': '', 'type': 'unknown'}
# self.cache[chain] = macros
if '' in macros:
del macros['']
return macros
def _get_value_from_element(self, elt, prop):
"""Get value from a element's property
the property may be a function to call.
:param elt: element
:type elt: object
:param prop: element property
:type prop: str
:return: getattr(elt, prop) or getattr(elt, prop)() (call)
:rtype: str
"""
try:
value = getattr(elt, prop)
if callable(value):
return unicode(value())
else:
return unicode(value)
except AttributeError, exp:
# Return no value
return ''
except UnicodeError, exp:
if isinstance(value, str):
return unicode(value, 'utf8', errors='ignore')
else:
return ''
def _delete_unwanted_caracters(self, chain):
"""Remove not wanted char from chain
unwanted char are illegal_macro_output_chars attribute
:param chain: chain to remove char from
:type chain: str
:return: chain cleaned
:rtype: str
"""
for char in self.illegal_macro_output_chars:
chain = chain.replace(char, '')
return chain
def get_env_macros(self, data):
"""Get all environment macros from data
For each object in data ::
* Fetch all macros in object.__class__.macros
* Fetch all customs macros in o.custom
:param data: data to get macro
:type data:
:return: dict with macro name as key and macro value as value
:rtype: dict
"""
env = {}
for obj in data:
cls = obj.__class__
macros = cls.macros
for macro in macros:
if macro.startswith("USER"):
break
prop = macros[macro]
value = self._get_value_from_element(obj, prop)
env['NAGIOS_%s' % macro] = value
if hasattr(obj, 'customs'):
# make NAGIOS__HOSTMACADDR from _MACADDR
for cmacro in obj.customs:
new_env_name = 'NAGIOS__' + obj.__class__.__name__.upper() + cmacro[1:].upper()
env[new_env_name] = obj.customs[cmacro]
return env
def resolve_simple_macros_in_string(self, c_line, data, args=None):
"""Replace macro in the command line with the real value
:param c_line: command line to modify
:type c_line: str
:param data: objects list, use to look for a specific macro
:type data:
:param args: args given to the command line, used to get "ARGN" macros.
:type args:
:return: command line with '$MACRO$' replaced with values
:rtype: str
"""
# Now we prepare the classes for looking at the class.macros
data.append(self) # For getting global MACROS
if hasattr(self, 'conf'):
data.append(self.conf) # For USERN macros
clss = [d.__class__ for d in data]
# we should do some loops for nested macros
# like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if
# $USER1$ is pointing to $USER34$ etc etc, we should loop
# until we reach the bottom. So the last loop is when we do
# not still have macros :)
still_got_macros = True
nb_loop = 0
while still_got_macros:
nb_loop += 1
# Ok, we want the macros in the command line
macros = self._get_macros(c_line)
# We can get out if we do not have macros this loop
still_got_macros = (len(macros) != 0)
# print "Still go macros:", still_got_macros
# Put in the macros the type of macro for all macros
self._get_type_of_macro(macros, clss)
# Now we get values from elements
for macro in macros:
# If type ARGN, look at ARGN cutting
if macros[macro]['type'] == 'ARGN' and args is not None:
macros[macro]['val'] = self._resolve_argn(macro, args)
macros[macro]['type'] = 'resolved'
# If class, get value from properties
if macros[macro]['type'] == 'class':
cls = macros[macro]['class']
for elt in data:
if elt is not None and elt.__class__ == cls:
prop = cls.macros[macro]
macros[macro]['val'] = self._get_value_from_element(elt, prop)
# Now check if we do not have a 'output' macro. If so, we must
# delete all special characters that can be dangerous
if macro in self.output_macros:
macros[macro]['val'] = \
self._delete_unwanted_caracters(macros[macro]['val'])
if macros[macro]['type'] == 'CUSTOM':
cls_type = macros[macro]['class']
# Beware : only cut the first _HOST value, so the macro name can have it on it..
macro_name = re.split('_' + cls_type, macro, 1)[1].upper()
# Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS
# Now we get the element in data that have the type HOST
# and we check if it got the custom value
for elt in data:
if elt is not None and elt.__class__.my_type.upper() == cls_type:
if '_' + macro_name in elt.customs:
macros[macro]['val'] = elt.customs['_' + macro_name]
# Then look on the macromodulations, in reserver order, so
# the last to set, will be the firt to have. (yes, don't want to play
# with break and such things sorry...)
mms = getattr(elt, 'macromodulations', [])
for macromod in mms[::-1]:
# Look if the modulation got the value,
# but also if it's currently active
if '_' + macro_name in macromod.customs and macromod.is_active():
macros[macro]['val'] = macromod.customs['_' + macro_name]
if macros[macro]['type'] == 'ONDEMAND':
macros[macro]['val'] = self._resolve_ondemand(macro, data)
# We resolved all we can, now replace the macro in the command call
for macro in macros:
c_line = c_line.replace('$' + macro + '$', macros[macro]['val'])
# A $$ means we want a $, it's not a macro!
# We replace $$ by a big dirty thing to be sure to not misinterpret it
c_line = c_line.replace("$$", "DOUBLEDOLLAR")
if nb_loop > 32: # too much loop, we exit
still_got_macros = False
# We now replace the big dirty token we made by only a simple $
c_line = c_line.replace("DOUBLEDOLLAR", "$")
# print "Retuning c_line", c_line.strip()
return c_line.strip()
def resolve_command(self, com, data):
"""Resolve command macros with data
:param com: check / event handler or command call object
:type com: object
:param data: objects list, use to look for a specific macro
:type data:
:return: command line with '$MACRO$' replaced with values
:rtype: str
"""
c_line = com.command.command_line
return self.resolve_simple_macros_in_string(c_line, data, args=com.args)
def _get_type_of_macro(self, macros, clss):
r"""Set macros types
Example::
ARG\d -> ARGN,
HOSTBLABLA -> class one and set Host in class)
_HOSTTOTO -> HOST CUSTOM MACRO TOTO
SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1
:param macros: macros list
:type macros: list[str]
:param clss: classes list, used to tag class macros
:type clss:
:return: None
"""
for macro in macros:
# ARGN Macros
if re.match(r'ARG\d', macro):
macros[macro]['type'] = 'ARGN'
continue
# USERN macros
# are managed in the Config class, so no
# need to look that here
elif re.match(r'_HOST\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'HOST'
continue
elif re.match(r'_SERVICE\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'SERVICE'
# value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1]
continue
elif re.match(r'_CONTACT\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'CONTACT'
continue
# On demand macro
elif len(macro.split(':')) > 1:
macros[macro]['type'] = 'ONDEMAND'
continue
# OK, classical macro...
for cls in clss:
if macro in cls.macros:
macros[macro]['type'] = 'class'
macros[macro]['class'] = cls
continue
def _resolve_argn(self, macro, args):
"""Get argument from macro name
ie : $ARG3$ -> args[2]
:param macro: macro to parse
:type macro:
:param args: args given to command line
:type args:
:return: argument at position N-1 in args table (where N is the int parsed)
:rtype: None | str
"""
# first, get the number of args
_id = None
matches = re.search(r'ARG(?P<id>\d+)', macro)
if matches is not None:
_id = int(matches.group('id')) - 1
try:
return args[_id]
except IndexError:
return ''
def _resolve_ondemand(self, macro, data):
"""Get on demand macro value
:param macro: macro to parse
:type macro:
:param data: data to get value from
:type data:
:return: macro value
:rtype: str
"""
# print "\nResolving macro", macro
elts = macro.split(':')
nb_parts = len(elts)
macro_name = elts[0]
# Len 3 == service, 2 = all others types...
if nb_parts == 3:
val = ''
# print "Got a Service on demand asking...", elts
(host_name, service_description) = (elts[1], elts[2])
# host_name can be void, so it's the host in data
# that is important. We use our self.host_class to
# find the host in the data :)
if host_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
host_name = elt.host_name
# Ok now we get service
serv = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if serv is not None:
cls = serv.__class__
prop = cls.macros[macro_name]
val = self._get_value_from_element(serv, prop)
# print "Got val:", val
return val
# Ok, service was easy, now hard part
else:
val = ''
elt_name = elts[1]
# Special case: elt_name can be void
# so it's the host where it apply
if elt_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
elt_name = elt.host_name
for od_list in self.lists_on_demand:
cls = od_list.inner_class
# We search our type by looking at the macro
if macro_name in cls.macros:
prop = cls.macros[macro_name]
i = od_list.find_by_name(elt_name)
if i is not None:
val = self._get_value_from_element(i, prop)
# Ok we got our value :)
break
return val
return ''
def _get_long_date_time(self):
"""Get long date time
Example : Fri 15 May 11:42:39 CEST 2009
:return: long date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%a %d %b %H:%M:%S %Z %Y").decode('UTF-8', 'ignore')
def _get_short_date_time(self):
"""Get short date time
Example : 10-13-2000 00:30:28
:return: short date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%d-%m-%Y %H:%M:%S")
def _get_date(self):
"""Get date
Example : 10-13-2000
:return: local date
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%d-%m-%Y")
def _get_time(self):
"""Get date time
Example : 00:30:28
:return: date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%H:%M:%S")
def _get_timet(self):
"""Get epoch time
Example : 1437143291
:return: timestamp
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return str(int(time.time()))
def _tot_hosts_by_state(self, state):
"""Generic function to get the number of host in the specified state
:param state: state to filter on
:type state:
:return: number of host in state *state*
:rtype: int
TODO: Should be moved
"""
return sum(1 for h in self.hosts if h.state == state)
_get_total_hosts_up = lambda s: s._tot_hosts_by_state('UP')
_get_total_hosts_down = lambda s: s._tot_hosts_by_state('DOWN')
_get_total_hosts_unreachable = lambda s: s._tot_hosts_by_state('UNREACHABLE')
def _get_total_hosts_unreachable_unhandled(self):
"""DOES NOTHING( Should get the number of unreachable hosts not handled)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _get_total_hosts_problems(self):
"""Get the number of hosts that are a problem
:return: number of hosts with is_problem attribute True
:rtype: int
"""
return sum(1 for h in self.hosts if h.is_problem)
def _get_total_hosts_problems_unhandled(self):
"""DOES NOTHING( Should get the number of host problems not handled)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _tot_services_by_state(self, state):
"""Generic function to get the number of service in the specified state
:param state: state to filter on
:type state:
:return: number of service in state *state*
:rtype: int
TODO: Should be moved
"""
return sum(1 for s in self.services if s.state == state)
_get_total_service_ok = lambda s: s._tot_services_by_state('OK')
_get_total_service_warning = lambda s: s._tot_services_by_state('WARNING')
_get_total_service_critical = lambda s: s._tot_services_by_state('CRITICAL')
_get_total_service_unknown = lambda s: s._tot_services_by_state('UNKNOWN')
def _get_total_services_warning_unhandled(self):
"""DOES NOTHING (Should get the number of warning services not handled)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _get_total_services_critical_unhandled(self):
"""DOES NOTHING (Should get the number of critical services not handled)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _get_total_services_unknown_unhandled(self):
"""DOES NOTHING (Should get the number of unknown services not handled)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _get_total_service_problems(self):
"""Get the number of services that are a problem
:return: number of services with is_problem attribute True
:rtype: int
"""
return sum(1 for s in self.services if s.is_problem)
def _get_total_service_problems_unhandled(self):
"""DOES NOTHING (Should get the number of service problems not handled)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _get_process_start_time(self):
"""DOES NOTHING ( Should get process start time)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
def _get_events_start_time(self):
"""DOES NOTHING ( Should get events start time)
:return: 0 always
:rtype: int
TODO: Implement this
"""
return 0
| agpl-3.0 | -6,827,662,738,583,955,000 | 35.442748 | 100 | 0.556054 | false |
redstorm45/factory-maker | graphics/text.py | 1 | 1178 | '''
Copyright 2014 Pierre Cadart
This file is part of Factory Maker.
Factory Maker is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Factory Maker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Factory Maker. If not, see <http://www.gnu.org/licenses/>.
Description:
This file can create textures of text, for use in buttons for example
'''
import globalVars as g
#
# first argument is the name of the font
# second argument is the text to write
def createText(self):
color = None
try:
color = self.links[2]
except:
color = g.color.textButtonMenu
self.surf = g.tManager.fonts[self.links[0]].render(self.links[1],True,color)
| gpl-3.0 | -1,593,186,073,715,944,000 | 27.731707 | 80 | 0.696944 | false |
vitormazzi/django-jython | doj/backends/zxjdbc/oracle/query.py | 1 | 6130 | """
Custom Query class for Oracle.
Derived from: django.db.models.sql.query.Query
"""
import datetime
from django.db.backends import util
# Cache. Maps default query class to new Oracle query class.
_classes = {}
def query_class(QueryClass, Database):
"""
Returns a custom django.db.models.sql.query.Query subclass that is
appropriate for Oracle.
The 'Database' module (cx_Oracle) is passed in here so that all the setup
required to import it only needs to be done by the calling module.
"""
global _classes
try:
return _classes[QueryClass]
except KeyError:
pass
class OracleQuery(QueryClass):
def resolve_columns(self, row, fields=()):
index_start = len(self.extra_select.keys())
values = [self.convert_values(v, type(v)) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.convert_values(value, field))
return values
def convert_values(self, value, field):
from django.db.models.fields import DateField, DateTimeField, \
TimeField, BooleanField, NullBooleanField, DecimalField, FloatField, Field
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None:
pass
elif value is None and isinstance(field, Field) and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif isinstance(value, float):
value = float(value)
# Added 04-26-2009 to repair "Invalid literal for int() base 10" error
elif isinstance(value,int):
value = int(value)
elif field is not None and field.get_internal_type() == 'AutoField':
value = int(float(value))
elif value in (1, 0) and field is not None and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif field is not None and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif field is not None and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
elif field is not None and field.get_internal_type() == 'SmallIntegerField':
value = util.typecast_decimal(field.format_number(value))
return value
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overriden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.high_mark is not None
or self.low_mark)
if not do_offset:
sql, params = super(OracleQuery, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
else:
# `get_columns` needs to be called before `get_ordering` to
# populate `_select_alias`.
self.pre_sql_setup()
self.get_columns()
#ordering = self.get_ordering()
#
# Removed Ordering on 03/27/2009 as it caused error:
# TypeError: sequence item 0: expected string, list found
#
ordering = False
# Oracle's ROW_NUMBER() function requires an ORDER BY clause.
if ordering:
rn_orderby = ', '.join(ordering)
else:
# Create a default ORDER BY since none was specified.
qn = self.quote_name_unless_alias
opts = self.model._meta
rn_orderby = '%s.%s' % (qn(opts.db_table),
qn(opts.fields[0].db_column or opts.fields[0].column))
# Ensure the base query SELECTs our special "_RN" column
self.extra_select['_RN'] = ('ROW_NUMBER() OVER (ORDER BY %s)'
% rn_orderby, '')
sql, params = super(OracleQuery, self).as_sql(with_limits=False,
with_col_aliases=True)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
sql = 'SELECT * FROM (%s) WHERE "_RN" > %d' % (sql, self.low_mark)
if self.high_mark is not None:
sql = '%s AND "_RN" <= %d' % (sql, self.high_mark)
return sql, params
def set_limits(self, low=None, high=None):
super(OracleQuery, self).set_limits(low, high)
# We need to select the row number for the LIMIT/OFFSET sql.
# A placeholder is added to extra_select now, because as_sql is
# too late to be modifying extra_select. However, the actual sql
# depends on the ordering, so that is generated in as_sql.
self.extra_select['_RN'] = ('1', '')
def clear_limits(self):
super(OracleQuery, self).clear_limits()
if '_RN' in self.extra_select:
del self.extra_select['_RN']
_classes[QueryClass] = OracleQuery
return OracleQuery
| bsd-3-clause | 7,167,331,611,330,312,000 | 44.073529 | 125 | 0.558564 | false |
simplegeo/sqlalchemy | lib/sqlalchemy/dialects/mssql/base.py | 1 | 48621 | # mssql.py
"""Support for the Microsoft SQL Server database.
Connecting
----------
See the individual driver sections below for details on connecting.
Auto Increment Behavior
-----------------------
``IDENTITY`` columns are supported by using SQLAlchemy
``schema.Sequence()`` objects. In other words::
Table('test', mss_engine,
Column('id', Integer,
Sequence('blah',100,10), primary_key=True),
Column('name', String(20))
).create()
would yield::
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
Implicit ``autoincrement`` behavior works the same in MSSQL as it
does in other dialects and results in an ``IDENTITY`` column.
* Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for
``INSERT`` s)
* Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on
``INSERT``
Collation Support
-----------------
MSSQL specific string types support a collation parameter that
creates a column-level specific collation for the column. The
collation parameter accepts a Windows Collation Name or a SQL
Collation Name. Supported types are MSChar, MSNChar, MSString,
MSNVarchar, MSText, and MSNText. For example::
Column('login', String(32, collation='Latin1_General_CI_AS'))
will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatibile with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always retrun the database
server version information (in this case SQL2005) and not the
compatibiility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
"""
import datetime, decimal, inspect, operator, sys, re
import itertools
from sqlalchemy import sql, schema as sa_schema, exc, util
from sqlalchemy.sql import select, compiler, expression, \
operators as sql_operators, \
functions as sql_functions, util as sql_util
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy import processors
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
VARBINARY, BLOB
from sqlalchemy.dialects.mssql import information_schema as ischema
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.Float):
"""A type for ``real`` numbers."""
__visit_name__ = 'REAL'
def __init__(self):
super(REAL, self).__init__(precision=24)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, basestring):
return datetime.date(*[
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, basestring):
return datetime.time(*[
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
return process
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kwargs):
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
self.collation = collation
class TEXT(_StringType, sqltypes.TEXT):
"""MSSQL TEXT type, for variable-length text up to 2^31 characters."""
def __init__(self, *args, **kw):
"""Construct a TEXT.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.Text.__init__(self, *args, **kw)
class NTEXT(_StringType, sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
def __init__(self, *args, **kwargs):
"""Construct a NTEXT.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kwargs.pop('collation', None)
_StringType.__init__(self, collation)
length = kwargs.pop('length', None)
sqltypes.UnicodeText.__init__(self, length, **kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MSSQL VARCHAR type, for variable-length non-Unicode data with a maximum
of 8,000 characters."""
def __init__(self, *args, **kw):
"""Construct a VARCHAR.
:param length: Optinal, maximum data length, in characters.
:param convert_unicode: defaults to False. If True, convert
``unicode`` data sent to the database to a ``str``
bytestring, and convert bytestrings coming back from the
database into ``unicode``.
Bytestrings are encoded using the dialect's
:attr:`~sqlalchemy.engine.base.Dialect.encoding`, which
defaults to `utf-8`.
If False, may be overridden by
:attr:`sqlalchemy.engine.base.Dialect.convert_unicode`.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.VARCHAR.__init__(self, *args, **kw)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MSSQL NVARCHAR type.
For variable-length unicode character data up to 4,000 characters."""
def __init__(self, *args, **kw):
"""Construct a NVARCHAR.
:param length: Optional, Maximum data length, in characters.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.NVARCHAR.__init__(self, *args, **kw)
class CHAR(_StringType, sqltypes.CHAR):
"""MSSQL CHAR type, for fixed-length non-Unicode data with a maximum
of 8,000 characters."""
def __init__(self, *args, **kw):
"""Construct a CHAR.
:param length: Optinal, maximum data length, in characters.
:param convert_unicode: defaults to False. If True, convert
``unicode`` data sent to the database to a ``str``
bytestring, and convert bytestrings coming back from the
database into ``unicode``.
Bytestrings are encoded using the dialect's
:attr:`~sqlalchemy.engine.base.Dialect.encoding`, which
defaults to `utf-8`.
If False, may be overridden by
:attr:`sqlalchemy.engine.base.Dialect.convert_unicode`.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.CHAR.__init__(self, *args, **kw)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MSSQL NCHAR type.
For fixed-length unicode character data up to 4,000 characters."""
def __init__(self, *args, **kw):
"""Construct an NCHAR.
:param length: Optional, Maximum data length, in characters.
:param collation: Optional, a column-level collation for this string
value. Accepts a Windows Collation Name or a SQL Collation Name.
"""
collation = kw.pop('collation', None)
_StringType.__init__(self, collation)
sqltypes.NCHAR.__init__(self, *args, **kw)
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int' : INTEGER,
'bigint': BIGINT,
'smallint' : SMALLINT,
'tinyint' : TINYINT,
'varchar' : VARCHAR,
'nvarchar' : NVARCHAR,
'char' : CHAR,
'nchar' : NCHAR,
'text' : TEXT,
'ntext' : NTEXT,
'decimal' : DECIMAL,
'numeric' : NUMERIC,
'float' : FLOAT,
'datetime' : DATETIME,
'datetime2' : DATETIME2,
'datetimeoffset' : DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime' : SMALLDATETIME,
'binary' : BINARY,
'varbinary' : VARBINARY,
'bit': BIT,
'real' : REAL,
'image' : IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if type_.length:
spec = spec + "(%d)" % type_.length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_REAL(self, type_):
return "REAL"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_):
return "SMALLDATETIME"
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_unicode_text(self, type_):
return self.visit_NTEXT(type_)
def visit_NTEXT(self, type_):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_):
return self._extend("VARCHAR", type_)
def visit_CHAR(self, type_):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_):
return self._extend("NVARCHAR", type_)
def visit_date(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_DATE(type_)
def visit_time(self, type_):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_)
else:
return self.visit_TIME(type_)
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
if self._select_lastrowid:
if self.dialect.use_scope_identity:
self.cursor.execute(
"SELECT scope_identity() AS lastrowid", ())
else:
self.cursor.execute("SELECT @@identity AS lastrowid", ())
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.\
format_table(self.compiled.statement.table)
)
except:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return base.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
super(MSSQLCompiler, self).__init__(*args, **kwargs)
self.tablealiases = {}
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op(self, binary, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select):
""" MS-SQL puts TOP, it's version of LIMIT here """
if select._distinct or select._limit:
s = select._distinct and "DISTINCT " or ""
if select._limit:
if not select._offset:
s += "TOP %s " % (select._limit,)
return s
return compiler.SQLCompiler.get_select_precolumns(self, select)
def limit_clause(self, select):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if not getattr(select, '_mssql_visit', None) and select._offset:
# to use ROW_NUMBER(), an ORDER BY is required.
orderby = self.process(select._order_by_clause)
if not orderby:
raise exc.InvalidRequestError('MSSQL requires an order_by when '
'using an offset.')
_offset = select._offset
_limit = select._limit
select._mssql_visit = True
select = select.column(
sql.literal_column("ROW_NUMBER() OVER (ORDER BY %s)" \
% orderby).label("mssql_rn")
).order_by(None).alias()
limitselect = sql.select([c for c in select.c if
c.key!='mssql_rn'])
limitselect.append_whereclause("mssql_rn>%d" % _offset)
if _limit is not None:
limitselect.append_whereclause("mssql_rn<=%d" %
(_limit + _offset))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_table(self, table, mssql_aliased=False, **kwargs):
if mssql_aliased:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=True, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
def visit_alias(self, alias, **kwargs):
# translate for schema-qualified table aliases
self.tablealiases[alias.original] = alias
kwargs['mssql_aliased'] = True
return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % \
(field, self.process(extract.expr, **kw))
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_column(self, column, result_map=None, **kwargs):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if result_map is not None:
result_map[column.name.lower()] = \
(column.name, (column, ),
column.type)
return super(MSSQLCompiler, self).\
visit_column(converted,
result_map=None, **kwargs)
return super(MSSQLCompiler, self).visit_column(column,
result_map=result_map,
**kwargs)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression._BindParamClause)
and binary.operator == operator.eq
and not isinstance(binary.right, expression._BindParamClause)
):
return self.process(
expression._BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
else:
if (
(binary.operator is operator.eq or
binary.operator is operator.ne)
and (
(isinstance(binary.left, expression._FromGrouping)
and isinstance(binary.left.element,
expression._ScalarSelect))
or (isinstance(binary.right, expression._FromGrouping)
and isinstance(binary.right.element,
expression._ScalarSelect))
or isinstance(binary.left, expression._ScalarSelect)
or isinstance(binary.right, expression._ScalarSelect)
)
):
op = binary.operator == operator.eq and "IN" or "NOT IN"
return self.process(
expression._BinaryExpression(binary.left,
binary.right, op),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
def col_label(col):
adapted = adapter.traverse(col)
if isinstance(col, expression._Label):
return adapted.label(c.key)
else:
return self.label_select_column(None, adapted, asfrom=False)
columns = [
self.process(
col_label(c),
within_columns_clause=True,
result_map=self.result_map
)
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op(self, binary, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op(self, binary, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_function(self, func, **kw):
kw['literal_binds'] = True
return super(MSSQLStrictCompiler, self).visit_function(func, **kw)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.InvalidRequestError(
"mssql requires Table-bound columns "
"in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) and \
column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(drop.element.table.name),
self.preparer.quote(
self._validate_identifier(drop.element.name, False),
drop.element.quote)
)
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=True):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime : _MSDateTime,
sqltypes.Date : _MSDate,
sqltypes.Time : TIME,
}
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name=u"dbo", **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
util.warn("Savepoint support in mssql is experimental and "
"may lead to data loss.")
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
connection.execute("SAVE TRANSACTION %s" % name)
def do_release_savepoint(self, connection, name):
pass
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
if self.server_version_info[0] not in range(8, 17):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
"is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info) )
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
def _get_default_schema_name(self, connection):
user_name = connection.scalar("SELECT user_name() as user_name;")
if user_name is not None:
# now, get the default schema
query = """
SELECT default_schema_name FROM
sys.database_principals
WHERE name = ?
AND type = 'S'
"""
try:
default_schema_name = connection.scalar(query, [user_name])
if default_schema_name is not None:
return unicode(default_schema_name)
except:
pass
return self.schema_name
def has_table(self, connection, tablename, schema=None):
current_schema = schema or self.default_schema_name
columns = ischema.columns
if current_schema:
whereclause = sql.and_(columns.c.table_name==tablename,
columns.c.table_schema==current_schema)
else:
whereclause = columns.c.table_name==tablename
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
current_schema = schema or self.default_schema_name
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == current_schema,
tables.c.table_type == u'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
current_schema = schema or self.default_schema_name
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == current_schema,
tables.c.table_type == u'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
# The cursor reports it is closed after executing the sp.
@reflection.cache
def get_indexes(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
col_finder = re.compile("(\w+)")
full_tname = "%s.%s" % (current_schema, tablename)
indexes = []
s = sql.text("exec sp_helpindex '%s'" % full_tname)
rp = connection.execute(s)
if rp.closed:
# did not work for this setup.
return []
for row in rp:
if 'primary key' not in row['index_description']:
indexes.append({
'name' : row['index_name'],
'column_names' : col_finder.findall(row['index_keys']),
'unique': 'unique' in row['index_description']
})
return indexes
@reflection.cache
def get_view_definition(self, connection, viewname, schema=None, **kw):
current_schema = schema or self.default_schema_name
views = ischema.views
s = sql.select([views.c.view_definition],
sql.and_(
views.c.table_schema == current_schema,
views.c.table_name == viewname
),
)
rp = connection.execute(s)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
def get_columns(self, connection, tablename, schema=None, **kw):
# Get base columns
current_schema = schema or self.default_schema_name
columns = ischema.columns
if current_schema:
whereclause = sql.and_(columns.c.table_name==tablename,
columns.c.table_schema==current_schema)
else:
whereclause = columns.c.table_name==tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype == MSText or \
(coltype in (MSString, MSNVarchar) and charlen == -1):
kwargs.pop('length')
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'autoincrement':False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, current_schema))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (current_schema, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start' : int(row[0]),
'increment' : int(row[1])
})
return cols
@reflection.cache
def get_primary_keys(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
pkeys = []
# information_schema.referential_constraints
RR = ischema.ref_constraints
# information_schema.table_constraints
TC = ischema.constraints
# information_schema.constraint_column_usage:
# the constrained column
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
# the referenced column
R = ischema.key_constraints.alias('R')
# Primary key constraints
s = sql.select([C.c.column_name, TC.c.constraint_type],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
C.c.table_name == tablename,
C.c.table_schema == current_schema)
)
c = connection.execute(s)
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
return pkeys
@reflection.cache
def get_foreign_keys(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
# Add constraints
#information_schema.referential_constraints
RR = ischema.ref_constraints
# information_schema.table_constraints
TC = ischema.constraints
# information_schema.constraint_column_usage:
# the constrained column
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
# the referenced column
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == current_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by = [
RR.c.constraint_name,
R.c.ordinal_position])
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or current_schema != rschema:
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return fkeys.values()
| mit | -1,975,082,025,182,092,500 | 34.962278 | 80 | 0.55947 | false |
pmuller/ipkg | ipkg/versions.py | 1 | 1177 | import __builtin__ # because we override sorted in this module
import pkg_resources
def compare(a, b):
if a < b:
return -1
elif a == b:
return 0
else: # a > b
return 1
def extract(item):
if isinstance(item, dict):
version = item['version']
revision = item['revision']
else:
version = item.version
revision = item.revision
return parse(version), parse(str(revision))
def parse(version):
"""Parses a ``version`` string.
Currently a simple wrapper around ``pkg_resources.parse_version()``,
for API purpose. Parsing could change later.
"""
return pkg_resources.parse_version(version)
def sorted(versions, parser=parse, reverse=False):
"""Returned sorted ``versions``.
"""
return __builtin__.sorted(versions, key=parser, cmp=compare,
reverse=reverse)
def most_recent(versions, parser=parse):
"""Returns the most recent version among ``versions``.
* ``versions`` must be an iterable of versions.
* ``parser`` defaults to ``parse`` which parses version strings.
"""
return sorted(versions, reverse=True)[0]
| mit | -3,061,816,019,314,012,000 | 24.042553 | 72 | 0.620221 | false |
thuo/bc-6-matlabette | matlabette/operators.py | 1 | 3521 | """
Operators
"""
from errors import InvalidArgumentsForOperator, MatlabetteRuntimeError
import numpy
class Operators(object):
@staticmethod
def add(lhs, rhs):
if isinstance(lhs, float):
if isinstance(rhs, float):
return lhs + rhs
if isinstance(rhs, list):
return (numpy.array(rhs) + lhs).tolist()
elif isinstance(lhs, list):
if isinstance(rhs, float):
return (numpy.array(lhs) + rhs).tolist()
if isinstance(rhs, list):
return (numpy.array(lhs) + numpy.array(rhs)).tolist()
raise InvalidArgumentsForOperator
@staticmethod
def subtract(lhs, rhs):
if isinstance(lhs, float):
if isinstance(rhs, float):
return lhs - rhs
if isinstance(rhs, list):
return (numpy.array(rhs) - lhs).tolist()
elif isinstance(lhs, list):
if isinstance(rhs, float):
return (numpy.array(lhs) - rhs).tolist()
if isinstance(rhs, list):
return (numpy.array(lhs) - numpy.array(rhs)).tolist()
raise InvalidArgumentsForOperator
@staticmethod
def multiply(lhs, rhs):
if isinstance(lhs, float):
if isinstance(rhs, float):
return lhs * rhs
if isinstance(rhs, list):
return (numpy.array(rhs) * lhs).tolist()
elif isinstance(lhs, list):
if isinstance(rhs, float):
return (numpy.array(lhs) * rhs).tolist()
if isinstance(rhs, list):
return (numpy.array(lhs).dot(numpy.array(rhs))).tolist()
raise InvalidArgumentsForOperator
@staticmethod
def divide(lhs, rhs):
if isinstance(lhs, float):
if isinstance(rhs, float):
return lhs / rhs
if isinstance(rhs, list):
return (numpy.array(rhs) / lhs).tolist()
elif isinstance(lhs, list):
if isinstance(rhs, float):
return (numpy.array(lhs) / rhs).tolist()
if isinstance(rhs, list):
return (numpy.array(lhs) / numpy.array(rhs)).tolist()
raise InvalidArgumentsForOperator
@staticmethod
def elem_add(lhs, rhs):
return Operators.add(lhs, rhs)
@staticmethod
def elem_subtract(lhs, rhs):
return Operators.subtract(lhs, rhs)
@staticmethod
def elem_multiply(lhs, rhs):
if isinstance(lhs, list) and isinstance(rhs, list):
return (numpy.array(lhs) * numpy.array(rhs)).tolist()
return Operators.multiply(lhs, rhs)
@staticmethod
def elem_divide(lhs, rhs):
return Operators.multiply(lhs, rhs)
@staticmethod
def transpose(array):
return numpy.array(array).T.tolist()
@staticmethod
def invert(matrix_array):
if len(matrix_array) != 1:
raise MatlabetteRuntimeError('inv takes only one argument')
if not isinstance(matrix_array[0], list):
raise MatlabetteRuntimeError('Invalid argument for inv')
return numpy.linalg.inv(matrix_array[0]).tolist()
@staticmethod
def transpose_function(matrix_array):
if len(matrix_array) != 1:
raise MatlabetteRuntimeError('transpose takes only one argument')
if not isinstance(matrix_array[0], list):
raise MatlabetteRuntimeError('Invalid argument for transpose')
return Operators.transpose(matrix_array[0])
| mit | 374,409,545,185,218,900 | 33.519608 | 77 | 0.587333 | false |
jacquev6/LowVoltage | LowVoltage/actions/tests/integ/test_create_table.py | 1 | 7338 | # coding: utf8
# Copyright 2014-2015 Vincent Jacques <[email protected]>
import datetime
import LowVoltage as _lv
import LowVoltage.testing as _tst
class CreateTableLocalIntegTests(_tst.LocalIntegTests):
def tearDown(self):
self.connection(_lv.DeleteTable("Aaa"))
super(CreateTableLocalIntegTests, self).tearDown()
def test_simplest_table(self):
r = self.connection(
_lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput(1, 2)
)
self.assertDateTimeIsReasonable(r.table_description.creation_date_time)
self.assertEqual(r.table_description.attribute_definitions[0].attribute_name, "h")
self.assertEqual(r.table_description.attribute_definitions[0].attribute_type, "S")
self.assertEqual(r.table_description.global_secondary_indexes, None)
self.assertEqual(r.table_description.item_count, 0)
self.assertEqual(r.table_description.key_schema[0].attribute_name, "h")
self.assertEqual(r.table_description.key_schema[0].key_type, "HASH")
self.assertEqual(r.table_description.local_secondary_indexes, None)
self.assertEqual(r.table_description.provisioned_throughput.last_decrease_date_time, datetime.datetime(1970, 1, 1))
self.assertEqual(r.table_description.provisioned_throughput.last_increase_date_time, datetime.datetime(1970, 1, 1))
self.assertEqual(r.table_description.provisioned_throughput.number_of_decreases_today, 0)
self.assertEqual(r.table_description.provisioned_throughput.read_capacity_units, 1)
self.assertEqual(r.table_description.provisioned_throughput.write_capacity_units, 2)
self.assertEqual(r.table_description.table_name, "Aaa")
self.assertEqual(r.table_description.table_size_bytes, 0)
self.assertEqual(r.table_description.table_status, "ACTIVE")
def test_simple_global_secondary_index(self):
r = self.connection(
_lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput(1, 2)
.global_secondary_index("the_gsi")
.hash_key("hh", _lv.STRING)
.project_all()
.provisioned_throughput(3, 4)
)
self.assertEqual(r.table_description.global_secondary_indexes[0].index_name, "the_gsi")
self.assertEqual(r.table_description.global_secondary_indexes[0].index_size_bytes, 0)
self.assertEqual(r.table_description.global_secondary_indexes[0].index_status, "ACTIVE")
self.assertEqual(r.table_description.global_secondary_indexes[0].item_count, 0)
self.assertEqual(r.table_description.global_secondary_indexes[0].key_schema[0].attribute_name, "hh")
self.assertEqual(r.table_description.global_secondary_indexes[0].key_schema[0].key_type, "HASH")
self.assertEqual(r.table_description.global_secondary_indexes[0].projection.non_key_attributes, None)
self.assertEqual(r.table_description.global_secondary_indexes[0].projection.projection_type, "ALL")
self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.last_decrease_date_time, None)
self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.last_increase_date_time, None)
self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.number_of_decreases_today, None)
self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.read_capacity_units, 3)
self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.write_capacity_units, 4)
def test_simple_local_secondary_index(self):
r = self.connection(
_lv.CreateTable("Aaa").hash_key("h", _lv.STRING).range_key("r", _lv.STRING).provisioned_throughput(1, 2)
.local_secondary_index("the_lsi").hash_key("h").range_key("rr", _lv.STRING).project_all()
)
self.assertEqual(r.table_description.local_secondary_indexes[0].index_name, "the_lsi")
self.assertEqual(r.table_description.local_secondary_indexes[0].index_size_bytes, 0)
self.assertEqual(r.table_description.local_secondary_indexes[0].item_count, 0)
self.assertEqual(r.table_description.local_secondary_indexes[0].key_schema[0].attribute_name, "h")
self.assertEqual(r.table_description.local_secondary_indexes[0].key_schema[0].key_type, "HASH")
self.assertEqual(r.table_description.local_secondary_indexes[0].key_schema[1].attribute_name, "rr")
self.assertEqual(r.table_description.local_secondary_indexes[0].key_schema[1].key_type, "RANGE")
self.assertEqual(r.table_description.local_secondary_indexes[0].projection.non_key_attributes, None)
self.assertEqual(r.table_description.local_secondary_indexes[0].projection.projection_type, "ALL")
def test_global_secondary_index_with_projection(self):
r = self.connection(
_lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput(1, 2)
.global_secondary_index("the_gsi")
.hash_key("hh", _lv.STRING)
.project("toto", "titi")
.provisioned_throughput(3, 4)
)
self.assertEqual(r.table_description.global_secondary_indexes[0].projection.non_key_attributes[0], "toto")
self.assertEqual(r.table_description.global_secondary_indexes[0].projection.non_key_attributes[1], "titi")
self.assertEqual(r.table_description.global_secondary_indexes[0].projection.projection_type, "INCLUDE")
class CreateTableErrorLocalIntegTests(_tst.LocalIntegTests):
def test_define_unused_attribute(self):
with self.assertRaises(_lv.ValidationException) as catcher:
self.connection(
_lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput(1, 2)
.attribute_definition("x", _lv.STRING)
)
self.assertEqual(
catcher.exception.args,
({
"__type": "com.amazon.coral.validate#ValidationException",
"message": "The number of attributes in key schema must match the number of attributesdefined in attribute definitions.",
},)
)
def test_dont_define_key_attribute(self):
with self.assertRaises(_lv.ValidationException) as catcher:
self.connection(
_lv.CreateTable("Aaa").hash_key("h").provisioned_throughput(1, 2)
.attribute_definition("x", _lv.STRING)
)
self.assertEqual(
catcher.exception.args,
({
"__type": "com.amazon.coral.validate#ValidationException",
"message": "Hash Key not specified in Attribute Definitions. Type unknown.",
},)
)
def test_dont_define_any_attribute(self):
with self.assertRaises(_lv.ValidationException) as catcher:
self.connection(
_lv.CreateTable("Aaa").hash_key("h").provisioned_throughput(1, 2)
)
self.assertEqual(
catcher.exception.args,
({
"__type": "com.amazon.coral.validate#ValidationException",
"message": "No Attribute Schema Defined",
},)
)
| mit | 2,983,920,477,913,561,600 | 55.015267 | 137 | 0.67239 | false |
pmdp/GIW | mongodb-1/consultas.py | 1 | 10427 | # -*- coding: utf-8 -*-
from bottle import run, get, request, template
from pymongo import MongoClient
from os import linesep
mongoclient = MongoClient()
db = mongoclient.giw
#Columnas para las tablas de los ejercicios 2, 3, 4, 5 y 7
all_table_data = ['Nombre de usuario', 'e-mail', 'Página web', 'Tarjeta de crédito', 'Hash de contraseña', 'Nombre', 'Apellido', 'Dirección', 'Aficiones', 'Fecha de nacimiento']
#Columnas para el ejercicio 6
mid_table_data = ['id', 'e-mail', 'Fecha de nacimiento']
#Función que recibe un cursor de mongo y prepara una lista para luego mostrarla por html
def get_results_data(c):
data = []
#Por cada elemento en el cursor devuelto en la consulta
for r in c:
userData = []
userData.append(r['_id'])
userData.append(r['email'])
userData.append(r['webpage'])
creditCardData = u"Número: " + r['credit_card']['number'] + linesep
creditCardData += u"Fecha de expiración: " + r['credit_card']['expire']['month'] + '/' + r['credit_card']['expire']['year']
userData.append(creditCardData)
userData.append(r['password'])
userData.append(r['name'])
userData.append(r['surname'])
addressData = "Pais: " + r['address']['country'] + linesep
addressData += "Zip: " + r['address']['zip'] + linesep
addressData += "Calle: " + r['address']['street'] + linesep
addressData += "Num: " + r['address']['num']
userData.append(addressData)
likesData = ''
for like in r['likes']:
likesData += str(like) + linesep
userData.append(likesData)
userData.append(r['birthdate'])
data.append(userData)
return data
#Función que recibe una lista con los argumentos que deberían haber llegado al servidor
# también recibe un variable que dice si todos los argumentos son obligatorios o no
def validate_arguments(args_list, all_needed=False):
args = request.query
invalid_args = []
valid_args = []
# Comprueba que todos los argumentos pasados son válidos
for a in args:
# Si no es válido lo añade a la lista de argumentos inválidos
if a not in args_list:
invalid_args.append(a)
#Si no lo mete en la lista de argumentos válidos
else:
valid_args.append(a)
if len(invalid_args) != 0:
return False, show_args_error(invalid_args)
elif not all_needed and len(valid_args) > 0:
return True, ''
elif all_needed and len(valid_args) == len(args) and len(args) > 0:
return True, ''
else:
return False, "<p style='color:red'>No se han recibido los argumentos necesarios</p>"
#Función que muestra un mensaje de error con los argumento inválidos
def show_args_error(invalid_args):
out = "<p style='color:red'>Argumentos inválidos:</p>\n"
out += "<ul>"
for i in invalid_args:
out += "<li>" + i + "</li>"
out += "</ul>"
return out
@get('/find_user')
def find_user():
# http://localhost:8080/find_user?username=burgoscarla
valid, msg = validate_arguments(['username'], all_needed=True)
if valid:
#Coge el nombre de usuario de la petición GET
username = request.query.username
c = db.usuarios
#Busca todos un único usuario con ese id
res = c.find_one({"_id":username})
#Si existe dicho usuario rellena las listas con los datos de la BD
if res:
#Lista para datos simples
simple_data = list()
#Lista para todos los datos de dirección
address = list()
#Lista para todos los datos de la tarjeta de crédito
credit_card = list()
#Lista de todo lo que le gusta al usuario
likes = list()
for key, value in res.items():
if key == 'credit_card':
credit_card.append('month : ' + value['expire']['month'])
credit_card.append('year : ' + value['expire']['year'])
credit_card.append('number : ' + value['number'])
elif key == 'address':
for k, v in value.items():
address.append(k + ' : ' + v)
elif key == 'likes':
for l in value:
likes.append(l)
else:
simple_data.append(key + ' : ' + value)
return template('datos', title=username, simple_data=simple_data,
address=address, credit_card=credit_card, likes=likes)
#Si no existe devuelve un error
else:
return "<p style='color:red'>El usuario <strong>" + username + " </strong> no existe en la BD.</p>"
else:
return msg
@get('/find_users')
def find_users():
# http://localhost:8080/find_users?name=Luz
# http://localhost:8080/find_users?name=Luz&surname=Romero
# http://localhost:8080/find_users?name=Luz&food=hotdog
valid, msg = validate_arguments(['name', 'surname', 'birthday'])
if valid:
#Si no hay ningún elemento inválido procede con la consulta
name = request.query.name
surname = request.query.surname
birth = request.query.birthday
#Diccionario donde van a ir los datos a buscar
data = dict()
if name:
data['name'] = name
if surname:
data['surname'] = surname
if birth:
data['birthdate'] = birth
c = db.usuarios
res = c.find(data)
data = get_results_data(res)
return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data)
else:
return msg
@get('/find_users_or')
def find_users_or():
# http://localhost:8080/find_users_or?name=Luz&surname=Corral
valid, msg = validate_arguments(['name', 'surname', 'birthday'])
# Si no hay ningún elemento inválido procede con la consulta
if valid:
name = request.query.name
surname = request.query.surname
birth = request.query.birthday
# Diccionario donde van a ir los datos a buscar
data = []
if name:
data.append({'name': name})
if surname:
data.append({'surname': surname})
if birth:
data.append({'birthdate': birth})
c = db.usuarios
res = c.find({'$or': data})
data = get_results_data(res)
return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data)
else:
return msg
@get('/find_like')
def find_like():
# http://localhost:8080/find_like?like=football
valid, msg = validate_arguments(['like'], all_needed=True)
# Si no hay ningún elemento inválido procede con la consulta
if valid:
like = request.query.like
c = db.usuarios
res = c.find({'likes': like})
data = get_results_data(res)
return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data)
else:
return msg
@get('/find_country')
def find_country():
# http://localhost:8080/find_country?country=Irlanda
valid, msg = validate_arguments(['country'], all_needed=True)
# Si no hay ningún elemento inválido procede con la consulta
if valid:
country = request.query.country
c = db.usuarios
res = c.find({'address.country': country})
data = get_results_data(res)
return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data)
else:
return msg
@get('/find_email_birthdate')
def email_birthdate():
# http://localhost:8080/find_email_birthdate?from=1973-01-01&to=1990-12-31
valid, msg = validate_arguments(['from', 'to'], all_needed=True)
# Si no hay ningún elemento inválido procede con la consulta
if valid:
from_date = request.query['from']
to_date = request.query.to
c = db.usuarios
# Fecha de nacimiento mayor que fromDate y menor que toDate
query = {'birthdate': {'$gt': from_date, '$lt': to_date}}
# query que busca las fechas de nacimiento ordenadas por fecha de nacimiento y por _id
# y solo proyecta los datos necesarios
res = c.find(query, {'_id': 1, 'email': 1, 'birthdate': 1 }).sort([('birthdate', 1), ('_id', 1)])
data = []
for r in res:
user_data = []
user_data.append(r['_id'])
user_data.append(r['email'])
user_data.append(r['birthdate'])
data.append(user_data)
return template('table', num_results=str(res.count()), table_titles=mid_table_data, rows=data)
else:
return msg
@get('/find_country_likes_limit_sorted')
def find_country_likes_limit_sorted():
# http://localhost:8080/find_country_likes_limit_sorted?country=Irlanda&likes=movies,animals&limit=4&ord=asc
valid, msg = validate_arguments(['country', 'likes', 'limit', 'ord'], all_needed=True)
# Si no hay ningún elemento inválido procede con la consulta
if valid:
country = request.query.country
likes = request.query.likes
limit = request.query.limit
order = request.query.ord
# Almacenamos en una lista todos los likes q se pasan por parametro. Hacemos lista para que $all pueda leer bien.
gustos = []
cadena = ""
for i in likes:
if i != ',':
cadena += i
else:
gustos.append(cadena)
cadena = ""
gustos.append(cadena)
# en funcion del tipo de ordenacion se le da un valor entero a la variable order
if order == 'asc':
order = 1
elif order == 'desc':
order = -1
c = db.usuarios
query = {'$and': [{'address.country': country}, {'likes': {'$all': gustos}}]}
# query que busca en funcion de un country y de los gustos ordenando por fechas de nacimiento y con limite = limit
res = c.find(query).sort('birthdate', int(order)).limit(int(limit))
data = get_results_data(res)
return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data)
else:
return msg
if __name__ == "__main__":
# No cambiar host ni port ni debug
run(host='localhost',port=8080,debug=True)
| gpl-3.0 | 4,137,989,952,177,386,000 | 37.779851 | 177 | 0.592322 | false |
kklmn/xrt | xrt/backends/raycing/physconsts.py | 1 | 1057 | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "07 Jan 2016"
PI = 3.1415926535897932384626433832795
PI2 = 6.283185307179586476925286766559
SQRT2PI = PI2**0.5 # =2.5066282746310002
SQ3 = 1.7320508075688772935274463415059
SQ2 = 2**0.5 # =1.4142135623730951
SQPI = PI**0.5 # =1.7724538509055159
E0 = 4.803e-10 # [esu]
C = 2.99792458e10 # [cm/sec]
M0 = 9.10938291e-28 # [g]
M0C2 = 0.510998928 # MeV
HPLANCK = 6.626069573e-27 # [erg*sec]
EV2ERG = 1.602176565e-12 # Energy conversion from [eV] to [erg]
K2B = 2 * PI * M0 * C**2 * 0.001 / E0 # =10.710201593926415
SIE0 = 1.602176565e-19
SIHPLANCK = 6.626069573e-34
SIM0 = M0 * 1e-3
SIC = C * 1e-2
FINE_STR = 1 / 137.03599976
#E2W = PI2 * SIE0 / SIH # w = E2W * E[eV]
E2W = 1519267514747457.9195337718065469
R0 = 2.817940285e-5 # A
AVOGADRO = 6.02214199e23 # atoms/mol
CHeVcm = HPLANCK * C / EV2ERG # {c*h[eV*cm]} = 0.00012398419297617678
CH = CHeVcm * 1e8 # {c*h[eV*A]} = 12398.419297617678
CHBAR = CH / PI2 # {c*h/(2pi)[eV*A]} = 1973.2697177417986
| mit | -5,220,107,738,553,659,000 | 34.233333 | 71 | 0.659413 | false |
tuxfux-hlp-notes/python-batches | archieves/Batch-63/12-Logging/seventh.py | 1 | 2105 | #!/usr/bin/python
# logging.basicConfig?
# logging.Formatter?
# man data or time.strftime().
# https://docs.python.org/2/library/subprocess.html
# cronjob or scheduler
# import logging.handlers for rest all handlers.
from subprocess import Popen,PIPE
from logging.handlers import SysLogHandler
import logging
#logging.basicConfig(filename='my_logs.txt',filemode='a',level=logging.DEBUG,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c')
# Loggers expose the interface that application code directly uses.
# ex:logger - root
# Handlers send the log records (created by loggers) to the appropriate destination.
# https://docs.python.org/2/howto/logging.html#useful-handlers
# ex: filename='my_logs.txt',filemode='a'
# Filters provide a finer grained facility for determining which log records to output.
# ex: level=logging.DEBUG
# Formatters specify the layout of log records in the final output.
# ex: format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c'
# create logger
logger = logging.getLogger('disk_monitor') # logger name
logger.setLevel(logging.DEBUG) # Filter for logger
# create console handler and set level to debug
ch = SysLogHandler(address="/dev/log") # handler
ch.setLevel(logging.DEBUG) # filter for handler
# create formatter
formatter = logging.Formatter('- %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter) # handler and formatter
# add ch to logger
logger.addHandler(ch) # logger and handler
# Main
p1 = Popen(['df','-h','/'],stdout=PIPE)
p2 = Popen(['tail','-n','1'],stdin=p1.stdout,stdout=PIPE)
disk_size = int(p2.communicate()[0].split()[4].split('%')[0])
if disk_size < 50:
logger.info("The disk looks health at {}".format(disk_size))
elif disk_size < 70:
logger.warning("The disk is getting filled up {}".format(disk_size))
elif disk_size < 80:
logger.error("your application is sleeping now {}".format(disk_size))
elif disk_size < 100:
logger.critical("your application is not working {}".format(disk_size))
| gpl-3.0 | 4,377,696,708,954,028,500 | 32.951613 | 87 | 0.705463 | false |
KristianJensen/cameo | cameo/network_analysis/networkx_based.py | 1 | 3295 | # Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
__all__ = ['model_to_network', 'reactions_to_network', 'remove_highly_connected_nodes']
import networkx as nx
from cameo.network_analysis.util import distance_based_on_molecular_formula
def model_to_network(model, *args, **kwargs):
"""Convert a model into a networkx graph.
Calls reactions_to_network with model.reactions.
Parameters
----------
model : SolverBasedModel
The model.
Returns
-------
networkx.MultiDiGraph
See Also
--------
reactions_to_network
"""
return reactions_to_network(model.reactions, *args, **kwargs)
def reactions_to_network(reactions, max_distance=0.3):
"""Convert a list of reactions into a networkx graph.
Parameters
----------
reactions : list
The list of reactions.
max_distance : float, optional
A threshold on the normalized distance between two compounds. If distance is above this threshold, no edge between
those compounds will be created.
Returns
-------
networkx.MultiDiGraph
See Also
--------
distance_based_on_molecular_formula
"""
edges = list()
for reaction in reactions:
substrates = list(reaction.reactants)
for substrate in substrates:
products = list(reaction.products)
for product in products:
try:
distance = distance_based_on_molecular_formula(substrate, product, normalize=True)
except ValueError:
distance = 0.
if distance <= max_distance:
if reaction.reversibility:
edges.append((product, substrate, dict(reaction=reaction)))
edges.append((substrate, product, dict(reaction=reaction)))
elif reaction.lower_bound > 0:
edges.append((substrate, product, dict(reaction=reaction)))
else:
edges.append((product, substrate, dict(reaction=reaction)))
multi_graph = nx.MultiDiGraph(edges)
return multi_graph
def remove_highly_connected_nodes(network, max_degree=10, ignore=[]):
"""Remove highly connected nodes (changes network in place).
Parameters
----------
network : networkx graph
max_degree : int (default 10)
Remove nodes with degree > max_degree
ignore : list
List of nodes to ignore.
Returns
-------
None
"""
to_remove = [node for node, degree in network.degree_iter() if degree > max_degree and node not in ignore]
network.remove_nodes_from(to_remove)
| apache-2.0 | -9,001,907,852,848,431,000 | 31.303922 | 122 | 0.640061 | false |
chand3040/cloud_that | lms/djangoapps/shoppingcart/processors/PayPal.py | 1 | 21754 | """
Implementation the PayPal processor.
To enable this implementation, add the following to lms.auth.json:
CC_PROCESSOR_NAME = "PayPal"
CC_PROCESSOR = {
"PayPal": {
"PURCHASE_ENDPOINT": "sandbox or live url of paypal",
"CLIENT_ID": "<paypal client_id>",
"CLIENT_SECRET": "<paypal client secret>",
"MODE": "sandbox | live",
"RETURN_URL": 'host/dashboard',
"NOTIFY_URL": 'host/paypal',
"CANCEL_URL": 'where to redirect if user cancels order'
}
}
"""
import time
import hmac
import binascii
import re
import json
import urlparse
import logging
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from hashlib import sha1
from textwrap import dedent
from django.conf import settings
from django.utils.translation import ugettext as _
from edxmako.shortcuts import render_to_string
from shoppingcart.models import Order
from shoppingcart.processors.exceptions import *
from shoppingcart.processors.helpers import get_processor_config
from microsite_configuration import microsite
from django.core.urlresolvers import reverse
from paypal.standard.models import ST_PP_COMPLETED, ST_PP_CANCELLED, ST_PP_DENIED
from paypal.standard.ipn.signals import valid_ipn_received
from paypal.standard.pdt.views import process_pdt
logger = logging.getLogger(__name__)
def process_postpay_callback(request):
"""
The top level call to this module, basically
This function is handed the callback request after the customer has entered the CC info and clicked "buy"
on the external Hosted Order Page.
It is expected to verify the callback and determine if the payment was successful.
It returns {'success':bool, 'order':Order, 'error_html':str}
If successful this function must have the side effect of marking the order purchased and calling the
purchased_callbacks of the cart items.
If unsuccessful this function should not have those side effects but should try to figure out why and
return a helpful-enough error message in error_html.
Author: Naresh Makwana
created on: 07-Apr-2015
"""
logger.info('Handling GET request %s', request.GET)
logger.info('Handling POST request %s', request.POST)
pdt_obj, failed = process_pdt(request)
logger.info('invoice %s', getattr(pdt_obj, 'invoice'))
logger.info('mc_currency %s', getattr(pdt_obj, 'mc_currency'))
logger.info('payment_status %s', getattr(pdt_obj, 'payment_status'))
try:
result = payment_accepted(pdt_obj)
if result['accepted']:
# SUCCESS CASE first, rest are some sort of oddity
record_purchase(pdt_obj, result['order'])
return {'success': True,
'order': result['order'],
'error_html': ''}
else:
return {'success': False,
'order': result['order'],
'error_html': get_processor_decline_html(pdt_obj)}
except CCProcessorException as error:
return {'success': False,
'order': None, # due to exception we may not have the order
'error_html': get_processor_exception_html(error)}
def processor_hash(value):
"""
Performs the base64(HMAC_SHA1(key, value)) used by CyberSource Hosted Order Page
"""
shared_secret = get_processor_config().get('SHARED_SECRET', '')
hash_obj = hmac.new(shared_secret.encode('utf-8'), value.encode('utf-8'), sha1)
return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\n', which we don't want
def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'):
"""
params needs to be an ordered dict, b/c cybersource documentation states that order is important.
Reverse engineered from PHP version provided by cybersource
"""
merchant_id = get_processor_config().get('MERCHANT_ID', '')
order_page_version = get_processor_config().get('ORDERPAGE_VERSION', '7')
serial_number = get_processor_config().get('SERIAL_NUMBER', '')
params['merchantID'] = merchant_id
params['orderPage_timestamp'] = int(time.time() * 1000)
params['orderPage_version'] = order_page_version
params['orderPage_serialNumber'] = serial_number
fields = u",".join(params.keys())
values = u",".join([u"{0}={1}".format(i, params[i]) for i in params.keys()])
fields_sig = processor_hash(fields)
values += u",signedFieldsPublicSignature=" + fields_sig
params[full_sig_key] = processor_hash(values)
params[signed_fields_key] = fields
return params
def verify_signatures(ipn_obj):
"""
Use the signature we receive in the POST back from PayPal to verify
the identity of the sender (PayPal) and that the contents of the message
have not been tampered with.
Args:
params (dictionary): The POST parameters we received from PayPal.
Returns:
dict: Contains the parameters we will use elsewhere, converted to the
appropriate types
Raises:
CCProcessorSignatureException: The calculated signature does not match
the signature we received.
CCProcessorDataException: The parameters we received from CyberSource were not valid
(missing keys, wrong types)
"""
# First see if the user cancelled the transaction
# if so, then not all parameters will be passed back so we can't yet verify signatures
if getattr(ipn_obj, 'payment_status') == ST_PP_CANCELLED:
raise CCProcessorUserCancelled()
# if the user decline the transaction
# if so, then amount will not be passed back so we can't yet verify signatures
if getattr(ipn_obj, 'payment_status') == ST_PP_DENIED:
raise CCProcessorUserDeclined()
return ipn_obj
def render_purchase_form_html(cart, **kwargs):
"""
Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource
"""
return render_to_string('shoppingcart/paypal_form.html', {
'action': get_purchase_endpoint(),
'params': get_signed_purchase_params(cart),
})
def get_signed_purchase_params(cart, **kwargs):
return sign(get_purchase_params(cart))
def get_purchase_params(cart):
cart_items = cart.orderitem_set.all()
total_cost = cart.total_cost
amount = "{0:0.2f}".format(total_cost)
cart_items = cart.orderitem_set.all()
params = OrderedDict()
params['business'] = settings.PAYPAL_RECEIVER_EMAIL
params['invoice'] = "{0:d}".format(cart.id)
params['item_number'] = "{0:d}".format(cart.id)
params['notify_url'] = get_processor_config().get('NOTIFY_URL', '')
params['return'] = get_processor_config().get('RETURN_URL', '')
params['cancel_return'] = get_processor_config().get('CANCEL_URL', '')
params['currency_code'] = cart.currency.upper()
params['orderPage_transactionType'] = 'sale'
params['orderNumber'] = "{0:d}".format(cart.id)
params['no_shipping'] = 1
params['charset'] = 'utf-8'
params['upload'] = 1
for counter, cart_item in enumerate(cart_items):
params['item_name_'+str(counter+1)] = cart_item.line_desc
params['amount_'+str(counter+1)] = cart_item.list_price
params['quantity_'+str(counter+1)] = cart_item.qty
params['cmd'] = '_cart'
return params
def get_purchase_endpoint():
return get_processor_config().get('PURCHASE_ENDPOINT', '')
def payment_accepted(ipn_obj):
"""
Check that paypal has accepted the payment
params: a dictionary of POST parameters returned by paypal in their post-payment callback
returns: true if the payment was correctly accepted, for the right amount
false if the payment was not accepted
raises: CCProcessorDataException if the returned message did not provide required parameters
CCProcessorWrongAmountException if the amount charged is different than the order amount
"""
#make sure required keys are present and convert their values to the right type
valid_params = {}
for key, key_type in [('invoice', int),
('mc_currency', str),
('payment_status', str)]:
if not hasattr(ipn_obj, key):
raise CCProcessorDataException(
_("The payment processor did not return a required parameter: {0}").format(key)
)
try:
valid_params[key] = key_type(getattr(ipn_obj, key))
except ValueError:
raise CCProcessorDataException(
_("The payment processor returned a badly-typed value {0} for param {1}.").format(getattr(ipn_obj, key), key)
)
try:
order = Order.objects.get(id=valid_params['invoice'])
except Order.DoesNotExist:
raise CCProcessorDataException(_("The payment processor accepted an order whose number is not in our system."))
if valid_params['payment_status'] == ST_PP_COMPLETED:
try:
# Moved reading of charged_amount here from the valid_params loop above because
# only 'ACCEPT' messages have a 'mc_gross' parameter
charged_amt = Decimal(getattr(ipn_obj, 'mc_gross'))
except InvalidOperation:
raise CCProcessorDataException(
_("The payment processor returned a badly-typed value {0} for param {1}.").format(
getattr(ipn_obj, 'mc_gross'), 'mc_gross'
)
)
if charged_amt == order.total_cost and valid_params['mc_currency'] == order.currency.upper():
return {'accepted': True,
'amt_charged': charged_amt,
'currency': valid_params['mc_currency'].lower(),
'order': order}
else:
raise CCProcessorWrongAmountException(
_("The amount charged by the processor {0} {1} is different than the total cost of the order {2} {3}.")
.format(
charged_amt,
valid_params['mc_currency'],
order.total_cost,
order.currency
)
)
else:
return {'accepted': False,
'amt_charged': 0,
'currency': 'usd',
'order': order}
def record_purchase(ipn_obj, order):
"""
Record the purchase and run purchased_callbacks
"""
ccnum_str = getattr(ipn_obj, 'card_accountNumber', '')
m = re.search("\d", ccnum_str)
if m:
ccnum = ccnum_str[m.start():]
else:
ccnum = "####"
order.purchase(
first=getattr(ipn_obj, 'first_name', ''),
last=getattr(ipn_obj, 'last_name', ''),
street1=getattr(ipn_obj, 'billTo_street1', ''),
street2=getattr(ipn_obj, 'address_street', ''),
city=getattr(ipn_obj, 'address_city', ''),
state=getattr(ipn_obj, 'address_state', ''),
country=getattr(ipn_obj, 'address_country', ''),
postalcode=getattr(ipn_obj, 'billTo_postalCode', ''),
ccnum=ccnum,
cardtype=CARDTYPE_MAP[getattr(ipn_obj, 'card_cardType', 'UNKNOWN')],
processor_reply_dump=dict(urlparse.parse_qsl(str(getattr(ipn_obj, 'query', 'UNKNOWN=UNKNOWN'))))
)
def get_processor_decline_html(ipn_obj):
"""Have to parse through the error codes to return a helpful message"""
# see if we have an override in the microsites
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
msg = _(
"Sorry! Our payment processor did not accept your payment. "
"The decision they returned was {decision_text}, "
"and the reason was {reason_text}. "
"You were not charged. "
"Please try a different form of payment. "
"Contact us with payment-related questions at {email}."
)
formatted = msg.format(
decision_text='<span class="decision">{}</span>'.format(getattr(ipn_obj, 'payment_status')),
reason_text='<span class="reason">{code}:{msg}</span>'.format(
code=params['reasonCode'], msg=REASONCODE_MAP[getattr(ipn_obj,'reason_code')],
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
def get_processor_exception_html(exception):
"""Return error HTML associated with exception"""
# see if we have an override in the microsites
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
if isinstance(exception, CCProcessorDataException):
msg = _(
"Sorry! Our payment processor sent us back a payment confirmation "
"that had inconsistent data!"
"We apologize that we cannot verify whether the charge went through "
"and take further action on your order."
"The specific error message is: {error_message}. "
"Your credit card may possibly have been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
elif isinstance(exception, CCProcessorWrongAmountException):
msg = _(
"Sorry! Due to an error your purchase was charged for "
"a different amount than the order total! "
"The specific error message is: {error_message}. "
"Your credit card has probably been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
elif isinstance(exception, CCProcessorSignatureException):
msg = _(
"Sorry! Our payment processor sent us back a corrupted message "
"regarding your charge, so we are unable to validate that "
"the message actually came from the payment processor. "
"The specific error message is: {error_message}. "
"We apologize that we cannot verify whether the charge went through "
"and take further action on your order. "
"Your credit card may possibly have been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
# fallthrough case, which basically never happens
return '<p class="error_msg">EXCEPTION!</p>'
CARDTYPE_MAP = defaultdict(lambda: "UNKNOWN")
CARDTYPE_MAP.update(
{
'001': 'Visa',
'002': 'MasterCard',
'003': 'American Express',
'004': 'Discover',
'005': 'Diners Club',
'006': 'Carte Blanche',
'007': 'JCB',
'014': 'EnRoute',
'021': 'JAL',
'024': 'Maestro',
'031': 'Delta',
'033': 'Visa Electron',
'034': 'Dankort',
'035': 'Laser',
'036': 'Carte Bleue',
'037': 'Carta Si',
'042': 'Maestro',
'043': 'GE Money UK card'
}
)
REASONCODE_MAP = defaultdict(lambda: "UNKNOWN REASON")
REASONCODE_MAP.update(
{
'100': _('Successful transaction.'),
'101': _('The request is missing one or more required fields.'),
'102': _('One or more fields in the request contains invalid data.'),
'104': dedent(_(
"""
The merchantReferenceCode sent with this authorization request matches the
merchantReferenceCode of another authorization request that you sent in the last 15 minutes.
Possible fix: retry the payment after 15 minutes.
""")),
'150': _('Error: General system failure. Possible fix: retry the payment after a few minutes.'),
'151': dedent(_(
"""
Error: The request was received but there was a server timeout.
This error does not include timeouts between the client and the server.
Possible fix: retry the payment after some time.
""")),
'152': dedent(_(
"""
Error: The request was received, but a service did not finish running in time
Possible fix: retry the payment after some time.
""")),
'201': _('The issuing bank has questions about the request. Possible fix: retry with another form of payment'),
'202': dedent(_(
"""
Expired card. You might also receive this if the expiration date you
provided does not match the date the issuing bank has on file.
Possible fix: retry with another form of payment
""")),
'203': dedent(_(
"""
General decline of the card. No other information provided by the issuing bank.
Possible fix: retry with another form of payment
""")),
'204': _('Insufficient funds in the account. Possible fix: retry with another form of payment'),
# 205 was Stolen or lost card. Might as well not show this message to the person using such a card.
'205': _('Unknown reason'),
'207': _('Issuing bank unavailable. Possible fix: retry again after a few minutes'),
'208': dedent(_(
"""
Inactive card or card not authorized for card-not-present transactions.
Possible fix: retry with another form of payment
""")),
'210': _('The card has reached the credit limit. Possible fix: retry with another form of payment'),
'211': _('Invalid card verification number. Possible fix: retry with another form of payment'),
# 221 was The customer matched an entry on the processor's negative file.
# Might as well not show this message to the person using such a card.
'221': _('Unknown reason'),
'231': _('Invalid account number. Possible fix: retry with another form of payment'),
'232': dedent(_(
"""
The card type is not accepted by the payment processor.
Possible fix: retry with another form of payment
""")),
'233': _('General decline by the processor. Possible fix: retry with another form of payment'),
'234': _(
"There is a problem with our CyberSource merchant configuration. Please let us know at {0}"
).format(settings.PAYMENT_SUPPORT_EMAIL),
# reason code 235 only applies if we are processing a capture through the API. so we should never see it
'235': _('The requested amount exceeds the originally authorized amount.'),
'236': _('Processor Failure. Possible fix: retry the payment'),
# reason code 238 only applies if we are processing a capture through the API. so we should never see it
'238': _('The authorization has already been captured'),
# reason code 239 only applies if we are processing a capture or credit through the API,
# so we should never see it
'239': _('The requested transaction amount must match the previous transaction amount.'),
'240': dedent(_(
"""
The card type sent is invalid or does not correlate with the credit card number.
Possible fix: retry with the same card or another form of payment
""")),
# reason code 241 only applies when we are processing a capture or credit through the API,
# so we should never see it
'241': _('The request ID is invalid.'),
# reason code 242 occurs if there was not a previously successful authorization request or
# if the previously successful authorization has already been used by another capture request.
# This reason code only applies when we are processing a capture through the API
# so we should never see it
'242': dedent(_(
"""
You requested a capture through the API, but there is no corresponding, unused authorization record.
""")),
# we should never see 243
'243': _('The transaction has already been settled or reversed.'),
# reason code 246 applies only if we are processing a void through the API. so we should never see it
'246': dedent(_(
"""
The capture or credit is not voidable because the capture or credit information has already been
submitted to your processor. Or, you requested a void for a type of transaction that cannot be voided.
""")),
# reason code 247 applies only if we are processing a void through the API. so we should never see it
'247': _('You requested a credit for a capture that was previously voided'),
'250': dedent(_(
"""
Error: The request was received, but there was a timeout at the payment processor.
Possible fix: retry the payment.
""")),
'520': dedent(_(
"""
The authorization request was approved by the issuing bank but declined by CyberSource.'
Possible fix: retry with a different form of payment.
""")),
}
)
| agpl-3.0 | 322,017,873,674,394,940 | 42.077228 | 125 | 0.623472 | false |
u7702045/sanji-cellular | tests/test_e2e/view_cellular.py | 1 | 2045 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from sanji.core import Sanji
from sanji.connection.mqtt import Mqtt
REQ_RESOURCE = "/network/cellulars"
class View(Sanji):
# This function will be executed after registered.
def run(self):
print "Go Test 1"
res = self.publish.get(REQ_RESOURCE)
if res.code != 200:
print "GET should reply code 200"
self.stop()
else:
print res.to_json()
print "Pass 1 Test"
print "Go Test 2"
res = self.publish.get(REQ_RESOURCE+'/0')
if res.code != 200:
print "GET should reply code 200"
self.stop()
else:
print res.to_json()
print "Pass 2 Test"
print "Go Test 3"
res = self.publish.get(REQ_RESOURCE+'/5')
if res.code == 200:
print "GET should reply code 400"
self.stop()
else:
print res.to_json()
print "Pass 3 Test"
print "Go Test 4"
res = self.publish.put(REQ_RESOURCE+'/0',
data={"enable": 1,
"apn": "internet"})
if res.code != 200:
print "GET should reply code 200"
print res.to_json()
self.stop()
else:
print res.to_json()
print "Pass 4 Test"
# print "Go Test 5"
res = self.publish.put(REQ_RESOURCE+'/0',
data={"enable": 0,
"apn": "internet"})
if res.code != 200:
print "GET should reply code 200"
self.stop()
else:
print res.to_json()
print "Pass 5 Test"
# stop the test view
self.stop()
if __name__ == "__main__":
FORMAT = "%(asctime)s - %(levelname)s - %(lineno)s - %(message)s"
logging.basicConfig(level=0, format=FORMAT)
logger = logging.getLogger("Cellular")
view = View(connection=Mqtt())
view.start()
| gpl-2.0 | 3,970,784,969,812,089,300 | 25.558442 | 69 | 0.49291 | false |
gimler/techism2 | techism2/ical/views.py | 1 | 3450 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from techism2 import service
from datetime import datetime, timedelta
import icalendar
import time
def ical(request):
ninety_days = datetime.utcnow() + timedelta(days=90)
event_list = service.get_event_query_set().filter(date_time_begin__lte=ninety_days).order_by('date_time_begin')
cal = icalendar.Calendar()
cal['prodid'] = icalendar.vText(u'-//Techism//Techism//DE')
cal['version'] = icalendar.vText(u'2.0')
cal['x-wr-calname'] = icalendar.vText(u'Techism')
cal['x-wr-caldesc'] = icalendar.vText(u'Techism - IT-Events in München')
for e in event_list:
event = icalendar.Event()
# TODO should we generate an UUID when creating the event?
uid = u'%[email protected]' % (str(e.id))
event['uid'] = icalendar.vText(uid)
event['dtstamp'] = icalendar.vDatetime(datetime.utcnow())
# The sequence field must be incremented each time the event is modifed.
# The trick here is to subtract the create TS from the modify TS and
# use the difference as sequence.
sequence = 0
if e.date_time_created and e.date_time_modified:
createTimestamp = time.mktime(e.get_date_time_created_utc().timetuple())
modifyTimestamp = time.mktime(e.get_date_time_modified_utc().timetuple())
sequence = modifyTimestamp - createTimestamp
event['sequence'] = icalendar.vInt(sequence)
# created and last-modified
if e.date_time_created:
event['created'] = icalendar.vDatetime(e.get_date_time_created_utc())
if e.date_time_modified:
event['last-modified'] = icalendar.vDatetime(e.get_date_time_modified_utc())
# TENTATIVE, CONFIRMED, CANCELLED
event['status'] = icalendar.vText(u'CONFIRMED')
if e.title:
event['summary'] = icalendar.vText(e.title)
if e.description:
event['description'] = icalendar.vText(e.description)
if e.date_time_begin:
event['dtstart'] = icalendar.vDatetime(e.get_date_time_begin_utc())
if e.date_time_end:
event['dtend'] = icalendar.vDatetime(e.get_date_time_end_utc())
if e.url:
relative_url = reverse('event-show', args=[e.id])
absolute_url = request.build_absolute_uri(relative_url)
event['url'] = icalendar.vUri(absolute_url)
# geo value isn't used by iCal readers :-(
# maybe a trick is to add the geo coordinates to the location field using the following format:
# $latitude, $longitude ($name, $street, $city)
if e.location:
location = u'%s, %s, %s' % (e.location.name, e.location.street, e.location.city)
event['location'] = icalendar.vText(location)
if e.location and e.location.latitude and e.location.longitude:
event['geo'] = icalendar.vGeo((e.location.latitude, e.location.longitude))
cal.add_component(event)
response = HttpResponse(cal.as_string())
response['Content-Type'] = 'text/calendar; charset=UTF-8'
response['Cache-Control'] = 'no-cache, no-store, max-age=0, must-revalidate'
response['Pragma'] = 'no-cache'
response['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
return response
| apache-2.0 | 7,792,382,559,504,474,000 | 43.217949 | 115 | 0.627428 | false |
infobip/infobip-api-python-client | infobip/api/model/nc/notify/NumberContextResponse.py | 1 | 1285 | # -*- coding: utf-8 -*-
"""This is a generated class and is not intended for modification!
"""
from datetime import datetime
from infobip.util.models import DefaultObject, serializable
from infobip.api.model.nc.notify.NumberContextResponseDetails import NumberContextResponseDetails
class NumberContextResponse(DefaultObject):
@property
@serializable(name="results", type=NumberContextResponseDetails)
def results(self):
"""
Property is a list of: NumberContextResponseDetails
"""
return self.get_field_value("results")
@results.setter
def results(self, results):
"""
Property is a list of: NumberContextResponseDetails
"""
self.set_field_value("results", results)
def set_results(self, results):
self.results = results
return self
@property
@serializable(name="bulkId", type=unicode)
def bulk_id(self):
"""
Property is of type: unicode
"""
return self.get_field_value("bulk_id")
@bulk_id.setter
def bulk_id(self, bulk_id):
"""
Property is of type: unicode
"""
self.set_field_value("bulk_id", bulk_id)
def set_bulk_id(self, bulk_id):
self.bulk_id = bulk_id
return self | apache-2.0 | 2,975,082,414,965,617,700 | 25.791667 | 97 | 0.637354 | false |
glenc/sp.py | src/sp/utils.py | 1 | 3239 | # Set up References
import clr
clr.AddReference("System")
clr.AddReference("Microsoft.SharePoint")
from System import Uri
from Microsoft.SharePoint import *
from Microsoft.SharePoint.Administration import SPWebApplication
# Enumeration
# These are simple enumeration methods for walking over various SharePoint
# objects and collections.
def enum(col, fn):
"""Enumerate a collection and call function fn for each item."""
for x in col:
fn(x)
def enum_sites(webapp, fn):
"""
Enumerate all site collections in the specified web application
and call the specified function with each site collection.
"""
# just in case we were passed a URL, get the web app
webapp = get_webapplication(webapp)
enum(webapp.Sites, fn)
def enum_webs(site, fn):
"""
Enumerate all webs beneath the site or web specified
and call te specified function with each web.
"""
# do different things based on the type of object provided
if type(site) is SPWeb:
enum(site.Webs, fn)
else:
site = get_site(site)
enum(site.RootWeb.Webs, fn)
def enum_all_webs(site, fn):
"""Enumerate all webs in a site collection"""
site = get_site(site)
enum(site.AllWebs, fn)
def enum_lists(web, fn):
"""Enumerate all lists in the web specified"""
web = get_web(web)
enum(web.Lists, fn)
# Get Object Helper Methods
# These methods take in some sort of object identifier (usually a URL)
# and return the appropriate object instance
def get_webapplication(url):
"""Gets a web application by its URL"""
if type(url) is SPWebApplication:
return url
return SPWebApplication.Lookup(Uri(url))
def get_site(url):
"""Gets a site collection by its URL"""
if type(url) is SPSite:
return url
return SPSite(url)
def get_web(url):
"""Gets a web by its URL"""
if type(url) is SPWeb:
return url
if type(url) is SPSite:
return url.RootWeb
site = get_site(url)
relative_url = url.replace(site.Url, "")
return site.OpenWeb(relative_url)
def get_list(web, list_name):
"""Gets a list within a web"""
web = get_web(web)
return first(web.Lists, lambda l: l.Title == list_name)
def try_get_site(url):
"""Tries to get a site collection but returns false if no site was found"""
try:
site = get_site(url)
return True, site
except:
return False, None
def try_get_web(url):
"""Tries to get a web but returns false if no web was found"""
web = get_web(url)
if web.Exists:
return True, web
else:
return False, None
def try_get_list(web, list_name):
"""Tries to get a list but returns false if no list was found"""
l = get_list(web, list_name)
return l != None, l
# Find Object Helper Methods
# These methods are used to find objects in collections
def list_exists(web, list_name):
"""Checks if a list exists"""
web = get_web(web)
match = first(web.Lists, lambda l: l.Title == list_name)
return match != None
# List/Collection helper methods
def collect(collection, fn):
"""Collects items where the function evalueates as true"""
results = []
for item in collection:
if fn(item):
results << item
return results
def first(collection, fn):
"""Finds the first item in the collection where the function evaluates as true"""
for item in collection:
if fn(item):
return item
return None
| bsd-3-clause | 7,495,485,383,914,679,000 | 20.885135 | 82 | 0.710096 | false |
dukhlov/oslo.messaging | oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py | 1 | 3834 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_messaging._drivers import base
from oslo_messaging._drivers.zmq_driver.server.consumers\
import zmq_consumer_base
from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message
from oslo_messaging._drivers.zmq_driver import zmq_address
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_messaging._i18n import _LE, _LI
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class RouterIncomingMessage(base.RpcIncomingMessage):
def __init__(self, context, message, socket, reply_id, msg_id,
poller):
super(RouterIncomingMessage, self).__init__(context, message)
self.socket = socket
self.reply_id = reply_id
self.msg_id = msg_id
self.message = message
poller.resume_polling(socket)
def reply(self, reply=None, failure=None, log_failure=True):
"""Reply is not needed for non-call messages"""
def acknowledge(self):
LOG.debug("Not sending acknowledge for %s", self.msg_id)
def requeue(self):
"""Requeue is not supported"""
class RouterConsumer(zmq_consumer_base.SingleSocketConsumer):
def __init__(self, conf, poller, server):
super(RouterConsumer, self).__init__(conf, poller, server, zmq.ROUTER)
self.matchmaker = server.matchmaker
self.host = zmq_address.combine_address(self.conf.rpc_zmq_host,
self.port)
self.targets = zmq_consumer_base.TargetsManager(
conf, self.matchmaker, self.host, zmq.ROUTER)
LOG.info(_LI("[%s] Run ROUTER consumer"), self.host)
def listen(self, target):
LOG.info(_LI("[%(host)s] Listen to target %(target)s"),
{'host': self.host, 'target': target})
self.targets.listen(target)
def cleanup(self):
super(RouterConsumer, self).cleanup()
self.targets.cleanup()
def _receive_request(self, socket):
reply_id = socket.recv()
empty = socket.recv()
assert empty == b'', 'Bad format: empty delimiter expected'
request = socket.recv_pyobj()
return request, reply_id
def receive_message(self, socket):
try:
request, reply_id = self._receive_request(socket)
LOG.debug("[%(host)s] Received %(type)s, %(id)s, %(target)s",
{"host": self.host,
"type": request.msg_type,
"id": request.message_id,
"target": request.target})
if request.msg_type == zmq_names.CALL_TYPE:
return zmq_incoming_message.ZmqIncomingRequest(
socket, reply_id, request, self.poller)
elif request.msg_type in zmq_names.NON_BLOCKING_TYPES:
return RouterIncomingMessage(
request.context, request.message, socket, reply_id,
request.message_id, self.poller)
else:
LOG.error(_LE("Unknown message type: %s"), request.msg_type)
except zmq.ZMQError as e:
LOG.error(_LE("Receiving message failed: %s"), str(e))
| apache-2.0 | 3,290,392,908,728,405,500 | 37.727273 | 78 | 0.6265 | false |
stormi/tsunami | src/primaires/objet/commandes/remplir/__init__.py | 1 | 4652 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'remplir'."""
from primaires.interpreteur.commande.commande import Commande
class CmdRemplir(Commande):
"""Commande 'remplir'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "remplir", "fill")
self.nom_categorie = "objets"
self.schema = "<plat:nom_objet> avec/with (<nombre>) <nom_objet>"
self.aide_courte = "remplit un plat de nourriture"
self.aide_longue = \
"Cette commande permet de manipuler des plats (assiette, " \
"bol voire poêlon, marmite) en y mettant des objets de type " \
"nourriture. Un repas pris de cette manière sera meilleur " \
"et plus nourrissant."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt(" \
"True), )"
nom_objet.proprietes["quantite"] = "True"
nom_objet.proprietes["conteneur"] = "True"
plat = self.noeud.get_masque("plat")
plat.prioritaire = True
plat.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire, " \
"personnage.salle.objets_sol)"
plat.proprietes["types"] = "('conteneur de nourriture', )"
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
personnage.agir("poser")
nombre = 1
if dic_masques["nombre"]:
nombre = dic_masques["nombre"].nombre
objets = list(dic_masques["nom_objet"].objets_qtt_conteneurs)[:nombre]
dans = dic_masques["plat"].objet
pose = 0
poids_total = dans.poids
for objet, qtt, conteneur in objets:
if not objet.peut_prendre:
personnage << "Vous ne pouvez pas prendre {} avec vos " \
"mains...".format(objet.get_nom())
return
if not objet.est_de_type("nourriture"):
personnage << "|err|Ceci n'est pas de la nourriture.|ff|"
return
poids_total += objet.poids
if poids_total > dans.poids_max:
if pose == 0:
personnage << "Vous ne pouvez rien y poser de plus."
return
else:
break
pose += 1
if qtt > nombre:
qtt = nombre
conteneur.retirer(objet, qtt)
dans.nourriture.append(objet)
if pose < qtt:
pose = qtt
personnage << "Vous déposez {} dans {}.".format(
objet.get_nom(pose), dans.nom_singulier)
personnage.salle.envoyer("{{}} dépose {} dans {}.".format(
objet.get_nom(pose), dans.nom_singulier), personnage)
| bsd-3-clause | -5,609,864,221,507,276,000 | 41.981481 | 79 | 0.615252 | false |
DTOcean/dtocean-core | tests/test_data_definitions_timetable.py | 1 | 7507 | import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
AutoQuery,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import TimeTable, TimeTableColumn
def test_TimeTable_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "TimeTable" in all_objs.keys()
def test_TimeTable():
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
raw = {"DateTime": dates,
"a": values,
"b": values}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None]})
test = TimeTable()
a = test.get_data(raw, meta)
b = test.get_value(a)
assert "a" in b
assert len(b) == len(dates)
assert len(b.resample('D').mean()) == 2
def test_get_None():
test = TimeTable()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"])
def test_TimeTable_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
raw = {"DateTime": dates,
"a": values,
"b": values}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None]})
test = TimeTable()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert "a" in result
assert len(result) == len(dates)
assert len(result.resample('D').mean()) == 2
def test_TimeTable_auto_plot(tmpdir):
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
raw = {"DateTime": dates,
"a": values,
"b": values}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None]})
test = TimeTable()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
def test_TimeTableColumn_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "TimeTableColumn" in all_objs.keys()
def test_TimeTableColumn_auto_db(mocker):
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
mock_dict = {"date": [x.date() for x in dates],
"time": [x.time() for x in dates],
"a": values,
"b": values}
mock_df = pd.DataFrame(mock_dict)
mocker.patch('dtocean_core.data.definitions.get_table_df',
return_value=mock_df,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None],
"tables": ["mock.mock", "date", "time", "a", "b"]})
test = TimeTableColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
result = test.get_data(query.data.result, meta)
assert "a" in result
assert len(result) == len(dates)
assert len(result.resample('D').mean()) == 2
def test_TimeSeriesColumn_auto_db_empty(mocker):
mock_dict = {"date": [],
"time": [],
"a": [],
"b": []}
mock_df = pd.DataFrame(mock_dict)
mocker.patch('dtocean_core.data.definitions.get_table_df',
return_value=mock_df,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None],
"tables": ["mock.mock", "date", "time", "a", "b"]})
test = TimeTableColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
def test_TimeSeriesColumn_auto_db_none(mocker):
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
mock_dict = {"date": [None] * len(dates),
"time": [x.time() for x in dates],
"a": values,
"b": values}
mock_df = pd.DataFrame(mock_dict)
mocker.patch('dtocean_core.data.definitions.get_table_df',
return_value=mock_df,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None],
"tables": ["mock.mock", "date", "time", "a", "b"]})
test = TimeTableColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
| gpl-3.0 | -5,739,731,147,225,360,000 | 25.620567 | 76 | 0.499534 | false |
SVilgelm/CloudFerry | cloudferry/lib/base/action/is_end_iter.py | 1 | 1174 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferry.lib.base.action import action
from cloudferry.lib.utils import utils as utl
class IsEndIter(action.Action):
def __init__(self, init, iter_info_name='info_iter',
resource_name=utl.INSTANCES_TYPE):
self.iter_info_name = iter_info_name
self.resource_name = resource_name
super(IsEndIter, self).__init__(init)
def run(self, **kwargs):
info = kwargs[self.iter_info_name]
objs = info[self.resource_name]
if objs:
self.num_element = 1
else:
self.num_element = 0
return {}
| apache-2.0 | 8,738,482,862,329,376,000 | 32.542857 | 70 | 0.682283 | false |
punchagan/zulip | zerver/webhooks/pagerduty/view.py | 1 | 7008 | # Webhooks for external integrations.
from typing import Any, Dict, Sequence
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
PAGER_DUTY_EVENT_NAMES = {
"incident.trigger": "triggered",
"incident.acknowledge": "acknowledged",
"incident.unacknowledge": "unacknowledged",
"incident.resolve": "resolved",
"incident.assign": "assigned",
"incident.escalate": "escalated",
"incident.delegate": "delineated",
}
PAGER_DUTY_EVENT_NAMES_V2 = {
"incident.trigger": "triggered",
"incident.acknowledge": "acknowledged",
"incident.resolve": "resolved",
"incident.assign": "assigned",
}
ASSIGNEE_TEMPLATE = "[{username}]({url})"
INCIDENT_WITH_SERVICE_AND_ASSIGNEE = (
"Incident [{incident_num}]({incident_url}) {action} by [{service_name}]"
"({service_url}) (assigned to {assignee_info}):\n\n``` quote\n{trigger_message}\n```"
)
INCIDENT_WITH_ASSIGNEE = """
Incident [{incident_num}]({incident_url}) {action} by {assignee_info}:
``` quote
{trigger_message}
```
""".strip()
INCIDENT_ASSIGNED = """
Incident [{incident_num}]({incident_url}) {action} to {assignee_info}:
``` quote
{trigger_message}
```
""".strip()
INCIDENT_RESOLVED_WITH_AGENT = """
Incident [{incident_num}]({incident_url}) resolved by {resolving_agent_info}:
``` quote
{trigger_message}
```
""".strip()
INCIDENT_RESOLVED = """
Incident [{incident_num}]({incident_url}) resolved:
``` quote
{trigger_message}
```
""".strip()
def build_pagerduty_formatdict(message: Dict[str, Any]) -> Dict[str, Any]:
format_dict: Dict[str, Any] = {}
format_dict["action"] = PAGER_DUTY_EVENT_NAMES[message["type"]]
format_dict["incident_id"] = message["data"]["incident"]["id"]
format_dict["incident_num"] = message["data"]["incident"]["incident_number"]
format_dict["incident_url"] = message["data"]["incident"]["html_url"]
format_dict["service_name"] = message["data"]["incident"]["service"]["name"]
format_dict["service_url"] = message["data"]["incident"]["service"]["html_url"]
if message["data"]["incident"].get("assigned_to_user", None):
assigned_to_user = message["data"]["incident"]["assigned_to_user"]
format_dict["assignee_info"] = ASSIGNEE_TEMPLATE.format(
username=assigned_to_user["email"].split("@")[0],
url=assigned_to_user["html_url"],
)
else:
format_dict["assignee_info"] = "nobody"
if message["data"]["incident"].get("resolved_by_user", None):
resolved_by_user = message["data"]["incident"]["resolved_by_user"]
format_dict["resolving_agent_info"] = ASSIGNEE_TEMPLATE.format(
username=resolved_by_user["email"].split("@")[0],
url=resolved_by_user["html_url"],
)
trigger_message = []
trigger_summary_data = message["data"]["incident"]["trigger_summary_data"]
if trigger_summary_data is not None:
trigger_subject = trigger_summary_data.get("subject", "")
if trigger_subject:
trigger_message.append(trigger_subject)
trigger_description = trigger_summary_data.get("description", "")
if trigger_description:
trigger_message.append(trigger_description)
format_dict["trigger_message"] = "\n".join(trigger_message)
return format_dict
def build_pagerduty_formatdict_v2(message: Dict[str, Any]) -> Dict[str, Any]:
format_dict = {}
format_dict["action"] = PAGER_DUTY_EVENT_NAMES_V2[message["event"]]
format_dict["incident_id"] = message["incident"]["id"]
format_dict["incident_num"] = message["incident"]["incident_number"]
format_dict["incident_url"] = message["incident"]["html_url"]
format_dict["service_name"] = message["incident"]["service"]["name"]
format_dict["service_url"] = message["incident"]["service"]["html_url"]
assignments = message["incident"]["assignments"]
if assignments:
assignee = assignments[0]["assignee"]
format_dict["assignee_info"] = ASSIGNEE_TEMPLATE.format(
username=assignee["summary"], url=assignee["html_url"]
)
else:
format_dict["assignee_info"] = "nobody"
last_status_change_by = message["incident"].get("last_status_change_by")
if last_status_change_by is not None:
format_dict["resolving_agent_info"] = ASSIGNEE_TEMPLATE.format(
username=last_status_change_by["summary"],
url=last_status_change_by["html_url"],
)
trigger_description = message["incident"].get("description")
if trigger_description is not None:
format_dict["trigger_message"] = trigger_description
return format_dict
def send_formated_pagerduty(
request: HttpRequest, user_profile: UserProfile, message_type: str, format_dict: Dict[str, Any]
) -> None:
if message_type in ("incident.trigger", "incident.unacknowledge"):
template = INCIDENT_WITH_SERVICE_AND_ASSIGNEE
elif message_type == "incident.resolve" and format_dict.get("resolving_agent_info") is not None:
template = INCIDENT_RESOLVED_WITH_AGENT
elif message_type == "incident.resolve" and format_dict.get("resolving_agent_info") is None:
template = INCIDENT_RESOLVED
elif message_type == "incident.assign":
template = INCIDENT_ASSIGNED
else:
template = INCIDENT_WITH_ASSIGNEE
subject = "Incident {incident_num}".format(**format_dict)
body = template.format(**format_dict)
check_send_webhook_message(request, user_profile, subject, body)
@webhook_view("PagerDuty")
@has_request_variables
def api_pagerduty_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Sequence[Dict[str, Any]]] = REQ(argument_type="body"),
) -> HttpResponse:
for message in payload["messages"]:
message_type = message.get("type")
# If the message has no "type" key, then this payload came from a
# Pagerduty Webhook V2.
if message_type is None:
break
if message_type not in PAGER_DUTY_EVENT_NAMES:
raise UnsupportedWebhookEventType(message_type)
format_dict = build_pagerduty_formatdict(message)
send_formated_pagerduty(request, user_profile, message_type, format_dict)
for message in payload["messages"]:
event = message.get("event")
# If the message has no "event" key, then this payload came from a
# Pagerduty Webhook V1.
if event is None:
break
if event not in PAGER_DUTY_EVENT_NAMES_V2:
raise UnsupportedWebhookEventType(event)
format_dict = build_pagerduty_formatdict_v2(message)
send_formated_pagerduty(request, user_profile, event, format_dict)
return json_success()
| apache-2.0 | 2,138,208,620,822,185,200 | 34.21608 | 100 | 0.661244 | false |
peterkuma/tjrapid | ob/views.py | 1 | 1560 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.template import RequestContext
from django.utils import translation
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponseRedirect
from main.models import *
from ob.models import *
def events(request, category_name):
category = get_object_or_404(Category, name=category_name)
events = Event.objects.filter(category=category)
return render(request,
'ob/events.html', {
'events': events,
'category': category,
},
RequestContext(request)
)
def event(request, name, category_name):
category = get_object_or_404(Category, name=category_name)
event = get_object_or_404(Event, category=category, name=name)
return render(request,
'ob/event.html', {
'event': event,
'category': category,
},
RequestContext(request)
)
def attachment(request, category_name, event_name, name):
category = get_object_or_404(Category, name=category_name)
event = get_object_or_404(Event, category=category, name=event_name)
for a in event.attachments.all():
if os.path.basename(a.file.name) == name:
return HttpResponseRedirect(a.file.url)
raise Http404
def members(request, category_name):
members_m = Member.objects.filter(category__startswith='M')
members_w = Member.objects.filter(category__startswith='W')
category = Category.objects.get(name=category_name)
return render(request,
'ob/members.html', {
'members_m': members_m,
'members_w': members_w,
'category': category,
},
RequestContext(request)
)
| mit | -8,542,457,093,815,724,000 | 25.896552 | 69 | 0.730128 | false |
sorenh/python-django-repomgmt | repomgmt/admin.py | 1 | 1108 | #
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.contrib import admin
from repomgmt.models import Architecture, Repository, BuildNode
from repomgmt.models import Cloud, KeyPair, Series, ChrootTarball
from repomgmt.models import UploaderKey
admin.site.register(Architecture)
admin.site.register(Repository)
admin.site.register(BuildNode)
admin.site.register(Cloud)
admin.site.register(KeyPair)
admin.site.register(Series)
admin.site.register(ChrootTarball)
admin.site.register(UploaderKey)
| apache-2.0 | 1,229,453,740,046,272,500 | 35.933333 | 76 | 0.775271 | false |
rlbabyuk/integration_tests | cfme/scripting/ipyshell.py | 1 | 1483 | # -*- coding: utf-8 -*-
import sys
from . import quickstart
from IPython.terminal.interactiveshell import TerminalInteractiveShell
IMPORTS = [
'from utils import conf',
'from fixtures.pytest_store import store',
'from utils.appliance.implementations.ui import navigate_to',
'from utils import providers'
]
def main():
"""Use quickstart to ensure we have correct env, then execute imports in ipython and done."""
quickstart.main(quickstart.parser.parse_args(['--mk-virtualenv', sys.prefix]))
print('Welcome to IPython designed for running CFME QE code.')
ipython = TerminalInteractiveShell.instance()
for code_import in IMPORTS:
print('> {}'.format(code_import))
ipython.run_cell(code_import)
from utils.path import conf_path
custom_import_path = conf_path.join('miq_python_startup.py')
if custom_import_path.exists():
with open(custom_import_path.strpath, 'r') as custom_import_file:
custom_import_code = custom_import_file.read()
print('Importing custom code:\n{}'.format(custom_import_code.strip()))
ipython.run_cell(custom_import_code)
else:
print(
'You can create your own python file with imports you use frequently. '
'Just create a conf/miq_python_startup.py file in your repo. '
'This file can contain arbitrary python code that is executed in this context.')
ipython.interact()
if __name__ == '__main__':
main()
| gpl-2.0 | -2,106,811,254,778,584,000 | 37.025641 | 97 | 0.675657 | false |
cedias/NNPy | NNPy.py | 1 | 14499 | #-*- coding: utf-8 -*-
import numpy as np
# ############ #
# Abstractions #
# ############ #
# Losses
class Loss:
#Calcule la valeur du loss étant données les valeurs prédites et désirées
def getLossValue(self,predicted_output,desired_output):
pass
#Calcule le gradient (pour chaque cellule d'entrée) du coût
def backward(self, predicted_output,desired_output):
pass
# Module
class Module:
#Permet le calcul de la sortie du module
def forward(self,input):
pass
#Permet le calcul du gradient des cellules d'entrée
def backward_delta(self,input,delta_module_suivant):
pass
#Permet d'initialiser le gradient du module
def init_gradient(self):
pass
#Permet la mise à jour des parmaètres du module avcec la valeur courante di gradient
def update_parameters(self,gradient_step):
pass
#Permet de mettre à jour la valeur courante du gradient par addition
def backward_update_gradient(self,input,delta_module_suivant):
pass
#Permet de faire les deux backwar simultanément
def backward(self,input,delta_module_suivant):
self.backward_update_gradient(input,delta_module_suivant)
return self.backward_delte(input,delta_module_suivant)
#Retourne les paramètres du module
def get_parameters(self):
pass
#Initialize aléatoirement les paramètres du module
def randomize_parameters(self, variance):
pass
# ############## #
# Implémentation #
# ############## #
#########LOSSES
#Square Loss
class SquareLoss(Loss):
def getLossValue(self,predicted_output,desired_output):
return np.power(desired_output-predicted_output,2)
def backward(self, predicted_output,desired_output):
return 2*(predicted_output-desired_output)
#HingeLoss
class HingeLoss(Loss):
def getLossValue(self,predicted_output,desired_output):
return np.max(np.zeros(predicted_output.size), -desired_output*predicted_output)
def backward(self, predicted_output,desired_output):
res = np.zeros(desired_output.size)
prod = -desired_output*predicted_output
index = np.where(prod >=0 )
res[index] = -desired_output[index]
return res
#########Modules
#Module linéaire
#In => [Vector] Out <V.Parameters>
class LinearModule(Module):
#Permet le calcul de la sortie du module
def __init__(self,entry_size,layer_size):
self.entry_size = entry_size
self.layer_size = layer_size
self.init_gradient()
self.randomize_parameters()
def forward(self,input):
return np.dot(self.parameters,input)
#Permet le calcul du gradient des cellules d'entrée
def backward_delta(self,input,delta_module_suivant):
return np.sum((delta_module_suivant*self.parameters.T).T,axis=0)
#Permet d'initialiser le gradient du module
def init_gradient(self):
self.gradient = np.zeros((self.layer_size,self.entry_size))
return
#Permet la mise à jour des parmaètres du module avcec la valeur courante di gradient
def update_parameters(self,gradient_step):
self.parameters -= self.gradient*gradient_step
self.gradient = np.zeros((self.layer_size,self.entry_size))
return
#Permet de mettre à jour la valeur courante du gradient par addition
# def backward_update_gradient(self,input,delta_module_suivant):
# newGrad = np.zeros((self.layer_size,self.entry_size))
# for i in xrange(0,self.layer_size):
# di = delta_module_suivant[i]
# newGrad[i,:] = di*input
# self.gradient += newGrad
# return
def backward_update_gradient(self,input,delta_module_suivant):
self.gradient += np.tile(input,(self.layer_size,1)) * np.reshape(delta_module_suivant,(self.layer_size,1))
#Permet de faire les deux backwar simultanément
def backward(self,input,delta_module_suivant):
self.backward_update_gradient(input,delta_module_suivant)
return self.backward_delta(input,delta_module_suivant)
#Retourne les paramètres du module
def get_parameters(self):
return self.parameters
#Initialize aléatoirement les paramètres du module
def randomize_parameters(self):
self.parameters = np.random.randn(self.layer_size,self.entry_size)
return
# Tanh Activation Function
class TanhModule(Module):
#Permet le calcul de la sortie du module
def __init__(self,entry_size,layer_size):
pass
def forward(self,input):
return np.tanh(input)
#Permet le calcul du gradient des cellules d'entrée
def backward_delta(self,input,delta_module_suivant):
return (1-np.power(np.tanh(input),2))*delta_module_suivant
#Permet d'initialiser le gradient du module
def init_gradient(self):
pass
#Permet la mise à jour des parmaètres du module avcec la valeur courante di gradient
def update_parameters(self,gradient_step):
pass
#Permet de mettre à jour la valeur courante du gradient par addition
def backward_update_gradient(self,input,delta_module_suivant):
pass
#Permet de faire les deux backwar simultanément
def backward(self,input,delta_module_suivant):
return self.backward_delta(input,delta_module_suivant)
#Retourne les paramètres du module
def get_parameters(self):
pass
#Initialize aléatoirement les paramètres du module
def randomize_parameters(self):
pass
# Logistic Activation Function
class LogisticModule(Module):
#Permet le calcul de la sortie du module
def __init__(self,entry_size,layer_size):
pass
def forward(self,input):
return np.power((1-np.exp(-1*input)),-1)
#Permet le calcul du gradient des cellules d'entrée
def backward_delta(self,input,delta_module_suivant):
return self.forward(input)*(1-self.forward(input))*delta_module_suivant
#Permet d'initialiser le gradient du module
def init_gradient(self):
pass
#Permet la mise à jour des parmaètres du module avcec la valeur courante di gradient
def update_parameters(self,gradient_step):
pass
#Permet de mettre à jour la valeur courante du gradient par addition
def backward_update_gradient(self,input,delta_module_suivant):
pass
#Permet de faire les deux backwar simultanément
def backward(self,input,delta_module_suivant):
return self.backward_delta(input,delta_module_suivant)
#Retourne les paramètres du module
def get_parameters(self):
pass
#Initialize aléatoirement les paramètres du module
def randomize_parameters(self):
pass
class DropoutModule(Module):
#Permet le calcul de la sortie du module
def __init__(self,entry_size,layer_size):
pass
def forward(self,input):
self.randomActivation = np.random.random_integers(0,1,len(input))
return input*self.randomActivation
#Permet le calcul du gradient des cellules d'entrée
def backward_delta(self,input,delta_module_suivant):
return delta_module_suivant
#Permet d'initialiser le gradient du module
def init_gradient(self):
pass
#Permet la mise à jour des parmaètres du module avcec la valeur courante di gradient
def update_parameters(self,gradient_step):
pass
#Permet de mettre à jour la valeur courante du gradient par addition
def backward_update_gradient(self,input,delta_module_suivant):
pass
#Permet de faire les deux backwar simultanément
def backward(self,input,delta_module_suivant):
return self.backward_delta(input,delta_module_suivant)
#Retourne les paramètres du module
def get_parameters(self):
pass
#Initialize aléatoirement les paramètres du module
def randomize_parameters(self):
pass
#########################
# AGGREGATION FUNCTION #
#########################
class sumAggregModule(Module):
#Permet le calcul de la sortie du module
def __init__(self,entry_size,layer_size):
pass
def forward(self,input):
return np.sum(input,axis=0)
#Permet le calcul du gradient des cellules d'entrée
def backward_delta(self,input,delta_module_suivant):
return delta_module_suivant
#Permet d'initialiser le gradient du module
def init_gradient(self):
pass
#Permet la mise à jour des parmaètres du module avcec la valeur courante di gradient
def update_parameters(self,gradient_step):
pass
#Permet de mettre à jour la valeur courante du gradient par addition
def backward_update_gradient(self,input,delta_module_suivant):
pass
#Permet de faire les deux backwar simultanément
def backward(self,input,delta_module_suivant):
return self.backward_delta(input,delta_module_suivant)
#Retourne les paramètres du module
def get_parameters(self):
pass
#Initialize aléatoirement les paramètres du module
def randomize_parameters(self):
pass
#########################
# MULTI MODULES CLASSES #
#########################
#Network Module Class
class NetworkModule():
def __init__(self,modules,loss):
self.modules = modules
self.loss = loss
def forwardIteration(self,input):
for module in self.modules:
input = module.forward_all(input)
return input
def forwardAll(self,examples):
return [self.forwardIteration(example) for example in examples]
#Permet le calcul du gradient des cellules d'entrée
def backwardIteration(self,predicted,wanted,batch=False,gradient_step=0.001):
loss_delta = self.loss.backward(predicted,wanted)
for mod in reversed(self.modules):
loss_delta = mod.backward_all(loss_delta)
if not batch:
self.update_parameters(gradient_step)
return loss_delta
def update_parameters(self,gradient_step):
for module in self.modules:
module.update_all_parameters(gradient_step)
return
def stochasticIter(self,examples,labels,gradient_step=0.001, verbose=False):
for example, label in zip(examples,labels):
pred = self.forwardIteration(example)
loss = self.backwardIteration(pred,label,gradient_step=gradient_step)
if verbose:
print loss
return loss
def batchIter(self,examples,labels,gradient_step=0.001, verbose=False):
for example, label in zip(examples,labels):
pred = self.forwardIteration(example)
loss = self.backwardIteration(pred,label,batch=True,gradient_step=gradient_step)
if verbose:
print loss
self.update_parameters(gradient_step)
return
def miniBatchIter(self,examples,labels,batch_size=10, gradient_step=0.001, verbose=False):
for i, (example, label) in enumerate(zip(examples,labels)):
pred = self.forwardIteration(example)
loss = self.backwardIteration(pred,label,batch=True,gradient_step=gradient_step)
if verbose:
print loss
if i%batch_size == 0:
self.update_parameters(gradient_step)
self.update_parameters(gradient_step)
return
#Quick utility function works for 1/-1 labels only
def trainTest(self,trainV,trainL,testV,testL,nbIter,gradient_step):
print("=======TRAIN ERROR=======")
for i in xrange(0,nbIter):
self.stochasticIter(trainV, trainL, gradient_step, verbose=False)
predicted = self.forwardAll(trainV)
ok=0
ko=0
for pred,exp in zip(predicted,trainL):
if pred[0]*exp[0] > 0:
ok+=1
else:
ko+=1
print ("%d correct (%f%%), %d incorrect (%f%%) " % (ok,ok/(ok+ko+0.0)*100,ko,ko/(ok+ko+0.0)*100))
print ("Learning done")
print ("=======TEST ERROR=======")
predicted = self.forwardAll(testV)
ok=0
ko=0
for pred,exp in zip(predicted,testL):
if pred[0]*exp[0] > 0:
ok+=1
else:
ko+=1
print ("%d correct (%f%%), %d incorrect (%f%%) " % (ok,ok/(ok+ko+0.0)*100,ko,ko/(ok+ko+0.0)*100))
#Horizontal Module Class
class HorizontalModule():
def __init__(self,modules):
self.modules = modules
def forward_all(self,input):
self.inputs = []
for module in self.modules:
self.inputs.append(input)
input = module.forward(input)
return input
#Permet le calcul du gradient des cellules d'entrée
def backward_all(self,loss_delta):
for module,input in zip(reversed(self.modules),reversed(self.inputs)):
loss_delta = module.backward(input,loss_delta)
return loss_delta
def update_all_parameters(self,gradient_step):
for module in self.modules:
module.update_parameters(gradient_step)
return
#Vertical Module Class
class VerticalModule():
#Permet le calcul de la sortie du module
def __init__(self,HModules,aggreg):
self.modules = HModules
self.aggreg = aggreg
def forward_all(self,input):
if len(input) < len(self.HModules):
raise "Not enough input in vertical module"
self.outputs = [ module.forward(input[index]) for index , module in enumerate(self.modules) ]
return aggreg.forward(self.outputs)
#Permet le calcul du gradient des cellules d'entrée
def backward_all(self,delta):
for module,output in zip(self.modules,self.output):
module_delta = aggreg.backward_delta(output,delta)
module.backward()
if not batch:
module.update_parameters(gradient_step)
return loss_delta
def update_all_parameters(self,gradient_step):
for module in self.modules:
module.update_parameters(gradient_step)
return
| mit | 1,651,238,196,075,126,300 | 30.526201 | 114 | 0.636817 | false |
google-research/google-research | enas_lm/src/tpu/data_utils.py | 1 | 3091 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data input pipeline for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import numpy as np
import tensorflow.compat.v1 as tf
gfile = tf.gfile
def _pad_to_batch(batch_size, data):
"""Pad `Tensor`s in data so that `N == batch_size` and return `mask`."""
x = data['x']
curr_batch_size = tf.shape(x)[0]
if curr_batch_size == batch_size:
masks = tf.ones([batch_size], dtype=tf.float32)
return data, masks
batch_diff = batch_size - curr_batch_size
padded_data = {}
for key, val in data.items():
val = tf.pad(val, [[0, batch_diff]] + [[0, 0]] * (val.shape.ndims - 1))
val.set_shape([batch_size] + val.shape.as_list()[1:])
padded_data[key] = val
masks = tf.pad(tf.ones([curr_batch_size], dtype=tf.float32),
[[0, batch_diff]])
masks.set_shape([batch_size])
return padded_data, masks
def input_fn(params):
"""For `TPUEstimator`."""
with gfile.GFile(params.data_path, 'rb') as finp:
x_train, x_valid, x_test, _, _ = pickle.load(finp)
tf.logging.info('-' * 80)
tf.logging.info('train_size: {0}'.format(np.size(x_train)))
tf.logging.info('valid_size: {0}'.format(np.size(x_valid)))
tf.logging.info(' test_size: {0}'.format(np.size(x_test)))
def _build_dataset(data, batch_size, bptt_steps):
"""Create LM dataset from a `data` tensor."""
num_batches = np.size(data) // batch_size
data = np.reshape(data[:batch_size*num_batches], [batch_size, num_batches])
data = np.transpose(data)
dataset = tf.data.Dataset.from_tensor_slices({'x': data[:-1],
'y': data[1:]})
dataset = dataset.repeat()
dataset = dataset.batch(batch_size=bptt_steps, drop_remainder=True)
def pad_to_batch(data):
padded_data, masks = _pad_to_batch(bptt_steps, data)
return padded_data, masks
dataset = dataset.map(map_func=pad_to_batch)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
return dataset
if params.task_mode == 'train':
return _build_dataset(x_train, params.train_batch_size, params.bptt_steps)
elif params.task_mode == 'valid':
return _build_dataset(x_valid, params.eval_batch_size, params.bptt_steps)
elif params.task_mode == 'test':
return _build_dataset(x_test, params.eval_batch_size, params.bptt_steps)
else:
raise ValueError('Unknown task_mode {0}'.format(params.task_mode))
| apache-2.0 | 5,911,740,309,309,060,000 | 36.240964 | 79 | 0.664833 | false |
PVirie/aknowthai | src/test.py | 1 | 1728 | import gen
import network as ann
import numpy as np
import util
def eval(neural_net, data, labels):
classes, alphas = neural_net.scan(data, gen.get_default_total_code())
data3ch = util.cvtColorGrey2RGB(data)
red = np.array([1.0, 0.0, 0.0], dtype=np.float32)
for b in xrange(alphas.shape[0]):
for c in xrange(alphas.shape[1]):
data3ch[b, c, int(np.floor((1.0 - alphas[b, c]) * (data3ch.shape[2] - 1))), :] = red
tile = util.make_tile(data3ch, rows=600, cols=800, flip=True)
util.numpy_to_image(tile).show()
# now get only classess corresponding to high alphas
index_output = np.argmax(classes, axis=2)
util.save_txt(index_output, "../artifacts/" + "data.out")
count = 0
correct = 0
for b in xrange(labels.shape[0]):
for c in xrange(labels.shape[1]):
if labels[b, c] > 0:
correct += 1 if labels[b, c] == index_output[b, c] else 0
count += 1
print "Percent correct = ", correct * 100.0 / count
collector = []
for b in xrange(alphas.shape[0]):
read_index = 0
converted = gen.indices_to_unicode(index_output[b])
read_word = u""
for c in xrange(alphas.shape[1]):
if alphas[b, c] > 0.5:
read_word = read_word + converted[read_index]
read_index = read_index + 1
print read_word
collector.append(read_word)
return collector
words, imgs = gen.get_tuples(range(100))
word_mat, img_mat = gen.prepare_input_tensors(words, imgs)
nn = ann.Network(img_mat.shape, word_mat.shape, gen.get_default_total_code(), 100)
nn.load_session("../artifacts/" + "test_weight")
eval(nn, img_mat, word_mat)
# raw_input()
| mit | -4,338,133,510,740,657,000 | 31.603774 | 96 | 0.601852 | false |
xingnix/learning | machinelearning/python/computationallearningtheory/weightedmajority.py | 1 | 2790 | import re
import numpy as np
input_string = """
Example Sky AirTemp Humidity Wind Water Forecast EnjoySport
1 Sunny Warm Normal Strong Warm Same Yes
2 Sunny Warm High Strong Warm Same Yes
3 Rainy Cold High Strong Warm Change No
4 Sunny Warm High Strong Cool Change Yes"""
lines = map(lambda x: filter(lambda y: y != '',
re.split(' +', x))[1:], # drop first item "Example"
filter(lambda x: x != '', re.split('\n', input_string)))
names, data = lines[0], lines[1:]
data_lines = map(lambda x: dict(zip(names, x)), data)
values = dict(zip(names,
reduce(lambda x, y:
map(lambda z:
z[0] | set([z[1]]) if type(z[0]) is set else set(z),
zip(x, y)),
lines[1:])))
names, target = names[:-1], names[-1] # the last item is target
p = 'Yes'
n = 'No'
def listconcept(names, values):
# rules=dict.fromkeys(names,'?')
rules = [{}]
newrules = []
for name in names:
for rule in rules:
for value in values[name]:
r = rule.copy()
r[name] = value
newrules.append(r)
rule[name] = '?'
newrules.append(rule)
rules = newrules
newrules = []
return rules
def listconcept(names, values):
# rules=dict.fromkeys(names,'?')
rules = [{}]
newrules = []
for name in names:
for rule in rules:
for value in values[name] | set(['?']):
r = rule.copy()
r[name] = value
newrules.append(r)
rules = newrules
newrules = []
return rules
def predict(h,sample):
names=h.keys()
p = 1 if (reduce(lambda x, y: x and y,
map(lambda name:
sample[name] == h[name] or h[name] == '?',
names))) else 0
return {"Yes":p,"No": 1-p}
def weight(samples,names,values):
beta=0.1
w=np.ones(len(H))
for sample in samples:
p={"Yes":0,"No":0}
for i in range(len(H)):
a=predict(H[i],sample)
if a["Yes"]>a["No"]:
c="Yes"
else:
c="No"
if sample[target]!=c:
w[i]*=beta
return w
def weightedmajorityclassifier(H,w,sample):
p={"Yes":0,"No":0}
for i in range(len(H)):
prediction= predict(H[i],sample)
for t in ["Yes","No"]:
p[t]+=prediction[t]*w[i]
return p
H=listconcept(names, values)
w=weight(data_lines,names,values)
weightedmajorityclass=weightedmajorityclassifier(H,w,data_lines[0])
print "Weighted Majority Classification : \n data: " ,data_lines[0], "\n class ",weightedmajorityclass
| gpl-3.0 | 3,463,591,842,420,936,000 | 29.659341 | 104 | 0.513262 | false |
cpbl/cpblUtilities | matplotlib_utils.py | 1 | 7242 | #!/usr/bin/python
import matplotlib.pyplot as plt
def prepare_figure_for_publication(ax=None,
width_cm=None,
width_inches=None,
height_cm=None,
height_inches=None,
fontsize=None,
fontsize_labels=None,
fontsize_ticklabels=None,
fontsize_legend=None,
fontsize_annotations =None,
TeX = True, # Used for ax=None case (setup)
):
"""
Two ways to use this:
(1) Before creating a figure, with ax=None
(2) To fine-tune a figure, using ax
One reasonable option for making compact figures like for Science/Nature is to create everything at double scale.
This works a little more naturally with Matplotlib's default line/axis/etc sizes.
Also, if you change sizes of, e.g. xticklabels and x-axis labels after they've been created, they will not necessarily be relocated appropriately.
So you can call prepare_figure_for_publication with no ax/fig argument to set up figure defaults
prior to creating the figure in the first place.
Some wisdom on graphics:
- 2015: How to produce PDFs of a given width, with chosen font size, etc:
(1) Fix width to journal specifications from the beginning / early. Adjust height as you go, according to preferences for aspect ratio:
figure(figsize=(11.4/2.54, chosen height))
(2) Do not use 'bbox_inches="tight"' in savefig('fn.pdf'). Instead, use the subplot_adjust options to manually adjust edges to get the figure content to fit in the PDF output
(3) Be satisfied with that. If you must get something exactly tight and exactly the right size, you do this in Inkscape. But you cannot scale the content and bbox in the same step. Load PDF, select all, choose the units in the box at the top of the main menu bar, click on the lock htere, set the width. Then, in File Properties dialog, resize file to content. Save.
"""
if ax is None: # Set up plot settings, prior to creation fo a figure
params = { 'axes.labelsize': fontsize_labels if fontsize_labels is not None else fontsize,
'font.size': fontsize,
'legend.fontsize': fontsize_legend if fontsize_legend is not None else fontsize,
'xtick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize,
'ytick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize,
'figure.figsize': (width_inches, height_inches),
}
if TeX:
params.update({
'text.usetex': TeX,
'text.latex.preamble': r'\usepackage{amsmath} \usepackage{amssymb}',
'text.latex.unicode': True,
})
if not TeX:
params.update({'text.latex.preamble':''})
plt.rcParams.update(params)
return
fig = ax.get_figure()
if width_inches:
fig.set_figwidth(width_inches)
assert width_cm is None
if height_inches:
fig.set_figheight(height_inches)
assert height_cm is None
if width_cm:
fig.set_figwidth(width_cm/2.54)
assert width_inches is None
if height_cm:
fig.set_figheight(height_cm/2.54)
assert height_inches is None
#ax = plt.subplot(111, xlabel='x', ylabel='y', title='title')
for item in fig.findobj(plt.Text) + [ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels():
if fontsize:
item.set_fontsize(fontsize)
def plot_diagonal(xdata=None, ydata=None, ax=None, **args):
""" Plot a 45-degree line
"""
import pandas as pd
if ax is None: ax = plt.gca()
#LL = min(min(df[xv]), min(df[yv])), max(max(df[xv]), max(df[yv]))
if xdata is None and ydata is None:
xl, yl = ax.get_xlim(), ax.get_ylim()
LL = max(min(xl), min(yl)), min(max(xl), max(yl)),
elif xdata is not None and ydata is None:
assert isinstance(xdata, pd.DataFrame)
dd = xdata.dropna()
LL = dd.min().max(), dd.max().min()
else:
assert xdata is not None
assert ydata is not None
#if isinstance(xdata, pd.Series): xdata = xdata.vlu
xl, yl = xdata, ydata
LL = max(min(xl), min(yl)), min(max(xl), max(yl)),
ax.plot(LL, LL, **args)
def figureFontSetup(uniform=12,figsize='paper', amsmath=True):
"""
This is deprecated. Use prepare_figure_for_publication
Set font size settings for matplotlib figures so that they are reasonable for exporting to PDF to use in publications / presentations..... [different!]
If not for paper, this is not yet useful.
Here are some good sizes for paper:
figure(468,figsize=(4.6,2)) # in inches
figureFontSetup(uniform=12) # 12 pt font
for a subplot(211)
or for a single plot (?)
figure(127,figsize=(4.6,4)) # in inches. Only works if figure is not open from last run!
why does the following not work to deal with the bad bounding-box size problem?!
inkscape -f GSSseries-happyLife-QC-bw.pdf --verb=FitCanvasToDrawing -A tmp.pdf .: Due to inkscape cli sucks! bug.
--> See savefigall for an inkscape implementation.
2012 May: new matplotlib has tight_layout(). But it rejigs all subplots etc. My inkscape solution is much better, since it doesn't change the layout. Hoewever, it does mean that the original size is not respected! ... Still, my favourite way from now on to make figures is to append the font size setting to the name, ie to make one for a given intended final size, and to do no resaling in LaTeX. Use tight_layout() if it looks okay, but the inkscape solution in general.
n.b. a clf() erases size settings on a figure!
"""
figsizelookup={'paper':(4.6,4),'quarter':(1.25,1) ,None:None}
try:
figsize=figsizelookup[figsize]
except KeyError,TypeError:
pass
params = {#'backend': 'ps',
'axes.labelsize': 16,
#'text.fontsize': 14,
'font.size': 14,
'legend.fontsize': 10,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': True,
'figure.figsize': figsize
}
#'figure.figsize': fig_size}
if uniform is not None:
assert isinstance(uniform,int)
params = {#'backend': 'ps',
'axes.labelsize': uniform,
#'text.fontsize': uniform,
'font.size': uniform,
'legend.fontsize': uniform,
'xtick.labelsize': uniform,
'ytick.labelsize': uniform,
'text.usetex': True,
'text.latex.unicode': True,
'text.latex.preamble':r'\usepackage{amsmath},\usepackage{amssymb}',
'figure.figsize': figsize
}
if not amsmath:
params.update({'text.latex.preamble':''})
plt.rcParams.update(params)
plt.rcParams['text.latex.unicode']=True
#if figsize:
# plt.rcParams[figure.figsize]={'paper':(4.6,4)}[figsize]
return(params)
| gpl-3.0 | -5,737,185,367,860,241,000 | 43.158537 | 474 | 0.627175 | false |
sarrionandia/taber | data/views.py | 1 | 4616 | from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.template import RequestContext, loader
from models import Institution, Judge, Team, Venue
from django.views.decorators.csrf import csrf_exempt
import json
def index(request):
template = loader.get_template('data/index.html')
context = RequestContext(request, {
'institutions' : Institution.objects.all().order_by('name'),
'judges' : Judge.objects.all(),
'teams' : Team.objects.all(),
'venues' : Venue.objects.all(),
})
return HttpResponse(template.render(context))
class DeleteInstitutionView(View):
def post(self, request, institutionid):
try:
institution = Institution.objects.get(id=institutionid)
institution.delete()
except ObjectDoesNotExist:
raise Http404("Institution does not exist")
return HttpResponse(institutionid)
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(DeleteInstitutionView, self).dispatch(*args, **kwargs)
class CreateInstitutionView(View):
def post(self, request):
name = request.POST.get('name')
institution = Institution(name=name)
institution.save()
response = {"id" : institution.id,
"name" : institution.name}
return HttpResponse(json.dumps(response))
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CreateInstitutionView, self).dispatch(*args, **kwargs)
class UpdateInstitutionView(View):
def post(self, request, institutionid):
try:
institution = Institution.objects.get(id=institutionid)
institution.name = request.POST.get('name')
institution.save()
response = {
'name' : institution.name,
'id' : institution.id
}
return HttpResponse(json.dumps(response));
except ObjectDoesNotExist:
raise Http404("Institution does not exist")
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(UpdateInstitutionView, self).dispatch(*args, **kwargs)
class DeleteTeamView(View):
def post(self, request, teamid):
try:
team = Team.objects.get(id=teamid)
team.delete()
except ObjectDoesNotExist:
raise Http404("Team does not exist")
return HttpResponse("OK")
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(DeleteTeamView, self).dispatch(*args, **kwargs)
class CreateTeamView(View):
def post(self, request):
institution = Institution.objects.get(id=int(request.POST.get('institution')))
team = Team(name=request.POST.get('name'), institution=institution)
team.speaker1 = request.POST.get('speaker1')
team.speaker2 = request.POST.get('speaker2')
team.save()
response = {
'id' : team.id,
'name' : team.name,
'speaker1' : team.speaker1,
'speaker2' : team.speaker2
}
return HttpResponse(json.dumps(response))
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CreateTeamView, self).dispatch(*args, **kwargs)
class UpdateTeamView(View):
def post(self, request, teamid):
try:
team = Team.objects.get(id=teamid)
team.name = request.POST.get('name')
team.speaker1 = request.POST.get('speaker1')
team.speaker2 = request.POST.get('speaker2')
team.save()
except ObjectDoesNotExist:
raise Http404("Team does not exist")
return HttpResponse("OK")
class DeleteJudgeView(View):
def post(self, request, judgeid):
try:
judge = Judge.objects.get(id=judgeid)
judge.delete()
except ObjectDoesNotExist:
raise Http404("Judge does not exist")
return HttpResponse("OK")
class CreateJudgeView(View):
def post(self, request):
name = request.POST.get('name')
try:
institution = Institution.objects.get(id=int(request.POST.get('institution')))
judge = Judge(name=name, institution=institution)
judge.save()
except ObjectDoesNotExist:
raise Http404("Institution does not exist")
return HttpResponse(judge.id)
| gpl-2.0 | 3,279,643,001,639,974,000 | 29.569536 | 90 | 0.630416 | false |
efforia/django-socialize | socialize/urls.py | 1 | 1668 | #!/usr/bin/python
#
# This file is part of django-socialize project.
#
# Copyright (C) 2011-2020 William Oliveira de Lagos <[email protected]>
#
# Socialize is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Socialize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Socialize. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls import url,include
from django.urls import path
from .views import *
urlpatterns = [
path('', AccountsView.as_view()),
# url(r'^profile', profile),
# url(r'^enter', authenticate),
# url(r'^leave', leave),
# url(r'^delete', delete),
# url(r'^userid', ids),
# url(r'^search', search),
# url(r'^explore', search),
# url(r'^known', explore),
# url(r'^following', following),
# url(r'^follow', follow),
# url(r'^unfollow', unfollow),
# url(r'^twitter/post', twitter_post),
# url(r'^facebook/post', facebook_post),
# url(r'^facebook/eventcover', facebook_eventcover),
# url(r'^facebook/event', facebook_event),
# url(r'^participate', participate),
# url(r'^tutorial', tutorial),
# url(r'^discharge', discharge),
# url(r'^recharge', recharge),
# url(r'^balance', balance),
]
| lgpl-3.0 | -5,867,830,235,791,402,000 | 33.75 | 78 | 0.676259 | false |
googleapis/googleapis-gen | google/cloud/essentialcontacts/v1/essentialcontacts-v1-py/google/cloud/essential_contacts_v1/services/essential_contacts_service/transports/grpc_asyncio.py | 1 | 18910 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.essential_contacts_v1.types import service
from google.protobuf import empty_pb2 # type: ignore
from .base import EssentialContactsServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import EssentialContactsServiceGrpcTransport
class EssentialContactsServiceGrpcAsyncIOTransport(EssentialContactsServiceTransport):
"""gRPC AsyncIO backend transport for EssentialContactsService.
Manages contacts for important Google Cloud notifications.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'essentialcontacts.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'essentialcontacts.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_contact(self) -> Callable[
[service.CreateContactRequest],
Awaitable[service.Contact]]:
r"""Return a callable for the create contact method over gRPC.
Adds a new contact for a resource.
Returns:
Callable[[~.CreateContactRequest],
Awaitable[~.Contact]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_contact' not in self._stubs:
self._stubs['create_contact'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/CreateContact',
request_serializer=service.CreateContactRequest.serialize,
response_deserializer=service.Contact.deserialize,
)
return self._stubs['create_contact']
@property
def update_contact(self) -> Callable[
[service.UpdateContactRequest],
Awaitable[service.Contact]]:
r"""Return a callable for the update contact method over gRPC.
Updates a contact.
Note: A contact's email address cannot be changed.
Returns:
Callable[[~.UpdateContactRequest],
Awaitable[~.Contact]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_contact' not in self._stubs:
self._stubs['update_contact'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/UpdateContact',
request_serializer=service.UpdateContactRequest.serialize,
response_deserializer=service.Contact.deserialize,
)
return self._stubs['update_contact']
@property
def list_contacts(self) -> Callable[
[service.ListContactsRequest],
Awaitable[service.ListContactsResponse]]:
r"""Return a callable for the list contacts method over gRPC.
Lists the contacts that have been set on a resource.
Returns:
Callable[[~.ListContactsRequest],
Awaitable[~.ListContactsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_contacts' not in self._stubs:
self._stubs['list_contacts'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/ListContacts',
request_serializer=service.ListContactsRequest.serialize,
response_deserializer=service.ListContactsResponse.deserialize,
)
return self._stubs['list_contacts']
@property
def get_contact(self) -> Callable[
[service.GetContactRequest],
Awaitable[service.Contact]]:
r"""Return a callable for the get contact method over gRPC.
Gets a single contact.
Returns:
Callable[[~.GetContactRequest],
Awaitable[~.Contact]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_contact' not in self._stubs:
self._stubs['get_contact'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/GetContact',
request_serializer=service.GetContactRequest.serialize,
response_deserializer=service.Contact.deserialize,
)
return self._stubs['get_contact']
@property
def delete_contact(self) -> Callable[
[service.DeleteContactRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete contact method over gRPC.
Deletes a contact.
Returns:
Callable[[~.DeleteContactRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_contact' not in self._stubs:
self._stubs['delete_contact'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/DeleteContact',
request_serializer=service.DeleteContactRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_contact']
@property
def compute_contacts(self) -> Callable[
[service.ComputeContactsRequest],
Awaitable[service.ComputeContactsResponse]]:
r"""Return a callable for the compute contacts method over gRPC.
Lists all contacts for the resource that are
subscribed to the specified notification categories,
including contacts inherited from any parent resources.
Returns:
Callable[[~.ComputeContactsRequest],
Awaitable[~.ComputeContactsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'compute_contacts' not in self._stubs:
self._stubs['compute_contacts'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/ComputeContacts',
request_serializer=service.ComputeContactsRequest.serialize,
response_deserializer=service.ComputeContactsResponse.deserialize,
)
return self._stubs['compute_contacts']
@property
def send_test_message(self) -> Callable[
[service.SendTestMessageRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the send test message method over gRPC.
Allows a contact admin to send a test message to
contact to verify that it has been configured correctly.
Returns:
Callable[[~.SendTestMessageRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'send_test_message' not in self._stubs:
self._stubs['send_test_message'] = self.grpc_channel.unary_unary(
'/google.cloud.essentialcontacts.v1.EssentialContactsService/SendTestMessage',
request_serializer=service.SendTestMessageRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['send_test_message']
__all__ = (
'EssentialContactsServiceGrpcAsyncIOTransport',
)
| apache-2.0 | -8,979,955,842,416,071,000 | 44.347722 | 94 | 0.615812 | false |
xzhang2016/tfagent | tfta/test/test2_json.py | 1 | 40826 | from kqml import KQMLList, KQMLString
from tfta.tfta import TFTA
from tfta.tfta_module import TFTA_Module
from bioagents.tests.util import ekb_from_text, ekb_kstring_from_text, \
get_request, agent_clj_from_text
from bioagents.tests.integration import _IntegrationTest, _FailureTest
from indra.sources.trips.processor import TripsProcessor
from indra.statements import Agent
from bioagents import Bioagent
from indra.sources import trips
#####################################
# Testing the following TFTA capabilities
# IS-GENE-ONTO
# FIND-GENE-ONTO
# FIND-KINASE-REGULATION
# IS-GENE-TISSUE
# FIND-GENE-TISSUE
# FIND-TISSUE
# FIND-COMMON-TF-GENES
# FIND-EVIDENCE
######################################
def _get_targets(target_arg):
proteins = None
family = None
agents = Bioagent.get_agent(target_arg)
if isinstance(agents, list):
proteins = [a.name for a in agents if a is not None and ('UP' in a.db_refs or 'HGNC' in a.db_refs)]
family = [a.name for a in agents if a is not None and 'FPLX' in a.db_refs and a.name not in proteins]
elif isinstance(agents, Agent):
if 'UP' in agents.db_refs or 'HGNC' in agents.db_refs:
proteins = [agents.name]
if not proteins and 'FPLX' in agents.db_refs:
family = [agents.name]
if proteins:
print('genes=', ','.join(proteins))
else:
print('Genes = None\n')
if family:
print('family=', ','.join(family))
else:
print('family = None\n')
return proteins,family
def agents_clj_from_text(text):
ekb_xml = ekb_from_text(text)
tp = trips.process_xml(ekb_xml)
agents = tp.get_agents()
clj = Bioagent.make_cljson(agents)
return clj
#############################################################################
#IS-GENE-ONTO
#Is stat3 a kinase?
class TestIsGeneOnto1(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('STAT3')
_get_targets(gene)
print('target=', str(gene))
keyword = 'kinase'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'FALSE', output
#Is stat3 a transcription factor?
class TestIsGeneOnto2(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('STAT3')
_get_targets(gene)
print('target=', str(gene))
keyword = 'transcription factor'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
#Is stat3 a protein kinase?
class TestIsGeneOnto3(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('STAT3')
_get_targets(gene)
print('target=', str(gene))
keyword = 'protein kinase'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'FALSE', output
#Is jak1 a protein kinase?
class TestIsGeneOnto4(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('JAK1')
_get_targets(gene)
print('target=', str(gene))
keyword = 'protein kinase'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
#Is PBRM1 a transcription factor?
class TestIsGeneOnto5(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('PBRM1')
_get_targets(gene)
print('target=', str(gene))
keyword = 'transcription factor'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
#TEST FAMILY NAME
#Is SMURF a transcription factor?
class TestIsGeneOnto6(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto6, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('SMURF')
_get_targets(gene)
print('target=', str(gene))
keyword = 'transcription factor'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output.get('reason')
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#is stat a kinase? (STAT is grounded as a gene, not a family)
class TestIsGeneOnto7(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto7, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('STAT')
_get_targets(gene)
print('target=', str(gene))
keyword = 'KINASE'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#test protein and gene category
#is map3k7 a protein?
class TestIsGeneOnto8(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto8, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('map3k7')
_get_targets(gene)
print('target=', str(gene))
keyword = 'protein'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
#is stat3 a gene?
class TestIsGeneOnto9(_IntegrationTest):
def __init__(self, *args):
super(TestIsGeneOnto9, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('stat3')
_get_targets(gene)
print('target=', str(gene))
keyword = 'gene'
content = KQMLList('is-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
##################################################################################
##TEST FIND-GENE-ONTO
#Among STAT3, JAK1, JAK2, ELK1, ELK2, HRAS, and FOS, which are protein kinases?
class TestFindGeneOnto1(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneOnto1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agents_clj_from_text("STAT3, JAK1, JAK2, ELK1, FOS, HRAS, ELK2")
_get_targets(gene)
print('target=', str(gene))
keyword = 'protein kinase'
content = KQMLList('find-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
print("len(output.get('genes'))=", str(len(output.get('genes'))))
assert output.head() == 'SUCCESS', output
assert len(output.get('genes')) == 2, output
#Among STAT3, JAK1, JAK2, ELK1, and FOS, which are histone demethylase?
class TestFindGeneOnto2(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneOnto2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agents_clj_from_text("STAT3, JAK1, JAK2, ELK1, FOS, SMAD2, KDM4B")
_get_targets(gene)
print('target=', str(gene))
keyword = 'histone demethylase'
content = KQMLList('find-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
print('len(output)=' + str(len(output.get('genes'))))
assert output.head() == 'SUCCESS', output
assert len(output.get('genes')) == 1, output
#Among PBRM1, SMAD2, TBL1XR1, AKT1, CDK19, CDK8, CDK9, DDR1, GSK3A, GSK3B, MET,TRIM28,COL2A1,
# JAK1, PRMT1, RB1, SMURF2, TRAF4, and USP15, which are transcription factors?
class TestFindGeneOnto3(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneOnto3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agents_clj_from_text("PBRM1, SMAD2, TBL1XR1, AKT1, CDK19, CDK8, CDK9, DDR1, \
GSK3A, GSK3B, MET,TRIM28,COL2A1,JAK1, PRMT1, RB1, SMURF2, TRAF4, USP15")
_get_targets(gene)
print('target=', str(gene))
keyword = 'transcription factor'
content = KQMLList('find-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
print("len(output.get('genes'))=" + str(len(output.get('genes'))))
assert output.head() == 'SUCCESS', output
assert len(output.get('genes')) == 3, output
#Among STAT3, JAK1, JAK2, ELK1, and FOS, which are demethylase?
class TestFindGeneOnto4(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneOnto4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agents_clj_from_text("STAT3, JAK1, JAK2, ELK1, FOS, SMAD2, KDM4B")
_get_targets(gene)
print('target=', str(gene))
keyword = 'demethylase'
content = KQMLList('find-gene-onto')
content.set('keyword', keyword)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
print('len(output)=' + str(len(output.get('genes'))))
assert output.head() == 'SUCCESS', output
assert len(output.get('genes')) == 1, output
#complex query: find-target and find-gene-onto
#What genes regulated by FOS are kinases?
class TestFindGeneOnto5(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneOnto5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('cfos')
_get_targets(regulator)
print('target=', str(regulator))
keyword = 'kinase'
content = KQMLList('find-gene-onto')
content.set('keyword', keyword)
content.set('regulator', regulator)
return get_request(content), content
def check_response_to_message(self, output):
print('len(output)=' + str(len(output.get('genes'))))
assert output.head() == 'SUCCESS', output
assert len(output.get('genes')) == 5, output
###############################################################################
# FIND-KINASE-REGULATION
#Which kinases regulate the cfos gene?
class TestFindKinaseReg1(_IntegrationTest):
def __init__(self, *args):
super(TestFindKinaseReg1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('cfos')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-KINASE-REGULATION')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert len(output.get('kinase')) == 5, output
#test gene family
#Which kinases regulate the MEK gene?
class TestFindKinaseReg2(_IntegrationTest):
def __init__(self, *args):
super(TestFindKinaseReg2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('MEK')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-KINASE-REGULATION')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#Which kinases negatively regulate the cfos gene?
class TestFindKinaseReg3(_IntegrationTest):
def __init__(self, *args):
super(TestFindKinaseReg3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('cfos')
_get_targets(target)
print('target=', str(target))
keyword = 'decrease'
content = KQMLList('FIND-KINASE-REGULATION')
content.set('target', target)
content.set('keyword', keyword)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert len(output.get('kinase')) == 3, output
#Which kinases positively regulate the cfos gene?
class TestFindKinaseReg4(_IntegrationTest):
def __init__(self, *args):
super(TestFindKinaseReg4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('cfos')
_get_targets(target)
print('target=', str(target))
keyword = 'increase'
content = KQMLList('FIND-KINASE-REGULATION')
content.set('target', target)
content.set('keyword', keyword)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert len(output.get('kinase')) == 2, output
#Which kinases positively regulate the AKT gene?
class TestFindKinaseReg5(_IntegrationTest):
def __init__(self, *args):
super(TestFindKinaseReg5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('AKT')
_get_targets(target)
print('target=', str(target))
keyword = 'increase'
content = KQMLList('FIND-KINASE-REGULATION')
content.set('target', target)
content.set('keyword', keyword)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#######################################################################################
#IS-GENE-TISSUE
###Is stat3 expressed in liver?
class TestIsTissueGene1(_IntegrationTest):
def __init__(self, *args):
super(TestIsTissueGene1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('stat3')
_get_targets(target)
print('target=', str(target))
content = KQMLList('IS-GENE-TISSUE')
content.set('gene', target)
content.set('tissue', 'liver')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
###Is kras expressed in brain?
class TestIsTissueGene2(_IntegrationTest):
def __init__(self, *args):
super(TestIsTissueGene2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('kras')
_get_targets(target)
print('target=', str(target))
content = KQMLList('IS-GENE-TISSUE')
content.set('gene', target)
content.set('tissue', 'brain')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'FALSE', output
###Is stat3 exclusively expressed in liver?
class TestIsTissueGene3(_IntegrationTest):
def __init__(self, *args):
super(TestIsTissueGene3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('stat3')
_get_targets(target)
print('target=', str(target))
content = KQMLList('IS-GENE-TISSUE')
content.set('gene', target)
content.set('tissue', 'liver')
content.set('keyword', 'exclusive')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'FALSE', output
###Is GYS2 exclusively expressed in liver?
class TestIsTissueGene4(_IntegrationTest):
def __init__(self, *args):
super(TestIsTissueGene4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('GYS2')
_get_targets(target)
print('target=', str(target))
content = KQMLList('IS-GENE-TISSUE')
content.set('gene', target)
content.set('tissue', 'liver')
content.set('keyword', 'exclusive')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
###Is NEUROD2 exclusively expressed in brain?
class TestIsTissueGene5(_IntegrationTest):
def __init__(self, *args):
super(TestIsTissueGene5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('NEUROD2')
_get_targets(target)
print('target=', str(target))
content = KQMLList('IS-GENE-TISSUE')
content.set('gene', target)
content.set('tissue', 'brain')
content.set('keyword', 'exclusive')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
###Is GAST expressed in stomach?
class TestIsTissueGene6(_IntegrationTest):
def __init__(self, *args):
super(TestIsTissueGene6, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agent_clj_from_text('GAST')
_get_targets(target)
print('target=', str(target))
content = KQMLList('IS-GENE-TISSUE')
content.set('gene', target)
content.set('tissue', 'stomach')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert output.get('result') == 'TRUE', output
######################################################################################
#FIND-GENE-TISSUE
#what genes are expressed in liver?
class TestFindGeneTissue1(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneTissue1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
#gene = ekb_kstring_from_text('AKT')
tissue = 'liver'
content = KQMLList('FIND-GENE-TISSUE')
content.set('tissue', tissue)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('genes'))=", len(output.get('genes')))
assert len(output.get('genes')) == 1929, output
#among stat3,srf, kras, and hras, what genes are expressed in liver?
class TestFindGeneTissue2(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneTissue2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agents_clj_from_text('stat3, srf, kras, hras')
tissue = 'liver'
content = KQMLList('FIND-GENE-TISSUE')
content.set('tissue', tissue)
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('genes'))=", len(output.get('genes')))
assert len(output.get('genes')) == 1, output
#what genes are exclusively expressed in liver?
class TestFindGeneTissue3(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneTissue3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
#gene = 'stat3, srf, kras, hras'
tissue = 'liver'
content = KQMLList('FIND-GENE-TISSUE')
content.set('tissue', tissue)
content.set('keyword', 'exclusive')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('genes'))=", len(output.get('genes')))
assert len(output.get('genes')) == 31, output
#what genes are exclusively expressed in brain?
class TestFindGeneTissue4(_IntegrationTest):
def __init__(self, *args):
super(TestFindGeneTissue4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
tissue = 'brain'
content = KQMLList('FIND-GENE-TISSUE')
content.set('tissue', tissue)
content.set('keyword', 'exclusive')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('genes'))=", len(output.get('genes')))
assert len(output.get('genes')) == 44, output
###############################################################################
# FIND-TISSUE
#What tissues is STAT3 expressed in?
class TestFindTissue1(_IntegrationTest):
def __init__(self, *args):
super(TestFindTissue1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('STAT3')
_get_targets(gene)
print('target=', str(gene))
content = KQMLList('FIND-TISSUE')
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tissue'))=", str(len(output.get('tissue'))))
assert len(output.get('tissue')) == 8, output
#What tissues is MEK expressed in?
class TestFindTissue2(_IntegrationTest):
def __init__(self, *args):
super(TestFindTissue2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('MEK')
_get_targets(gene)
print('target=', str(gene))
content = KQMLList('FIND-TISSUE')
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
print("len(output.get('clarification'))=", len(output.get('clarification')))
assert len(output.get('clarification')) == 5, output
#what tissues can I ask
class TestFindTissue3(_IntegrationTest):
def __init__(self, *args):
super(TestFindTissue3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
content = KQMLList('FIND-TISSUE')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tissue'))=", len(output.get('tissue')))
assert len(output.get('tissue')) == 30, output
#What tissues is frizzled8 expressed in?
class TestFindTissue4(_IntegrationTest):
def __init__(self, *args):
super(TestFindTissue4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
gene = agent_clj_from_text('frizzled8')
_get_targets(gene)
print('target=', str(gene))
content = KQMLList('FIND-TISSUE')
content.set('gene', gene)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tissue'))=", str(len(output.get('tissue'))))
assert len(output.get('tissue')) == 7, output
####################################################################################
#FIND-COMMON-TF-GENES
#What transcription factors are shared by the SRF, HRAS, and elk1 genes? (subtask: find-common-tf-genes)
class TestFindCommonTfGenes1(_IntegrationTest):
def __init__(self, *args):
super(TestFindCommonTfGenes1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('SRF, HRAS, elk1')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-COMMON-TF-GENES')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert len(output.get('tfs')) == 3, output
#What transcription factors are in common to the STAT3, SOCS3, IFNG, FOXO3, and CREB5 genes?
class TestFindCommonTfGenes2(_IntegrationTest):
def __init__(self, *args):
super(TestFindCommonTfGenes2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('STAT3, IFNG, FOXO3, SOCS3, CREB5')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-COMMON-TF-GENES')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
print('len(output)=' + str(len(output.get('tfs'))))
assert output.head() == 'SUCCESS', output
assert len(output.get('tfs')) == 8, output
#test gene family
#What transcription factors are in common to the STAT3, SOCS3, and MEK genes?
#MEK will be ignored in this case
class TestFindCommonTfGenes3(_IntegrationTest):
def __init__(self, *args):
super(TestFindCommonTfGenes3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('STAT3, SOCS3, MEK')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-COMMON-TF-GENES')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
assert len(output.get('tfs')) == 1, output
#What transcription factors are in common to the STAT3, SOCS3, and AKT genes?
class TestFindCommonTfGenes4(_IntegrationTest):
def __init__(self, *args):
super(TestFindCommonTfGenes4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('STAT3, AKT, MEK')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-COMMON-TF-GENES')
content.set('target', target)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'FAMILY_NAME', output
assert len(output.get('clarification').get('as')) == 2, output
#Which of these transcription factors are shared by the SRF, HRAS, FOS, and elk1 genes? (subtask: find-common-tf-genes)
class TestFindCommonTfGenes5(_IntegrationTest):
def __init__(self, *args):
super(TestFindCommonTfGenes5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
target = agents_clj_from_text('SRF, HRAS, cfos, elk1')
_get_targets(target)
print('target=', str(target))
of_those = agents_clj_from_text('stat3,ELK1,TFAP2A,CREB1,TP53')
_get_targets(of_those)
print('target=', str(of_those))
content = KQMLList('FIND-COMMON-TF-GENES')
content.set('target', target)
content.set('of-those', of_those)
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('tfs'))=", len(output.get('tfs')))
assert len(output.get('tfs')) == 3, output
######################################################################################
# FIND-EVIDENCE
##Show me evidence that kras regulate frizzled8?
class TestFindEvidence1(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence1, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('kras')
target = agent_clj_from_text('fzd8')
_get_targets(target)
print('target=', str(target))
content = KQMLList('FIND-EVIDENCE')
content.set('regulator', regulator)
content.set('target', target)
content.set('keyword', 'regulate')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", str(len(output.get('evidence'))))
print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature'))))
assert len(output.get('evidence')) == 2, output
assert len(output.get('evidence').get('literature')) == 1, output
##show me evidence that kras increase frizzled8?
class TestFindEvidence2(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence2, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('kras')
target = agent_clj_from_text('fzd8')
content = KQMLList('FIND-EVIDENCE')
content.set('regulator', regulator)
content.set('target', target)
content.set('keyword', 'increase')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", str(len(output.get('evidence'))))
print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature'))))
assert len(output.get('evidence')) == 2, output
assert len(output.get('evidence').get('literature')) == 1, output
##show me evidence that kras decrease frizzled8?
class TestFindEvidence3(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence3, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('kras')
target = agent_clj_from_text('fzd8')
content = KQMLList('FIND-EVIDENCE')
content.set('regulator', regulator)
content.set('target', target)
content.set('keyword', 'decrease')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", str(len(output.get('evidence'))))
print("type(output.get('evidence'))=",type(output.get('evidence')))
print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature'))))
assert len(output.get('evidence')) == 2, output
assert len(output.get('evidence').get('literature')) == 1, output
##Show me the evidence that IL6 increase the amount of SOCS1.
class TestFindEvidence4(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence4, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('il6')
target = agent_clj_from_text('socs1')
content = KQMLList('FIND-EVIDENCE')
content.set('regulator',regulator)
content.set('target',target)
content.set('keyword', 'increase')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", str(len(output.get('evidence'))))
print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature'))))
assert len(output.get('evidence')) == 2, output
assert len(output.get('evidence').get('literature')) == 9, output
##Show me the evidence that SRF binds to the FOS gene.
class TestFindEvidence5(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence5, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('SRF')
target = agent_clj_from_text('cfos')
content = KQMLList('FIND-EVIDENCE')
content.set('regulator', regulator)
content.set('target', target)
content.set('keyword', 'bind')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", str(len(output.get('evidence'))))
print("len(output.get('evidence').get('tf-db'))=", str(len(output.get('evidence').get('tf-db'))))
assert len(output.get('evidence')) == 2, output
assert len(output.get('evidence').get('tf-db')) == 2, output
##Show me the evidence that SRF regulate FOS gene.
class TestFindEvidence6(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence6, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
regulator = agent_clj_from_text('SRF')
target = agent_clj_from_text('cfos')
content = KQMLList('FIND-EVIDENCE')
content.set('regulator', regulator)
content.set('target', target)
content.set('keyword', 'regulate')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'SUCCESS', output
print("len(output.get('evidence'))=", str(len(output.get('evidence'))))
print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature'))))
assert len(output.get('evidence')) == 4, output
assert len(output.get('evidence').get('literature')) == 2, output
#IncreaseAmount(miR_491(), GFAP())
class TestFindEvidence7(_IntegrationTest):
def __init__(self, *args):
super(TestFindEvidence7, self).__init__(TFTA_Module)
def create_message(self):
# Here we create a KQML request that the TFTA needs to respond to
agents = Bioagent.get_agent(agent_clj_from_text('miR-491'))
print(agents)
print('name=', agents.name)
print('db_refs=', agents.db_refs)
regulator = agent_clj_from_text('miR-491')
target = agent_clj_from_text('GFAP')
content = KQMLList('FIND-EVIDENCE')
content.set('regulator', regulator)
content.set('target', target)
content.set('keyword', 'increase')
print(content, '\n')
return get_request(content), content
def check_response_to_message(self, output):
assert output.head() == 'FAILURE', output
assert output.get('reason') == 'NO_REGULATOR_NAME', output
| bsd-2-clause | -776,647,102,837,806,700 | 38.445411 | 119 | 0.606452 | false |
cajone/pychess | lib/pychess/widgets/ChatWindow.py | 1 | 3808 | import re
from gi.repository import Gtk
from pychess.widgets.ChatView import ChatView
from pychess.widgets.ViewsPanel import ViewsPanel
from pychess.widgets.InfoPanel import InfoPanel
from pychess.widgets.ChannelsPanel import ChannelsPanel
TYPE_PERSONAL, TYPE_CHANNEL, TYPE_GUEST, \
TYPE_ADMIN, TYPE_COMP, TYPE_BLINDFOLD = range(6)
def get_playername(playername):
re_m = re.match("(\w+)\W*", playername)
return re_m.groups()[0]
class ChatWindow(object):
def __init__(self, widgets, connection):
self.connection = connection
self.viewspanel = ViewsPanel(self.connection)
self.channelspanel = ChannelsPanel(self.connection)
self.adj = self.channelspanel.get_vadjustment()
self.infopanel = InfoPanel(self.connection)
self.chatbox = Gtk.Box()
self.chatbox.pack_start(self.channelspanel, True, True, 0)
notebook = Gtk.Notebook()
notebook.append_page(self.viewspanel, Gtk.Label(_("Chat")))
notebook.append_page(self.infopanel, Gtk.Label(_("Info")))
self.chatbox.pack_start(notebook, False, False, 0)
self.panels = [self.viewspanel, self.channelspanel, self.infopanel]
self.viewspanel.connect('channel_content_Changed',
self.channelspanel.channel_Highlight, id)
self.channelspanel.connect('conversationAdded',
self.onConversationAdded)
self.channelspanel.connect('conversationRemoved',
self.onConversationRemoved)
self.channelspanel.connect('conversationSelected',
self.onConversationSelected)
self.channelspanel.connect('focus_in_event', self.focus_in, self.adj)
for panel in self.panels:
panel.show_all()
panel.start()
def onConversationAdded(self, panel, grp_id, text, grp_type):
chatView = ChatView()
plus_channel = '+channel ' + str(grp_id)
self.connection.cm.connection.client.run_command(plus_channel)
for panel in self.panels:
panel.addItem(grp_id, text, grp_type, chatView)
def onConversationRemoved(self, panel, grp_id):
minus_channel = '-channel ' + str(grp_id)
self.connection.cm.connection.client.run_command(minus_channel)
for panel in self.panels:
panel.removeItem(grp_id)
def onConversationSelected(self, panel, grp_id):
for panel in self.panels:
panel.selectItem(grp_id)
def openChatWithPlayer(self, name):
cm = self.connection.cm
self.channelspanel.onPersonMessage(cm, name, "", False, "")
def focus_in(widget, event, adj):
alloc = widget.get_allocation()
if alloc.y < adj.value or alloc.y > adj.value + adj.page_size:
adj.set_value(min(alloc.y, adj.upper - adj.page_size))
if __name__ == "__main__":
import random
class LM:
def getPlayerlist(self):
for i in range(10):
chrs = map(chr, range(ord("a"), ord("z") + 1))
yield "".join(random.sample(chrs, random.randrange(20)))
def getChannels(self):
return [(str(i), n) for i, n in enumerate(self.getPlayerlist())]
def joinChannel(self, channel):
pass
def connect(self, *args):
pass
def getPeopleInChannel(self, name):
pass
def finger(self, name):
pass
def getJoinedChannels(self):
return []
class Con:
def __init__(self):
self.glm = LM()
self.cm = LM()
self.fm = LM()
chatwin = ChatWindow({}, Con())
globals()["_"] = lambda x: x
chatwin.window.connect("delete-event", Gtk.main_quit)
Gtk.main()
| gpl-3.0 | -2,369,495,915,848,076,000 | 33 | 77 | 0.608193 | false |
dowjones/respawn | respawn/autoscaling.py | 1 | 15011 | from cfn_pyplates import core, functions
from ec2 import BlockDevice, BlockDeviceMapping
from errors import RespawnResourceError
class MetricsCollection(core.JSONableDict):
"""
Creates a Metrics Collection
:param granularity: String
:param kwargs: metrics - [ String, ... ]
"""
# ----------------------------------------------------------------------------------------------------------
# Metrics Collection
# ----------------------------------------------------------------------------------------------------------
def __init__(self,
granularity,
**kwargs
):
super(MetricsCollection, self).__init__()
self['Granularity'] = granularity
if 'metrics' in kwargs:
self['Metrics'] = kwargs.get('metrics')
class NotificationConfigurations(core.JSONableDict):
"""
Creates a Notification Configuration
:param notification_type: [ String, ... ]
:param topic_arn: String
"""
# ----------------------------------------------------------------------------------------------------------
# NotificationConfiguration
# ----------------------------------------------------------------------------------------------------------
def __init__(self,
notification_type,
topic_arn
):
super(NotificationConfigurations, self).__init__()
self['NotificationTypes'] = notification_type
self['TopicARN'] = topic_arn
class Tag(core.JSONableDict):
"""
Create ASG Tag
:param key: String
:param value: String
:param propagate_at_launch: Boolean
"""
# ----------------------------------------------------------------------------------------------------------
# Tag
# ----------------------------------------------------------------------------------------------------------
def __init__(self,
key,
value,
propagate_at_launch
):
super(Tag, self).__init__()
self['Key'] = key
self['Value'] = value
self['PropagateAtLaunch'] = propagate_at_launch
class LaunchConfiguration(core.Resource):
"""
Creates a Launch Configuration
:param name: String
:param ami_id: String
:param instance_type: String
kwargs
- public_ip: Boolean
- block_devices: [ BlockDeviceMapping, ... ]
- classic_link_vpc_id: String
- classic_link_vpc_security_groups: [ String, ... ],
- ebs_optimized: Boolean
- iam_role: String
- instance_id: String
- monitoring: Boolean
- kernel_id: String
- key_pair: String
- placement_tenancy: String
- ramdisk_id: String
- security_groups: [ SecurityGroup, ... ]
- spot_price: String
- user_data_script: String
- attributes: { key: value, ... }
"""
# ----------------------------------------------------------------------------------------------------------
# Launch Configuration
# ----------------------------------------------------------------------------------------------------------
def __init__(
self,
name,
ami_id,
instance_type,
**kwargs
):
if "classic_link_vpc_id" in kwargs and "classic_link_vpc_security_groups" not in kwargs:
raise RespawnResourceError("Classic Link VPC Sercurity Groups (classic_link_vpc_security_groups) "
"required with Class Link VPC ID (classic_link_vpc_id).",
"classic Link VPC Id/Classic Link Vpc Security Groups")
attributes = kwargs.get("attributes")
properties = {
'ImageId': ami_id,
'InstanceType': instance_type
}
if 'block_devices' in kwargs:
devices = kwargs.get('block_devices')
block_devices = []
for device, args in devices.items():
if 'ebs' in args:
args['ebs'] = BlockDevice(**args['ebs'])
block_devices.append(BlockDeviceMapping(device, **args))
properties['BlockDeviceMappings'] = block_devices
if "public_ip" in kwargs:
properties['AssociatePublicIpAddress'] = kwargs.get("public_ip") # default=False
if "classic_link_vpc_id" in kwargs:
properties['ClassicLinkVPCId'] = kwargs.get("classic_link_vpc_id")
if "classic_link_vpc_security_groups" in kwargs:
properties['ClassicLinkVPCSecurityGroups'] = kwargs.get("classic_link_vpc_security_groups")
if "ebs_optimized" in kwargs:
properties['EbsOptimized'] = kwargs.get("ebs_optimized") # default=False
if "iam_role" in kwargs:
properties['IamInstanceProfile'] = kwargs.get("iam_role")
if "instance_id" in kwargs:
properties['InstanceId'] = kwargs.get("instance_id")
if "monitoring" in kwargs:
properties['InstanceMonitoring'] = kwargs.get("monitoring") # default=True
if "kernel_id" in kwargs:
properties['KernelId'] = kwargs.get("kernel_id")
if "key_pair" in kwargs:
properties['KeyName'] = kwargs.get("key_pair")
if "placement_tenancy" in kwargs:
properties['PlacementTenancy'] = kwargs.get("placement_tenancy")
if "private_ip" in kwargs:
properties['PlacementGroupName'] = kwargs.get("private_ip")
if "ramdisk_id" in kwargs:
properties['RamdiskId'] = kwargs.get("ramdisk_id")
if "security_groups" in kwargs:
properties['SecurityGroups'] = kwargs.get("security_groups")
if "spot_price" in kwargs:
properties['SpotPrice'] = kwargs.get("spot_price")
if "user_data_script" in kwargs:
properties['UserData'] = functions.base64(kwargs.get("user_data_script"))
super(LaunchConfiguration, self).__init__(name, 'AWS::AutoScaling::LaunchConfiguration', properties, attributes)
class AutoScalingGroup(core.Resource):
"""
Creates an AutoScaling Group
:param name: String
:param max_size: String
:param min_size: String
kwargs
- availability_zones: [ String, ... ]
- cooldown: String
- desired_capacity: String
- health_check_grace_period: Integer
- health_check_type: String
- instance_id: String
- launch_configuration: String
- load_balancer_names: [ String, ... ]
- metrics_collection: [ MetricsCollection, ... ]
- notification_configs: [ NotificationConfigurations, ... ]
- placement_group: String
- tags: [ Tag, ...]
- termination_policies: [ String, ..., ]
- vpc_zone_identifier: [ String, ... ]
- attributes: { key: value, ... }
"""
# ----------------------------------------------------------------------------------------------------------
# Auto Scaling Group
# ----------------------------------------------------------------------------------------------------------
def __init__(
self,
name,
max_size,
min_size,
**kwargs
):
if "instance_id" not in kwargs and "launch_configuration" not in kwargs:
raise RespawnResourceError(
"Instance ID (instance_id) or Launch Configuration Name (launch_configuration) required.",
"Instance Id/ Launch Configuration")
if "availability_zones" not in kwargs and "vpc_zone_identifier" not in kwargs:
raise RespawnResourceError(
"Availability Zones (availability_zones) or VPC Zone Identifier (vpc_zone_identifier) "
"required.", "AvailabilityZones/VPCZoneIdentifier")
attributes = kwargs.get("attributes", dict())
properties = {
'MaxSize': max_size,
'MinSize': min_size
}
if "metrics_collection" in kwargs:
metrics_collection = kwargs.get('metrics_collection')
metrics_collections = []
for collection in metrics_collection:
metrics_collections.append(MetricsCollection(**collection))
properties['MetricsCollection'] = metrics_collections
if "notification_configs" in kwargs:
notification_configs = kwargs.get("notification_configs")
configs = []
for config in notification_configs:
configs.append(NotificationConfigurations(**config))
properties['NotificationConfigurations'] = configs
if 'tags' in kwargs:
t = kwargs.get('tags')
tags = []
for tag in t:
tags.append(Tag(**tag))
properties['Tags'] = tags
if "launch_configuration" in kwargs:
properties['LaunchConfigurationName'] = kwargs.get("launch_configuration")
if "load_balancer_names" in kwargs:
properties['LoadBalancerNames'] = kwargs.get("load_balancer_names")
if "availability_zones" in kwargs:
properties['AvailabilityZones'] = kwargs.get("availability_zones")
if "cooldown" in kwargs:
properties['Cooldown'] = kwargs.get("cooldown")
if "desired_capacity" in kwargs:
properties['DesiredCapacity'] = kwargs.get("desired_capacity")
if "health_check_grace_period" in kwargs:
properties['HealthCheckGracePeriod'] = kwargs.get("health_check_grace_period")
if "health_check_type" in kwargs:
properties['HealthCheckType'] = kwargs.get("health_check_type")
if "instance_id" in kwargs:
properties['InstanceId'] = kwargs.get("instance_id")
if "placement_group" in kwargs:
properties['PlacementGroup'] = kwargs.get("placement_group")
if "termination_policies" in kwargs:
properties['TerminationPolicies'] = kwargs.get("termination_policies")
if "vpc_zone_identifier" in kwargs:
properties['VPCZoneIdentifier'] = kwargs.get("vpc_zone_identifier")
super(AutoScalingGroup, self).__init__(name, 'AWS::AutoScaling::AutoScalingGroup', properties, attributes)
class ScalingPolicy(core.Resource):
"""
Creates a Scaling Policy
:param adjustment_type: String
:param asg_name: String
:param scaling_adjustment: String
kwargs
- cooldown: String
- in_adjustment_step: String
"""
# ----------------------------------------------------------------------------------------------------------
# Scaling Policy
# ----------------------------------------------------------------------------------------------------------
def __init__(
self,
name,
adjustment_type,
asg_name,
scaling_adjustment,
**kwargs
):
attributes = kwargs.get("attributes", dict())
properties = {
'AdjustmentType': adjustment_type,
'AutoScalingGroupName': asg_name,
'ScalingAdjustment': scaling_adjustment
}
if "cooldown" in kwargs:
properties['Cooldown'] = kwargs.get("cooldown")
if "min_adjustment_step" in kwargs:
properties['MinAdjustmentStep'] = kwargs.get("min_adjustment_step")
super(ScalingPolicy, self).__init__(name, 'AWS::AutoScaling::ScalingPolicy', properties, attributes)
class ScheduledAction(core.Resource):
"""
Creates a Scheduled Action
:param asg_name: String
kwargs
- desired_capacity: Integer
- end_time: Time stamp (e.g. 2010-06-01T00:00:00Z)
- max_size: Integer
- min_size: Integer
- recurrence: String (e.g. cron)
- start_time: Time stamp (e.g. 2010-06-01T00:00:00Z)
"""
# ----------------------------------------------------------------------------------------------------------
# Scheduled Action
# ----------------------------------------------------------------------------------------------------------
def __init__(
self,
name,
asg_name,
**kwargs
):
attributes = kwargs.get("attributes", dict())
properties = {
'AutoScalingGroupName': asg_name
}
if "desired_capacity" in kwargs:
properties['DesiredCapacity'] = kwargs.get("desired_capacity")
if "end_time" in kwargs:
properties['EndTime'] = kwargs.get("end_time")
if "max_size" in kwargs:
properties['MaxSize'] = kwargs.get("max_size")
if "min_size" in kwargs:
properties['MinSize'] = kwargs.get("min_size")
if "recurrence" in kwargs:
properties['Recurrence'] = kwargs.get("recurrence")
if "start_time" in kwargs:
properties['StartTime'] = kwargs.get("start_time")
super(ScheduledAction, self).__init__(name, 'AWS::AutoScaling::ScheduledAction', properties, attributes)
class LifecycleHook(core.Resource):
"""
Creates a Lifecycle Hook
:param asg_name: String
:param lifecycle_transition: String
:param notification_target_arn: String
:param role_arn: String
kwargs
- default_result: String
- heartbeat_timeout: Integer
- notification_metadata: String
"""
# ----------------------------------------------------------------------------------------------------------
# LifeCycle Hook
# ----------------------------------------------------------------------------------------------------------
def __init__(
self,
name,
asg_name,
lifecycle_transition,
notification_target_arn,
role_arn,
**kwargs
):
attributes = kwargs.get("attributes", dict())
properties = {
'AutoScalingGroupName': asg_name,
'LifecycleTransition': lifecycle_transition,
'NotificationTargetARN': notification_target_arn,
'RoleARN': role_arn
}
if "default_result" in kwargs:
properties['DefaultResult'] = kwargs.get("default_result")
if "heartbeat_timeout" in kwargs:
properties['HeartbeatTimeout'] = kwargs.get("heartbeat_timeout")
if "notification_metadata" in kwargs:
properties['NotificationMetadata'] = kwargs.get("notification_metadata")
super(LifecycleHook, self).__init__(name, 'AWS::AutoScaling::LifecycleHook', properties, attributes)
| isc | 4,011,306,310,980,120,600 | 38.193211 | 120 | 0.500433 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.