repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Russell-Jones/django-wiki | wiki/migrations/0001_initial.py | 1 | 18236 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Article'
db.create_table('wiki_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='current_set', unique=True, null=True, to=orm['wiki.ArticleRevision'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('group_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('group_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('wiki', ['Article'])
# Adding model 'ArticleForObject'
db.create_table('wiki_articleforobject', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_articleforobject', to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('is_mptt', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wiki', ['ArticleForObject'])
# Adding unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.create_unique('wiki_articleforobject', ['content_type_id', 'object_id'])
# Adding model 'ArticleRevision'
db.create_table('wiki_articlerevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=512)),
('redirect', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='redirect_set', null=True, to=orm['wiki.Article'])),
))
db.send_create_signal('wiki', ['ArticleRevision'])
# Adding unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.create_unique('wiki_articlerevision', ['article_id', 'revision_number'])
# Adding model 'URLPath'
db.create_table('wiki_urlpath', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['wiki.URLPath'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('wiki', ['URLPath'])
# Adding unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.create_unique('wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Adding model 'ArticlePlugin'
db.create_table('wiki_articleplugin', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wiki', ['ArticlePlugin'])
# Adding model 'ReusablePlugin'
db.create_table('wiki_reusableplugin', (
('articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
))
db.send_create_signal('wiki', ['ReusablePlugin'])
# Adding M2M table for field articles on 'ReusablePlugin'
db.create_table('wiki_reusableplugin_articles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reusableplugin', models.ForeignKey(orm['wiki.reusableplugin'], null=False)),
('article', models.ForeignKey(orm['wiki.article'], null=False))
))
db.create_unique('wiki_reusableplugin_articles', ['reusableplugin_id', 'article_id'])
# Adding model 'RevisionPlugin'
db.create_table('wiki_revisionplugin', (
('articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
('revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'])),
))
db.send_create_signal('wiki', ['RevisionPlugin'])
def backwards(self, orm):
# Removing unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.delete_unique('wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Removing unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.delete_unique('wiki_articlerevision', ['article_id', 'revision_number'])
# Removing unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.delete_unique('wiki_articleforobject', ['content_type_id', 'object_id'])
# Deleting model 'Article'
db.delete_table('wiki_article')
# Deleting model 'ArticleForObject'
db.delete_table('wiki_articleforobject')
# Deleting model 'ArticleRevision'
db.delete_table('wiki_articlerevision')
# Deleting model 'URLPath'
db.delete_table('wiki_urlpath')
# Deleting model 'ArticlePlugin'
db.delete_table('wiki_articleplugin')
# Deleting model 'ReusablePlugin'
db.delete_table('wiki_reusableplugin')
# Removing M2M table for field articles on 'ReusablePlugin'
db.delete_table('wiki_reusableplugin_articles')
# Deleting model 'RevisionPlugin'
db.delete_table('wiki_revisionplugin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'wiki.articleforobject': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_articleforobject'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mptt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'redirect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redirect_set'", 'null': 'True', 'to': "orm['wiki.Article']"}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
},
'wiki.revisionplugin': {
'Meta': {'object_name': 'RevisionPlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']"})
},
'wiki.urlpath': {
'Meta': {'unique_together': "(('site', 'parent', 'slug'),)", 'object_name': 'URLPath'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.URLPath']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['wiki']
| gpl-3.0 | -3,276,954,538,703,390,000 | 67.556391 | 209 | 0.592948 | false |
MrYsLab/razmq | razmq/motors/motors.py | 1 | 5067 | """
Copyright (c) 2016 Alan Yorinks All right reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import time
import signal
import sys
import argparse
from razmq.razbase.razbase import Razbase
class Motors(Razbase):
"""
This is the user side interface for motor control
Move left motor forward with a speed of 100:
{'command': 'left_motor_forward', 'speed': 100 }
Move left motor reverse with a speed of 100:
{'command': 'left_motor_reverse', 'speed': 100 }
Move left motor forward with a speed of 100:
{'command': 'right_motor_forward', 'speed': 100 }
Move left motor reverse with a speed of 100:
{'command': 'right_motor_reverse', 'speed': 100 }
Brake left motor
{'command': 'left_motor_brake' }}
Brake both motors
{'command": 'brake both;}
coast both motors
{'command": 'coast both;}
Coast left motor
{'command': 'left_motor_coast' }}
Brake right motor
{'command': 'right_motor_brake' }}
Coast right motor
{'command': 'right_motor_coast' }}
"""
def __init__(self, back_plane_ip_address=None, subscriber_port='43125', publisher_port='43124', process_name=None):
"""
:param back_plane_ip_address:
:param subscriber_port:
:param publisher_port:
"""
# initialize the base class
super().__init__(back_plane_ip_address, subscriber_port, publisher_port, process_name=process_name)
# allow time for connection
time.sleep(.03)
self.set_subscriber_topic('user_motor_command')
self.publisher_topic = 'system_motor_command'
# receive loop is defined in the base class
self.receive_loop()
# noinspection PyMethodMayBeStatic
def incoming_message_processing(self, topic, payload):
"""
Override this method with a message processor for the application
:param topic: Message Topic string
:param payload: Message Data
:return:
"""
try:
command = payload['command']
if command == 'left_motor_forward':
speed = payload['speed']
payload = {'command': 'left_motor_forward', 'speed': speed}
elif command == 'left_motor_reverse':
speed = payload['speed']
payload = {'command': 'left_motor_reverse', 'speed': speed}
elif command == 'left_motor_brake':
payload = {'command': 'left_motor_brake'}
elif command == 'left_motor_coast':
payload = {'command': 'left_motor_coast'}
elif command == 'right_motor_forward':
speed = payload['speed']
payload = {'command': 'right_motor_forward', 'speed': speed}
elif command == 'right_motor_reverse':
speed = payload['speed']
payload = {'command': 'right_motor_reverse', 'speed': speed}
elif command == 'right_motor_brake':
payload = {'command': 'right_motor_brake'}
elif command == 'right_motor_coast':
payload = {'command': 'right_motor_coast'}
else:
raise ValueError
self.publish_payload(payload, self.publisher_topic)
except ValueError:
print('led topic: ' + topic + ' payload: ' + payload)
raise
def motors():
# noinspection PyShadowingNames
parser = argparse.ArgumentParser()
parser.add_argument("-b", dest="back_plane_ip_address", default="None",
help="None or IP address used by Back Plane")
parser.add_argument("-n", dest="process_name", default="Motors Front End", help="Set process name in banner")
args = parser.parse_args()
kw_options = {}
if args.back_plane_ip_address != 'None':
kw_options['back_plane_ip_address'] = args.back_plane_ip_address
kw_options['process_name'] = args.process_name
my_motors = Motors(**kw_options)
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal,PyUnusedLocal
def signal_handler(signal, frame):
print('Control-C detected. See you soon.')
my_motors.clean_up()
sys.exit(0)
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if __name__ == '__main__':
motors()
| gpl-3.0 | 8,621,895,000,938,363,000 | 31.273885 | 119 | 0.623051 | false |
kyhau/reko | reko/speechreko.py | 1 | 3825 | """
NOTE: this example requires PyAudio because it uses the Microphone class
"""
import os
import speech_recognition as sr
import time
from playsound import playsound
from reko.polly import Polly
from reko.reko import Reko
class SpeechReko(Reko):
def __init__(self, profile, collection_id, audio_on=False):
Reko.__init__(self, profile, collection_id)
self._audio_on = audio_on
self._polly = Polly(profile)
def signin(self, id=None):
"""
:param id: (optional) external_image_id
:return: external_image_id or None if not found
"""
ret_id = super(SpeechReko, self).signin(id)
if self._audio_on is True:
self.speak("Hello {}!".format(ret_id) \
if ret_id is not None else "Sorry! I do not recognise you.")
return ret_id
def signup(self, id):
"""
:param id: external_image_id
:return:
"""
succeeded = super(SpeechReko, self).signup(id)
if self._audio_on is True:
self.speak("Hello {}!".format(id) if succeeded is True else "Sorry {}! I have problem remembering you!".format(id))
return succeeded
def take_picture(self):
"""Connect to the webcam and capture an image and save to the give file.
"""
succeeded = super(SpeechReko, self).take_picture()
if succeeded is False and self._audio_on:
self.speak("Sorry! I'm unable to connect to the camera.")
return succeeded
def speak(self, msg):
"""Create an audio file for the given msg and play it.
"""
if self._audio_on is False:
print(msg)
return True
filename = self._cache.get_filename(msg, "mp3")
filepath = self._cache.get_filepath(filename)
if os.path.exists(filepath):
SpeechReko.play_audio(filepath)
return True
if self._polly.synthesize_speech(text_message=msg, output_file=filepath) is True:
SpeechReko.play_audio(filepath)
return True
return False
@staticmethod
def play_audio(audio_file):
"""
Play sound
"""
playsound(audio_file)
def watching(self, interval_sec=30):
"""
"""
while True:
print("Watching ...")
try:
ret_id = super(SpeechReko, self).signin()
if ret_id and self._audio_on is True:
self.speak("Hello {}!".format(ret_id))
except Exception as e:
print("Error: {0}".format(e))
time.sleep(interval_sec)
def listening(self):
"""Obtain audio from the microphone
"""
while True:
recognizer = sr.Recognizer()
with sr.Microphone() as source:
print("Listening ...")
audio = recognizer.listen(source)
try:
input_msg = recognizer.recognize_google(audio)
if self.process_message(input_msg) is False:
break
except sr.UnknownValueError:
self.speak("Please say it again")
except sr.RequestError as e:
self.speak("I have problem listening to you")
print("Error: {0}".format(e))
def process_message(self, input_msg):
"""Process message and return False if stop listening
"""
print("You said " + input_msg)
# TODO still in progress, this part is tmp code
if 'bye' in input_msg or 'goodbye' in input_msg or 'good bye' in input_msg:
self.speak("Goodbye")
return False
if 'sign in' in input_msg or 'sign-in' in input_msg:
self.signin()
return True
| mit | -4,202,663,694,091,020,300 | 31.692308 | 127 | 0.554248 | false |
travistang/late_fyt | ntm.py | 1 | 1191 | import tensorflow as tf
import numpy as np
class NTM(object):
def __init__(self,session, mem_size, mem_dim,controller):
self.sess = session
self.memory_dim = mem_dim
self.memory_length = mem_size
# construct memory variables
self.memory = [tf.Variable(np.zeros(self.memory_dim).astype(np.float32)) for _ in range(mem_size)]
self.controller = controller
self.write_vector = [tf.Variable(np.random.rand()) for _ in range(mem_size)]
# operations
self.read_op = tf.reduce_sum([a * b for (a,b) in zip(self.write_vector,self.memory)],0)
# finally initialize all variables
self.sess.run(tf.global_variables_initializer())
def read_vector(self):
self._normalize(self.write_vector)
return self.sess.run(self.read_op)
def write(self,erase_v,add_v)
# normalize a list of tf.Variable and return the new values
def _normalize(self,vec):
total = tf.reduce_sum(map(lambda v: tf.abs(v),vec))
# check if everything is 0
if total == 0.:
return sess.run(map(lambda v: v.assign(0.),vec))
else:
return sess.run(map(lambda v: v.assign(v/total),vec))
if __name__ == '__main__':
with tf.Session() as sess:
ntm = NTM(sess,10,6,None)
print ntm.read_vector() | mit | -5,308,125,148,231,301,000 | 28.073171 | 100 | 0.690176 | false |
kaushik94/sympy | sympy/interactive/session.py | 1 | 15136 | """Tools for setting up interactive sessions. """
from __future__ import print_function, division
from distutils.version import LooseVersion as V
from sympy.external import import_module
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at https://docs.sympy.org/%(version)s
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
if quiet:
return ""
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
doc_version = sympy_version
if 'dev' in doc_version:
doc_version = "dev"
else:
doc_version = "%s/" % doc_version
message += '\n' + verbose_message % {'source': _source,
'version': doc_version}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Examples
========
>>> from __future__ import division
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s)
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from sympy.core.compatibility import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
def enable_automatic_int_sympification(shell):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
import ast
old_run_cell = shell.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
shell.run_cell = my_run_cell
def enable_automatic_symbols(shell):
"""Allow IPython to automatically create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
shell.set_custom_exc((NameError,), _handler)
def init_ipython_session(shell=None, argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if V(IPython.__version__) >= '0.11':
if not shell:
# use an app to parse the command line, and init config
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal import ipapp
else:
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
shell = app.shell
if auto_symbols:
enable_automatic_symbols(shell)
if auto_int_to_Integer:
enable_automatic_int_sympification(shell)
return shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, str_printer=None, pretty_printer=None,
latex_printer=None, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which automatically determines whether we are in
an ipython instance or not.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is not False:
try:
import IPython
except ImportError:
if ipython is True:
raise RuntimeError("IPython is not available on this system")
ip = None
else:
try:
from IPython import get_ipython
ip = get_ipython()
except ImportError:
ip = None
in_ipython = bool(ip)
if ipython is None:
ipython = in_ipython
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
ip = init_ipython_session(ip, argv=argv, auto_symbols=auto_symbols,
auto_int_to_Integer=auto_int_to_Integer)
if V(IPython.__version__) >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
if auto_symbols and (not ipython or V(IPython.__version__) < '0.11'):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above")
if auto_int_to_Integer and (not ipython or V(IPython.__version__) < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip,
str_printer=str_printer, pretty_printer=pretty_printer,
latex_printer=latex_printer)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
print(message)
mainloop()
sys.exit('Exiting ...')
else:
print(message)
import atexit
atexit.register(lambda: print("Exiting ...\n"))
| bsd-3-clause | -7,974,791,464,022,189,000 | 31.550538 | 111 | 0.583047 | false |
hp-storage/python-lefthandclient | hpelefthandclient/__init__.py | 2 | 1185 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013-2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
HPE LeftHand REST Client
:Author: Kurt Martin
:Author: Walter A. Boring IV
:Copyright: Copyright 2013-2015 Hewlett Packard Enterprise Development LP
:License: Apache v2.0
"""
version_tuple = (2, 1, 0)
def get_version_string():
if isinstance(version_tuple[-1], str):
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
version = get_version_string()
"""Current version of HPELeftHandClient."""
| apache-2.0 | 7,481,040,198,847,530,000 | 31.916667 | 78 | 0.714768 | false |
migimigi/bme280-1 | docs/conf.py | 2 | 8446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# bme280 documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import bme280
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BME280 Python Driver'
copyright = u'2015, Kieran Brownlees'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = bme280.__version__
# The full version, including alpha/beta/rc tags.
release = bme280.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bme280doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'bme280.tex',
u'BME280 Python Driver Documentation',
u'Kieran Brownlees', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bme280',
u'BME280 Python Driver Documentation',
[u'Kieran Brownlees'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bme280',
u'BME280 Python Driver Documentation',
u'Kieran Brownlees',
'bme280',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | 7,603,664,539,946,308,000 | 29.712727 | 76 | 0.705186 | false |
jeremiedecock/snippets | python/tkinter/python3/geometry_manager_pack_test_with_1_widget.py | 1 | 6263 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://effbot.org/tkinterbook/frame.htm
import tkinter as tk
# WINDOW 1 (there should be only one "Tk" object) #########################
window1 = tk.Tk()
window1.title("Result Window")
widget1 = tk.Canvas(window1, bg="red", width=200, height=200)
widget1.create_text((100, 100), text="Widget 1", font="sans 16 bold", fill="white", anchor="c")
widget1.pack()
frame1_pack_info = widget1.pack_info()
# WINDOW 2 (Toplevel object) ##############################################
window2 = tk.Toplevel()
window2.title("Control Window")
window2.geometry("+200+200")
# Widget 1 frame ##################
frame_widget1 = tk.LabelFrame(window2, text="Widget 1", padx=5, pady=5)
frame_widget1.pack(fill=tk.X, padx=10, pady=5)
# Fill ########
# Must be none, x, y, or both
var_fill = tk.StringVar()
var_fill.set(frame1_pack_info['fill'])
def fill_callback():
widget1.pack_configure(fill=var_fill.get())
print("Widget 1:", widget1.pack_info())
rb_fill_none = tk.Radiobutton(frame_widget1, text="fill = none", variable=var_fill, value="none", command=fill_callback)
rb_fill_x = tk.Radiobutton(frame_widget1, text="fill = x", variable=var_fill, value="x", command=fill_callback)
rb_fill_y = tk.Radiobutton(frame_widget1, text="fill = y", variable=var_fill, value="y", command=fill_callback)
rb_fill_both = tk.Radiobutton(frame_widget1, text="fill = both", variable=var_fill, value="both", command=fill_callback)
rb_fill_none.pack(anchor=tk.W)
rb_fill_x.pack(anchor=tk.W)
rb_fill_y.pack(anchor=tk.W)
rb_fill_both.pack(anchor=tk.W)
# Separator
tk.Frame(frame_widget1, height=1, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
# Expand ######
var_expand = tk.IntVar()
var_expand.set(frame1_pack_info['expand'])
def expand_callback():
print(var_expand.get())
widget1.pack_configure(expand=var_expand.get())
print("Widget 1:", widget1.pack_info())
cb_expand = tk.Checkbutton(frame_widget1, text="expand", variable=var_expand, command=expand_callback)
cb_expand.pack(anchor=tk.W)
# Separator
tk.Frame(frame_widget1, height=1, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
# Side ########
# Must be top, bottom, left, or right
var_side = tk.StringVar()
var_side.set(frame1_pack_info['side'])
def side_callback():
widget1.pack_configure(side=var_side.get())
print("Widget 1:", widget1.pack_info())
rb_side_top = tk.Radiobutton(frame_widget1, text="side = top", variable=var_side, value="top", command=side_callback)
rb_side_bottom = tk.Radiobutton(frame_widget1, text="side = bottom", variable=var_side, value="bottom", command=side_callback)
rb_side_left = tk.Radiobutton(frame_widget1, text="side = left", variable=var_side, value="left", command=side_callback)
rb_side_right = tk.Radiobutton(frame_widget1, text="side = right", variable=var_side, value="right", command=side_callback)
rb_side_top.pack(anchor=tk.W)
rb_side_bottom.pack(anchor=tk.W)
rb_side_left.pack(anchor=tk.W)
rb_side_right.pack(anchor=tk.W)
# Separator
tk.Frame(frame_widget1, height=1, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
# Anchor ######
# Must be n, ne, e, se, s, sw, w, nw, or center
var_anchor = tk.StringVar()
var_anchor.set(frame1_pack_info['anchor'])
def anchor_callback():
widget1.pack_configure(anchor=var_anchor.get())
print("Widget 1:", widget1.pack_info())
rb_anchor_n = tk.Radiobutton(frame_widget1, text="anchor = n", variable=var_anchor, value="n", command=anchor_callback)
rb_anchor_s = tk.Radiobutton(frame_widget1, text="anchor = s", variable=var_anchor, value="s", command=anchor_callback)
rb_anchor_e = tk.Radiobutton(frame_widget1, text="anchor = e", variable=var_anchor, value="e", command=anchor_callback)
rb_anchor_w = tk.Radiobutton(frame_widget1, text="anchor = w", variable=var_anchor, value="w", command=anchor_callback)
rb_anchor_ne = tk.Radiobutton(frame_widget1, text="anchor = ne", variable=var_anchor, value="ne", command=anchor_callback)
rb_anchor_nw = tk.Radiobutton(frame_widget1, text="anchor = nw", variable=var_anchor, value="nw", command=anchor_callback)
rb_anchor_se = tk.Radiobutton(frame_widget1, text="anchor = se", variable=var_anchor, value="se", command=anchor_callback)
rb_anchor_sw = tk.Radiobutton(frame_widget1, text="anchor = sw", variable=var_anchor, value="sw", command=anchor_callback)
rb_anchor_center = tk.Radiobutton(frame_widget1, text="anchor = center", variable=var_anchor, value="center", command=anchor_callback)
rb_anchor_n.pack(anchor=tk.W)
rb_anchor_s.pack(anchor=tk.W)
rb_anchor_e.pack(anchor=tk.W)
rb_anchor_w.pack(anchor=tk.W)
rb_anchor_ne.pack(anchor=tk.W)
rb_anchor_nw.pack(anchor=tk.W)
rb_anchor_se.pack(anchor=tk.W)
rb_anchor_sw.pack(anchor=tk.W)
rb_anchor_center.pack(anchor=tk.W)
# Setup close button ##############
# Let window2's close button quit the application
window2.protocol("WM_DELETE_WINDOW", window1.quit)
# MAIN LOOP ("Tk" object) #################################################
window1.mainloop()
| mit | 4,548,765,682,759,385,600 | 40.190789 | 134 | 0.691902 | false |
fernandog/Medusa | medusa/providers/torrent/html/zooqle.py | 1 | 5673 | # coding=utf-8
"""Provider code for Zooqle."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class ZooqleProvider(TorrentProvider):
"""Zooqle Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(ZooqleProvider, self).__init__('Zooqle')
# Credentials
self.public = True
# URLs
self.url = 'https://zooqle.com'
self.urls = {
'search': urljoin(self.url, '/search'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self, min_time=15)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
# Search Params
search_params = {
'q': '* category:TV',
's': 'dt',
'v': 't',
'sd': 'd',
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params = {'q': '{0} category:TV'.format(search_string)}
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='panel-body')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a').get_text()
magnet = cells[2].find('a', title='Magnet link')['href']
download_url = '{magnet}{trackers}'.format(magnet=magnet,
trackers=self._custom_trackers)
if not all([title, download_url]):
continue
seeders = 1
leechers = 0
if len(cells) > 5:
peers = cells[5].find('div')
if peers and peers.get('title'):
peers = peers['title'].replace(',', '').split(' | ', 1)
# Removes 'Seeders: '
seeders = try_int(peers[0][9:])
# Removes 'Leechers: '
leechers = try_int(peers[1][10:])
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[3].get_text().replace(',', '')
size = convert_size(torrent_size) or -1
pubdate_raw = cells[4].get_text().replace('yesterday', '24 hours')
# "long ago" can't be translated to a date
if pubdate_raw == 'long ago':
pubdate_raw = None
pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
provider = ZooqleProvider()
| gpl-3.0 | 7,063,182,022,490,597,000 | 32.568047 | 94 | 0.482285 | false |
ncareol/qmicromap | tool_qmicromap.py | 1 | 1595 | import os
import sys
tools = ['qt5','spatialdb','doxygen','prefixoptions']
env = Environment(tools = ['default'] + tools)
# qt modules
qtModules = Split('QtCore QtGui QtSvg')
env.EnableQtModules(qtModules)
def win_qt_setup(env):
# Windows needs an extra include path for Qt modules.
qt5include = env['QT5DIR'] + '/include'
env.AppendUnique(CPPPATH=[qt5include,])
env.EnableQtModules(qtModules)
def mac_qt_setup(env):
# Mac OS setup
# Qt configuration:
# the following uses the frameworks scheme available for gcc on Mac OS
# to provide libraries and library paths
#frameworkpath='/usr/local/lib'
#env.AppendUnique(FRAMEWORKPATH=[frameworkpath,])
#env.AppendUnique(FRAMEWORKS=qt4Modules)
pass
libsources = Split("""
QMicroMap.cpp
QStationModelGraphicsItem.cpp
""")
headers = Split("""
QMicroMap.h
QStationModelGraphicsItem.h
MicroMapOverview.h
""")
if env['PLATFORM'] == 'darwin':
mac_qt_setup(env)
if env['PLATFORM'] == 'win32':
win_qt_setup(env)
libqmicromap = env.Library('qmicromap', libsources)
env.Default(libqmicromap)
html = env.Apidocs(libsources + headers, DOXYFILE_DICT={'PROJECT_NAME':'QMicroMap', 'PROJECT_NUMBER':'1.0'})
thisdir = env.Dir('.').srcnode().abspath
def qmicromap(env):
env.AppendLibrary('qmicromap')
env.Require(tools)
env.EnableQtModules(qtModules)
env.AppendUnique(CPPPATH =[thisdir,])
env.AppendDoxref('QMicroMap')
if env['PLATFORM'] == 'darwin':
mac_qt_setup(env)
if env['PLATFORM'] == 'win32':
win_qt_setup(env)
Export('qmicromap')
| bsd-3-clause | -6,514,323,080,600,594,000 | 24.725806 | 109 | 0.685266 | false |
ermongroup/a-nice-mc | a_nice_mc/objectives/expression/mog2.py | 1 | 1170 | import numpy as np
import tensorflow as tf
from a_nice_mc.objectives.expression import Expression
from a_nice_mc.utils.logger import create_logger
logger = create_logger(__name__)
class MixtureOfGaussians(Expression):
def __init__(self, name='mog2', display=True):
super(MixtureOfGaussians, self).__init__(name=name, display=display)
self.z = tf.placeholder(tf.float32, [None, 2], name='z')
def __call__(self, z):
z1 = tf.reshape(tf.slice(z, [0, 0], [-1, 1]), [-1])
z2 = tf.reshape(tf.slice(z, [0, 1], [-1, 1]), [-1])
v1 = tf.sqrt((z1 - 5) * (z1 - 5) + z2 * z2) * 2
v2 = tf.sqrt((z1 + 5) * (z1 + 5) + z2 * z2) * 2
pdf1 = tf.exp(-0.5 * v1 * v1) / tf.sqrt(2 * np.pi * 0.25)
pdf2 = tf.exp(-0.5 * v2 * v2) / tf.sqrt(2 * np.pi * 0.25)
return -tf.log(0.5 * pdf1 + 0.5 * pdf2)
@staticmethod
def mean():
return np.array([0.0, 0.0])
@staticmethod
def std():
return np.array([5.0, 0.5])
@staticmethod
def statistics(z):
return z
@staticmethod
def xlim():
return [-8, 8]
@staticmethod
def ylim():
return [-8, 8]
| mit | 8,517,003,602,130,317,000 | 27.536585 | 76 | 0.54188 | false |
LynnCo/toolkit | graphVis/interface.py | 1 | 3989 | #Basic
'''
import tkinter as tk
class GUI(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.create()
def create(self):
w = tk.Canvas(self,width=600,height=400)
# w.create_image = (image=vp)
w.pack()
run = GUI()
run.mainloop()
'''
#Buttons
'''
import tkinter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
class GUI ():
def __init__(self, master):
# Create a container
frame = tkinter.Frame(master)
# Create 2 buttons
self.button_left = tkinter.Button(frame,text="< Decrease Slope",command=self.decrease)
self.button_left.pack(side="left")
self.button_right = tkinter.Button(frame,text="Increase Slope >",command=self.increase)
self.button_right.pack(side="left")
fig = Figure()
ax = fig.add_subplot(111)
self.line, = ax.plot(range(10))
self.canvas = FigureCanvasTkAgg(fig,master=master)
self.canvas.show()
self.canvas.get_tk_widget().pack(side='top', fill='both', expand=1)
frame.pack()
def decrease(self):
x, y = self.line.get_data()
self.line.set_ydata(y - 0.2 * x)
self.canvas.draw()
def increase(self):
x, y = self.line.get_data()
self.line.set_ydata(y + 0.2 * x)
self.canvas.draw()
root = tkinter.Tk()
run = GUI(root)
root.mainloop()
'''
#Click event handler and annotation
'''
import math
import pylab
import matplotlib
class AnnoteFinder:
"""
callback for matplotlib to display an annotation when points are clicked on. The
point which is closest to the click and within xtol and ytol is identified.
Register this function like this:
scatter(xdata, ydata)
af = AnnoteFinder(xdata, ydata, annotes)
connect('button_press_event', af)
"""
def __init__(self, xdata, ydata, annotes, axis=None, xtol=None, ytol=None):
self.data = zip(xdata, ydata, annotes)
if xtol is None:
xtol = ((max(xdata) - min(xdata))/float(len(xdata)))/2
if ytol is None:
ytol = ((max(ydata) - min(ydata))/float(len(ydata)))/2
self.xtol = xtol
self.ytol = ytol
if axis is None:
self.axis = pylab.gca()
else:
self.axis= axis
self.drawnAnnotations = dict()
self.links = list()
def distance(self, x1, x2, y1, y2):
"""
return the distance between two points
"""
return math.hypot(x1 - x2, y1 - y2)
def __call__(self, event):
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
if self.axis is None or self.axis==event.inaxes:
annotes = list()
for x,y,a in self.data:
if clickX-self.xtol < x < clickX+self.xtol and clickY-self.ytol < y < clickY+self.ytol :
annotes.append((self.distance(x,clickX,y,clickY),x,y, a) )
if annotes:
annotes.sort()
distance, x, y, annote = annotes[0]
self.drawAnnote(event.inaxes, x, y, annote)
for l in self.links:
l.drawSpecificAnnote(annote)
def drawAnnote(self, axis, x, y, annote):
"""
Draw the annotation on the plot
"""
if (x,y) in self.drawnAnnotations:
markers = self.drawnAnnotations[(x,y)]
for m in markers:
m.set_visible(not m.get_visible())
self.axis.figure.canvas.draw()
else:
t = axis.text(x,y, "(%3.2f, %3.2f) - %s"%(x,y,annote), )
m = axis.scatter([x],[y], marker='d', c='r', zorder=100)
self.drawnAnnotations[(x,y)] =(t,m)
self.axis.figure.canvas.draw()
def drawSpecificAnnote(self, annote):
annotesToDraw = [(x,y,a) for x,y,a in self.data if a==annote]
for x,y,a in annotesToDraw:
self.drawAnnote(self.axis, x, y, a)
x = range(2)
y = range(2)
annotes = ["point 1","point 2"]
pylab.scatter(x,y)
af = AnnoteFinder(x, y, annotes)
pylab.connect('button_press_event', af)
pylab.show()
''' | mit | -7,537,670,444,090,344,000 | 26.142857 | 100 | 0.604161 | false |
rsms/smisk | admin/old_examples/testbed/process.py | 1 | 3051 | #!/usr/bin/env python
# encoding: utf-8
import sys, os, platform
from smisk import Application, Request, Response, request
class MyRequest(Request):
def accepts_charsets(self):
'''Return a list of charsets which the client can handle, ordered by priority and appearing order.'''
vv = []
if not 'HTTP_ACCEPT_CHARSET' in self.env:
return vv
for cs in self.env['HTTP_ACCEPT_CHARSET'].split(','):
p = cs.find(';')
if p != -1:
pp = cs.find('q=', p)
if pp != -1:
vv.append([cs[:p], int(float(cs[pp+2:])*100)])
continue
vv.append([cs, 100])
vv.sort(lambda a,b: b[1] - a[1])
return [v[0] for v in vv]
class MyResponse(Response):
def redirect_to_path(self, path):
url = request.url
include_port = True
if url.port == 80:
include_port = False
url = url.to_s(port=include_port, path=False, query=False, fragment=False)
self.headers += ['Status: 302 Found', 'Location: %s%s' % (url, path)]
class MyApp(Application):
chunk = '.'*8000
def __init__(self):
self.request_class = MyRequest
self.response_class = MyResponse
Application.__init__(self)
def service(self):
# Test sending alot of data with content length
#self.response.out.write("Content-Length: 8000\r\n\r\n")
#self.response.out.write(self.chunk)
# Test sending alot of data with chunked content
#self.response.write(self.chunk)
if self.request.url.path == "/go-away":
self.response.redirect_to_path("/redirected/away")
return
if 'CONTENT_LENGTH' in self.request.env:
# Test smisk_Request___iter__
for line in self.request:
self.response.write(line)
self.response.headers = ["Content-Type: text/plain"]
self.response.write("self.request.url = %s\n" % self.request.url)
self.response.write("self.request.env.get('HTTP_ACCEPT_CHARSET') => %s\n" % self.request.env.get('HTTP_ACCEPT_CHARSET'))
self.response.write("self.request.acceptsCharsets() = %s\n" % self.request.accepts_charsets())
# Test smisk_Response___call__
self.response(
"__call__ Line1\n",
"__call__ Line2\n",
"__call__ Line3\n",
"__call__ Line4\n",
)
# Test smisk_Response_writelines and at the same time test smisk_Stream_perform_writelines
self.response.writelines((
"writelines Line1\n",
"writelines Line2\n",
"writelines Line3\n",
"writelines Line4\n",
))
#self.response.write(self.chunk)
#self.response.write("<h1>Hello World!</h1>"
# "request.env = <tt>%s</tt>\n" % self.request.env)
#self.response.headers = ["Content-Type: text/html"]
#err1()
# test exception response
def err1(): err2()
def err2(): err3()
def err3(): err4()
def err4(): err5()
def err5(): raise IOError("Kabooom!")
try:
MyApp().run()
except KeyboardInterrupt:
pass
except:
import traceback
traceback.print_exc(1000, open(os.path.abspath(os.path.dirname(__file__)) + "/process-error.log", "a"))
| mit | -5,327,384,261,450,466,000 | 28.621359 | 124 | 0.620125 | false |
eamontoyaa/pyCSS | validations/validation03-comparisonZhao.etal.,2014.py | 1 | 5013 | '''
# Description.
This is a minimal module in order to perform a circular arc slope stability
analysis by the limit equilibrium model by Fellenius and Bishop symplified
methods.
'''
#------------------------------------------------------------------------------
## Add functions directory
import sys
sys.path += ['../functions']
#------------------------------------------------------------------------------
## Modules/Functions import
import numpy as np
import time
from automaticslipcircles import automaticslipcircles
from onlyonecircle import onlyonecircle
#------------------------------------------------------------------------------
## Poject data
projectName = 'Validation-03'
projectAuthor = 'Exneyder A. Montoya Araque'
projectDate = time.strftime("%d/%m/%y")
#------------------------------------------------------------------------------
## Define inputs
# The slope geometry
slopeHeight = [10, 'm']
slopeDip = np.array([2, 1])
crownDist = [5, 'm']
toeDist = [5, 'm']
wantAutomaticToeDepth = False
if wantAutomaticToeDepth == True:
toeDepth = ['automatic toe Depth']
else:
toeDepth = [3, 'm']
# The slip arc-circle
wantEvaluateOnlyOneSurface = True
if wantEvaluateOnlyOneSurface == True:
hztDistPointAtCrownFromCrown = [-2, 'm']
hztDistPointAtToeFromCrown = [20, 'm']
slipRadius = [34.95, 'm']
else:
numCircles = 2000
radiusIncrement = [2, 'm']
numberIncrements = 40
maxFsValueCont = 2
# Watertable
wantWatertable = False
if wantWatertable == True:
wtDepthAtCrown = [0, 'm']
else:
wtDepthAtCrown = ['No watertable']
toeUnderWatertable = False
# Materials properties.
waterUnitWeight = [0, 'kN/m3']
materialUnitWeight = [20, 'kN/m3']
frictionAngleGrad = [19.6, 'degrees']
cohesion = [3, 'kPa']
## Advanced inputs
# Want divide the slip surface in constant width slices?
wantConstSliceWidthTrue = False
# Number of discretizations of slip surface.
numSlices = 15
# Number of discretizations of circular arcs.
nDivs = numSlices
# Select the method to calcualte the safety factor ['Flns', 'Bshp' or 'Allm'].
methodString = 'Allm'
# Select the output format image #['.eps', '.jpeg', '.jpg', '.pdf', '.pgf', \
# '.png', '.ps', '.raw', '.rgba', '.svg', '.svgz', '.tif', '.tiff'].
outputFormatImg = '.svg'
#------------------------------------------------------------------------------
# Operations for only one slip surface
if wantEvaluateOnlyOneSurface == True:
msg = onlyonecircle(projectName, projectAuthor, projectDate, slopeHeight, \
slopeDip, crownDist, toeDist, wantAutomaticToeDepth, toeDepth, \
hztDistPointAtCrownFromCrown, hztDistPointAtToeFromCrown, \
slipRadius, wantWatertable, wtDepthAtCrown, toeUnderWatertable, \
waterUnitWeight, materialUnitWeight, frictionAngleGrad, cohesion, \
wantConstSliceWidthTrue, numSlices, nDivs, methodString, \
outputFormatImg)
#------------------------------------------------------------------------------
# Operations for multiple slip surface
else:
automaticslipcircles(projectName, projectAuthor, projectDate, slopeHeight,\
slopeDip, crownDist, toeDist, wantAutomaticToeDepth, toeDepth, \
numCircles, radiusIncrement, numberIncrements, maxFsValueCont, \
wantWatertable, wtDepthAtCrown, toeUnderWatertable, waterUnitWeight, \
materialUnitWeight, frictionAngleGrad, cohesion, \
wantConstSliceWidthTrue, numSlices, nDivs, methodString, \
outputFormatImg)
'''
BSD 2 license.
Copyright (c) 2016, Universidad Nacional de Colombia, Ludger O.
Suarez-Burgoa and Exneyder Andrés Montoya Araque.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| bsd-2-clause | 6,933,199,822,277,709,000 | 37.777778 | 79 | 0.656425 | false |
viewportvr/daysinvr | backend/remixvr/database.py | 1 | 1343 | # -*- coding: utf-8 -*-
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from sqlalchemy.orm import relationship
from .compat import basestring
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = relationship
Model = db.Model
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
"""A mixin that adds a surrogate integer 'primary key' column named ``id`` \
to any declarative-mapped class.
"""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""Get record by ID."""
if any(
(isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float))),
):
return cls.query.get(int(record_id))
def reference_col(tablename, nullable=False, pk_name='id', **kwargs):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
return db.Column(
db.ForeignKey('{0}.{1}'.format(tablename, pk_name)),
nullable=nullable, **kwargs)
| mit | 9,041,744,562,209,402,000 | 28.195652 | 89 | 0.647059 | false |
BrownGLAMOR/JACK | web/ffad/urls.py | 1 | 1176 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
from ffad import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^(?P<draft_id>\d+)/$', views.draft),
url(r'^(?P<draft_id>\d+)/register$', views.register),
url(r'^(?P<draft_id>\d+)/get_manager_updates$', views.get_manager_updates),
url(r'^(?P<draft_id>\d+)/get_player_updates$', views.get_player_updates),
url(r'^(?P<draft_id>\d+)/get_team$', views.get_team),
url(r'^(?P<draft_id>\d+)/place_bid$', views.place_bid)
)
| lgpl-3.0 | -9,095,699,174,355,440,000 | 44.230769 | 79 | 0.701531 | false |
super-goose/orbit | space/planet.py | 1 | 3201 | import pygame
import math
class Planet:
def __init__(self, surface, color, position, radius, center):
self.radius = radius
self.surface = surface
self.color = color
self.setPosition(position)
self.center = center
self.setOrbitOffset(0)
self.setOrbitPeriod(1)
self.setOrbitRadius(0)
self.year = 0
self.mass = 0
self.velocity = 0
self.angle = 0
self.name = ''
def drawPlanet(self):
x = int(self.position[0])
y = int(self.position[1])
pygame.draw.circle(self.surface, self.color, (x, y), self.radius)
def getRadius(self): return self.radius
def setPosition(self, newPos):
self.position = newPos
return self
def getPosition(self): return self.position
def setVelocity(self, vel):
self.velocity = vel
return self
def getVelocity(self): return self.velocity
def setAngle(self, angle):
self.angle = angle
return self
def getAngle(self): return self.angle
def setName(self, name):
self.name = name
return self
def getName(self): return self.name
def setGravity(self, gravity):
self.gravity = gravity
return self
def getGravity(self): return self.gravity
def setOrbitRadius(self, radius):
self.orbitRadius = radius
return self
def getOrbitRadius(self): return self.orbitRadius
def setOrbitOffset(self, offset):
self.orbitOffset = offset
return self
def getOrbitOffset(self): return self.orbitOffset
def setOrbitPeriod(self, period):
self.orbitPeriod = period
return self
def getOrbitPeriod(self): return self.orbitPeriod
def advancePosition(self, sun):
x, y = self.position
# get new point with no gravity
v = self.velocity
angle = self.angle
vx = v * math.sin(angle)
vy = v * math.cos(angle)
# get the pull fromt he sun
gravitaionalConstant = 14 # this is the number that made it work well
# i don't know why this number and not another
sunX, sunY = sun.getPosition()
sunX -= x
sunY -= y
d = math.sqrt(sunX**2 + sunY**2)
g = sun.getGravity() * gravitaionalConstant / (d ** 2)
ax = (g * sunX) / d
ay = (g * sunY) / d
# add these vectors together
dx = vx + ax
dy = vy + ay
newV = math.sqrt(dx**2 + dy**2)
# using law of cosines to get the angle
# by getting the cosine first, then using arccos to find the angle
ac = (g**2 - v**2 - newV**2)/(-2 * v * newV)
A = math.acos(ac)
#update attributes
self.angle += A
self.velocity = newV
x += newV * math.sin(self.angle)
y += newV * math.cos(self.angle)
self.setPosition((x, y))
return self
def distanceFrom(self, pos):
x1 = self.position[0]
y1 = self.position[1]
x2 = pos[0]
y2 = pos[1]
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
# EOF for planets | mit | -2,533,718,706,393,433,600 | 24.616 | 80 | 0.564199 | false |
krasnoperov/django-formalizr | formalizr/views.py | 1 | 3769 | import json
from django.contrib import messages
from django.http import HttpResponse
from django.views.generic.edit import CreateView, UpdateView, FormView
from django.core.serializers.json import DjangoJSONEncoder
class AjaxFormMixin(object):
"""
Mixin which adds support of AJAX requests to the form.
Can be used with any view which has FormMixin.
"""
json_dumps_kwargs = None
success_message = ''
def get_success_message(self, cleaned_data):
return self.success_message % cleaned_data
def get_json_dumps_kwargs(self):
if self.json_dumps_kwargs is None:
self.json_dumps_kwargs = {}
self.json_dumps_kwargs.setdefault('ensure_ascii', False)
return self.json_dumps_kwargs
def render_to_json_response(self, context, **response_kwargs):
data = json.dumps(context, cls=DjangoJSONEncoder, **self.get_json_dumps_kwargs())
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_valid(self, form):
success_message = self.get_success_message(form.cleaned_data)
if success_message:
messages.info(self.request, success_message)
if self.request.is_ajax():
context = self.get_json_context(form)
return self.render_to_json_response(context)
else:
return super(AjaxFormMixin, self).form_valid(form)
def form_invalid(self, form):
if self.request.is_ajax():
context = {
'status': 'error',
'error': 'Bad Request'
}
errors = self.get_json_errors(form)
if errors:
context["errors"] = errors
return self.render_to_json_response(context, status=400)
else:
return super(AjaxFormMixin, self).form_invalid(form)
def is_result_requested(self):
return self.request.POST.get("_return", "redirect") == "result"
def get_json_context(self, form):
if self.request.POST.get("_return", "redirect") == "result":
context = {
"status": "success"
}
msgs = self.get_json_messages()
if msgs:
context["messages"] = msgs
obj = self.get_json_object(form)
if obj:
context["object"] = obj
else:
context = {
"status": "redirect",
"location": self.get_success_url()
}
return context
def get_json_messages(self):
msgs = []
for message in messages.get_messages(self.request):
msgs.append({
"level": message.tags,
"message": message.message,
})
return msgs
def get_json_errors(self, form):
errors = {}
for error in form.errors.iteritems():
errors.update({
form.prefix + "-" + error[0] if form.prefix else error[0]: [unicode(msg) for msg in error[1]]
})
return errors
def get_json_object(self, form):
"""
Method returns dict representation of result (self.object of form.instance, etc)
"""
return None
class AjaxModelFormMixin(AjaxFormMixin):
"""
This mixin adds AJAX handling of model form.
Can be used with any view which has ModelFormMixin.
"""
def form_valid(self, form):
if self.request.is_ajax():
self.object = form.save()
return super(AjaxModelFormMixin, self).form_valid(form)
class AjaxFormView(AjaxFormMixin, FormView):
pass
class AjaxUpdateView(AjaxModelFormMixin, UpdateView):
pass
class AjaxCreateView(AjaxModelFormMixin, CreateView):
pass
| bsd-3-clause | 5,784,175,414,557,377,000 | 30.14876 | 109 | 0.59273 | false |
moelius/async-task-processor | async_task_processor/primitives/tarantool_task.py | 1 | 1986 | import asyncio
from async_task_processor.primitives.base_task import BaseTask
class TarantoolTask(BaseTask):
conn_max_retries = None
conn_retries = None
conn_retry_countdown = None
ack = True # Using to prevent tarantool ack task
connection = None
data = None
queue_name = None
_task = None
_tube = None
def __init__(self, loop, connection, tube, foo, args, bind, timeout, max_retries, retry_countdown,
conn_max_retries, conn_retry_countdown, name):
"""
:type connection: asynctnt.Connection
:type tube: asynctnt_queue.tube.Tube
"""
self.conn_max_retries = conn_max_retries or 0
self.conn_retry_countdown = conn_retry_countdown or 1
self.conn_retries = 0
self.queue_name = tube.name
self.connection = connection
self._tube = tube
super().__init__(loop, type(self).__name__, foo, args, bind, timeout, max_retries, retry_countdown, name)
def set_tube(self, task):
"""
:type task: asynctnt_queue.Task
:return:
"""
self._task = task
self.data = task.data
def reset(self):
self._task, self.data = None, None
async def _tnt_call(self, func_name, args, timeout):
return await self.connection.call(func_name, args, timeout=timeout)
def __call__(self, func_name, args, timeout=-1):
"""Tarantool command execute. You may use self(<tarantool_command>) in task function.
Use this method if you want to redeclare ack method for example or bury, release task manually.
:type func_name: str
:type args: list
:type timeout: int
:return:
"""
future = asyncio.run_coroutine_threadsafe(self._tnt_call(func_name, args, timeout=timeout), self.app.loop)
return future.result()
@property
def tube(self):
return self._tube
@property
def task(self):
return self._task
| mit | -3,077,405,517,272,907,000 | 29.553846 | 114 | 0.6143 | false |
uncled1023/pygments | Pygments/pygments-lib/pygments/lexers/php.py | 1 | 10730 | # -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, \
this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_php_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._php_builtins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'(<<<)([\'"]?)(' + _ident_inner + r')(\2\n.*?\n\s*)(\3)(;?)(\n)',
bygroups(String, String, String.Delimiter, String, String.Delimiter,
Punctuation, Text)),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/@-]+', Operator),
(r'\?', Operator), # don't add to the charclass above!
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|'
r'endif|list|endswitch|new|endwhile|not|'
r'array|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
include('magicconstants'),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
(r'\d+e[+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0x[a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'magicfuncs': [
# source: http://php.net/manual/en/language.oop5.magic.php
(words((
'__construct', '__destruct', '__call', '__callStatic', '__get', '__set',
'__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke',
'__set_state', '__clone', '__debugInfo',), suffix=r'\b'),
Name.Function.Magic),
],
'magicconstants': [
# source: http://php.net/manual/en/language.constants.predefined.php
(words((
'__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__',
'__TRAIT__', '__METHOD__', '__NAMESPACE__',),
suffix=r'\b'),
Name.Constant),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
include('magicfuncs'),
(_ident_inner, Name.Function, '#pop'),
default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
return rv
| bsd-2-clause | -9,095,089,548,564,730,000 | 39.187266 | 89 | 0.502703 | false |
rnirmal/savanna | savanna/tests/unit/plugins/hdp/validator_test.py | 1 | 5193 | # Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from savanna.plugins.general import exceptions as ex
from savanna.plugins.hdp import validator as v
import unittest2
class ValidatorTest(unittest2.TestCase):
def test_no_namenode(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"AMBARI_SERVER",
"AMBARI_AGENT"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT"]))
validator = v.Validator()
with self.assertRaises(ex.NotSingleNameNodeException):
validator.validate(cluster)
def test_with_namenode(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"AMBARI_SERVER",
"AMBARI_AGENT",
"NAMENODE"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT"]))
validator = v.Validator()
validator.validate(cluster)
def test_with_multiple_namenodes(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"AMBARI_SERVER",
"AMBARI_AGENT",
"NAMENODE"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT",
"NAMENODE"]))
validator = v.Validator()
with self.assertRaises(ex.NotSingleNameNodeException):
validator.validate(cluster)
def test_no_jobtracker(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"AMBARI_SERVER",
"AMBARI_AGENT",
"NAMENODE"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT",
"TASKTRACKER"]))
validator = v.Validator()
with self.assertRaises(ex.TaskTrackersWithoutJobTracker):
validator.validate(cluster)
def test_with_jobtracker(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"AMBARI_SERVER",
"AMBARI_AGENT",
"NAMENODE",
"JOBTRACKER"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT",
"TASKTRACKER"]))
validator = v.Validator()
validator.validate(cluster)
def test_no_ambari_server(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"NAMENODE",
"AMBARI_AGENT"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT"]))
validator = v.Validator()
with self.assertRaises(v.NotSingleAmbariServerException):
validator.validate(cluster)
def test_missing_ambari_agent(self):
cluster = TestCluster()
cluster.node_groups.append(TestNodeGroup(["GANGLIA_SERVER",
"NAMENODE",
"AMBARI_SERVER"]))
cluster.node_groups.append(TestNodeGroup(["GANGLIA_MONITOR",
"AMBARI_AGENT"]))
validator = v.Validator()
with self.assertRaises(v.AmbariAgentNumberException):
validator.validate(cluster)
class TestCluster(object):
def __init__(self):
self.node_groups = []
class TestNodeGroup:
def __init__(self, processes, count=1):
self.node_processes = processes
self.count = count or 1
self.name = 'TEST'
| apache-2.0 | 3,736,133,292,998,521,300 | 42.638655 | 69 | 0.494897 | false |
mateor/pants | src/python/pants/engine/engine.py | 1 | 5985 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from abc import abstractmethod
from twitter.common.collections import maybe_list
from pants.base.exceptions import TaskError
from pants.engine.nodes import Return, State, Throw
from pants.engine.storage import Cache, Storage
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecutionError(Exception):
pass
class Engine(AbstractClass):
"""An engine for running a pants command line."""
class Result(datatype('Result', ['error', 'root_products'])):
"""Represents the result of a single engine run."""
@classmethod
def finished(cls, root_products):
"""Create a success or partial success result from a finished run.
Runs can either finish with no errors, satisfying all promises, or they can partially finish
if run in fail-slow mode producing as many products as possible.
:param root_products: Mapping of root SelectNodes to their State values.
:rtype: `Engine.Result`
"""
return cls(error=None, root_products=root_products)
@classmethod
def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `Engine.Result`
"""
return cls(error=error, root_products=None)
def __init__(self, scheduler, storage=None, cache=None, use_cache=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param use_cache: True to enable usage of the cache. The cache incurs a large amount of
overhead for small tasks, and needs TODO: further improvement.
:type use_cache: bool
"""
self._scheduler = scheduler
self._storage = storage or Storage.create()
self._cache = cache or Cache.create(storage)
self._use_cache = use_cache
def execute(self, execution_request):
"""Executes the requested build.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The result of the run.
:rtype: :class:`Engine.Result`
"""
try:
self.reduce(execution_request)
return self.Result.finished(self._scheduler.root_entries(execution_request))
except TaskError as e:
return self.Result.failure(e)
def product_request(self, product, subjects):
"""Executes a request for a singular product type from the scheduler for one or more subjects
and yields the products.
:param class product: A product type for the request.
:param list subjects: A list of subjects for the request.
:yields: The requested products.
"""
request = self._scheduler.execution_request([product], subjects)
result = self.execute(request)
if result.error:
raise result.error
result_items = self._scheduler.root_entries(request).items()
# State validation.
unknown_state_types = tuple(
type(state) for _, state in result_items if type(state) not in (Throw, Return)
)
if unknown_state_types:
State.raise_unrecognized(unknown_state_types)
# Throw handling.
# TODO: See https://github.com/pantsbuild/pants/issues/3912
throw_roots = tuple(root for root, state in result_items if type(state) is Throw)
if throw_roots:
cumulative_trace = '\n'.join(self._scheduler.trace())
raise ExecutionError('Received unexpected Throw state(s):\n{}'.format(cumulative_trace))
# Return handling.
returns = tuple(state.value for _, state in result_items if type(state) is Return)
for return_value in returns:
for computed_product in maybe_list(return_value, expected_type=product):
yield computed_product
def close(self):
"""Shutdown this engine instance, releasing resources it was using."""
self._storage.close()
self._cache.close()
def cache_stats(self):
"""Returns cache stats for the engine."""
return self._cache.get_stats()
def _maybe_cache_get(self, node_entry, runnable):
"""If caching is enabled for the given Entry, create a key and perform a lookup.
The sole purpose of a keyed request is to get a stable cache key, so we can sort
keyed_request.dependencies by keys as opposed to requiring dep nodes to support compare.
:returns: A tuple of a key and result, either of which may be None.
"""
if not self._use_cache or not runnable.cacheable:
return None, None
return self._cache.get(runnable)
def _maybe_cache_put(self, key, result):
if key is not None:
self._cache.put(key, result)
@abstractmethod
def reduce(self, execution_request):
"""Reduce the given execution graph returning its root products.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The root products promised by the execution graph.
:rtype: dict of (:class:`Promise`, product)
"""
class LocalSerialEngine(Engine):
"""An engine that runs tasks locally and serially in-process."""
def reduce(self, execution_request):
self._scheduler.schedule(execution_request)
| apache-2.0 | -4,252,687,880,089,084,000 | 35.717791 | 98 | 0.705263 | false |
nttks/edx-platform | common/djangoapps/student/models.py | 1 | 77455 | """
Models for User Information (students, staff, etc)
Migration Notes
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration student --auto description_of_your_change
3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/
"""
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from functools import total_ordering
import hashlib
from importlib import import_module
import json
import logging
from pytz import UTC
from urllib import urlencode
import uuid
import analytics
from config_models.models import ConfigurationModel
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db import models, IntegrityError, transaction
from django.db.models import Count
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver, Signal
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_noop
from django.core.cache import cache
from django_countries.fields import CountryField
import dogstats_wrapper as dog_stats_api
from eventtracking import tracker
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from simple_history.models import HistoricalRecords
from track import contexts
from xmodule_django.models import CourseKeyField, NoneToEmptyManager
from certificates.models import GeneratedCertificate
from course_modes.models import CourseMode
from enrollment.api import _default_course_mode
import lms.lib.comment_client as cc
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, ECOMMERCE_DATE_FORMAT
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from util.model_utils import emit_field_changed_events, get_changed_fields_dict
from util.query import use_read_replica_if_available
from util.milestones_helpers import is_entrance_exams_enabled
from openedx.core.djangoapps.ga_self_paced import api as self_paced_api
UNENROLL_DONE = Signal(providing_args=["course_enrollment", "skip_refund"])
log = logging.getLogger(__name__)
AUDIT_LOG = logging.getLogger("audit")
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # pylint: disable=invalid-name
UNENROLLED_TO_ALLOWEDTOENROLL = 'from unenrolled to allowed to enroll'
ALLOWEDTOENROLL_TO_ENROLLED = 'from allowed to enroll to enrolled'
ENROLLED_TO_ENROLLED = 'from enrolled to enrolled'
ENROLLED_TO_UNENROLLED = 'from enrolled to unenrolled'
UNENROLLED_TO_ENROLLED = 'from unenrolled to enrolled'
ALLOWEDTOENROLL_TO_UNENROLLED = 'from allowed to enroll to enrolled'
UNENROLLED_TO_UNENROLLED = 'from unenrolled to unenrolled'
DEFAULT_TRANSITION_STATE = 'N/A'
TRANSITION_STATES = (
(UNENROLLED_TO_ALLOWEDTOENROLL, UNENROLLED_TO_ALLOWEDTOENROLL),
(ALLOWEDTOENROLL_TO_ENROLLED, ALLOWEDTOENROLL_TO_ENROLLED),
(ENROLLED_TO_ENROLLED, ENROLLED_TO_ENROLLED),
(ENROLLED_TO_UNENROLLED, ENROLLED_TO_UNENROLLED),
(UNENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED),
(ALLOWEDTOENROLL_TO_UNENROLLED, ALLOWEDTOENROLL_TO_UNENROLLED),
(UNENROLLED_TO_UNENROLLED, UNENROLLED_TO_UNENROLLED),
(DEFAULT_TRANSITION_STATE, DEFAULT_TRANSITION_STATE)
)
class AnonymousUserId(models.Model):
"""
This table contains user, course_Id and anonymous_user_id
Purpose of this table is to provide user by anonymous_user_id.
We generate anonymous_user_id using md5 algorithm,
and use result in hex form, so its length is equal to 32 bytes.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User, db_index=True)
anonymous_user_id = models.CharField(unique=True, max_length=32)
course_id = CourseKeyField(db_index=True, max_length=255, blank=True)
unique_together = (user, course_id)
def anonymous_id_for_user(user, course_id, save=True):
"""
Return a unique id for a (user, course) pair, suitable for inserting
into e.g. personalized survey links.
If user is an `AnonymousUser`, returns `None`
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# This part is for ability to get xblock instance in xblock_noauth handlers, where user is unauthenticated.
if user.is_anonymous():
return None
cached_id = getattr(user, '_anonymous_id', {}).get(course_id)
if cached_id is not None:
return cached_id
# include the secret key as a salt, and to make the ids unique across different LMS installs.
hasher = hashlib.md5()
hasher.update(settings.ANONYMOUS_ID_SECRET_KEY)
hasher.update(unicode(user.id))
if course_id:
hasher.update(course_id.to_deprecated_string().encode('utf-8'))
digest = hasher.hexdigest()
if not hasattr(user, '_anonymous_id'):
user._anonymous_id = {} # pylint: disable=protected-access
user._anonymous_id[course_id] = digest # pylint: disable=protected-access
if save is False:
return digest
try:
anonymous_user_id, __ = AnonymousUserId.objects.get_or_create(
defaults={'anonymous_user_id': digest},
user=user,
course_id=course_id
)
if anonymous_user_id.anonymous_user_id != digest:
log.error(
u"Stored anonymous user id %r for user %r "
u"in course %r doesn't match computed id %r",
user,
course_id,
anonymous_user_id.anonymous_user_id,
digest
)
except IntegrityError:
# Another thread has already created this entry, so
# continue
pass
return digest
def user_by_anonymous_id(uid):
"""
Return user by anonymous_user_id using AnonymousUserId lookup table.
Do not raise `django.ObjectDoesNotExist` exception,
if there is no user for anonymous_student_id,
because this function will be used inside xmodule w/o django access.
"""
if uid is None:
return None
try:
return User.objects.get(anonymoususerid__anonymous_user_id=uid)
except ObjectDoesNotExist:
return None
class UserStanding(models.Model):
"""
This table contains a student's account's status.
Currently, we're only disabling accounts; in the future we can imagine
taking away more specific privileges, like forums access, or adding
more specific karma levels or probationary stages.
"""
ACCOUNT_DISABLED = "disabled"
ACCOUNT_ENABLED = "enabled"
USER_STANDING_CHOICES = (
(ACCOUNT_DISABLED, u"Account Disabled"),
(ACCOUNT_ENABLED, u"Account Enabled"),
)
user = models.OneToOneField(User, db_index=True, related_name='standing')
account_status = models.CharField(
blank=True, max_length=31, choices=USER_STANDING_CHOICES
)
changed_by = models.ForeignKey(User, blank=True)
standing_last_changed_at = models.DateTimeField(auto_now=True)
resign_reason = models.TextField(null=True, max_length=1000)
class UserProfile(models.Model):
"""This is where we store all the user demographic fields. We have a
separate table for this rather than extending the built-in Django auth_user.
Notes:
* Some fields are legacy ones from the first run of 6.002, from which
we imported many users.
* Fields like name and address are intentionally open ended, to account
for international variations. An unfortunate side-effect is that we
cannot efficiently sort on last names for instance.
Replication:
* Only the Portal servers should ever modify this information.
* All fields are replicated into relevant Course databases
Some of the fields are legacy ones that were captured during the initial
MITx fall prototype.
"""
# cache key format e.g user.<user_id>.profile.country = 'SG'
PROFILE_COUNTRY_CACHE_KEY = u"user.{user_id}.profile.country"
class Meta(object):
db_table = "auth_userprofile"
# CRITICAL TODO/SECURITY
# Sanitize all fields.
# This is not visible to other users, but could introduce holes later
user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile')
name = models.CharField(blank=True, max_length=255, db_index=True)
meta = models.TextField(blank=True) # JSON dictionary for future expansion
courseware = models.CharField(blank=True, max_length=255, default='course.xml')
# Location is no longer used, but is held here for backwards compatibility
# for users imported from our first class.
language = models.CharField(blank=True, max_length=255, db_index=True)
location = models.CharField(blank=True, max_length=255, db_index=True)
# Optional demographic data we started capturing from Fall 2012
this_year = datetime.now(UTC).year
VALID_YEARS = range(this_year, this_year - 120, -1)
year_of_birth = models.IntegerField(blank=True, null=True, db_index=True)
GENDER_CHOICES = (
('m', ugettext_noop('Male')),
('f', ugettext_noop('Female')),
# Translators: 'Other' refers to the student's gender
('o', ugettext_noop('Other/Prefer Not to Say'))
)
gender = models.CharField(
blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES
)
# [03/21/2013] removed these, but leaving comment since there'll still be
# p_se and p_oth in the existing data in db.
# ('p_se', 'Doctorate in science or engineering'),
# ('p_oth', 'Doctorate in another field'),
LEVEL_OF_EDUCATION_CHOICES = (
('p', ugettext_noop('Doctorate')),
('m', ugettext_noop("Master's or professional degree")),
('b', ugettext_noop("Bachelor's degree")),
('a', ugettext_noop("Associate degree")),
('hs', ugettext_noop("Secondary/high school")),
('jhs', ugettext_noop("Junior secondary/junior high/middle school")),
('el', ugettext_noop("Elementary/primary school")),
# Translators: 'None' refers to the student's level of education
('none', ugettext_noop("No Formal Education")),
# Translators: 'Other' refers to the student's level of education
('other', ugettext_noop("Other Education"))
)
level_of_education = models.CharField(
blank=True, null=True, max_length=6, db_index=True,
choices=LEVEL_OF_EDUCATION_CHOICES
)
mailing_address = models.TextField(blank=True, null=True)
city = models.TextField(blank=True, null=True)
country = CountryField(blank=True, null=True)
goals = models.TextField(blank=True, null=True)
allow_certificate = models.BooleanField(default=1)
bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False)
profile_image_uploaded_at = models.DateTimeField(null=True)
@property
def has_profile_image(self):
"""
Convenience method that returns a boolean indicating whether or not
this user has uploaded a profile image.
"""
return self.profile_image_uploaded_at is not None
@property
def age(self):
""" Convenience method that returns the age given a year_of_birth. """
year_of_birth = self.year_of_birth
year = datetime.now(UTC).year
if year_of_birth is not None:
return year - year_of_birth
@property
def level_of_education_display(self):
""" Convenience method that returns the human readable level of education. """
if self.level_of_education:
return self.__enumerable_to_display(self.LEVEL_OF_EDUCATION_CHOICES, self.level_of_education)
@property
def gender_display(self):
""" Convenience method that returns the human readable gender. """
if self.gender:
return self.__enumerable_to_display(self.GENDER_CHOICES, self.gender)
def get_meta(self): # pylint: disable=missing-docstring
js_str = self.meta
if not js_str:
js_str = dict()
else:
js_str = json.loads(self.meta)
return js_str
def set_meta(self, meta_json): # pylint: disable=missing-docstring
self.meta = json.dumps(meta_json)
def set_login_session(self, session_id=None):
"""
Sets the current session id for the logged-in user.
If session_id doesn't match the existing session,
deletes the old session object.
"""
meta = self.get_meta()
old_login = meta.get('session_id', None)
if old_login:
SessionStore(session_key=old_login).delete()
meta['session_id'] = session_id
self.set_meta(meta)
self.save()
def requires_parental_consent(self, date=None, age_limit=None, default_requires_consent=True):
"""Returns true if this user requires parental consent.
Args:
date (Date): The date for which consent needs to be tested (defaults to now).
age_limit (int): The age limit at which parental consent is no longer required.
This defaults to the value of the setting 'PARENTAL_CONTROL_AGE_LIMIT'.
default_requires_consent (bool): True if users require parental consent if they
have no specified year of birth (default is True).
Returns:
True if the user requires parental consent.
"""
if age_limit is None:
age_limit = getattr(settings, 'PARENTAL_CONSENT_AGE_LIMIT', None)
if age_limit is None:
return False
# Return True if either:
# a) The user has a year of birth specified and that year is fewer years in the past than the limit.
# b) The user has no year of birth specified and the default is to require consent.
#
# Note: we have to be conservative using the user's year of birth as their birth date could be
# December 31st. This means that if the number of years since their birth year is exactly equal
# to the age limit then we have to assume that they might still not be old enough.
year_of_birth = self.year_of_birth
if year_of_birth is None:
return default_requires_consent
if date is None:
age = self.age
else:
age = date.year - year_of_birth
return age <= age_limit
def __enumerable_to_display(self, enumerables, enum_value):
""" Get the human readable value from an enumerable list of key-value pairs. """
return dict(enumerables)[enum_value]
@classmethod
def country_cache_key_name(cls, user_id):
"""Return cache key name to be used to cache current country.
Args:
user_id(int): Id of user.
Returns:
Unicode cache key
"""
return cls.PROFILE_COUNTRY_CACHE_KEY.format(user_id=user_id)
@receiver(models.signals.post_save, sender=UserProfile)
def invalidate_user_profile_country_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of country in UserProfile model. """
changed_fields = getattr(instance, '_changed_fields', {})
if 'country' in changed_fields:
cache_key = UserProfile.country_cache_key_name(instance.user_id)
cache.delete(cache_key)
log.info("Country changed in UserProfile for %s, cache deleted", instance.user_id)
@receiver(pre_save, sender=UserProfile)
def user_profile_pre_save_callback(sender, **kwargs):
"""
Ensure consistency of a user profile before saving it.
"""
user_profile = kwargs['instance']
# Remove profile images for users who require parental consent
if user_profile.requires_parental_consent() and user_profile.has_profile_image:
user_profile.profile_image_uploaded_at = None
# Cache "old" field values on the model instance so that they can be
# retrieved in the post_save callback when we emit an event with new and
# old field values.
user_profile._changed_fields = get_changed_fields_dict(user_profile, sender)
@receiver(post_save, sender=UserProfile)
def user_profile_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the UserProfile.
"""
user_profile = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user_profile,
user_profile.user,
sender._meta.db_table,
excluded_fields=['meta']
)
@receiver(pre_save, sender=User)
def user_pre_save_callback(sender, **kwargs):
"""
Capture old fields on the user instance before save and cache them as a
private field on the current model for use in the post_save callback.
"""
user = kwargs['instance']
user._changed_fields = get_changed_fields_dict(user, sender)
@receiver(post_save, sender=User)
def user_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the User.
"""
user = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user,
user,
sender._meta.db_table,
excluded_fields=['last_login', 'first_name', 'last_name'],
hidden_fields=['password']
)
class UserSignupSource(models.Model):
"""
This table contains information about users registering
via Micro-Sites
"""
user = models.ForeignKey(User, db_index=True)
site = models.CharField(max_length=255, db_index=True)
def unique_id_for_user(user, save=True):
"""
Return a unique id for a user, suitable for inserting into
e.g. personalized survey links.
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# Setting course_id to '' makes it not affect the generated hash,
# and thus produce the old per-student anonymous id
return anonymous_id_for_user(user, None, save=save)
# TODO: Should be renamed to generic UserGroup, and possibly
# Given an optional field for type of group
class UserTestGroup(models.Model):
users = models.ManyToManyField(User, db_index=True)
name = models.CharField(blank=False, max_length=32, db_index=True)
description = models.TextField(blank=True)
class Registration(models.Model):
''' Allows us to wait for e-mail before user is registered. A
registration profile is created when the user creates an
account, but that account is inactive. Once the user clicks
on the activation key, it becomes active. '''
class Meta(object):
db_table = "auth_registration"
user = models.OneToOneField(User)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
modified = models.DateTimeField(null=True, auto_now=True)
masked = models.BooleanField(default=False)
def register(self, user):
# MINOR TODO: Switch to crypto-secure key
self.activation_key = uuid.uuid4().hex
self.user = user
self.save()
def activate(self):
self.user.is_active = True
self.user.save()
# update modified
self.save()
def update_masked(self):
self.masked = True
self.save()
class PendingNameChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_name = models.CharField(blank=True, max_length=255)
rationale = models.CharField(blank=True, max_length=1024)
class PendingEmailChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_email = models.CharField(blank=True, max_length=255, db_index=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def request_change(self, email):
"""Request a change to a user's email.
Implicitly saves the pending email change record.
Arguments:
email (unicode): The proposed new email for the user.
Returns:
unicode: The activation code to confirm the change.
"""
self.new_email = email
self.activation_key = uuid.uuid4().hex
self.save()
return self.activation_key
EVENT_NAME_ENROLLMENT_ACTIVATED = 'edx.course.enrollment.activated'
EVENT_NAME_ENROLLMENT_DEACTIVATED = 'edx.course.enrollment.deactivated'
EVENT_NAME_ENROLLMENT_MODE_CHANGED = 'edx.course.enrollment.mode_changed'
class PasswordHistory(models.Model):
"""
This model will keep track of past passwords that a user has used
as well as providing contraints (e.g. can't reuse passwords)
"""
user = models.ForeignKey(User)
password = models.CharField(max_length=128)
time_set = models.DateTimeField(default=timezone.now)
def create(self, user):
"""
This will copy over the current password, if any of the configuration has been turned on
"""
if not (PasswordHistory.is_student_password_reuse_restricted() or
PasswordHistory.is_staff_password_reuse_restricted() or
PasswordHistory.is_password_reset_frequency_restricted() or
PasswordHistory.is_staff_forced_password_reset_enabled() or
PasswordHistory.is_student_forced_password_reset_enabled()):
return
self.user = user
self.password = user.password
self.save()
@classmethod
def is_student_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_staff_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_password_reset_frequency_restricted(cls):
"""
Returns whether the configuration which limits the password reset frequency has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'
)
return min_days_between_reset
@classmethod
def is_staff_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_between_reset
@classmethod
def is_student_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_pw_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_pw_reset
@classmethod
def should_user_reset_password_now(cls, user):
"""
Returns whether a password has 'expired' and should be reset. Note there are two different
expiry policies for staff and students
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
days_before_password_reset = None
if user.is_staff:
if cls.is_staff_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS']
elif cls.is_student_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS']
if days_before_password_reset:
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
time_last_reset = None
if history:
# first element should be the last time we reset password
time_last_reset = history[0].time_set
else:
# no history, then let's take the date the user joined
time_last_reset = user.date_joined
now = timezone.now()
delta = now - time_last_reset
return delta.days >= days_before_password_reset
return False
@classmethod
def is_password_reset_too_soon(cls, user):
"""
Verifies that the password is not getting reset too frequently
"""
if not cls.is_password_reset_frequency_restricted():
return False
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
if not history:
return False
now = timezone.now()
delta = now - history[0].time_set
return delta.days < settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
@classmethod
def is_allowable_password_reuse(cls, user, new_password):
"""
Verifies that the password adheres to the reuse policies
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return True
if user.is_staff and cls.is_staff_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
elif cls.is_student_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
else:
min_diff_passwords_required = 0
# just limit the result set to the number of different
# password we need
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')[:min_diff_passwords_required]
for entry in history:
# be sure to re-use the same salt
# NOTE, how the salt is serialized in the password field is dependent on the algorithm
# in pbkdf2_sha256 [LMS] it's the 3rd element, in sha1 [unit tests] it's the 2nd element
hash_elements = entry.password.split('$')
algorithm = hash_elements[0]
if algorithm == 'pbkdf2_sha256':
hashed_password = make_password(new_password, hash_elements[2])
elif algorithm == 'sha1':
hashed_password = make_password(new_password, hash_elements[1])
else:
# This means we got something unexpected. We don't want to throw an exception, but
# log as an error and basically allow any password reuse
AUDIT_LOG.error('''
Unknown password hashing algorithm "{0}" found in existing password
hash, password reuse policy will not be enforced!!!
'''.format(algorithm))
return True
if entry.password == hashed_password:
return False
return True
class LoginFailures(models.Model):
"""
This model will keep track of failed login attempts
"""
user = models.ForeignKey(User)
failure_count = models.IntegerField(default=0)
lockout_until = models.DateTimeField(null=True)
@classmethod
def is_feature_enabled(cls):
"""
Returns whether the feature flag around this functionality has been set
"""
return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS']
@classmethod
def is_user_locked_out(cls, user):
"""
Static method to return in a given user has his/her account locked out
"""
try:
record = LoginFailures.objects.get(user=user)
if not record.lockout_until:
return False
now = datetime.now(UTC)
until = record.lockout_until
is_locked_out = until and now < until
return is_locked_out
except ObjectDoesNotExist:
return False
@classmethod
def increment_lockout_counter(cls, user):
"""
Ticks the failed attempt counter
"""
record, _ = LoginFailures.objects.get_or_create(user=user)
record.failure_count = record.failure_count + 1
max_failures_allowed = settings.MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED
# did we go over the limit in attempts
if record.failure_count >= max_failures_allowed:
# yes, then store when this account is locked out until
lockout_period_secs = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS
record.lockout_until = datetime.now(UTC) + timedelta(seconds=lockout_period_secs)
record.save()
@classmethod
def clear_lockout_counter(cls, user):
"""
Removes the lockout counters (normally called after a successful login)
"""
try:
entry = LoginFailures.objects.get(user=user)
entry.delete()
except ObjectDoesNotExist:
return
class CourseEnrollmentException(Exception):
pass
class NonExistentCourseError(CourseEnrollmentException):
pass
class EnrollmentClosedError(CourseEnrollmentException):
pass
class CourseFullError(CourseEnrollmentException):
pass
class AlreadyEnrolledError(CourseEnrollmentException):
pass
class CourseEnrollmentManager(models.Manager):
"""
Custom manager for CourseEnrollment with Table-level filter methods.
"""
def num_enrolled_in(self, course_id):
"""
Returns the count of active enrollments in a course.
'course_id' is the course_id to return enrollments
"""
enrollment_number = super(CourseEnrollmentManager, self).get_queryset().filter(
course_id=course_id,
is_active=1
).count()
return enrollment_number
def is_course_full(self, course):
"""
Returns a boolean value regarding whether a course has already reached it's max enrollment
capacity
"""
is_course_full = False
if course.max_student_enrollments_allowed is not None:
is_course_full = self.num_enrolled_in(course.id) >= course.max_student_enrollments_allowed
return is_course_full
def users_enrolled_in(self, course_id):
"""Return a queryset of User for every user enrolled in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
def enrollment_counts(self, course_id):
"""
Returns a dictionary that stores the total enrollment count for a course, as well as the
enrollment count for each individual mode.
"""
# Unfortunately, Django's "group by"-style queries look super-awkward
query = use_read_replica_if_available(
super(CourseEnrollmentManager, self).get_queryset().filter(course_id=course_id, is_active=True).values(
'mode').order_by().annotate(Count('mode')))
total = 0
enroll_dict = defaultdict(int)
for item in query:
enroll_dict[item['mode']] = item['mode__count']
total += item['mode__count']
enroll_dict['total'] = total
return enroll_dict
def enrolled_and_dropped_out_users(self, course_id):
"""Return a queryset of Users in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id
)
class CourseEnrollment(models.Model):
"""
Represents a Student's Enrollment record for a single Course. You should
generally not manipulate CourseEnrollment objects directly, but use the
classmethods provided to enroll, unenroll, or check on the enrollment status
of a given student.
We're starting to consolidate course enrollment logic in this class, but
more should be brought in (such as checking against CourseEnrollmentAllowed,
checking course dates, user permissions, etc.) This logic is currently
scattered across our views.
"""
MODEL_TAGS = ['course_id', 'is_active', 'mode']
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
# If is_active is False, then the student is not considered to be enrolled
# in the course (is_enrolled() will return False)
is_active = models.BooleanField(default=True)
# Represents the modes that are possible. We'll update this later with a
# list of possible values.
mode = models.CharField(default=CourseMode.DEFAULT_MODE_SLUG, max_length=100)
objects = CourseEnrollmentManager()
# Maintain a history of requirement status updates for auditing purposes
history = HistoricalRecords()
# cache key format e.g enrollment.<username>.<course_key>.mode = 'honor'
COURSE_ENROLLMENT_CACHE_KEY = u"enrollment.{}.{}.mode"
class Meta(object):
unique_together = (('user', 'course_id'),)
ordering = ('user', 'course_id')
def __init__(self, *args, **kwargs):
super(CourseEnrollment, self).__init__(*args, **kwargs)
# Private variable for storing course_overview to minimize calls to the database.
# When the property .course_overview is accessed for the first time, this variable will be set.
self._course_overview = None
def __unicode__(self):
return (
"[CourseEnrollment] {}: {} ({}); active: ({})"
).format(self.user, self.course_id, self.created, self.is_active)
@classmethod
@transaction.atomic
def get_or_create_enrollment(cls, user, course_key):
"""
Create an enrollment for a user in a class. By default *this enrollment
is not active*. This is useful for when an enrollment needs to go
through some sort of approval process before being activated. If you
don't need this functionality, just call `enroll()` instead.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
# If we're passing in a newly constructed (i.e. not yet persisted) User,
# save it to the database so that it can have an ID that we can throw
# into our CourseEnrollment object. Otherwise, we'll get an
# IntegrityError for having a null user_id.
assert isinstance(course_key, CourseKey)
if user.id is None:
user.save()
enrollment, created = CourseEnrollment.objects.get_or_create(
user=user,
course_id=course_key,
)
# If we *did* just create a new enrollment, set some defaults
if created:
enrollment.mode = CourseMode.DEFAULT_MODE_SLUG
enrollment.is_active = False
enrollment.save()
return enrollment
@classmethod
def get_enrollment(cls, user, course_key):
"""Returns a CoursewareEnrollment object.
Args:
user (User): The user associated with the enrollment.
course_id (CourseKey): The key of the course associated with the enrollment.
Returns:
Course enrollment object or None
"""
try:
return CourseEnrollment.objects.get(
user=user,
course_id=course_key
)
except cls.DoesNotExist:
return None
@classmethod
def is_enrollment_closed(cls, user, course):
"""
Returns a boolean value regarding whether the user has access to enroll in the course. Returns False if the
enrollment has been closed.
"""
# Disable the pylint error here, as per ormsbee. This local import was previously
# in CourseEnrollment.enroll
from courseware.access import has_access # pylint: disable=import-error
return not has_access(user, 'enroll', course)
def update_enrollment(self, mode=None, is_active=None, skip_refund=False):
"""
Updates an enrollment for a user in a class. This includes options
like changing the mode, toggling is_active True/False, etc.
Also emits relevant events for analytics purposes.
This saves immediately.
"""
activation_changed = False
# if is_active is None, then the call to update_enrollment didn't specify
# any value, so just leave is_active as it is
if self.is_active != is_active and is_active is not None:
self.is_active = is_active
activation_changed = True
mode_changed = False
# if mode is None, the call to update_enrollment didn't specify a new
# mode, so leave as-is
if self.mode != mode and mode is not None:
self.mode = mode
mode_changed = True
if activation_changed or mode_changed:
self.save()
if activation_changed:
if self.is_active:
self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)
dog_stats_api.increment(
"common.student.enrollment",
tags=[u"org:{}".format(self.course_id.org),
u"offering:{}".format(self.course_id.offering),
u"mode:{}".format(self.mode)]
)
else:
UNENROLL_DONE.send(sender=None, course_enrollment=self, skip_refund=skip_refund)
self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)
dog_stats_api.increment(
"common.student.unenrollment",
tags=[u"org:{}".format(self.course_id.org),
u"offering:{}".format(self.course_id.offering),
u"mode:{}".format(self.mode)]
)
if mode_changed:
# Only emit mode change events when the user's enrollment
# mode has changed from its previous setting
self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED)
def emit_event(self, event_name):
"""
Emits an event to explicitly track course enrollment and unenrollment.
"""
try:
context = contexts.course_context_from_course_id(self.course_id)
assert isinstance(self.course_id, CourseKey)
data = {
'user_id': self.user.id,
'course_id': self.course_id.to_deprecated_string(),
'mode': self.mode,
}
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, data)
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user_id, event_name, {
'category': 'conversion',
'label': self.course_id.to_deprecated_string(),
'org': self.course_id.org,
'course': self.course_id.course,
'run': self.course_id.run,
'mode': self.mode,
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except: # pylint: disable=bare-except
if event_name and self.course_id:
log.exception(
u'Unable to emit event %s for user %s and course %s',
event_name,
self.user.username,
self.course_id,
)
@classmethod
def enroll(cls, user, course_key, mode=None, check_access=False):
"""
Enroll a user in a course. This saves immediately.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_key` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is the default course mode, 'audit'. Other options
include 'professional', 'verified', 'honor',
'no-id-professional' and 'credit'.
See CourseMode in common/djangoapps/course_modes/models.py.
`check_access`: if True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Exceptions that can be raised: NonExistentCourseError,
EnrollmentClosedError, CourseFullError, AlreadyEnrolledError. All these
are subclasses of CourseEnrollmentException if you want to catch all of
them in the same way.
It is expected that this method is called from a method which has already
verified the user authentication.
Also emits relevant events for analytics purposes.
"""
if mode is None:
mode = _default_course_mode(unicode(course_key))
# All the server-side checks for whether a user is allowed to enroll.
try:
course = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
# This is here to preserve legacy behavior which allowed enrollment in courses
# announced before the start of content creation.
if check_access:
log.warning(u"User %s failed to enroll in non-existent course %s", user.username, unicode(course_key))
raise NonExistentCourseError
if check_access:
if CourseEnrollment.is_enrollment_closed(user, course):
log.warning(
u"User %s failed to enroll in course %s because enrollment is closed",
user.username,
course_key.to_deprecated_string()
)
raise EnrollmentClosedError
if CourseEnrollment.objects.is_course_full(course):
log.warning(
u"User %s failed to enroll in full course %s",
user.username,
course_key.to_deprecated_string(),
)
raise CourseFullError
if CourseEnrollment.is_enrolled(user, course_key):
log.warning(
u"User %s attempted to enroll in %s, but they were already enrolled",
user.username,
course_key.to_deprecated_string()
)
if check_access:
raise AlreadyEnrolledError
# User is allowed to enroll if they've reached this point.
enrollment = cls.get_or_create_enrollment(user, course_key)
enrollment.update_enrollment(is_active=True, mode=mode)
return enrollment
@classmethod
def enroll_by_email(cls, email, course_id, mode=None, ignore_errors=True):
"""
Enroll a user in a course given their email. This saves immediately.
Note that enrolling by email is generally done in big batches and the
error rate is high. For that reason, we supress User lookup errors by
default.
Returns a CoursewareEnrollment object. If the User does not exist and
`ignore_errors` is set to `True`, it will return None.
`email` Email address of the User to add to enroll in the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is the default course mode, 'audit'. Other options
include 'professional', 'verified', 'honor',
'no-id-professional' and 'credit'.
See CourseMode in common/djangoapps/course_modes/models.py.
`ignore_errors` is a boolean indicating whether we should suppress
`User.DoesNotExist` errors (returning None) or let it
bubble up.
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
try:
user = User.objects.get(email=email)
return cls.enroll(user, course_id, mode)
except User.DoesNotExist:
err_msg = u"Tried to enroll email {} into course {}, but user not found"
log.error(err_msg.format(email, course_id))
if ignore_errors:
return None
raise
@classmethod
def unenroll(cls, user, course_id, skip_refund=False):
"""
Remove the user from a given course. If the relevant `CourseEnrollment`
object doesn't exist, we log an error but don't throw an exception.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`skip_refund` can be set to True to avoid the refund process.
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
record.update_enrollment(is_active=False, skip_refund=skip_refund)
except cls.DoesNotExist:
log.error(
u"Tried to unenroll student %s from %s but they were not enrolled",
user,
course_id
)
@classmethod
def unenroll_by_email(cls, email, course_id):
"""
Unenroll a user from a course given their email. This saves immediately.
User lookup errors are logged but will not throw an exception.
`email` Email address of the User to unenroll from the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
user = User.objects.get(email=email)
return cls.unenroll(user, course_id)
except User.DoesNotExist:
log.error(
u"Tried to unenroll email %s from course %s, but user not found",
email,
course_id
)
@classmethod
def is_enrolled(cls, user, course_key):
"""
Returns True if the user is enrolled in the course (the entry must exist
and it must have `is_active=True`). Otherwise, returns False.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
if not user.is_authenticated():
return False
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_key)
return record.is_active
except cls.DoesNotExist:
return False
@classmethod
def is_enrolled_by_partial(cls, user, course_id_partial):
"""
Returns `True` if the user is enrolled in a course that starts with
`course_id_partial`. Otherwise, returns False.
Can be used to determine whether a student is enrolled in a course
whose run name is unknown.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id_partial` (CourseKey) is missing the run component
"""
assert isinstance(course_id_partial, CourseKey)
assert not course_id_partial.run # None or empty string
course_key = SlashSeparatedCourseKey(course_id_partial.org, course_id_partial.course, '')
querystring = unicode(course_key.to_deprecated_string())
try:
return CourseEnrollment.objects.filter(
user=user,
course_id__startswith=querystring,
is_active=1
).exists()
except cls.DoesNotExist:
return False
@classmethod
def enrollment_mode_for_user(cls, user, course_id):
"""
Returns the enrollment mode for the given user for the given course
`user` is a Django User object
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
Returns (mode, is_active) where mode is the enrollment mode of the student
and is_active is whether the enrollment is active.
Returns (None, None) if the courseenrollment record does not exist.
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
return (record.mode, record.is_active)
except cls.DoesNotExist:
return (None, None)
@classmethod
def enrollments_for_user(cls, user):
return CourseEnrollment.objects.filter(user=user, is_active=1)
def is_paid_course(self):
"""
Returns True, if course is paid
"""
paid_course = CourseMode.is_white_label(self.course_id)
if paid_course or CourseMode.is_professional_slug(self.mode):
return True
return False
def activate(self):
"""Makes this `CourseEnrollment` record active. Saves immediately."""
self.update_enrollment(is_active=True)
def deactivate(self):
"""Makes this `CourseEnrollment` record inactive. Saves immediately. An
inactive record means that the student is not enrolled in this course.
"""
self.update_enrollment(is_active=False)
def change_mode(self, mode):
"""Changes this `CourseEnrollment` record's mode to `mode`. Saves immediately."""
self.update_enrollment(mode=mode)
def refundable(self):
"""
For paid/verified certificates, students may receive a refund if they have
a verified certificate and the deadline for refunds has not yet passed.
"""
# In order to support manual refunds past the deadline, set can_refund on this object.
# On unenrolling, the "UNENROLL_DONE" signal calls CertificateItem.refund_cert_callback(),
# which calls this method to determine whether to refund the order.
# This can't be set directly because refunds currently happen as a side-effect of unenrolling.
# (side-effects are bad)
if getattr(self, 'can_refund', None) is not None:
return True
# If the student has already been given a certificate they should not be refunded
if GeneratedCertificate.certificate_for_student(self.user, self.course_id) is not None:
return False
# If it is after the refundable cutoff date they should not be refunded.
refund_cutoff_date = self.refund_cutoff_date()
if refund_cutoff_date and datetime.now(UTC) > refund_cutoff_date:
return False
course_mode = CourseMode.mode_for_course(self.course_id, 'verified')
if course_mode is None:
return False
else:
return True
def refund_cutoff_date(self):
""" Calculate and return the refund window end date. """
try:
attribute = self.attributes.get(namespace='order', name='order_number')
except ObjectDoesNotExist:
return None
order_number = attribute.value
order = ecommerce_api_client(self.user).orders(order_number).get()
refund_window_start_date = max(
datetime.strptime(order['date_placed'], ECOMMERCE_DATE_FORMAT),
self.course_overview.start.replace(tzinfo=None)
)
return refund_window_start_date.replace(tzinfo=UTC) + EnrollmentRefundConfiguration.current().refund_window
@property
def username(self):
return self.user.username
@property
def course(self):
# Deprecated. Please use the `course_overview` property instead.
return self.course_overview
@property
def course_overview(self):
"""
Returns a CourseOverview of the course to which this enrollment refers.
Returns None if an error occurred while trying to load the course.
Note:
If the course is re-published within the lifetime of this
CourseEnrollment object, then the value of this property will
become stale.
"""
if not self._course_overview:
try:
self._course_overview = CourseOverview.get_from_id(self.course_id)
except (CourseOverview.DoesNotExist, IOError):
self._course_overview = None
return self._course_overview
def is_verified_enrollment(self):
"""
Check the course enrollment mode is verified or not
"""
return CourseMode.is_verified_slug(self.mode)
def is_professional_enrollment(self):
"""
Check the course enrollment mode is professional or not
"""
return CourseMode.is_professional_slug(self.mode)
@classmethod
def is_enrolled_as_verified(cls, user, course_key):
"""
Check whether the course enrollment is for a verified mode.
Arguments:
user (User): The user object.
course_key (CourseKey): The identifier for the course.
Returns: bool
"""
enrollment = cls.get_enrollment(user, course_key)
return (
enrollment is not None and
enrollment.is_active and
enrollment.is_verified_enrollment()
)
@classmethod
def cache_key_name(cls, user_id, course_key):
"""Return cache key name to be used to cache current configuration.
Args:
user_id(int): Id of user.
course_key(unicode): Unicode of course key
Returns:
Unicode cache key
"""
return cls.COURSE_ENROLLMENT_CACHE_KEY.format(user_id, unicode(course_key))
def get_individual_start_date(self):
return self_paced_api.get_base_date(self)
def get_individual_end_date(self):
return self_paced_api.get_course_end_date(self)
def is_individual_closed(self):
return self_paced_api.is_course_closed(self)
@receiver(models.signals.post_save, sender=CourseEnrollment)
@receiver(models.signals.post_delete, sender=CourseEnrollment)
def invalidate_enrollment_mode_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of CourseEnrollment model. """
cache_key = CourseEnrollment.cache_key_name(
instance.user.id,
unicode(instance.course_id)
)
cache.delete(cache_key)
class ManualEnrollmentAudit(models.Model):
"""
Table for tracking which enrollments were performed through manual enrollment.
"""
enrollment = models.ForeignKey(CourseEnrollment, null=True)
enrolled_by = models.ForeignKey(User, null=True)
enrolled_email = models.CharField(max_length=255, db_index=True)
time_stamp = models.DateTimeField(auto_now_add=True, null=True)
state_transition = models.CharField(max_length=255, choices=TRANSITION_STATES)
reason = models.TextField(null=True)
@classmethod
def create_manual_enrollment_audit(cls, user, email, state_transition, reason, enrollment=None):
"""
saves the student manual enrollment information
"""
cls.objects.create(
enrolled_by=user,
enrolled_email=email,
state_transition=state_transition,
reason=reason,
enrollment=enrollment
)
@classmethod
def get_manual_enrollment_by_email(cls, email):
"""
if matches returns the most recent entry in the table filtered by email else returns None.
"""
try:
manual_enrollment = cls.objects.filter(enrolled_email=email).latest('time_stamp')
except cls.DoesNotExist:
manual_enrollment = None
return manual_enrollment
@classmethod
def get_manual_enrollment(cls, enrollment):
"""
if matches returns the most recent entry in the table filtered by enrollment else returns None,
"""
try:
manual_enrollment = cls.objects.filter(enrollment=enrollment).latest('time_stamp')
except cls.DoesNotExist:
manual_enrollment = None
return manual_enrollment
class CourseEnrollmentAllowed(models.Model):
"""
Table of users (specified by email address strings) who are allowed to enroll in a specified course.
The user may or may not (yet) exist. Enrollment by users listed in this table is allowed
even if the enrollment time window is past.
"""
email = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
auto_enroll = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
class Meta(object):
unique_together = (('email', 'course_id'),)
def __unicode__(self):
return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created)
@classmethod
def may_enroll_and_unenrolled(cls, course_id):
"""
Return QuerySet of students who are allowed to enroll in a course.
Result excludes students who have already enrolled in the
course.
`course_id` identifies the course for which to compute the QuerySet.
"""
enrolled = CourseEnrollment.objects.users_enrolled_in(course_id=course_id).values_list('email', flat=True)
return CourseEnrollmentAllowed.objects.filter(course_id=course_id).exclude(email__in=enrolled)
@total_ordering
class CourseAccessRole(models.Model):
"""
Maps users to org, courses, and roles. Used by student.roles.CourseRole and OrgRole.
To establish a user as having a specific role over all courses in the org, create an entry
without a course_id.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User)
# blank org is for global group based roles such as course creator (may be deprecated)
org = models.CharField(max_length=64, db_index=True, blank=True)
# blank course_id implies org wide role
course_id = CourseKeyField(max_length=255, db_index=True, blank=True)
role = models.CharField(max_length=64, db_index=True)
class Meta(object):
unique_together = ('user', 'org', 'course_id', 'role')
@property
def _key(self):
"""
convenience function to make eq overrides easier and clearer. arbitrary decision
that role is primary, followed by org, course, and then user
"""
return (self.role, self.org, self.course_id, self.user_id)
def __eq__(self, other):
"""
Overriding eq b/c the django impl relies on the primary key which requires fetch. sometimes we
just want to compare roles w/o doing another fetch.
"""
return type(self) == type(other) and self._key == other._key # pylint: disable=protected-access
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
"""
Lexigraphic sort
"""
return self._key < other._key # pylint: disable=protected-access
def __unicode__(self):
return "[CourseAccessRole] user: {} role: {} org: {} course: {}".format(self.user.username, self.role, self.org, self.course_id)
#### Helper methods for use from python manage.py shell and other classes.
def get_user_by_username_or_email(username_or_email):
"""
Return a User object, looking up by email if username_or_email contains a
'@', otherwise by username.
Raises:
User.DoesNotExist is lookup fails.
"""
if '@' in username_or_email:
return User.objects.get(email=username_or_email)
else:
return User.objects.get(username=username_or_email)
def get_user(email):
user = User.objects.get(email=email)
u_prof = UserProfile.objects.get(user=user)
return user, u_prof
def user_info(email):
user, u_prof = get_user(email)
print "User id", user.id
print "Username", user.username
print "E-mail", user.email
print "Name", u_prof.name
print "Location", u_prof.location
print "Language", u_prof.language
return user, u_prof
def change_email(old_email, new_email):
user = User.objects.get(email=old_email)
user.email = new_email
user.save()
def change_name(email, new_name):
_user, u_prof = get_user(email)
u_prof.name = new_name
u_prof.save()
def user_count():
print "All users", User.objects.all().count()
print "Active users", User.objects.filter(is_active=True).count()
return User.objects.all().count()
def active_user_count():
return User.objects.filter(is_active=True).count()
def create_group(name, description):
utg = UserTestGroup()
utg.name = name
utg.description = description
utg.save()
def add_user_to_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.add(User.objects.get(username=user))
utg.save()
def remove_user_from_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.remove(User.objects.get(username=user))
utg.save()
DEFAULT_GROUPS = {
'email_future_courses': 'Receive e-mails about future MITx courses',
'email_helpers': 'Receive e-mails about how to help with MITx',
'mitx_unenroll': 'Fully unenrolled -- no further communications',
'6002x_unenroll': 'Took and dropped 6002x'
}
def add_user_to_default_group(user, group):
try:
utg = UserTestGroup.objects.get(name=group)
except UserTestGroup.DoesNotExist:
utg = UserTestGroup()
utg.name = group
utg.description = DEFAULT_GROUPS[group]
utg.save()
utg.users.add(User.objects.get(username=user))
utg.save()
def create_comments_service_user(user):
if not settings.FEATURES['ENABLE_DISCUSSION_SERVICE']:
# Don't try--it won't work, and it will fill the logs with lots of errors
return
try:
cc_user = cc.User.from_django_user(user)
cc_user.save()
except Exception: # pylint: disable=broad-except
log = logging.getLogger("edx.discussion") # pylint: disable=redefined-outer-name
log.error(
"Could not create comments service user with id {}".format(user.id),
exc_info=True
)
# Define login and logout handlers here in the models file, instead of the views file,
# so that they are more likely to be loaded when a Studio user brings up the Studio admin
# page to login. These are currently the only signals available, so we need to continue
# identifying and logging failures separately (in views).
@receiver(user_logged_in)
def log_successful_login(sender, request, user, **kwargs): # pylint: disable=unused-argument
"""Handler to log when logins have occurred successfully."""
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id))
else:
AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email))
@receiver(user_logged_out)
def log_successful_logout(sender, request, user, **kwargs): # pylint: disable=unused-argument
"""Handler to log when logouts have occurred successfully."""
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Logout - user.id: {0}".format(request.user.id))
else:
AUDIT_LOG.info(u"Logout - {0}".format(request.user))
@receiver(user_logged_in)
@receiver(user_logged_out)
def enforce_single_login(sender, request, user, signal, **kwargs): # pylint: disable=unused-argument
"""
Sets the current session id in the user profile,
to prevent concurrent logins.
"""
if settings.FEATURES.get('PREVENT_CONCURRENT_LOGINS', False):
if signal == user_logged_in:
key = request.session.session_key
else:
key = None
if user:
user.profile.set_login_session(key)
class DashboardConfiguration(ConfigurationModel):
"""Dashboard Configuration settings.
Includes configuration options for the dashboard, which impact behavior and rendering for the application.
"""
recent_enrollment_time_delta = models.PositiveIntegerField(
default=0,
help_text="The number of seconds in which a new enrollment is considered 'recent'. "
"Used to display notifications."
)
@property
def recent_enrollment_seconds(self):
return self.recent_enrollment_time_delta
class LinkedInAddToProfileConfiguration(ConfigurationModel):
"""
LinkedIn Add to Profile Configuration
This configuration enables the "Add to Profile" LinkedIn
button on the student dashboard. The button appears when
users have a certificate available; when clicked,
users are sent to the LinkedIn site with a pre-filled
form allowing them to add the certificate to their
LinkedIn profile.
"""
MODE_TO_CERT_NAME = {
"honor": _(u"{platform_name} Honor Code Certificate for {course_name}"),
"verified": _(u"{platform_name} Verified Certificate for {course_name}"),
"professional": _(u"{platform_name} Professional Certificate for {course_name}"),
"no-id-professional": _(
u"{platform_name} Professional Certificate for {course_name}"
),
}
company_identifier = models.TextField(
help_text=_(
u"The company identifier for the LinkedIn Add-to-Profile button "
u"e.g 0_0dPSPyS070e0HsE9HNz_13_d11_"
)
)
# Deprecated
dashboard_tracking_code = models.TextField(default="", blank=True)
trk_partner_name = models.CharField(
max_length=10,
default="",
blank=True,
help_text=_(
u"Short identifier for the LinkedIn partner used in the tracking code. "
u"(Example: 'edx') "
u"If no value is provided, tracking codes will not be sent to LinkedIn."
)
)
def add_to_profile_url(self, course_key, course_name, cert_mode, cert_url, source="o", target="dashboard"):
"""Construct the URL for the "add to profile" button.
Arguments:
course_key (CourseKey): The identifier for the course.
course_name (unicode): The display name of the course.
cert_mode (str): The course mode of the user's certificate (e.g. "verified", "honor", "professional")
cert_url (str): The download URL for the certificate.
Keyword Arguments:
source (str): Either "o" (for onsite/UI), "e" (for emails), or "m" (for mobile)
target (str): An identifier for the occurrance of the button.
"""
params = OrderedDict([
('_ed', self.company_identifier),
('pfCertificationName', self._cert_name(course_name, cert_mode).encode('utf-8')),
('pfCertificationUrl', cert_url),
('source', source)
])
tracking_code = self._tracking_code(course_key, cert_mode, target)
if tracking_code is not None:
params['trk'] = tracking_code
return u'http://www.linkedin.com/profile/add?{params}'.format(
params=urlencode(params)
)
def _cert_name(self, course_name, cert_mode):
"""Name of the certification, for display on LinkedIn. """
return self.MODE_TO_CERT_NAME.get(
cert_mode,
_(u"{platform_name} Certificate for {course_name}")
).format(
platform_name=settings.PLATFORM_NAME,
course_name=course_name
)
def _tracking_code(self, course_key, cert_mode, target):
"""Create a tracking code for the button.
Tracking codes are used by LinkedIn to collect
analytics about certifications users are adding
to their profiles.
The tracking code format is:
&trk=[partner name]-[certificate type]-[date]-[target field]
In our case, we're sending:
&trk=edx-{COURSE ID}_{COURSE MODE}-{TARGET}
If no partner code is configured, then this will
return None, indicating that tracking codes are disabled.
Arguments:
course_key (CourseKey): The identifier for the course.
cert_mode (str): The enrollment mode for the course.
target (str): Identifier for where the button is located.
Returns:
unicode or None
"""
return (
u"{partner}-{course_key}_{cert_mode}-{target}".format(
partner=self.trk_partner_name,
course_key=unicode(course_key),
cert_mode=cert_mode,
target=target
)
if self.trk_partner_name else None
)
class EntranceExamConfiguration(models.Model):
"""
Represents a Student's entrance exam specific data for a single Course
"""
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
# if skip_entrance_exam is True, then student can skip entrance exam
# for the course
skip_entrance_exam = models.BooleanField(default=True)
class Meta(object):
unique_together = (('user', 'course_id'), )
def __unicode__(self):
return "[EntranceExamConfiguration] %s: %s (%s) = %s" % (
self.user, self.course_id, self.created, self.skip_entrance_exam
)
@classmethod
def user_can_skip_entrance_exam(cls, user, course_key):
"""
Return True if given user can skip entrance exam for given course otherwise False.
"""
can_skip = False
if is_entrance_exams_enabled():
try:
record = EntranceExamConfiguration.objects.get(user=user, course_id=course_key)
can_skip = record.skip_entrance_exam
except EntranceExamConfiguration.DoesNotExist:
can_skip = False
return can_skip
class LanguageField(models.CharField):
"""Represents a language from the ISO 639-1 language set."""
def __init__(self, *args, **kwargs):
"""Creates a LanguageField.
Accepts all the same kwargs as a CharField, except for max_length and
choices. help_text defaults to a description of the ISO 639-1 set.
"""
kwargs.pop('max_length', None)
kwargs.pop('choices', None)
help_text = kwargs.pop(
'help_text',
_("The ISO 639-1 language code for this language."),
)
super(LanguageField, self).__init__(
max_length=16,
choices=settings.ALL_LANGUAGES,
help_text=help_text,
*args,
**kwargs
)
class LanguageProficiency(models.Model):
"""
Represents a user's language proficiency.
Note that we have not found a way to emit analytics change events by using signals directly on this
model or on UserProfile. Therefore if you are changing LanguageProficiency values, it is important
to go through the accounts API (AccountsView) defined in
/edx-platform/openedx/core/djangoapps/user_api/accounts/views.py or its associated api method
(update_account_settings) so that the events are emitted.
"""
class Meta(object):
unique_together = (('code', 'user_profile'),)
user_profile = models.ForeignKey(UserProfile, db_index=True, related_name='language_proficiencies')
code = models.CharField(
max_length=16,
blank=False,
choices=settings.ALL_LANGUAGES,
help_text=_("The ISO 639-1 language code for this language.")
)
class CourseEnrollmentAttribute(models.Model):
"""
Provide additional information about the user's enrollment.
"""
enrollment = models.ForeignKey(CourseEnrollment, related_name="attributes")
namespace = models.CharField(
max_length=255,
help_text=_("Namespace of enrollment attribute")
)
name = models.CharField(
max_length=255,
help_text=_("Name of the enrollment attribute")
)
value = models.CharField(
max_length=255,
help_text=_("Value of the enrollment attribute")
)
def __unicode__(self):
"""Unicode representation of the attribute. """
return u"{namespace}:{name}, {value}".format(
namespace=self.namespace,
name=self.name,
value=self.value,
)
@classmethod
def add_enrollment_attr(cls, enrollment, data_list):
"""Delete all the enrollment attributes for the given enrollment and
add new attributes.
Args:
enrollment(CourseEnrollment): 'CourseEnrollment' for which attribute is to be added
data(list): list of dictionaries containing data to save
"""
cls.objects.filter(enrollment=enrollment).delete()
attributes = [
cls(enrollment=enrollment, namespace=data['namespace'], name=data['name'], value=data['value'])
for data in data_list
]
cls.objects.bulk_create(attributes)
@classmethod
def get_enrollment_attributes(cls, enrollment):
"""Retrieve list of all enrollment attributes.
Args:
enrollment(CourseEnrollment): 'CourseEnrollment' for which list is to retrieve
Returns: list
Example:
>>> CourseEnrollmentAttribute.get_enrollment_attributes(CourseEnrollment)
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
"""
return [
{
"namespace": attribute.namespace,
"name": attribute.name,
"value": attribute.value,
}
for attribute in cls.objects.filter(enrollment=enrollment)
]
class EnrollmentRefundConfiguration(ConfigurationModel):
"""
Configuration for course enrollment refunds.
"""
# TODO: Django 1.8 introduces a DurationField
# (https://docs.djangoproject.com/en/1.8/ref/models/fields/#durationfield)
# for storing timedeltas which uses MySQL's bigint for backing
# storage. After we've completed the Django upgrade we should be
# able to replace this field with a DurationField named
# `refund_window` without having to run a migration or change
# other code.
refund_window_microseconds = models.BigIntegerField(
default=1209600000000,
help_text=_(
"The window of time after enrolling during which users can be granted"
" a refund, represented in microseconds. The default is 14 days."
)
)
@property
def refund_window(self):
"""Return the configured refund window as a `datetime.timedelta`."""
return timedelta(microseconds=self.refund_window_microseconds)
@refund_window.setter
def refund_window(self, refund_window):
"""Set the current refund window to the given timedelta."""
self.refund_window_microseconds = int(refund_window.total_seconds() * 1000000)
| agpl-3.0 | -8,885,954,979,312,208,000 | 36.095307 | 142 | 0.640217 | false |
maas/maas | src/maasserver/websockets/handlers/domain.py | 1 | 8142 | # Copyright 2016-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""The domain handler for the WebSocket connection."""
from django.core.exceptions import ValidationError
from maasserver.forms.dnsdata import DNSDataForm
from maasserver.forms.dnsresource import DNSResourceForm
from maasserver.forms.domain import DomainForm
from maasserver.models import DNSData, DNSResource, GlobalDefault
from maasserver.models.domain import Domain
from maasserver.permissions import NodePermission
from maasserver.websockets.base import (
AdminOnlyMixin,
HandlerPermissionError,
HandlerValidationError,
)
from maasserver.websockets.handlers.timestampedmodel import (
TimestampedModelHandler,
)
class DomainHandler(TimestampedModelHandler, AdminOnlyMixin):
class Meta:
queryset = Domain.objects.all()
pk = "id"
form = DomainForm
form_requires_request = False
allowed_methods = [
"list",
"get",
"create",
"update",
"delete",
"set_active",
"set_default",
"create_dnsresource",
"update_dnsresource",
"delete_dnsresource",
"create_address_record",
"update_address_record",
"delete_address_record",
"create_dnsdata",
"update_dnsdata",
"delete_dnsdata",
]
listen_channels = ["domain"]
def dehydrate(self, domain, data, for_list=False):
rrsets = domain.render_json_for_related_rrdata(
for_list=for_list, user=self.user
)
if not for_list:
data["rrsets"] = rrsets
data["hosts"] = len(
{rr["system_id"] for rr in rrsets if rr["system_id"] is not None}
)
data["resource_count"] = len(rrsets)
if domain.is_default():
data["displayname"] = "%s (default)" % data["name"]
data["is_default"] = True
else:
data["displayname"] = data["name"]
data["is_default"] = False
return data
def _get_domain_or_permission_error(self, params):
domain = params.get("domain")
if domain is None:
raise HandlerValidationError(
{"domain": ["This field is required"]}
)
domain = self.get_object({"id": domain})
if not self.user.has_perm(NodePermission.admin, domain):
raise HandlerPermissionError()
return domain
def create_dnsresource(self, params):
self._get_domain_or_permission_error(params)
form = DNSResourceForm(data=params, user=self.user)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def update_dnsresource(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource = DNSResource.objects.get(
domain=domain, id=params["dnsresource_id"]
)
form = DNSResourceForm(
instance=dnsresource, data=params, user=self.user
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
return self.full_dehydrate(domain)
def delete_dnsresource(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource = DNSResource.objects.get(
domain=domain, id=params["dnsresource_id"]
)
dnsresource.delete()
def create_address_record(self, params):
domain = self._get_domain_or_permission_error(params)
if params["ip_addresses"] == [""]:
raise ValidationError(
"Data field is required when creating an %s record."
% params["rrtype"]
)
dnsresource, created = DNSResource.objects.get_or_create(
domain=domain, name=params["name"]
)
if created:
ip_addresses = []
else:
ip_addresses = dnsresource.get_addresses()
ip_addresses.extend(params["ip_addresses"])
params["ip_addresses"] = " ".join(ip_addresses)
form = DNSResourceForm(
data=params, user=self.user, instance=dnsresource
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def update_address_record(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource, created = DNSResource.objects.get_or_create(
domain=domain, name=params["name"]
)
if created:
# If we ended up creating a record, that's because the name
# was changed, so we'll start with an empty list. But that also
# means we need to edit the record with the original name.
ip_addresses = []
previous_dnsresource = DNSResource.objects.get(
domain=domain, name=params["previous_name"]
)
prevoius_ip_addresses = previous_dnsresource.get_addresses()
prevoius_ip_addresses.remove(params["previous_rrdata"])
modified_addresses = " ".join(prevoius_ip_addresses)
form = DNSResourceForm(
data=dict(ip_addresses=modified_addresses),
user=self.user,
instance=previous_dnsresource,
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
else:
ip_addresses = dnsresource.get_addresses()
# Remove the previous address for the record being edited.
# The previous_rrdata field will contain the original value
# for the IP address in the edited row.
ip_addresses.remove(params["previous_rrdata"])
ip_addresses.extend(params["ip_addresses"])
params["ip_addresses"] = " ".join(ip_addresses)
form = DNSResourceForm(
data=params, user=self.user, instance=dnsresource
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def delete_address_record(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource = DNSResource.objects.get(
domain=domain, id=params["dnsresource_id"]
)
ip_addresses = dnsresource.get_addresses()
ip_addresses.remove(params["rrdata"])
params["ip_addresses"] = " ".join(ip_addresses)
form = DNSResourceForm(
data=params, user=self.user, instance=dnsresource
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def create_dnsdata(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource, _ = DNSResource.objects.get_or_create(
domain=domain, name=params["name"]
)
params["dnsresource"] = dnsresource.id
form = DNSDataForm(data=params)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def update_dnsdata(self, params):
domain = self._get_domain_or_permission_error(params)
dnsdata = DNSData.objects.get(
id=params["dnsdata_id"], dnsresource_id=params["dnsresource_id"]
)
form = DNSDataForm(data=params, instance=dnsdata)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
return self.full_dehydrate(domain)
def delete_dnsdata(self, params):
self._get_domain_or_permission_error(params)
dnsdata = DNSData.objects.get(id=params["dnsdata_id"])
dnsdata.delete()
def set_default(self, params):
domain = self._get_domain_or_permission_error(params)
global_defaults = GlobalDefault.objects.instance()
global_defaults.domain = domain
global_defaults.save()
return self.full_dehydrate(domain)
| agpl-3.0 | -6,462,518,231,539,437,000 | 35.348214 | 77 | 0.594203 | false |
h2oloopan/easymerge | EasyMerge/tests/reddit/r2/r2/controllers/error.py | 1 | 8526 | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
import json
import os
import random
import pylons
from webob.exc import HTTPFound, HTTPMovedPermanently
from pylons.i18n import _
from pylons import c, g, request, response
try:
# place all r2 specific imports in here. If there is a code error, it'll
# get caught and the stack trace won't be presented to the user in
# production
from r2.config import extensions
from r2.controllers.reddit_base import RedditController, Cookies
from r2.lib.errors import ErrorSet
from r2.lib.filters import websafe_json
from r2.lib import log, pages
from r2.lib.strings import rand_strings
from r2.lib.template_helpers import static
from r2.models.link import Link
from r2.models.subreddit import DefaultSR, Subreddit
except Exception, e:
if g.debug:
# if debug mode, let the error filter up to pylons to be handled
raise e
else:
# production environment: protect the code integrity!
print "HuffmanEncodingError: make sure your python compiles before deploying, stupid!"
# kill this app
os._exit(1)
redditbroke = \
'''<html>
<head>
<title>reddit broke!</title>
</head>
<body>
<div style="margin: auto; text-align: center">
<p>
<a href="/">
<img border="0" src="%s" alt="you broke reddit" />
</a>
</p>
<p>
%s
</p>
</body>
</html>
'''
FAILIEN_COUNT = 3
def make_failien_url():
failien_number = random.randint(1, FAILIEN_COUNT)
failien_name = "youbrokeit%d.png" % failien_number
return static(failien_name)
class ErrorController(RedditController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def check_for_bearer_token(self):
pass
allowed_render_styles = ('html', 'xml', 'js', 'embed', '', "compact", 'api')
# List of admins to blame (skip the first admin, "reddit")
# If list is empty, just blame "an admin"
admins = g.admins[1:] or ["an admin"]
def __before__(self):
try:
c.error_page = True
RedditController.__before__(self)
except (HTTPMovedPermanently, HTTPFound):
# ignore an attempt to redirect from an error page
pass
except Exception as e:
handle_awful_failure("ErrorController.__before__: %r" % e)
def __after__(self):
try:
RedditController.__after__(self)
except Exception as e:
handle_awful_failure("ErrorController.__after__: %r" % e)
def __call__(self, environ, start_response):
try:
return RedditController.__call__(self, environ, start_response)
except Exception as e:
return handle_awful_failure("ErrorController.__call__: %r" % e)
def send403(self):
c.site = DefaultSR()
if 'usable_error_content' in request.environ:
return request.environ['usable_error_content']
else:
res = pages.RedditError(
title=_("forbidden (%(domain)s)") % dict(domain=g.domain),
message=_("you are not allowed to do that"),
explanation=request.GET.get('explanation'))
return res.render()
def send404(self):
if 'usable_error_content' in request.environ:
return request.environ['usable_error_content']
return pages.RedditError(_("page not found"),
_("the page you requested does not exist")).render()
def send429(self):
retry_after = request.environ.get("retry_after")
if retry_after:
response.headers["Retry-After"] = str(retry_after)
template_name = '/ratelimit_toofast.html'
else:
template_name = '/ratelimit_throttled.html'
template = g.mako_lookup.get_template(template_name)
return template.render(logo_url=static(g.default_header_url))
def send503(self):
retry_after = request.environ.get("retry_after")
if retry_after:
response.headers["Retry-After"] = str(retry_after)
return request.environ['usable_error_content']
def GET_document(self):
try:
c.errors = c.errors or ErrorSet()
# clear cookies the old fashioned way
c.cookies = Cookies()
code = request.GET.get('code', '')
try:
code = int(code)
except ValueError:
code = 404
srname = request.GET.get('srname', '')
takedown = request.GET.get('takedown', "")
# StatusBasedRedirect will override this anyway, but we need this
# here for pagecache to see.
response.status_int = code
if srname:
c.site = Subreddit._by_name(srname)
if code in (204, 304):
# NEVER return a content body on 204/304 or downstream
# caches may become very confused.
if request.GET.has_key('x-sup-id'):
x_sup_id = request.GET.get('x-sup-id')
if '\r\n' not in x_sup_id:
response.headers['x-sup-id'] = x_sup_id
return ""
elif c.render_style not in self.allowed_render_styles:
return str(code)
elif c.render_style in extensions.API_TYPES:
data = request.environ.get('extra_error_data', {'error': code})
return websafe_json(json.dumps(data))
elif takedown and code == 404:
link = Link._by_fullname(takedown)
return pages.TakedownPage(link).render()
elif code == 403:
return self.send403()
elif code == 429:
return self.send429()
elif code == 500:
randmin = {'admin': random.choice(self.admins)}
failien_url = make_failien_url()
return redditbroke % (failien_url, rand_strings.sadmessages % randmin)
elif code == 503:
return self.send503()
elif c.site:
return self.send404()
else:
return "page not found"
except Exception as e:
return handle_awful_failure("ErrorController.GET_document: %r" % e)
POST_document = PUT_document = DELETE_document = GET_document
def handle_awful_failure(fail_text):
"""
Makes sure that no errors generated in the error handler percolate
up to the user unless debug is enabled.
"""
if g.debug:
import sys
s = sys.exc_info()
# reraise the original error with the original stack trace
raise s[1], None, s[2]
try:
# log the traceback, and flag the "path" as the error location
import traceback
log.write_error_summary(fail_text)
for line in traceback.format_exc().splitlines():
g.log.error(line)
return redditbroke % (make_failien_url(), fail_text)
except:
# we are doomed. Admit defeat
return "This is an error that should never occur. You win."
| mit | 9,156,844,164,749,541,000 | 35.75 | 94 | 0.604621 | false |
PnEcrins/GeoNature | backend/geonature/core/gn_permissions/tools.py | 1 | 9278 | import logging, json
from flask import current_app, redirect, Response
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer,
SignatureExpired, BadSignature)
import sqlalchemy as sa
from sqlalchemy.sql.expression import func
from pypnusershub.db.tools import (
InsufficientRightsError,
AccessRightsExpiredError,
UnreadableAccessRightsError
)
from geonature.core.gn_permissions.models import VUsersPermissions, TFilters
from geonature.utils.env import DB
log = logging.getLogger(__name__)
def user_from_token(token, secret_key=None):
secret_key = secret_key or current_app.config['SECRET_KEY']
try:
s = Serializer(current_app.config['SECRET_KEY'])
user = s.loads(token)
return user
except SignatureExpired:
raise AccessRightsExpiredError("Token expired")
except BadSignature:
raise UnreadableAccessRightsError('Token BadSignature', 403)
def get_user_from_token_and_raise(
request,
secret_key=None,
redirect_on_expiration=None,
redirect_on_invalid_token=None
):
"""
Deserialize the token
catch excetpion and return appropriate Response(403, 302 ...)
"""
try:
token = request.cookies['token']
return user_from_token(token, secret_key)
except AccessRightsExpiredError:
if redirect_on_expiration:
res = redirect(redirect_on_expiration, code=302)
else:
res = Response('Token Expired', 403)
res.set_cookie('token', expires=0)
return res
except InsufficientRightsError as e:
log.info(e)
if redirect_on_expiration:
res = redirect(redirect_on_expiration, code=302)
else:
res = Response('Forbidden', 403)
return res
except KeyError as e:
if redirect_on_expiration:
return redirect(redirect_on_expiration, code=302)
return Response('No token', 403)
except UnreadableAccessRightsError:
log.info('Invalid Token : BadSignature')
# invalid token
if redirect_on_invalid_token:
res = redirect(redirect_on_invalid_token, code=302)
else:
res = Response('Token BadSignature', 403)
res.set_cookie('token', expires=0)
return res
except Exception as e:
trap_all_exceptions = current_app.config.get(
'TRAP_ALL_EXCEPTIONS',
True
)
if not trap_all_exceptions:
raise
log.critical(e)
msg = json.dumps({'type': 'Exception', 'msg': repr(e)})
return Response(msg, 403)
def get_user_permissions(user, code_action, code_filter_type, module_code=None, code_object=None):
"""
Get all the permissions of a user for an action, a module (or an object) and a filter_type
Users permissions could be multiples because of user's group. The view mapped by VUsersPermissions does not take the
max because some filter type could be not quantitative
Parameters:
user(dict)
code_action(str): <C,R,U,V,E,D>
code_filter_type(str): <SCOPE, GEOGRAPHIC ...>
module_code(str): 'GEONATURE', 'OCCTAX'
code_object(str): 'PERMISSIONS', 'DATASET' (table gn_permissions.t_oject)
Return:
Array<VUsersPermissions>
"""
id_role = user['id_role']
ors = [VUsersPermissions.module_code.ilike('GEONATURE')]
q = (
VUsersPermissions
.query
.filter(VUsersPermissions.id_role == id_role)
.filter(VUsersPermissions.code_action == code_action)
.filter(VUsersPermissions.code_filter_type == code_filter_type)
)
# if code_object we take only autorization of this object
# no heritage from GeoNature
if code_object:
user_cruved = q.filter(VUsersPermissions.code_object == code_object).all()
object_for_error = code_object
# else: heritage cruved of the module or from GeoNature
else:
object_for_error = 'GEONATURE'
if module_code:
ors.append(VUsersPermissions.module_code.ilike(module_code))
object_for_error = module_code
user_cruved = q.filter(sa.or_(*ors)).all()
try:
assert len(user_cruved) > 0
return user_cruved
except AssertionError:
raise InsufficientRightsError(
'User "{}" cannot "{}" in module/app/object "{}"'.format(
id_role, code_action, object_for_error
)
)
def build_cruved_dict(cruved, get_id):
'''
function utils to build a dict like {'C':'3', 'R':'2'}...
from Array<VUsersPermissions>
'''
cruved_dict = {}
for action_scope in cruved:
if get_id:
cruved_dict[action_scope[0]] = action_scope[2]
else:
cruved_dict[action_scope[0]] = action_scope[1]
return cruved_dict
def beautifulize_cruved(actions, cruved):
"""
Build more readable the cruved dict with the actions label
Params:
actions: dict action {'C': 'Action de créer'}
cruved: dict of cruved
Return:
Array<dict> [{'label': 'Action de Lire', 'value': '3'}]
"""
cruved_beautiful = []
for key, value in cruved.items():
temp = {}
temp['label'] = actions.get(key)
temp['value'] = value
cruved_beautiful.append(temp)
return cruved_beautiful
def cruved_scope_for_user_in_module(
id_role=None,
module_code=None,
object_code='ALL',
get_id=False
):
"""
get the user cruved for a module
if no cruved for a module, the cruved parent module is taken
Child app cruved alway overright parent module cruved
Params:
- id_role(int)
- module_code(str)
- get_id(bool): if true return the id_scope for each action
if false return the filter_value for each action
Return a tuple
- index 0: the cruved as a dict : {'C': 0, 'R': 2 ...}
- index 1: a boolean which say if its an herited cruved
"""
q = DB.session.query(
VUsersPermissions.code_action,
func.max(VUsersPermissions.value_filter),
func.max(VUsersPermissions.id_filter)
).distinct(VUsersPermissions.code_action).filter(
VUsersPermissions.id_role == id_role
).filter(
VUsersPermissions.code_filter_type == 'SCOPE'
).filter(
VUsersPermissions.code_object == object_code
).group_by(VUsersPermissions.code_action)
cruved_actions = ['C', 'R', 'U', 'V', 'E', 'D']
# if object not ALL, no heritage
if object_code != 'ALL':
object_cruved = q.all()
cruved_dict = build_cruved_dict(object_cruved, get_id)
update_cruved = {}
for action in cruved_actions:
if action in cruved_dict:
update_cruved[action] = cruved_dict[action]
else:
update_cruved[action] = '0'
return update_cruved, False
# get max scope cruved for module GEONATURE
parent_cruved_data = q.filter(VUsersPermissions.module_code.ilike('GEONATURE')).all()
parent_cruved = {}
# build a dict like {'C':'0', 'R':'2' ...} if get_id = False or
# {'C': 1, 'R':3 ...} if get_id = True
parent_cruved = build_cruved_dict(parent_cruved_data, get_id)
# get max scope cruved for module passed in parameter
module_cruved = {}
if module_code:
module_cruved_data = q.filter(VUsersPermissions.module_code.ilike(module_code)).all()
module_cruved = build_cruved_dict(module_cruved_data, get_id)
# for the module
for action_scope in module_cruved_data:
if get_id:
module_cruved[action_scope[0]] = action_scope[2]
else:
module_cruved[action_scope[0]] = action_scope[1]
# get the id for code 0
if get_id:
id_scope_no_data = DB.session.query(TFilters.id_filter).filter(TFilters.value_filter == '0').one()[0]
# update cruved with child module if action exist, otherwise take geonature cruved
update_cruved = {}
herited = False
for action in cruved_actions:
if action in module_cruved:
update_cruved[action] = module_cruved[action]
elif action in parent_cruved:
update_cruved[action] = parent_cruved[action]
herited = True
else:
if get_id:
update_cruved[action] = id_scope_no_data
else:
update_cruved[action] = '0'
return update_cruved, herited
def get_or_fetch_user_cruved(
session=None,
id_role=None,
module_code=None,
object_code= 'ALL'
):
"""
Check if the cruved is in the session
if not, get the cruved from the DB with
cruved_for_user_in_app()
"""
if module_code in session and 'user_cruved' in session[module_code]:
return session[module_code]['user_cruved']
else:
user_cruved = cruved_scope_for_user_in_module(
id_role=id_role,
module_code=module_code,
object_code=object_code
)[0]
session[module_code] = {}
session[module_code]['user_cruved'] = user_cruved
return user_cruved
| bsd-2-clause | 3,092,783,275,424,942,600 | 32.014235 | 124 | 0.612267 | false |
sYnfo/samba | python/samba/netcmd/ntacl.py | 1 | 10204 | # Manipulate file NT ACLs
#
# Copyright Matthieu Patou 2010 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from samba.credentials import DONT_USE_KERBEROS
import samba.getopt as options
from samba.dcerpc import security, idmap
from samba.ntacls import setntacl, getntacl
from samba import Ldb
from samba.ndr import ndr_unpack, ndr_print
from samba.samdb import SamDB
from samba.samba3 import param as s3param, passdb, smbd
from samba import provision
from ldb import SCOPE_BASE
import os
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
SuperCommand,
Option,
)
class cmd_ntacl_set(Command):
"""Set ACLs on a file."""
synopsis = "%prog <acl> <file> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--quiet", help="Be quiet", action="store_true"),
Option("--xattr-backend", type="choice", help="xattr backend type (native fs or tdb)",
choices=["native","tdb"]),
Option("--eadb-file", help="Name of the tdb file where attributes are stored", type="string"),
Option("--use-ntvfs", help="Set the ACLs directly to the TDB or xattr for use with the ntvfs file server", action="store_true"),
Option("--use-s3fs", help="Set the ACLs for use with the default s3fs file server via the VFS layer", action="store_true"),
Option("--service", help="Name of the smb.conf service to use when applying the ACLs", type="string")
]
takes_args = ["acl","file"]
def run(self, acl, file, use_ntvfs=False, use_s3fs=False,
quiet=False,xattr_backend=None,eadb_file=None,
credopts=None, sambaopts=None, versionopts=None,
service=None):
logger = self.get_logger()
lp = sambaopts.get_loadparm()
try:
samdb = SamDB(session_info=system_session(),
lp=lp)
except Exception as e:
raise CommandError("Unable to open samdb:", e)
if not use_ntvfs and not use_s3fs:
use_ntvfs = "smb" in lp.get("server services")
elif use_s3fs:
use_ntvfs = False
try:
domain_sid = security.dom_sid(samdb.domain_sid)
except:
raise CommandError("Unable to read domain SID from configuration files")
s3conf = s3param.get_context()
s3conf.load(lp.configfile)
# ensure we are using the right samba_dsdb passdb backend, no matter what
s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
setntacl(lp, file, acl, str(domain_sid), xattr_backend, eadb_file, use_ntvfs=use_ntvfs, service=service)
if use_ntvfs:
logger.warning("Please note that POSIX permissions have NOT been changed, only the stored NT ACL")
class cmd_ntacl_get(Command):
"""Get ACLs of a file."""
synopsis = "%prog <file> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--as-sddl", help="Output ACL in the SDDL format", action="store_true"),
Option("--xattr-backend", type="choice", help="xattr backend type (native fs or tdb)",
choices=["native","tdb"]),
Option("--eadb-file", help="Name of the tdb file where attributes are stored", type="string"),
Option("--use-ntvfs", help="Get the ACLs directly from the TDB or xattr used with the ntvfs file server", action="store_true"),
Option("--use-s3fs", help="Get the ACLs for use via the VFS layer used by the default s3fs file server", action="store_true"),
Option("--service", help="Name of the smb.conf service to use when getting the ACLs", type="string")
]
takes_args = ["file"]
def run(self, file, use_ntvfs=False, use_s3fs=False,
as_sddl=False, xattr_backend=None, eadb_file=None,
credopts=None, sambaopts=None, versionopts=None,
service=None):
lp = sambaopts.get_loadparm()
try:
samdb = SamDB(session_info=system_session(),
lp=lp)
except Exception as e:
raise CommandError("Unable to open samdb:", e)
if not use_ntvfs and not use_s3fs:
use_ntvfs = "smb" in lp.get("server services")
elif use_s3fs:
use_ntvfs = False
s3conf = s3param.get_context()
s3conf.load(lp.configfile)
# ensure we are using the right samba_dsdb passdb backend, no matter what
s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
acl = getntacl(lp, file, xattr_backend, eadb_file, direct_db_access=use_ntvfs, service=service)
if as_sddl:
try:
domain_sid = security.dom_sid(samdb.domain_sid)
except:
raise CommandError("Unable to read domain SID from configuration files")
self.outf.write(acl.as_sddl(domain_sid)+"\n")
else:
self.outf.write(ndr_print(acl))
class cmd_ntacl_sysvolreset(Command):
"""Reset sysvol ACLs to defaults (including correct ACLs on GPOs)."""
synopsis = "%prog <file> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--use-ntvfs", help="Set the ACLs for use with the ntvfs file server", action="store_true"),
Option("--use-s3fs", help="Set the ACLs for use with the default s3fs file server", action="store_true")
]
def run(self, use_ntvfs=False, use_s3fs=False,
credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
path = lp.private_path("secrets.ldb")
creds = credopts.get_credentials(lp)
creds.set_kerberos_state(DONT_USE_KERBEROS)
logger = self.get_logger()
netlogon = lp.get("path", "netlogon")
sysvol = lp.get("path", "sysvol")
try:
samdb = SamDB(session_info=system_session(),
lp=lp)
except Exception as e:
raise CommandError("Unable to open samdb:", e)
if not use_ntvfs and not use_s3fs:
use_ntvfs = "smb" in lp.get("server services")
elif use_s3fs:
use_ntvfs = False
domain_sid = security.dom_sid(samdb.domain_sid)
s3conf = s3param.get_context()
s3conf.load(lp.configfile)
# ensure we are using the right samba_dsdb passdb backend, no matter what
s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
LA_sid = security.dom_sid(str(domain_sid)
+"-"+str(security.DOMAIN_RID_ADMINISTRATOR))
BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
s4_passdb = passdb.PDB(s3conf.get("passdb backend"))
# These assertions correct for current ad_dc selftest
# configuration. When other environments have a broad range of
# groups mapped via passdb, we can relax some of these checks
(LA_uid,LA_type) = s4_passdb.sid_to_id(LA_sid)
if (LA_type != idmap.ID_TYPE_UID and LA_type != idmap.ID_TYPE_BOTH):
raise CommandError("SID %s is not mapped to a UID" % LA_sid)
(BA_gid,BA_type) = s4_passdb.sid_to_id(BA_sid)
if (BA_type != idmap.ID_TYPE_GID and BA_type != idmap.ID_TYPE_BOTH):
raise CommandError("SID %s is not mapped to a GID" % BA_sid)
if use_ntvfs:
logger.warning("Please note that POSIX permissions have NOT been changed, only the stored NT ACL")
provision.setsysvolacl(samdb, netlogon, sysvol,
LA_uid, BA_gid, domain_sid,
lp.get("realm").lower(), samdb.domain_dn(),
lp, use_ntvfs=use_ntvfs)
class cmd_ntacl_sysvolcheck(Command):
"""Check sysvol ACLs match defaults (including correct ACLs on GPOs)."""
synopsis = "%prog <file> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
def run(self, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
path = lp.private_path("secrets.ldb")
creds = credopts.get_credentials(lp)
creds.set_kerberos_state(DONT_USE_KERBEROS)
logger = self.get_logger()
netlogon = lp.get("path", "netlogon")
sysvol = lp.get("path", "sysvol")
try:
samdb = SamDB(session_info=system_session(), lp=lp)
except Exception as e:
raise CommandError("Unable to open samdb:", e)
domain_sid = security.dom_sid(samdb.domain_sid)
provision.checksysvolacl(samdb, netlogon, sysvol,
domain_sid,
lp.get("realm").lower(), samdb.domain_dn(),
lp)
class cmd_ntacl(SuperCommand):
"""NT ACLs manipulation."""
subcommands = {}
subcommands["set"] = cmd_ntacl_set()
subcommands["get"] = cmd_ntacl_get()
subcommands["sysvolreset"] = cmd_ntacl_sysvolreset()
subcommands["sysvolcheck"] = cmd_ntacl_sysvolcheck()
| gpl-3.0 | -2,287,516,163,640,824,300 | 37.946565 | 136 | 0.618875 | false |
lepisma/desky | desky.py | 1 | 3731 | """
Desky
-----
Wrap your web app in desktop frame
"""
import sys, subprocess
import socket
import time
from PyQt4.Qt import *
import json
MAX_PORT_SCAN_TRIES = 10 # 20 secs
def print_help():
"""
Prints help for commands
"""
print "Usage : `python desky.py` for running app"
print "`python desky.py pack` for packing"
print "`python desky.py packupx <upx-dir-path>` for packing with upx compression"
def port_check(port, host = '127.0.0.1'):
"""
Checks whether the port is open or not
Parameters
----------
port : int
The port to check for
host : string
The port to check for
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
s.close()
except:
return False
return True
class Desky(QWidget):
"""
The GUI Class
Opens the url in qt webview
"""
def __init__(self, url, name, server_process):
"""
Parameters
----------
url : string
The url to load in the frame
name : string
The name of frame (to be shown as window title)
server_process : subprocess.Popen or bool
The process which is handling the webpage
"""
QWidget.__init__(self)
self.setWindowTitle(name)
self.layout = QGridLayout(self)
self.layout.setMargin(0)
self.layout.setSpacing(0)
self.view = QWebView(self)
self.view.setUrl(QUrl(url))
self.layout.addWidget(self.view, 0, 0, 1, 1)
self.server_process = server_process
def closeEvent(self, event):
"""
Kills the server process and quits
"""
if self.server_process != False:
self.server_process.kill()
event.accept()
def main():
"""
Main function
Scans directory for desky_config.json
Runs server (if needed)
Passes URL to qt webkit
"""
# Loading config
try:
config = json.load(open('desky_config.json', 'rb'))
except IOError as e:
if e.errno == 2:
print "Config file not found"
else:
print "Something wicked happened while reading config"
sys.exit()
try:
url = config['url']
except KeyError:
print "No url specified, exiting"
sys.exit()
try:
cmd = config['cmd']
server_process = subprocess.Popen(cmd)
except KeyError:
cmd = False
server_process = False
print "No command to run, opening frame now"
if cmd != False:
try:
check_port = config['check_port']
except KeyError:
print "No check port specified, exiting"
sys.exit()
else:
check_port = False
try:
name = config['name']
except KeyError:
print "No name specified, using 'Desky'"
name = "Desky"
if check_port != False:
# Checking if server is up
tries = 0
while port_check(check_port) == False:
time.sleep(2)
tries += 1
if tries > MAX_PORT_SCAN_TRIES:
break
app = QApplication(sys.argv)
frame = Desky(url, name, server_process)
frame.show()
app.exec_()
def pack(upx = False):
"""
Packs the app using pyinstaller
Parameters
----------
upx : string / bool
Path to upx directory for compression or False for no upx
"""
try:
config = json.load(open('desky_config.json', 'rb'))
except IOError as e:
if e.errno == 2:
print "Config file not found"
else:
print "Something wicked happened while reading config"
config = False
if config != False:
try:
name = config['name']
except KeyError:
name = 'Desky'
else:
name = 'Desky'
command = "pyinstaller desky.py --name=" + name + " --onefile --noconsole --distpath=./"
if upx != False:
command += " --upx-dir=" + upx
subprocess.call(command)
if __name__ == '__main__':
if len(sys.argv) == 1:
main()
elif len(sys.argv) == 2:
if sys.argv[1] == "pack":
pack()
else:
print_help()
elif len(sys.argv) == 3:
if sys.argv[1] == "packupx":
pack(sys.argv[2])
else:
print_help()
else:
print_help() | mit | 5,337,855,102,648,143,000 | 17.66 | 89 | 0.648352 | false |
allanliebold/data-structures | src/test_linked_list.py | 1 | 2515 | """Tests for singly-linked list."""
import pytest
def test_node_attributes():
"""Test that node object has expected attributes."""
from linked_list import Node
n = Node('test')
assert n.data == 'test' and n.next_node is None
def test_list_push():
"""Test that linked_list has node pushed to it."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(1)
assert linked_list.head.data == 1
def test_list_push_next():
"""Test push with second node.
Head should be new node, next attribute should point to previous head.
"""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push('first')
linked_list.push('second')
assert linked_list.head.data == 'second' and linked_list.head.next_node.data == 'first'
def test_list_push_iterable():
"""."""
from linked_list import LinkedList
datas = [1, 2, 3, 4, 5]
linked_list = LinkedList(datas)
for i in datas:
assert linked_list.search(i).data == i
def test_list_pop():
"""Test that pop returns the data of the deleted node."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(5)
assert linked_list.pop() == 5
def test_list_pop_empty():
"""Test pop called on an empty linked list."""
from linked_list import LinkedList
linked_list = LinkedList()
assert linked_list.pop() is None
def test_list_search():
"""Test that search method returns the node with the data passed."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(1)
linked_list.push('target')
linked_list.push(3)
assert linked_list.search('target').data == 'target'
def test_list_search_invalid():
"""Test that search for node not in list raises dataError."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(1)
linked_list.push(2)
linked_list.push(3)
with pytest.raises(ValueError):
linked_list.search('target')
def test_list_size():
"""Test that size method returns correct number."""
from linked_list import LinkedList
linked_list = LinkedList()
for i in range(10):
linked_list.push(i)
assert linked_list.size() == 10
def test_string_not_iterated_upon_init():
"""Test that strings passed on init are not split."""
from linked_list import LinkedList
linked_list = LinkedList('68')
assert linked_list.head.data == '68'
| mit | 2,257,836,508,669,859,300 | 26.944444 | 91 | 0.662028 | false |
fcbond/OMW | omw/__init__.py | 1 | 28562 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, sqlite3, datetime, urllib, gzip, requests
from time import sleep
from flask import Flask, render_template, g, request, redirect, url_for, send_from_directory, session, flash, jsonify, make_response, Markup, Response
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user, wraps
from itsdangerous import URLSafeTimedSerializer # for safe session cookies
from collections import defaultdict as dd
from collections import OrderedDict as od
from hashlib import md5
from werkzeug import secure_filename
from lxml import etree
from packaging.version import Version
## profiler
#from werkzeug.contrib.profiler import ProfilerMiddleware
from common_login import *
from common_sql import *
from omw_sql import *
from wn_syntax import *
from math import log
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "!$flhgSgngNO%$#SOET!$!"
app.config["REMEMBER_COOKIE_DURATION"] = datetime.timedelta(minutes=30)
## profiler
#app.config['PROFILE'] = True
#app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
#app.run(debug = True)
################################################################################
# LOGIN
################################################################################
login_manager.init_app(app)
@app.route("/login", methods=["GET", "POST"])
def login():
""" This login function checks if the username & password
match the admin.db; if the authentication is successful,
it passes the id of the user into login_user() """
if request.method == "POST" and \
"username" in request.form and \
"password" in request.form:
username = request.form["username"]
password = request.form["password"]
user = User.get(username)
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing it.
if user and hash_pass(password) == user.password:
login_user(user, remember=True)
# FIXME! Get this to work properly...
# return redirect(request.args.get("next") or url_for("index"))
return redirect(url_for("index"))
else:
flash(u"Invalid username, please try again.")
return render_template("login.html")
@app.route("/logout")
@login_required(role=0, group='open')
def logout():
logout_user()
return redirect(url_for("index"))
################################################################################
################################################################################
# SET UP CONNECTION WITH DATABASES
################################################################################
@app.before_request
def before_request():
g.admin = connect_admin()
g.omw = connect_omw()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.admin.close()
g.omw.close()
################################################################################
################################################################################
# AJAX REQUESTS
################################################################################
@app.route('/_thumb_up_id')
def thumb_up_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = 1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="{}">+{}</span><br>
<span style="color:red" title="{}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_thumb_down_id')
def thumb_down_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = -1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="{}">+{}</span><br>
<span style="color:red" title="{}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_comment_id')
def comment_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
comment = request.args.get('comment', None)
comment = str(Markup.escape(comment))
dbinsert = comment_ili_id(ili_id, comment, user)
return jsonify(result=dbinsert)
@app.route('/_detailed_id')
def detailed_id():
ili_id = request.args.get('ili_id', None)
rate_hist = fetch_rate_id([ili_id])
comm_hist = fetch_comment_id([ili_id])
users = fetch_allusers()
r_html = ""
for r, u, t in rate_hist[int(ili_id)]:
r_html += '{} ({}): {} <br>'.format(users[u]['userID'], t, r)
c_html = ""
for c, u, t in comm_hist[int(ili_id)]:
c_html += '{} ({}): {} <br>'.format(users[u]['userID'], t, c)
html = """
<td colspan="9">
<div style="width: 49%; float:left;">
<h6>Ratings</h6>
{}</div>
<div style="width: 49%; float:right;">
<h6>Comments</h6>
{}</div>
</td>""".format(r_html, c_html)
return jsonify(result=html)
@app.route('/_confirm_wn_upload')
def confirm_wn_upload_id():
user = fetch_id_from_userid(current_user.id)
fn = request.args.get('fn', None)
upload = confirmUpload(fn, user)
labels = updateLabels()
return jsonify(result=upload)
@app.route('/_add_new_project')
def add_new_project():
user = fetch_id_from_userid(current_user.id)
proj = request.args.get('proj_code', None)
proj = str(Markup.escape(proj))
if user and proj:
dbinsert = insert_new_project(proj, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route("/_load_lang_selector",methods=["GET"])
def omw_lang_selector():
selected_lang = request.cookies.get('selected_lang')
selected_lang2 = request.cookies.get('selected_lang2')
lang_id, lang_code = fetch_langs()
html = '<select name="lang" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang == str(lid):
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
html += '<select name="lang2" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang2 == str(lid):
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
return jsonify(result=html)
@app.route('/_add_new_language')
def add_new_language():
user = fetch_id_from_userid(current_user.id)
bcp = request.args.get('bcp', None)
bcp = str(Markup.escape(bcp))
iso = request.args.get('iso', None)
iso = str(Markup.escape(iso))
name = request.args.get('name', None)
name = str(Markup.escape(name))
if bcp and name:
dbinsert = insert_new_language(bcp, iso, name, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route('/_load_proj_details')
def load_proj_details():
proj_id = request.args.get('proj', 0)
if proj_id:
proj_id = int(proj_id)
else:
proj_id = None
projs = fetch_proj()
srcs = fetch_src()
srcs_meta = fetch_src_meta()
html = str()
if proj_id:
i = 0
for src_id in srcs.keys():
if srcs[src_id][0] == projs[proj_id]:
i += 1
html += "<br><p><b>Source {}: {}-{}</b></p>".format(i,
projs[proj_id],srcs[src_id][1])
for attr, val in srcs_meta[src_id].items():
html += "<p style='margin-left: 40px'>"
html += attr + ": " + val
html += "</p>"
return jsonify(result=html)
@app.route('/_load_min_omw_concept/<ss>')
@app.route('/_load_min_omw_concept_ili/<ili_id>')
def min_omw_concepts(ss=None, ili_id=None):
if ili_id:
ss_ids = f_ss_id_by_ili_id(ili_id)
else:
ss_ids = [ss]
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
ssrels = fetch_ssrel()
return jsonify(result=render_template('min_omw_concept.html',
pos = pos,
langs = langs_id,
senses=senses,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes))
@app.route('/_load_min_omw_sense/<sID>')
def min_omw_sense(sID=None):
if sID:
s_id=int(sID)
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
sense = fetch_sense(s_id)
forms=fetch_forms(sense[3])
selected_lang = int(request.cookies.get('selected_lang'))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
# return jsonify(result=render_template('omw_sense.html',
return jsonify(result=render_template('min_omw_sense.html',
s_id = s_id,
sdef=sdef,
sense = sense,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
src_sid = src_sid,
src_meta = src_meta))
# l=lambda:dd(l)
# vr = l() # wn-lmf validation report
# @app.route('/_report_val1')
# def report_val1():
# filename = request.args.get('fn', None)
# if filename:
# vr1 = val1_DTD(current_user, filename)
# vr.update(vr1)
# if vr1['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
@app.route('/_report_val2', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report_val2():
filename = request.args.get('fn', None)
vr, filename, wn, wn_dtls = validateFile(current_user.id, filename)
return jsonify(result=render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls, filename=filename))
# validateFile()
# filename = request.args.get('fn', None)
# if filename:
# vr = val1_DTD(current_user, filename)
# if vr['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
# return jsonify(result="TEST_VAL2")
################################################################################
################################################################################
# VIEWS
################################################################################
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/ili', methods=['GET', 'POST'])
def ili_welcome(name=None):
return render_template('ili_welcome.html')
@app.route('/omw', methods=['GET', 'POST'])
def omw_welcome(name=None):
projects = request.args.get('projects','current')
#print(projects)
lang_id, lang_code = fetch_langs()
src_meta=fetch_src_meta()
### sort by language, project version (Newest first)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
src_sort[k] = src_meta[k]
return render_template('omw_welcome.html',
src_meta=src_sort,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/wordnet', methods=['GET', 'POST'])
def wordnet_license(name=None):
return render_template('wordnet_license.html')
@app.route('/omw_wns', methods=['GET', 'POST'])
def omw_wns(name=None):
projects = request.args.get('projects','current')
src_meta=fetch_src_meta()
stats = []
lang_id, lang_code = fetch_langs()
### sort by language name (1), id, version (FIXME -- reverse version)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
stats.append((src_meta[k], fetch_src_id_stats(k)))
return render_template('omw_wns.html',
stats=stats,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route("/useradmin",methods=["GET"])
@login_required(role=99, group='admin')
def useradmin():
users = fetch_allusers()
return render_template("useradmin.html", users=users)
@app.route("/langadmin",methods=["GET"])
@login_required(role=99, group='admin')
def langadmin():
lang_id, lang_code = fetch_langs()
return render_template("langadmin.html", langs=lang_id)
@app.route("/projectadmin",methods=["GET"])
@login_required(role=99, group='admin')
def projectadmin():
projs = fetch_proj()
return render_template("projectadmin.html", projs=projs)
@app.route('/allconcepts', methods=['GET', 'POST'])
def allconcepts():
ili, ili_defs = fetch_ili()
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/temporary', methods=['GET', 'POST'])
def temporary():
ili = fetch_ili_status(2)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/deprecated', methods=['GET', 'POST'])
def deprecated():
ili = fetch_ili_status(0)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/concepts/<c>', methods=['GET', 'POST'])
def concepts_ili(c=None):
c = c.split(',')
ili, ili_defs = fetch_ili(c)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/search', methods=['GET', 'POST'])
@app.route('/ili/search/<q>', methods=['GET', 'POST'])
def search_ili(q=None):
if q:
query = q
else:
query = request.form['query']
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
for c in query_omw("""SELECT * FROM ili WHERE def GLOB ?
""", [query]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/upload', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def upload():
return render_template('upload.html')
@app.route('/metadata', methods=['GET', 'POST'])
def metadata():
return render_template('metadata.html')
@app.route('/join', methods=['GET', 'POST'])
def join():
return render_template('join.html')
@app.route('/omw/uploads/<filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/ili/validation-report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def validationReport():
vr, filename, wn, wn_dtls = validateFile(current_user.id)
return render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls,
filename=filename)
@app.route('/ili/report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report():
passed, filename = uploadFile(current_user.id)
return render_template('report.html',
passed=passed,
filename=filename)
# return render_template('report.html')
@app.route('/omw/search', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>,<lang2>/<q>', methods=['GET', 'POST'])
def search_omw(lang=None, q=None):
if lang and q:
lang_id = lang
lang_id2 = lang2
query = q
else:
lang_id = request.form['lang']
lang_id2 = request.form['lang2']
query = request.form['query']
query = query.strip()
sense = dd(list)
lang_sense = dd(lambda: dd(list))
# GO FROM FORM TO SENSE
for s in query_omw("""
SELECT s.id as s_id, ss_id, wid, fid, lang_id, pos_id, lemma
FROM (SELECT w_id as wid, form.id as fid, lang_id, pos_id, lemma
FROM (SELECT id, lang_id, pos_id, lemma
FROM f WHERE lemma GLOB ? AND lang_id in (?,?)) as form
JOIN wf_link ON form.id = wf_link.f_id) word
JOIN s ON wid=w_id
""", ['['+query[0].upper() + query[0].lower()+']'+query[1:],
lang_id,
lang_id2]):
sense[s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['lang_id'], s['pos_id'], s['lemma']]
lang_sense[s['lang_id']][s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['pos_id'], s['lemma']]
pos = fetch_pos()
lang_dct, lang_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(sense.keys())
labels = fetch_labels(lang_id, set(senses.keys()))
resp = make_response(render_template('omw_results.html',
langsel = int(lang_id),
langsel2 = int(lang_id2),
pos = pos,
lang_dct = lang_dct,
sense=sense,
senses=senses,
ss=ss,
links=links,
defs=defs,
exes=exes,
labels=labels))
resp.set_cookie('selected_lang', lang_id)
resp.set_cookie('selected_lang2', lang_id2)
return resp
@app.route('/omw/core', methods=['GET', 'POST'])
def omw_core(): ### FIXME add lang as a paramater?
return render_template('omw_core.html')
@app.route('/omw/concepts/<ssID>', methods=['GET', 'POST'])
@app.route('/omw/concepts/ili/<iliID>', methods=['GET', 'POST'])
def concepts_omw(ssID=None, iliID=None):
if iliID:
ss_ids = f_ss_id_by_ili_id(iliID)
ili, ilidefs = fetch_ili([iliID])
else:
ss_ids = [ssID]
ili, ili_defs = dict(), dict()
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
if (not iliID) and int(ssID) in ss:
iliID = ss[int(ssID)][0]
ili, ilidefs = fetch_ili([iliID])
sss = list(ss.keys())
for s in links:
for l in links[s]:
sss.extend(links[s][l])
selected_lang = request.cookies.get('selected_lang')
labels = fetch_labels(selected_lang, set(sss))
ssrels = fetch_ssrel()
ss_srcs=fetch_src_for_ss_id(ss_ids)
src_meta=fetch_src_meta()
core_ss, core_ili = fetch_core()
s_ids = []
for x in senses:
for y in senses[x]:
for (s_id, lemma, freq) in senses[x][y]:
s_ids.append(s_id)
slinks = fetch_sense_links(s_ids)
return render_template('omw_concept.html',
ssID=ssID,
iliID=iliID,
pos = pos,
langs = langs_id,
senses=senses,
slinks=slinks,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
ili=ili,
selected_lang = selected_lang,
selected_lang2 = request.cookies.get('selected_lang2'),
labels=labels,
ss_srcs=ss_srcs,
src_meta=src_meta,
core=core_ss)
@app.route('/omw/senses/<sID>', methods=['GET', 'POST'])
def omw_sense(sID=None):
"""display a single sense (and its variants)"""
if sID:
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
s_id=int(sID)
sense = fetch_sense(s_id)
slinks = fetch_sense_links([s_id])
forms=fetch_forms(sense[3])
selected_lang = int(request.cookies.get('selected_lang'))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
srel = fetch_srel()
## get the canonical form for each linked sense
slabel=fetch_sense_labels([x for v in slinks[int(s_id)].values() for x in v])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
return render_template('omw_sense.html',
s_id = sID,
sdef = sdef,
sense = sense,
slinks = slinks[s_id],
srel = srel,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
slabel = slabel,
src_sid = src_sid,
src_meta = src_meta)
# URIs FOR ORIGINAL CONCEPT KEYS, BY INDIVIDUAL SOURCES
@app.route('/omw/src/<src>/<originalkey>', methods=['GET', 'POST'])
def src_omw(src=None, originalkey=None):
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
if src_id:
ss = fetch_ss_id_by_src_orginalkey(src_id, originalkey)
else:
ss = None
return concepts_omw(ss)
## show wn statistics
##
##
@app.route('/omw/src/<src>', methods=['GET', 'POST'])
def omw_wn(src=None):
if src:
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
return render_template('omw_wn.html',
wn = src,
src_id=src_id,
src_info=src_info,
ssrel_stats=fetch_ssrel_stats(src_id),
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id),
licenses=licenses)
@app.route('/omw/src-latex/<src>', methods=['GET', 'POST'])
def omw_wn_latex(src=None):
if src:
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
return render_template('omw_wn_latex.html',
wn = src,
src_id=src_id,
src_info=src_info,
ssrel_stats=fetch_ssrel_stats(src_id),
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id))
@app.route('/cili.tsv')
def generate_cili_tsv():
tsv="""# omw_id ili_id projects\n"""
srcs = fetch_src()
ss =dict()
r = query_omw_direct("SELECT id, ili_id from ss")
for (ss_id, ili_id) in r:
ss[ss_id] = [ili_id]
src = dd(list)
r = query_omw_direct("SELECT ss_id, src_id, src_key from ss_src")
for (ss_id, src_id, src_key) in r:
src[ss_id].append("{}-{}:{}".format(srcs[src_id][0],
srcs[src_id][1],
src_key))
for ss_id in ss:
ili = 'i' + str(ss[ss_id][0]) if ss[ss_id][0] else 'None'
tsv += "{}\t{}\t{}\n".format(ss_id, ili, ";".join(src[ss_id]))
return Response(tsv, mimetype='text/tab-separated-values')
@app.context_processor
def utility_processor():
def scale_freq(f, maxfreq=1000):
if f > 0:
return 100 + 100 * log(f)/log(maxfreq)
else:
return 100
return dict(scale_freq=scale_freq)
# def style_sense(freq, conf, lang):
# """show confidence as opacity, show freq as size
# opacity is the square of the confidence
# freq is scaled as a % of maxfreq for that language
# TODO: highlight a word if searched for?"""
# style = ''
# if conf and conf < 1.0: ## should not be more than 1.0
# style += 'opacity: {f};'.format(conf*conf) ## degrade quicker
# if freq:
# ### should I be using a log here?
# maxfreq=1000 #(should do per lang)
# style += 'font-size: {f}%;'.format(100*(1+ log(freq)/log(maxfreq)))
# if style:
# style = "style='{}'".format(style)
## show proj statistics
#for proj in fetch_proj/
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', threaded=True)
| mit | -2,032,441,808,575,662,300 | 33.165072 | 150 | 0.51418 | false |
schef/schef.github.io | source/07/mc-7-4-tp-cde-md.py | 1 | 2572 | #!/usr/bin/python
# Written by Stjepan Horvat
# ( [email protected] )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( [email protected] ) for snd-virmidi example
# ( [email protected] )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 7-4:")
print ("C D and E. Harmonic and melodic pitch indentification. Melodic doubles.")
#from c to c'' white tones
#c major scale
#notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
notes = [ 36, 38, 40, 48, 50, 52, 60, 62, 64, 72, 74, 76, 84, 86, 88, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
def playNote(note):
fout.write((chr(0x90)+chr(note)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(note)+chr(127)).encode('utf-8'))
fout.flush()
def nameNote(note):
if note in noteC:
return("C")
elif note-2 in noteC:
return("D")
elif note-4 in noteC:
return("E")
elif note-5 in noteC:
return("F")
elif note-7 in noteC:
return("G")
elif note-9 in noteC:
return("A")
elif note-11 in noteC:
return("H")
def name2Note(name):
if name == "c":
return(60)
if name == "d":
return(62)
if name == "e":
return(64)
usage = "Usage: 1-repeat, <note> <note> \"c d\", ?-usage."
round = 1
a = re.compile("^[c-e] [c-e]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
noteTwo = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
match = False
while not match:
done = False
playNote(noteOne)
playNote(noteTwo)
while not done:
n = input("? ")
if n == "1":
playNote(noteOne)
playNote(noteTwo)
if n == "?":
print(usage)
#TODO:bug da prima sve umjesto samo imena nota
elif a.match(n):
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playNote(name2Note(splitNote[0]))
playNote(name2Note(splitNote[1]))
except KeyboardInterrupt:
pass
| mit | -7,023,858,542,194,064,000 | 24.979798 | 155 | 0.588647 | false |
dsorokin/aivika-modeler | tests/submodel_test.py | 1 | 1158 | #!/usr/local/bin/python3
# NOTE: The model itself is quite meaningless. The purpose is
# to check some features. Consider it like an unit-test
from simulation.aivika.modeler import *
model = MainModel()
submodel1 = SubModel(model, name = 'submodel1')
submodel2 = SubModel(submodel1, name = 'submodel2')
data_type = TransactType(model, 'Transact')
input_stream = uniform_random_stream(data_type, 3, 7)
input_queue = create_queue(submodel1, data_type, 10, name = 'queue', descr = 'The input queue')
input_queue_source = input_queue.add_result_source()
enqueue_stream_or_remove_item(input_queue, input_stream)
server = uniform_random_server(data_type, 1, 2, name = 'server', descr = 'The server')
server_source = server.add_result_source()
arrival_timer = create_arrival_timer(submodel2, name = 'arrivalTimer', descr = 'The arrival timer')
arrival_timer_source = arrival_timer.add_result_source()
output_stream0 = dequeue_stream(input_queue)
output_stream1 = server_stream(server, output_stream0)
output_stream = arrival_timer_stream(arrival_timer, output_stream1)
terminate_stream(output_stream)
specs = Specs(0, 100, 0.1)
model.run(specs)
| bsd-3-clause | 1,714,945,578,067,700,500 | 32.085714 | 99 | 0.74266 | false |
hirofumi0810/asr_preprocessing | swbd/input_data.py | 1 | 8525 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make input data (Switchboard corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, basename
import numpy as np
import pickle
from tqdm import tqdm
from utils.util import mkdir_join
from utils.inputs.segmentation import segment
from utils.inputs.htk import read, write
def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,
save_path=None, save_format=None, global_mean=None, global_std=None,
dtype=np.float32):
"""Read HTK or WAV files.
Args:
audio_paths (list): paths to HTK or WAV files
speaker_dict (dict): A dictionary of speakers' gender information
key (string) => speaker
value (dict) => dictionary of utterance information of each speaker
key (string) => utterance index
value (list) => [start_frame, end_frame, transcript]
tool (string): the tool to extract features,
htk or librosa or python_speech_features
config (dict): a configuration for feature extraction
normalize (string):
no => normalization will be not conducted
global => normalize input features by global mean & std over
the training set per gender
speaker => normalize input features by mean & std per speaker
utterance => normalize input features by mean & std per utterancet
data by mean & std per utterance
is_training (bool): training or not
save_path (string): path to save npy files
save_format (string, optional): numpy as htk
global_mean (np.ndarray, optional): global mean over the training set
global_std (np.ndarray, optional): global standard deviation over the
training set
dtype (optional): the type of data, default is np.float32
Returns:
global_mean (np.ndarray): global mean over the training set
global_std (np.ndarray): global standard deviation over the
training set
frame_num_dict (dict):
key => utterance name
value => the number of frames
"""
if not is_training:
if global_mean is None or global_std is None:
raise ValueError('Set mean & std computed in the training set.')
if normalize not in ['global', 'speaker', 'utterance', 'no']:
raise ValueError(
'normalize must be "utterance" or "speaker" or "global" or "no".')
total_frame_num = 0
total_frame_num_dict = {}
speaker_mean_dict = {}
# Loop 1: Computing global mean and statistics
if is_training and normalize != 'no':
print('=====> Reading audio files...')
for i, audio_path in enumerate(tqdm(audio_paths)):
speaker = basename(audio_path).split('.')[0]
# Fix speaker name
speaker = speaker.replace('sw0', 'sw')
# ex.) sw04771-A => sw4771-A (LDC97S62)
speaker = speaker.replace('sw_', 'sw')
# ex.) sw_4771-A => sw4771-A (eval2000, swbd)
speaker = speaker.replace('en_', 'en')
# ex.) en_4156-A => en4156-A (eval2000, ch)
# Divide each audio file into utterances
_, input_utt_sum, speaker_mean, _, total_frame_num_speaker = segment(
audio_path,
speaker,
speaker_dict[speaker],
is_training=True,
sil_duration=0,
tool=tool,
config=config)
if i == 0:
# Initialize global statistics
feature_dim = input_utt_sum.shape[0]
global_mean = np.zeros((feature_dim,), dtype=dtype)
global_std = np.zeros((feature_dim,), dtype=dtype)
global_mean += input_utt_sum
total_frame_num += total_frame_num_speaker
# For computing speaker stddev
if normalize == 'speaker':
speaker_mean_dict[speaker] = speaker_mean
total_frame_num_dict[speaker] = total_frame_num_speaker
# NOTE: speaker mean is already computed
print('=====> Computing global mean & stddev...')
# Compute global mean
global_mean /= total_frame_num
for audio_path in tqdm(audio_paths):
speaker = basename(audio_path).split('.')[0]
# Normalize speaker name
speaker = speaker.replace('sw0', 'sw')
speaker = speaker.replace('sw_', 'sw')
speaker = speaker.replace('en_', 'en')
# Divide each audio into utterances
input_data_dict_speaker, _, _, _, _ = segment(
audio_path,
speaker,
speaker_dict[speaker],
is_training=True,
sil_duration=0,
tool=tool,
config=config)
# For computing global stddev
for input_utt in input_data_dict_speaker.values():
global_std += np.sum(
np.abs(input_utt - global_mean) ** 2, axis=0)
# Compute global stddev
global_std = np.sqrt(global_std / (total_frame_num - 1))
if save_path is not None:
# Save global mean & std per gender
np.save(join(save_path, 'global_mean.npy'), global_mean)
np.save(join(save_path, 'global_std.npy'), global_std)
# Loop 2: Normalization and Saving
print('=====> Normalization...')
frame_num_dict = {}
sampPeriod, parmKind = None, None
for audio_path in tqdm(audio_paths):
speaker = basename(audio_path).split('.')[0]
# Normalize speaker name
speaker = speaker.replace('sw0', 'sw')
speaker = speaker.replace('sw_', 'sw')
speaker = speaker.replace('en_', 'en')
if normalize == 'speaker' and is_training:
speaker_mean = speaker_mean_dict[speaker]
else:
speaker_mean = None
# Divide each audio into utterances
input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment(
audio_path,
speaker,
speaker_dict[speaker],
is_training=is_training,
sil_duration=0,
tool=tool,
config=config,
mean=speaker_mean) # for compute speaker sttdev
# NOTE: input_data_dict_speaker have been not normalized yet
for utt_index, input_utt in input_data_dict_speaker.items():
if normalize == 'no':
pass
elif normalize == 'global' or not is_training:
# Normalize by mean & std over the training set
input_utt -= global_mean
input_utt /= global_std
elif normalize == 'speaker':
# Normalize by mean & std per speaker
input_utt = (input_utt - speaker_mean) / speaker_std
elif normalize == 'utterance':
# Normalize by mean & std per utterance
utt_mean = np.mean(input_utt, axis=0, dtype=dtype)
utt_std = np.std(input_utt, axis=0, dtype=dtype)
input_utt = (input_utt - utt_mean) / utt_std
else:
ValueError
frame_num_dict[speaker + '_' + utt_index] = input_utt.shape[0]
if save_path is not None:
# Save input features
if save_format == 'numpy':
input_data_save_path = mkdir_join(
save_path, speaker, speaker + '_' + utt_index + '.npy')
np.save(input_data_save_path, input_utt)
elif save_format == 'htk':
if sampPeriod is None:
_, sampPeriod, parmKind = read(audio_path)
write(input_utt,
htk_path=mkdir_join(
save_path, speaker, speaker + '_' + utt_index + '.htk'),
sampPeriod=sampPeriod,
parmKind=parmKind)
else:
raise ValueError('save_format is numpy or htk.')
if save_path is not None:
# Save the frame number dictionary
with open(join(save_path, 'frame_num.pickle'), 'wb') as f:
pickle.dump(frame_num_dict, f)
return global_mean, global_std, frame_num_dict
| mit | 2,988,558,157,063,402,000 | 39.023474 | 86 | 0.55085 | false |
Fity/2code | flaskr/flaskr_tests.py | 1 | 1622 | # -*- coding:utf-8 -*-
import os
import flaskr
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()
flaskr.app.config['TESTING'] = True
self.app = flaskr.app.test_client()
flaskr.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(flaskr.app.config['DATABASE'])
def test_empty_db(self):
rv = self.app.get('/')
assert 'No entries here so far' in rv.data
def login(self, username, passwd):
return self.app.post('/login/', data=dict(
username=username, password=passwd),
follow_redirects=True)
def logout(self):
return self.app.get('/logout/', follow_redirects=True)
def test_loginout(self):
rv = self.login('admin', 'passwd')
assert 'You were logged in' in rv.data
rv = self.logout()
assert 'You have logged out' in rv.data
rv = self.login('adminx', 'passwd')
assert 'Invalid username' in rv.data
rv = self.login('admin', 'defaultx')
assert 'Invalid password' in rv.data
def test_message(self):
self.login('admin', 'passwd')
rv = self.app.post('/add/', data=dict(
title='<Hello>',
text='<strong>HTML</strong> allowed here'
), follow_redirects=True)
assert 'No entries here so far' not in rv.data
assert '<Hello>' in rv.data
assert '<strong>HTML</strong> allowed here' in rv.data
if __name__ == '__main__':
unittest.main()
| mit | -6,352,385,153,454,176,000 | 29.037037 | 70 | 0.590012 | false |
fortesg/fortrantestgenerator | config_fortrantestgenerator.py | 1 | 1665 | import os
ftgDir = os.path.dirname(os.path.realpath(__file__))
# Directory where FortranCallGraph is located
# REQUIRED
FCG_DIR = ftgDir + '/../fortrancallgraph'
# Configuration file to be used by FortranCallGraph
# OPTIONAL: When omitted or None, config variables required by FortranCallGraph must be assigned here
# When empty string (''), FortranCallGraph's default (config_fortrancallgraph.py) will be used.
# Variables can always be overwritten here
FCG_CONFIG_FILE = 'config_fortrancallgraph.py'
# Path of the templates to be used
# REQUIRED
TEMPLATE = ftgDir + '/templates/Standalone/Standalone.tmpl'
# Directory where the test drivers generated by -r will be placed
# REQUIRED
TEST_SOURCE_DIR = ''
# List containing locations of the source files that will be modified by -c
# OPTIONAL: When omitted or None same as SOURCE_DIRS from FortranCallGraph's configuration
# Can be used for example, when preprocessed source files shall be analysed but original source files modified.
# Handle with care! -c creates backups (see BACKUP_SUFFIX) of the unmodified source files for later analyses,
# but those are only used when SOURCE_DIRS and MODIFIY_SOURCE_DIRS are the same.
MODIFY_SOURCE_DIRS = None
# Suffix for backuped source files
# Will used by later analyses, for example when you first run -c and later -r
# OPTIONAL, default: 'ftg-backup'
BACKUP_SUFFIX = 'ftg-backup'
# Prefix of subroutines generated by -c, will be excluded from later analyses
# Must fit to subroutine names defined in the template
# OPTIONAL, default: 'ftg_'
FTG_PREFIX = 'ftg_'
# Value of the template variable ${dataDir}
# OPTIONAL, default: '.'
TEST_DATA_BASE_DIR = '.'
| gpl-3.0 | -4,443,048,437,855,805,400 | 38.642857 | 111 | 0.763363 | false |
ethereum/pyethapp | pyethapp/app.py | 1 | 30125 | # -*- coding: utf8 -*-
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import next
from builtins import range
import copy
import json
import os
import signal
import sys
from logging import StreamHandler
from uuid import uuid4
import click
import ethereum.slogging as slogging
import gevent
import rlp
from click import BadParameter
from devp2p.app import BaseApp
from devp2p.discovery import NodeDiscovery
from devp2p.peermanager import PeerManager
from devp2p.service import BaseService
from ethereum import config as eth_config
from ethereum.block import Block
from ethereum.snapshot import create_snapshot, load_snapshot as _load_snapshot
from ethereum.utils import (
encode_hex,
decode_hex,
to_string,
)
from gevent.event import Event
from . import config as app_config
from . import eth_protocol
from . import utils
from .accounts import AccountsService, Account
from .console_service import Console
from .db_service import DBService
from .eth_service import ChainService
from .jsonrpc import JSONRPCServer, IPCRPCServer
from .pow_service import PoWService
from pyethapp import __version__
from pyethapp.profiles import PROFILES, DEFAULT_PROFILE
from pyethapp.utils import merge_dict, load_contrib_services, FallbackChoice, \
enable_greenlet_debugger
log = slogging.get_logger('app')
services = [DBService, AccountsService, NodeDiscovery, PeerManager, ChainService,
PoWService, JSONRPCServer, IPCRPCServer, Console]
class EthApp(BaseApp):
client_name = 'pyethapp'
client_version = '%s/%s/%s' % (__version__, sys.platform,
'py%d.%d.%d' % sys.version_info[:3])
client_version_string = '%s/v%s' % (client_name, client_version)
start_console = False
default_config = dict(BaseApp.default_config)
default_config['client_version_string'] = client_version_string
default_config['post_app_start_callback'] = None
script_globals = {}
# TODO: Remove `profile` fallbacks in 1.4 or so
# Separators should be underscore!
@click.group(help='Welcome to {} {}'.format(EthApp.client_name, EthApp.client_version))
@click.option('--profile', type=FallbackChoice(
list(PROFILES.keys()),
{'frontier': 'livenet', 'morden': 'testnet'},
"PyEthApp's configuration profiles have been renamed to "
"'livenet' and 'testnet'. The previous values 'frontier' and "
"'morden' will be removed in a future update."),
default=DEFAULT_PROFILE, help="Configuration profile.", show_default=True)
@click.option('alt_config', '--Config', '-C', type=str, callback=app_config.validate_alt_config_file,
help='Alternative config file')
@click.option('config_values', '-c', multiple=True, type=str,
help='Single configuration parameters (<param>=<value>)')
@click.option('alt_data_dir', '-d', '--data-dir', multiple=False, type=str,
help='data directory', default=app_config.default_data_dir, show_default=True)
@click.option('-l', '--log_config', multiple=False, type=str, default=":info",
help='log_config string: e.g. ":info,eth:debug', show_default=True)
@click.option('--log-json/--log-no-json', default=False,
help='log as structured json output')
@click.option('--log-file', type=click.Path(dir_okay=False, writable=True, resolve_path=True),
help="Log to file instead of stderr.")
@click.option('-b', '--bootstrap_node', multiple=False, type=str,
help='single bootstrap_node as enode://pubkey@host:port')
@click.option('-m', '--mining_pct', multiple=False, type=int, default=0,
help='pct cpu used for mining')
@click.option('--unlock', multiple=True, type=str,
help='Unlock an account (prompts for password)')
@click.option('--password', type=click.File(), help='path to a password file')
@click.pass_context
def app(ctx, profile, alt_config, config_values, alt_data_dir, log_config,
bootstrap_node, log_json, mining_pct, unlock, password, log_file):
# configure logging
slogging.configure(log_config, log_json=log_json, log_file=log_file)
# data dir default or from cli option
alt_data_dir = os.path.expanduser(alt_data_dir)
data_dir = alt_data_dir or app_config.default_data_dir
app_config.setup_data_dir(data_dir) # if not available, sets up data_dir and required config
log.info('using data in', path=data_dir)
# prepare configuration
# config files only contain required config (privkeys) and config different from the default
if alt_config: # specified config file
config = app_config.load_config(alt_config)
if not config:
log.warning('empty config given. default config values will be used')
else: # load config from default or set data_dir
config = app_config.load_config(data_dir)
config['data_dir'] = data_dir
# Store custom genesis to restore if overridden by profile value
genesis_from_config_file = config.get('eth', {}).get('genesis')
# Store custom network_id to restore if overridden by profile value
network_id_from_config_file = config.get('eth', {}).get('network_id')
# Store custom bootstrap_nodes to restore them overridden by profile value
bootstrap_nodes_from_config_file = config.get('discovery', {}).get('bootstrap_nodes')
# add default config
app_config.update_config_with_defaults(config, app_config.get_default_config([EthApp] + services))
app_config.update_config_with_defaults(config, {'eth': {'block': eth_config.default_config}})
# Set config values based on profile selection
merge_dict(config, PROFILES[profile])
if genesis_from_config_file:
# Fixed genesis_hash taken from profile must be deleted as custom genesis loaded
del config['eth']['genesis_hash']
config['eth']['genesis'] = genesis_from_config_file
if network_id_from_config_file:
del config['eth']['network_id']
config['eth']['network_id'] = network_id_from_config_file
if bootstrap_nodes_from_config_file:
# Fixed bootstrap_nodes taken from profile must be deleted as custom bootstrap_nodes loaded
del config['discovery']['bootstrap_nodes']
config['discovery']['bootstrap_nodes'] = bootstrap_nodes_from_config_file
pre_cmd_line_config_genesis = config.get('eth', {}).get('genesis')
# override values with values from cmd line
for config_value in config_values:
try:
app_config.set_config_param(config, config_value)
except ValueError:
raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
'specifies the parameter to set and d is a valid yaml value '
'(example: "-c jsonrpc.port=5000")')
if pre_cmd_line_config_genesis != config.get('eth', {}).get('genesis'):
# Fixed genesis_hash taked from profile must be deleted as custom genesis loaded
if 'genesis_hash' in config['eth']:
del config['eth']['genesis_hash']
# Load genesis config
app_config.update_config_from_genesis_json(config,
genesis_json_filename_or_dict=config['eth']['genesis'])
if bootstrap_node:
# [NOTE]: check it
config['discovery']['bootstrap_nodes'] = [to_string(bootstrap_node)]
if mining_pct > 0:
config['pow']['activated'] = True
config['pow']['cpu_pct'] = int(min(100, mining_pct))
if not config.get('pow', {}).get('activated'):
config['deactivated_services'].append(PoWService.name)
ctx.obj = {'config': config,
'unlock': unlock,
'password': password.read().rstrip() if password else None,
'log_file': log_file}
assert (password and ctx.obj['password'] is not None and len(
ctx.obj['password'])) or not password, "empty password file"
@app.command()
@click.option('--dev/--nodev', default=False,
help='Drop into interactive debugger on unhandled exceptions.')
@click.option('--nodial/--dial', default=False, help='Do not dial nodes.')
@click.option('--fake/--nofake', default=False, help='Fake genesis difficulty.')
@click.option('--console', is_flag=True, help='Immediately drop into interactive console.')
@click.pass_context
def run(ctx, dev, nodial, fake, console):
"""Start the client ( --dev to stop on error)"""
config = ctx.obj['config']
if nodial:
# config['deactivated_services'].append(PeerManager.name)
# config['deactivated_services'].append(NodeDiscovery.name)
config['discovery']['bootstrap_nodes'] = []
config['discovery']['listen_port'] = 29873
config['p2p']['listen_port'] = 29873
config['p2p']['min_peers'] = 0
if fake:
config['eth']['block']['GENESIS_DIFFICULTY'] = 1024
config['eth']['block']['BLOCK_DIFF_FACTOR'] = 16
# create app
app = EthApp(config)
# development mode
if dev:
enable_greenlet_debugger()
try:
config['client_version'] += '/' + os.getlogin()
except:
log.warn("can't get and add login name to client_version")
pass
# dump config
if log.is_active('debug'):
dump_config(config)
# init and unlock accounts first to check coinbase
if AccountsService in services:
AccountsService.register_with_app(app)
unlock_accounts(ctx.obj['unlock'], app.services.accounts, password=ctx.obj['password'])
try:
app.services.accounts.coinbase
except ValueError as e:
log.fatal('invalid coinbase', coinbase=config.get('pow', {}).get('coinbase_hex'),
error=e.message)
sys.exit()
app.start_console = console
# register services
contrib_services = load_contrib_services(config)
for service in services + contrib_services:
assert issubclass(service, BaseService)
if service.name not in app.config['deactivated_services'] + [AccountsService.name]:
assert service.name not in app.services
service.register_with_app(app)
assert hasattr(app.services, service.name)
# start app
log.info('starting')
app.start()
if ctx.obj['log_file']:
log.info("Logging to file %s", ctx.obj['log_file'])
# User requested file logging - remove stderr handler
root_logger = slogging.getLogger()
for hndlr in root_logger.handlers:
if isinstance(hndlr, StreamHandler) and hndlr.stream == sys.stderr:
root_logger.removeHandler(hndlr)
break
if config['post_app_start_callback'] is not None:
config['post_app_start_callback'](app)
# wait for interrupt
evt = Event()
gevent.signal(signal.SIGQUIT, evt.set)
gevent.signal(signal.SIGTERM, evt.set)
evt.wait()
# finally stop
app.stop()
def dump_config(config):
cfg = copy.deepcopy(config)
alloc = cfg.get('eth', {}).get('block', {}).get('GENESIS_INITIAL_ALLOC', {})
if len(alloc) > 100:
log.debug('omitting reporting of %d accounts in genesis' % len(alloc))
del cfg['eth']['block']['GENESIS_INITIAL_ALLOC']
app_config.dump_config(cfg)
@app.command()
@click.pass_context
def config(ctx):
"""Show the config"""
dump_config(ctx.obj['config'])
@app.command()
@click.argument('file', type=click.File(), required=True)
@click.argument('name', type=str, required=True)
@click.pass_context
def blocktest(ctx, file, name):
"""Start after importing blocks from a file.
In order to prevent replacement of the local test chain by the main chain from the network, the
peermanager, if registered, is stopped before importing any blocks.
Also, for block tests an in memory database is used. Thus, a already persisting chain stays in
place.
"""
app = EthApp(ctx.obj['config'])
app.config['db']['implementation'] = 'EphemDB'
# register services
for service in services:
assert issubclass(service, BaseService)
if service.name not in app.config['deactivated_services']:
assert service.name not in app.services
service.register_with_app(app)
assert hasattr(app.services, service.name)
if ChainService.name not in app.services:
log.fatal('No chainmanager registered')
ctx.abort()
if DBService.name not in app.services:
log.fatal('No db registered')
ctx.abort()
log.info('loading block file', path=file.name)
try:
data = json.load(file)
except ValueError:
log.fatal('Invalid JSON file')
if name not in data:
log.fatal('Name not found in file')
ctx.abort()
try:
blocks = utils.load_block_tests(list(data.values())[0], app.services.chain.chain.db)
except ValueError:
log.fatal('Invalid blocks encountered')
ctx.abort()
# start app
app.start()
if 'peermanager' in app.services:
app.services.peermanager.stop()
log.info('building blockchain')
Block.is_genesis = lambda self: self.number == 0
app.services.chain.chain._initialize_blockchain(genesis=blocks[0])
for block in blocks[1:]:
app.services.chain.chain.add_block(block)
# wait for interrupt
evt = Event()
gevent.signal(signal.SIGQUIT, evt.set)
gevent.signal(signal.SIGTERM, evt.set)
gevent.signal(signal.SIGINT, evt.set)
evt.wait()
# finally stop
app.stop()
@app.command('snapshot')
@click.option('-r', '--recent', type=int, default=1024,
help='Number of recent blocks. State before these blocks and these blocks will be dumped. On recover these blocks will be applied on restored state. (default: 1024)')
@click.option('-f', '--filename', type=str, default=None,
help='Output file name. (default: auto-gen file prefixed by snapshot-')
@click.pass_context
def snapshot(ctx, recent, filename):
"""Take a snapshot of current world state.
The snapshot will be saved in JSON format, including data like chain configurations and accounts.
It will overwrite exiting file if it already exists.
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
if not filename:
import time
filename = 'snapshot-%d.json' % int(time.time()*1000)
s = create_snapshot(app.services.chain.chain, recent)
with open(filename, 'w') as f:
json.dump(s, f, sort_keys=False, indent=4, separators=(',', ': '), encoding='ascii')
print('snapshot saved to %s' % filename)
@app.command('load_snapshot')
@click.argument('filename', type=str)
@click.pass_context
def load_snapshot(ctx, filename):
"""Load snapshot FILE into local node database.
This process will OVERWRITE data in current database!!!
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
with open(filename, 'r') as f:
s = json.load(f, encoding='ascii')
_load_snapshot(app.services.chain.chain, s)
print('snapshot %s loaded.' % filename)
@app.command('export')
@click.option('--from', 'from_', type=int, help='Number of the first block (default: genesis)')
@click.option('--to', type=int, help='Number of the last block (default: latest)')
@click.argument('file', type=click.File('ab'))
@click.pass_context
def export_blocks(ctx, from_, to, file):
"""Export the blockchain to FILE.
The chain will be stored in binary format, i.e. as a concatenated list of RLP encoded blocks,
starting with the earliest block.
If the file already exists, the additional blocks are appended. Otherwise, a new file is
created.
Use - to write to stdout.
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
if from_ is None:
from_ = 0
head_number = app.services.chain.chain.head.number
if to is None:
to = head_number
if from_ < 0:
log.fatal('block numbers must not be negative')
sys.exit(1)
if to < from_:
log.fatal('"to" block must be newer than "from" block')
sys.exit(1)
if to > head_number:
log.fatal('"to" block not known (current head: {})'.format(head_number))
sys.exit(1)
log.info('Starting export')
for n in range(from_, to + 1):
log.debug('Exporting block {}'.format(n))
if (n - from_) % 50000 == 0:
log.info('Exporting block {} to {}'.format(n, min(n + 50000, to)))
block_hash = app.services.chain.chain.get_blockhash_by_number(n)
# bypass slow block decoding by directly accessing db
block_rlp = app.services.db.get(block_hash)
file.write(block_rlp)
log.info('Export complete')
@app.command('import')
@click.argument('file', type=click.File('rb'))
@click.pass_context
def import_blocks(ctx, file):
"""Import blocks from FILE.
Blocks are expected to be in binary format, i.e. as a concatenated list of RLP encoded blocks.
Blocks are imported sequentially. If a block can not be imported (e.g. because it is badly
encoded, it is in the chain already or its parent is not in the chain) it will be ignored, but
the process will continue. Sole exception: If neither the first block nor its parent is known,
importing will end right away.
Use - to read from stdin.
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
chain = app.services.chain
assert chain.block_queue.empty()
data = file.read()
app.start()
def blocks():
"""Generator for blocks encoded in `data`."""
i = 0
while i < len(data):
try:
block_data, next_i = rlp.codec.consume_item(data, i)
except rlp.DecodingError:
log.fatal('invalid RLP encoding', byte_index=i)
sys.exit(1) # have to abort as we don't know where to continue
try:
if not isinstance(block_data, list) or len(block_data) != 3:
raise rlp.DeserializationError('', block_data)
yield eth_protocol.TransientBlock.init_from_rlp(block_data)
except (IndexError, rlp.DeserializationError):
log.warning('not a valid block', byte_index=i) # we can still continue
yield None
i = next_i
log.info('importing blocks')
# check if it makes sense to go through all blocks
first_block = next(blocks())
if first_block is None:
log.fatal('first block invalid')
sys.exit(1)
if not (chain.knows_block(first_block.header.hash) or
chain.knows_block(first_block.header.prevhash)):
log.fatal('unlinked chains', newest_known_block=chain.chain.head.number,
first_unknown_block=first_block.header.number)
sys.exit(1)
# import all blocks
for n, block in enumerate(blocks()):
if block is None:
log.warning('skipping block', number_in_file=n)
continue
log.debug('adding block to queue', number_in_file=n, number_in_chain=block.header.number)
app.services.chain.add_block(block, None) # None for proto
# let block processing finish
while not app.services.chain.block_queue.empty():
gevent.sleep()
app.stop()
log.info('import finished', head_number=app.services.chain.chain.head.number)
@app.group()
@click.pass_context
def account(ctx):
"""Manage accounts.
For accounts to be accessible by pyethapp, their keys must be stored in the keystore directory.
Its path can be configured through "accounts.keystore_dir".
"""
app = EthApp(ctx.obj['config'])
ctx.obj['app'] = app
AccountsService.register_with_app(app)
unlock_accounts(ctx.obj['unlock'], app.services.accounts, password=ctx.obj['password'])
@account.command('new')
@click.option('--uuid', '-i', help='equip the account with a random UUID', is_flag=True)
@click.pass_context
def new_account(ctx, uuid):
"""Create a new account.
This will generate a random private key and store it in encrypted form in the keystore
directory. You are prompted for the password that is employed (if no password file is
specified). If desired the private key can be associated with a random UUID (version 4) using
the --uuid flag.
"""
app = ctx.obj['app']
if uuid:
id_ = uuid4()
else:
id_ = None
password = ctx.obj['password']
if password is None:
password = click.prompt('Password to encrypt private key', default='', hide_input=True,
confirmation_prompt=True, show_default=False)
account = Account.new(password, uuid=id_)
account.path = os.path.join(app.services.accounts.keystore_dir, encode_hex(account.address))
try:
app.services.accounts.add_account(account)
except IOError:
click.echo('Could not write keystore file. Make sure you have write permission in the '
'configured directory and check the log for further information.')
sys.exit(1)
else:
click.echo('Account creation successful')
click.echo(' Address: {}'.format(encode_hex(account.address)))
click.echo(' Id: {}'.format(account.uuid))
@account.command('list')
@click.pass_context
def list_accounts(ctx):
"""List accounts with addresses and ids.
This prints a table of all accounts, numbered consecutively, along with their addresses and
ids. Note that some accounts do not have an id, and some addresses might be hidden (i.e. are
not present in the keystore file). In the latter case, you have to unlock the accounts (e.g.
via "pyethapp --unlock <account> account list") to display the address anyway.
"""
accounts = ctx.obj['app'].services.accounts
if len(accounts) == 0:
click.echo('no accounts found')
else:
fmt = '{i:>4} {address:<40} {id:<36} {locked:<1}'
click.echo(' {address:<40} {id:<36} {locked}'.format(address='Address (if known)',
id='Id (if any)',
locked='Locked'))
for i, account in enumerate(accounts):
click.echo(fmt.format(i='#' + to_string(i + 1),
address=encode_hex(account.address or ''),
id=account.uuid or '',
locked='yes' if account.locked else 'no'))
@account.command('import')
@click.argument('f', type=click.File(), metavar='FILE')
@click.option('--uuid', '-i', help='equip the new account with a random UUID', is_flag=True)
@click.pass_context
def import_account(ctx, f, uuid):
"""Import a private key from FILE.
FILE is the path to the file in which the private key is stored. The key is assumed to be hex
encoded, surrounding whitespace is stripped. A new account is created for the private key, as
if it was created with "pyethapp account new", and stored in the keystore directory. You will
be prompted for a password to encrypt the key (if no password file is specified). If desired a
random UUID (version 4) can be generated using the --uuid flag in order to identify the new
account later.
"""
app = ctx.obj['app']
if uuid:
id_ = uuid4()
else:
id_ = None
privkey_hex = f.read()
try:
privkey = decode_hex(privkey_hex.strip())
except TypeError:
click.echo('Could not decode private key from file (should be hex encoded)')
sys.exit(1)
password = ctx.obj['password']
if password is None:
password = click.prompt('Password to encrypt private key', default='', hide_input=True,
confirmation_prompt=True, show_default=False)
account = Account.new(password, privkey, uuid=id_)
account.path = os.path.join(app.services.accounts.keystore_dir, encode_hex(account.address))
try:
app.services.accounts.add_account(account)
except IOError:
click.echo('Could not write keystore file. Make sure you have write permission in the '
'configured directory and check the log for further information.')
sys.exit(1)
else:
click.echo('Account creation successful')
click.echo(' Address: {}'.format(encode_hex(account.address)))
click.echo(' Id: {}'.format(account.uuid))
@account.command('update')
@click.argument('account', type=str)
@click.pass_context
def update_account(ctx, account):
"""
Change the password of an account.
ACCOUNT identifies the account: It can be one of the following: an address, a uuid, or a
number corresponding to an entry in "pyethapp account list" (one based).
"update" first prompts for the current password to unlock the account. Next, the new password
must be entered.
The password replacement procedure backups the original keystore file in the keystore
directory, creates the new file, and finally deletes the backup. If something goes wrong, an
attempt will be made to restore the keystore file from the backup. In the event that this does
not work, it is possible to recover from the backup manually by simply renaming it. The backup
shares the same name as the original file, but with an appended "~" plus a number if necessary
to avoid name clashes.
As this command tampers with your keystore directory, it is advisable to perform a manual
backup in advance.
If a password is provided via the "--password" option (on the "pyethapp" base command), it will
be used to unlock the account, but not as the new password (as distinguished from
"pyethapp account new").
"""
app = ctx.obj['app']
unlock_accounts([account], app.services.accounts, password=ctx.obj['password'])
old_account = app.services.accounts.find(account)
if old_account.locked:
click.echo('Account needs to be unlocked in order to update its password')
sys.exit(1)
click.echo('Updating account')
click.echo('Address: {}'.format(encode_hex(old_account.address)))
click.echo(' Id: {}'.format(old_account.uuid))
new_password = click.prompt('New password', default='', hide_input=True,
confirmation_prompt=True, show_default=False)
try:
app.services.accounts.update_account(old_account, new_password)
except:
click.echo('Account update failed. Make sure that the keystore file has been restored '
'correctly (e.g. with "pyethapp --unlock <acct> account list"). If not, look '
'for automatic backup files in the keystore directory (suffix "~" or '
'"~<number>"). Check the log for further information.')
raise
click.echo('Account update successful')
def unlock_accounts(account_ids, account_service, max_attempts=3, password=None):
"""Unlock a list of accounts, prompting for passwords one by one if not given.
If a password is specified, it will be used to unlock all accounts. If not, the user is
prompted for one password per account.
If an account can not be identified or unlocked, an error message is logged and the program
exits.
:param accounts: a list of account identifiers accepted by :meth:`AccountsService.find`
:param account_service: the account service managing the given accounts
:param max_attempts: maximum number of attempts per account before the unlocking process is
aborted (>= 1), or `None` to allow an arbitrary number of tries
:param password: optional password which will be used to unlock the accounts
"""
accounts = []
for account_id in account_ids:
try:
account = account_service.find(account_id)
except KeyError:
log.fatal('could not find account', identifier=account_id)
sys.exit(1)
accounts.append(account)
if password is not None:
for identifier, account in zip(account_ids, accounts):
try:
account.unlock(password)
except ValueError:
log.fatal('Could not unlock account with password from file',
account_id=identifier)
sys.exit(1)
return
max_attempts_str = to_string(max_attempts) if max_attempts else 'oo'
attempt_fmt = '(attempt {{attempt}}/{})'.format(max_attempts_str)
first_attempt_fmt = 'Password for account {id} ' + attempt_fmt
further_attempts_fmt = 'Wrong password. Please try again ' + attempt_fmt
for identifier, account in zip(account_ids, accounts):
attempt = 1
pw = click.prompt(first_attempt_fmt.format(id=identifier, attempt=1), hide_input=True,
default='', show_default=False)
while True:
attempt += 1
try:
account.unlock(pw)
except ValueError:
if max_attempts and attempt > max_attempts:
log.fatal('Too many unlock attempts', attempts=attempt, account_id=identifier)
sys.exit(1)
else:
pw = click.prompt(further_attempts_fmt.format(attempt=attempt),
hide_input=True, default='', show_default=False)
else:
break
assert not account.locked
if __name__ == '__main__':
# python app.py 2>&1 | less +F
app()
| mit | 1,139,392,548,384,149,500 | 39.220294 | 180 | 0.645378 | false |
xrobau/PoGoMap | runserver.py | 1 | 9755 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import logging
import time
import re
import requests
import ssl
import json
from distutils.version import StrictVersion
from threading import Thread, Event
from queue import Queue
from flask_cors import CORS
from flask_cache_bust import init_cache_busting
from pogom import config
from pogom.app import Pogom
from pogom.utils import get_args, get_encryption_lib_path
from pogom.search import search_overseer_thread
from pogom.models import init_database, create_tables, drop_tables, Pokemon, db_updater, clean_db_loop
from pogom.webhook import wh_updater
from pogom.proxy import check_proxies
# Currently supported pgoapi
pgoapi_version = "1.1.7"
# Moved here so logger is configured at load time
logging.basicConfig(format='%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s')
log = logging.getLogger()
# Make sure pogom/pgoapi is actually removed if it is an empty directory
# This is a leftover directory from the time pgoapi was embedded in PokemonGo-Map
# The empty directory will cause problems with `import pgoapi` so it needs to go
oldpgoapiPath = os.path.join(os.path.dirname(__file__), "pogom/pgoapi")
if os.path.isdir(oldpgoapiPath):
log.info("I found %s, but its no longer used. Going to remove it...", oldpgoapiPath)
shutil.rmtree(oldpgoapiPath)
log.info("Done!")
# Assert pgoapi is installed
try:
import pgoapi
from pgoapi import utilities as util
except ImportError:
log.critical("It seems `pgoapi` is not installed. You must run pip install -r requirements.txt again")
sys.exit(1)
# Assert pgoapi >= pgoapi_version
if not hasattr(pgoapi, "__version__") or StrictVersion(pgoapi.__version__) < StrictVersion(pgoapi_version):
log.critical("It seems `pgoapi` is not up-to-date. You must run pip install -r requirements.txt again")
sys.exit(1)
def main():
args = get_args()
# Check for depreciated argumented
if args.debug:
log.warning('--debug is depreciated. Please use --verbose instead. Enabling --verbose')
args.verbose = 'nofile'
# Add file logging if enabled
if args.verbose and args.verbose != 'nofile':
filelog = logging.FileHandler(args.verbose)
filelog.setFormatter(logging.Formatter('%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'))
logging.getLogger('').addHandler(filelog)
if args.very_verbose and args.very_verbose != 'nofile':
filelog = logging.FileHandler(args.very_verbose)
filelog.setFormatter(logging.Formatter('%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'))
logging.getLogger('').addHandler(filelog)
# Check if we have the proper encryption library file and get its path
encryption_lib_path = get_encryption_lib_path(args)
if encryption_lib_path is "":
sys.exit(1)
if args.verbose or args.very_verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# Let's not forget to run Grunt / Only needed when running with webserver
if not args.no_server:
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'static/dist')):
log.critical('Missing front-end assets (static/dist) -- please run "npm install && npm run build" before starting the server')
sys.exit()
# These are very noisey, let's shush them up a bit
logging.getLogger('peewee').setLevel(logging.INFO)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)
logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
config['parse_pokemon'] = not args.no_pokemon
config['parse_pokestops'] = not args.no_pokestops
config['parse_gyms'] = not args.no_gyms
# Turn these back up if debugging
if args.verbose or args.very_verbose:
logging.getLogger('pgoapi').setLevel(logging.DEBUG)
if args.very_verbose:
logging.getLogger('peewee').setLevel(logging.DEBUG)
logging.getLogger('requests').setLevel(logging.DEBUG)
logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)
logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)
logging.getLogger('rpc_api').setLevel(logging.DEBUG)
logging.getLogger('werkzeug').setLevel(logging.DEBUG)
# use lat/lng directly if matches such a pattern
prog = re.compile("^(\-?\d+\.\d+),?\s?(\-?\d+\.\d+)$")
res = prog.match(args.location)
if res:
log.debug('Using coordinates from CLI directly')
position = (float(res.group(1)), float(res.group(2)), 0)
else:
log.debug('Looking up coordinates in API')
position = util.get_pos_by_name(args.location)
# Use the latitude and longitude to get the local altitude from Google
try:
url = 'https://maps.googleapis.com/maps/api/elevation/json?locations={},{}'.format(
str(position[0]), str(position[1]))
altitude = requests.get(url).json()[u'results'][0][u'elevation']
log.debug('Local altitude is: %sm', altitude)
position = (position[0], position[1], altitude)
except (requests.exceptions.RequestException, IndexError, KeyError):
log.error('Unable to retrieve altitude from Google APIs; setting to 0')
if not any(position):
log.error('Could not get a position by name, aborting')
sys.exit()
log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)',
position[0], position[1], position[2])
if args.no_pokemon:
log.info('Parsing of Pokemon disabled')
if args.no_pokestops:
log.info('Parsing of Pokestops disabled')
if args.no_gyms:
log.info('Parsing of Gyms disabled')
config['LOCALE'] = args.locale
config['CHINA'] = args.china
app = Pogom(__name__)
db = init_database(app)
if args.clear_db:
log.info('Clearing database')
if args.db_type == 'mysql':
drop_tables(db)
elif os.path.isfile(args.db):
os.remove(args.db)
create_tables(db)
app.set_current_location(position)
# Control the search status (running or not) across threads
pause_bit = Event()
pause_bit.clear()
# Setup the location tracking queue and push the first location on
new_location_queue = Queue()
new_location_queue.put(position)
# DB Updates
db_updates_queue = Queue()
# Thread(s) to process database updates
for i in range(args.db_threads):
log.debug('Starting db-updater worker thread %d', i)
t = Thread(target=db_updater, name='db-updater-{}'.format(i), args=(args, db_updates_queue))
t.daemon = True
t.start()
# db clearner; really only need one ever
t = Thread(target=clean_db_loop, name='db-cleaner', args=(args,))
t.daemon = True
t.start()
# WH Updates
wh_updates_queue = Queue()
# Thread to process webhook updates
for i in range(args.wh_threads):
log.debug('Starting wh-updater worker thread %d', i)
t = Thread(target=wh_updater, name='wh-updater-{}'.format(i), args=(args, wh_updates_queue))
t.daemon = True
t.start()
if not args.only_server:
# Check all proxies before continue so we know they are good
if args.proxy:
# Overwrite old args.proxy with new working list
args.proxy = check_proxies(args)
# Gather the pokemons!
# check the sort of scan
if args.spawnpoint_scanning:
mode = 'sps'
else:
mode = 'hex'
# attempt to dump the spawn points (do this before starting threads of endure the woe)
if args.spawnpoint_scanning and args.spawnpoint_scanning != 'nofile' and args.dump_spawnpoints:
with open(args.spawnpoint_scanning, 'w+') as file:
log.info('Saving spawn points to %s', args.spawnpoint_scanning)
spawns = Pokemon.get_spawnpoints_in_hex(position, args.step_limit)
file.write(json.dumps(spawns))
log.info('Finished exporting spawn points')
argset = (args, mode, new_location_queue, pause_bit, encryption_lib_path, db_updates_queue, wh_updates_queue)
log.debug('Starting a %s search thread', mode)
search_thread = Thread(target=search_overseer_thread, name='search-overseer', args=argset)
search_thread.daemon = True
search_thread.start()
if args.cors:
CORS(app)
# No more stale JS
init_cache_busting(app)
app.set_search_control(pause_bit)
app.set_location_queue(new_location_queue)
config['ROOT_PATH'] = app.root_path
config['GMAPS_KEY'] = args.gmaps_key
if args.no_server:
# This loop allows for ctrl-c interupts to work since flask won't be holding the program open
while search_thread.is_alive():
time.sleep(60)
else:
ssl_context = None
if args.ssl_certificate and args.ssl_privatekey \
and os.path.exists(args.ssl_certificate) and os.path.exists(args.ssl_privatekey):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain(args.ssl_certificate, args.ssl_privatekey)
log.info('Web server in SSL mode.')
if args.verbose or args.very_verbose:
app.run(threaded=True, use_reloader=False, debug=True, host=args.host, port=args.port, ssl_context=ssl_context)
else:
app.run(threaded=True, use_reloader=False, debug=False, host=args.host, port=args.port, ssl_context=ssl_context)
if __name__ == '__main__':
main()
| agpl-3.0 | 4,981,080,807,710,686,000 | 36.664093 | 138 | 0.663557 | false |
squaresLab/Houston | test/test_state.py | 1 | 3173 | import pytest
from houston.state import State, var
def test_variable_construction():
class S(State):
foo = var(float, lambda c: 0.1)
assert set(n for n in S.variables) == {'foo'}
def test_constructor():
class S(State):
foo = var(float, lambda c: 0.1)
state = S(foo=0.1, time_offset=30.0)
assert state.foo == 0.1
assert state.time_offset == 30.0
with pytest.raises(TypeError):
assert S()
pytest.fail("expected TypeError (no arguments)")
with pytest.raises(TypeError):
assert S(foo=0.1)
pytest.fail("expected TypeError (missing time_offset)")
with pytest.raises(TypeError):
assert S(time_offset=30.0)
pytest.fail("expected TypeError (missing foo)")
with pytest.raises(TypeError):
assert S(foo=0.1, bar=1.0, time_offset=30.0)
pytest.fail("expected TypeError (erroneous property 'bar')")
class S(State):
foo = var(int, lambda c: 0)
bar = var(int, lambda c: 0)
state = S(foo=0, bar=1, time_offset=0.0)
assert state.foo == 0
assert state.bar == 1
assert state.time_offset == 0.0
def test_is_frozen():
class S(State):
foo = var(int, lambda c: 0)
bar = var(int, lambda c: 0)
state = S(foo=0, bar=0, time_offset=0.0)
with pytest.raises(AttributeError):
state.time_offset = 500.0
pytest.fail("expected AttributeError (can't set time_offset)")
with pytest.raises(AttributeError):
state.foo = 10
pytest.fail("expected AttributeError (can't set foo)")
def test_hash():
class S(State):
foo = var(int, lambda c: 0)
s1 = S(foo=0, time_offset=0.0)
s2 = S(foo=1, time_offset=0.0)
s3 = S(foo=0, time_offset=0.0)
s4 = S(foo=1, time_offset=0.0)
assert {s1, s2, s3, s4} == {S(foo=0, time_offset=0.0), S(foo=1, time_offset=0.0)}
def test_eq():
class S(State):
foo = var(int, lambda c: 0)
bar = var(int, lambda c: 0)
class Y(State):
foo = var(int, lambda c: 0)
bar = var(int, lambda c: 0)
assert S(foo=1, bar=2, time_offset=0.0) == S(foo=1, bar=2, time_offset=0.0)
assert S(foo=1, bar=2, time_offset=0.0) != S(foo=1, bar=2, time_offset=1.0)
assert S(foo=1, bar=2, time_offset=0.0) != S(foo=1, bar=3, time_offset=0.0)
with pytest.raises(Exception):
assert S(foo=1, bar=2, time_offset=0.0) == Y(foo=1, bar=2, time_offset=0.0)
pytest.fail("expected Exception (states have different parent classes)")
def test_equiv():
class S(State):
foo = var(int, lambda c: 0)
bar = var(int, lambda c: 0)
assert S(foo=1, bar=1, time_offset=0.0).equiv(S(foo=1, bar=1, time_offset=0.0))
assert S(foo=1, bar=1, time_offset=0.0).equiv(S(foo=1, bar=1, time_offset=1.0))
assert not S(foo=0, bar=1, time_offset=0.0).equiv(S(foo=1, bar=1, time_offset=0.0))
def test_to_and_from_dict():
class S(State):
foo = var(int, lambda c: 0)
bar = var(int, lambda c: 0)
state = S(foo=1, bar=2, time_offset=0.0)
d = {'foo': 1, 'bar': 2, 'time_offset': 0.0}
assert state.to_dict() == d
assert S.from_dict(d) == state
| mit | -8,171,461,671,288,105,000 | 28.933962 | 87 | 0.590923 | false |
kirbyfan64/cppexpat | doc/source/conf.py | 1 | 8300 | # -*- coding: utf-8 -*-
#
# CppExpat documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 13 19:00:49 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe']
breathe_projects_source = {'cppexpat': ('../..', ['cppexpat.hpp'])}
breathe_doxygen_config_options = {'EXCLUDE_SYMBOLS': 'CPPEXPAT_TO_PBASE'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CppExpat'
copyright = u'2015, Ryan Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CppExpatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CppExpat.tex', u'CppExpat Documentation',
u'Ryan Gonzalez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cppexpat', u'CppExpat Documentation',
[u'Ryan Gonzalez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CppExpat', u'CppExpat Documentation',
u'Ryan Gonzalez', 'CppExpat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -6,357,192,428,893,004,000 | 30.679389 | 79 | 0.707229 | false |
ehashman/oh-mainline | vendor/packages/django-http-proxy/httpproxy/migrations/0001_initial.py | 1 | 2970 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('method', models.CharField(max_length=20, verbose_name='method')),
('domain', models.CharField(max_length=100, verbose_name='domain')),
('port', models.PositiveSmallIntegerField(default=80)),
('path', models.CharField(max_length=250, verbose_name='path')),
('date', models.DateTimeField(auto_now=True)),
('querykey', models.CharField(verbose_name='query key', max_length=255, editable=False)),
],
options={
'get_latest_by': 'date',
'verbose_name': 'request',
'verbose_name_plural': 'requests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RequestParameter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(default=b'G', max_length=1, choices=[(b'G', b'GET'), (b'P', b'POST')])),
('order', models.PositiveSmallIntegerField(default=1)),
('name', models.CharField(max_length=100, verbose_name='naam')),
('value', models.CharField(max_length=250, null=True, verbose_name='value', blank=True)),
('request', models.ForeignKey(related_name='parameters', verbose_name='request', to='httpproxy.Request')),
],
options={
'ordering': ('order',),
'verbose_name': 'request parameter',
'verbose_name_plural': 'request parameters',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveSmallIntegerField(default=200)),
('content_type', models.CharField(max_length=200, verbose_name='inhoudstype')),
('content', models.TextField(verbose_name='inhoud')),
('request', models.OneToOneField(verbose_name='request', to='httpproxy.Request')),
],
options={
'verbose_name': 'response',
'verbose_name_plural': 'responses',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='request',
unique_together=set([('method', 'domain', 'port', 'path', 'querykey')]),
),
]
| agpl-3.0 | 5,437,310,444,514,051,000 | 43.328358 | 122 | 0.537037 | false |
Banbury/cartwheel-3d | Python/UI/MainWindow.py | 1 | 5484 | '''
Created on 2009-08-24
This module contains the main OpenGL application window that is used by all SNM applications
@author: beaudoin
'''
import wx
import UI
class MainWindow(wx.Frame):
"""The class for the main window."""
MIN_TOOLPANEL_WIDTH = 200
MIN_CONSOLE_HEIGHT = 100
def __init__(self, parent, id, title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
name='frame', fps=30, glCanvasSize=wx.DefaultSize,
showConsole=True,
consoleEnvironment={} ):
# Check if a fixed glWindow was asked
fixedGlWindow = glCanvasSize != wx.DefaultSize
self._glCanvasSize = glCanvasSize
#
# Forcing a specific style on the window.
# Should this include styles passed?
style |= wx.NO_FULL_REPAINT_ON_RESIZE
# Not resizable if GL canvas is fixed size
if fixedGlWindow :
style &= ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
super(MainWindow, self).__init__(parent, id, title, pos, size, style, name)
#
# Create the menu
self._menuBar = wx.MenuBar()
self._fileMenu = wx.Menu()
self._fileMenu.Append( wx.ID_OPEN, "&Open" )
self._fileMenu.Append( wx.ID_SAVE, "&Save" )
self._fileMenu.AppendSeparator()
self._fileMenu.Append( wx.ID_EXIT, "&Quit" )
self._menuBar.Append(self._fileMenu, "&File" )
self._helpMenu = wx.Menu()
self._helpMenu.Append( wx.ID_ABOUT, "&About" )
self._menuBar.Append(self._helpMenu, "&Help" )
self.SetMenuBar( self._menuBar )
#
# Create the GL canvas
attribList = (wx.glcanvas.WX_GL_RGBA, # RGBA
wx.glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
wx.glcanvas.WX_GL_DEPTH_SIZE, 24, # 24 bit depth
wx.glcanvas.WX_GL_STENCIL_SIZE, 8 ) # 8 bit stencil
self._glCanvas = UI.GLPanel(self, fps = fps, size = glCanvasSize, attribList = attribList)
# Create the right window (sashed) where the tool panel will be
self._rightWindow = wx.SashLayoutWindow(self)
self._rightWindow.SetDefaultSize((MainWindow.MIN_TOOLPANEL_WIDTH * 1.3,-1))
self._rightWindow.SetMinimumSizeX(MainWindow.MIN_TOOLPANEL_WIDTH)
self._rightWindow.SetOrientation( wx.LAYOUT_VERTICAL )
self._rightWindow.SetAlignment( wx.LAYOUT_RIGHT )
if not fixedGlWindow:
self._rightWindow.SetSashVisible( wx.SASH_LEFT, True )
self._rightWindow.Bind( wx.EVT_SASH_DRAGGED, self.onSashDragRightWindow )
#
# Create the tool panel
self._toolPanel = UI.ToolPanel(self._rightWindow)
# Create the bottom window (sashed) where the console will be
self._bottomWindow = wx.SashLayoutWindow(self)
self._bottomWindow.SetDefaultSize((-1,MainWindow.MIN_CONSOLE_HEIGHT*2))
self._bottomWindow.SetMinimumSizeY(MainWindow.MIN_CONSOLE_HEIGHT)
self._bottomWindow.SetOrientation( wx.LAYOUT_HORIZONTAL )
self._bottomWindow.SetAlignment( wx.LAYOUT_BOTTOM )
if not fixedGlWindow:
self._bottomWindow.SetSashVisible( wx.SASH_TOP, True )
self._bottomWindow.Bind( wx.EVT_SASH_DRAGGED, self.onSashDragBottomWindow )
#
# Create the console window
self._console = UI.PythonConsole(self._bottomWindow, size=(-1,220), consoleEnvironment = consoleEnvironment )
if not showConsole:
self._bottomWindow.Hide()
self.Bind( wx.EVT_SIZE, self.onSize )
#
# Private methods
def _layoutFrame(self):
"""Private. Perform frame layout"""
wx.LayoutAlgorithm().LayoutFrame(self, self._glCanvas)
#
# Event handlers
def onSize(self, event):
self._layoutFrame()
if self._glCanvasSize != wx.DefaultSize :
currGlCanvasSize = self._glCanvas.GetSize()
diff = ( currGlCanvasSize[0] - self._glCanvasSize[0], currGlCanvasSize[1] - self._glCanvasSize[1] )
if diff == (0,0) :
return
currentSize = event.GetSize()
newSize= ( currentSize[0] - diff[0], currentSize[1] - diff[1] )
if newSize == currentSize :
return
self.SetSize( newSize )
self.SendSizeEvent()
def onSashDragRightWindow(self, event):
if event.GetDragStatus() == wx.SASH_STATUS_OUT_OF_RANGE:
return
self._rightWindow.SetDefaultSize((event.GetDragRect().width,-1))
self._layoutFrame()
def onSashDragBottomWindow(self, event):
if event.GetDragStatus() == wx.SASH_STATUS_OUT_OF_RANGE:
return
self._bottomWindow.SetDefaultSize((-1,event.GetDragRect().height))
self._layoutFrame()
#
# Accessors
def getGLCanvas(self):
"""Return the associated GL canvas."""
return self._glCanvas
def getToolPanel(self):
"""Return the associated tool panel."""
return self._toolPanel
def getFps(self):
"""Return the desired frame per second for this window."""
return self._glCanvas.getFps()
| apache-2.0 | -7,808,601,954,536,289,000 | 34.317881 | 117 | 0.585157 | false |
serkanaltuntas/yavst | yavst/prepare_gpf4.py | 1 | 4894 | #!/usr/bin/env python
#
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/Utilities24/prepare_gpf4.py,v 1.10.4.3 2009/03/23 21:54:28 rhuey Exp $
#
import string
import os.path
import glob
from MolKit import Read
from AutoDockTools.GridParameters import GridParameters, grid_parameter_list4
from AutoDockTools.GridParameters import GridParameter4FileMaker
from AutoDockTools.atomTypeTools import AutoDock4_AtomTyper
def usage():
print "Usage: prepare_gpf4.py -l pdbqt_file -r pdbqt_file "
print " -l ligand_filename"
print " -r receptor_filename"
print
print "Optional parameters:"
print " [-i reference_gpf_filename]"
print " [-o output_gpf_filename]"
print " [-x flexres_filename]"
print " [-p parameter=newvalue. For example: -p ligand_types='HD,Br,A,C,OA' ]"
print " [-d directory of ligands to use to set types]"
print " [-y boolean to center grids on center of ligand]"
print " [-n boolean to NOT size_box_to_include_ligand]"
print " [-v]"
print
print "Prepare a grid parameter file (GPF) for AutoDock4."
print
print " The GPF will by default be <receptor>.gpf. This"
print "may be overridden using the -o flag."
if __name__ == '__main__':
import getopt
import sys
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'vl:r:i:x:o:p:d:yn')
except getopt.GetoptError, msg:
print 'prepare_gpf4.py: %s' % msg
usage()
sys.exit(2)
receptor_filename = ligand_filename = None
list_filename = gpf_filename = gpf_filename = None
output_gpf_filename = None
flexres_filename = None
directory = None
parameters = []
verbose = None
center_on_ligand = False
size_box_to_include_ligand = True
for o, a in opt_list:
if o in ('-v', '--v'):
verbose = 1
if o in ('-l', '--l'):
ligand_filename = a
if verbose: print 'ligand_filename=', ligand_filename
if o in ('-r', '--r'):
receptor_filename = a
if verbose: print 'receptor_filename=', receptor_filename
if o in ('-i', '--i'):
gpf_filename = a
if verbose: print 'reference_gpf_filename=', gpf_filename
if o in ('-x', '--x'):
flexres_filename = a
if verbose: print 'flexres_filename=', flexres_filename
if o in ('-o', '--o'):
output_gpf_filename = a
if verbose: print 'output_gpf_filename=', output_gpf_filename
if o in ('-p', '--p'):
parameters.append(a)
if verbose: print 'parameters=', parameters
if o in ('-d', '--d'):
directory = a
if verbose: print 'directory=', directory
if o in ('-y', '--y'):
center_on_ligand = True
if verbose: print 'set center_on_ligand to ', center_on_ligand
if o in ('-n', '--n'):
size_box_to_include_ligand = False
if verbose: print 'set size_box_to_include_ligand to ', size_box_to_include_ligand
if o in ('-h', '--'):
usage()
sys.exit()
if (not receptor_filename) or (ligand_filename is None and directory is None):
print "prepare_gpf4.py: ligand and receptor filenames"
print " must be specified."
usage()
sys.exit()
gpfm = GridParameter4FileMaker(size_box_to_include_ligand=size_box_to_include_ligand,verbose=verbose)
if gpf_filename is not None:
gpfm.read_reference(gpf_filename)
if ligand_filename is not None:
gpfm.set_ligand(ligand_filename)
gpfm.set_receptor(receptor_filename)
if directory is not None:
gpfm.set_types_from_directory(directory)
if flexres_filename is not None:
flexmol = Read(flexres_filename)[0]
flexres_types = flexmol.allAtoms.autodock_element
lig_types = gpfm.gpo['ligand_types']['value'].split()
all_types = lig_types
for t in flexres_types:
if t not in all_types:
all_types.append(t)
all_types_string = all_types[0]
if len(all_types)>1:
for t in all_types[1:]:
all_types_string = all_types_string + " " + t
gpfm.gpo['ligand_types']['value'] = all_types_string
for p in parameters:
key,newvalue = string.split(p, '=')
kw = {key:newvalue}
apply(gpfm.set_grid_parameters, (), kw)
#gpfm.set_grid_parameters(spacing=1.0)
if center_on_ligand is True:
gpfm.gpo['gridcenterAuto']['value'] = 0
cenx,ceny,cenz = gpfm.ligand.getCenter()
gpfm.gpo['gridcenter']['value'] = "%.3f %.3f %.3f" %(cenx,ceny,cenz)
gpfm.write_gpf(output_gpf_filename)
#prepare_gpf4.py -l 1ebg_lig.pdbqt -r 1ebg_rec.pdbqt -p spacing=0.4 -p npts=[60,60,60] -i ref.gpf -o testing.gpf
| mit | -1,222,868,990,191,267,000 | 35.796992 | 129 | 0.592154 | false |
wy182000/gyp | pylib/gyp/generator/ninja.py | 1 | 80265 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, flavor, toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def WriteSpec(self, spec, config_name, generator_flags,
case_sensitive_filesystem):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = spec.get('sources', []) + extra_sources
if sources:
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
config_name, config, sources, compile_depends_stamp, pch,
case_sensitive_filesystem, spec)
# Some actions/rules output 'sources' that are already object files.
link_deps += [self.GypPathToNinja(f)
for f in sources if f.endswith(self.obj_ext)]
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', []) + \
extra_mac_bundle_resources
self.WriteMacBundleResources(mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(intermediate_plist, 'infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
self.ninja.build(out, 'mac_tool', info_plist,
variables=[('mactool_cmd', 'copy-info-plist'),
('env', env)])
bundle_depends.append(out)
def WriteSources(self, config_name, config, sources, predepends,
precompiled_header, case_sensitive_filesystem, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
pdbpath = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir,
self.name + '.pdb'))
self.WriteVariableList('pdbname', [pdbpath])
self.WriteVariableList('pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
defines = config.get('defines', []) + extra_defines
self.WriteVariableList('defines', [Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList('rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
env = self.GetSortedXcodeEnv()
self.WriteVariableList('includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands()
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext)
if include: self.ninja.variable(var, include)
self.WriteVariableList('cflags', map(self.ExpandSpecial, cflags))
self.WriteVariableList('cflags_c', map(self.ExpandSpecial, cflags_c))
self.WriteVariableList('cflags_cc', map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList('cflags_objc', map(self.ExpandSpecial,
cflags_objc))
self.WriteVariableList('cflags_objcc', map(self.ExpandSpecial,
cflags_objcc))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
# Ninja's depfile handling gets confused when the case of a filename
# changes on a case-insensitive file system. To work around that, always
# convert .o filenames to lowercase on such file systems. See
# https://github.com/martine/ninja/issues/402 for details.
if not case_sensitive_filesystem:
output = output.lower()
implicit = precompiled_header.GetObjDependencies([input], [output])
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
self.ninja.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
self.WritePchTargets(pch_commands)
self.ninja.newline()
return outputs
def WritePchTargets(self, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
self.ninja.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
extra_link_deps |= set(target.component_objs)
elif self.flavor == 'win' and target.import_lib:
extra_link_deps.add(target.import_lib)
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
extra_link_deps.add(target.binary)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
link_deps.extend(list(extra_link_deps))
extra_bindings = []
if self.is_mac_bundle:
output = self.ComputeMacBundleBinaryOutput()
else:
output = self.ComputeOutput(spec)
extra_bindings.append(('postbuilds',
self.GetPostbuildCommand(spec, output, output)))
is_executable = spec['type'] == 'executable'
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja)
elif self.flavor == 'win':
manifest_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, manifest_files = self.msvs_settings.GetLdflags(config_name,
self.GypPathToNinja, self.ExpandSpecial, manifest_name, is_executable)
self.WriteVariableList('manifests', manifest_files)
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name),
self.msvs_settings.IsLinkIncremental(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
ldflags = config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = [QuoteShellArgument('-LIBPATH:' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList('libs', library_dirs + libraries)
self.target.binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('dll', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
output = [output, self.target.import_lib]
else:
output = [output, output + '.TOC']
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
self.ninja.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
if spec['type'] == 'none':
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
variables = []
postbuild = self.GetPostbuildCommand(
spec, self.target.binary, self.target.binary)
if postbuild:
variables.append(('postbuilds', postbuild))
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps, variables=variables)
else:
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
postbuild = self.GetPostbuildCommand(spec, output, self.target.binary,
is_command_start=not package_framework)
variables = []
if postbuild:
variables.append(('postbuilds', postbuild))
if package_framework:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def GetPostbuildCommand(self, spec, output, output_binary,
is_command_start=False):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
target_postbuilds = []
if output_binary is not None:
target_postbuilds = self.xcode_settings.GetTargetPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
quiet=True)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
postbuilds = target_postbuilds + postbuilds
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetExecutablePath()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, type=None):
"""Compute the path for the final output of the spec."""
assert not self.is_mac_bundle or type
if not type:
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
assert not isinstance(values, str)
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
generator_flags = params.get('generator_flags', {})
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest, link_incremental):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding and/or incremental linking is enabled."""
suffix = ''
if embed_manifest:
suffix += '_embed'
if link_incremental:
suffix += '_inc'
return suffix
def _AddWinLinkRules(master_ninja, embed_manifest, link_incremental):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
cmd = ('cmd /c %(ldcmd)s'
' && %(python)s gyp-win-tool manifest-wrapper $arch'
' cmd /c if exist %(out)s.manifest del %(out)s.manifest'
' && %(python)s gyp-win-tool manifest-wrapper $arch'
' $mt -nologo -manifest $manifests')
if embed_manifest and not link_incremental:
# Embed manifest into a binary. If incremental linking is enabled,
# embedding is postponed to the re-linking stage (see below).
cmd += ' -outputresource:%(out)s;%(resname)s'
else:
# Save manifest as an external file.
cmd += ' -out:%(out)s.manifest'
if link_incremental:
# There is no point in generating separate rule for the case when
# incremental linking is enabled, but manifest embedding is disabled.
# In that case the basic rule should be used (e.g. 'link').
# See also implementation of _GetWinLinkRuleNameSuffix().
assert embed_manifest
# Make .rc file out of manifest, compile it to .res file and re-link.
cmd += (' && %(python)s gyp-win-tool manifest-to-rc $arch'
' %(out)s.manifest %(out)s.manifest.rc %(resname)s'
' && %(python)s gyp-win-tool rc-wrapper $arch $rc'
' %(out)s.manifest.rc'
' && %(ldcmd)s %(out)s.manifest.res')
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return cmd % {'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name}
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest, link_incremental)
dlldesc = 'LINK%s(DLL) $dll' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo $implibflag /DLL /OUT:$dll '
'/PDB:$dll.pdb @$dll.rsp' % sys.executable)
dllcmd = FullLinkCommand(dllcmd, '$dll', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo /OUT:$out /PDB:$out.pdb @$out.rsp' %
sys.executable)
exe_cmd = FullLinkCommand(exe_cmd, '$out', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $out' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$out.rsp',
rspfile_content='$in_newline $libs $ldflags')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(toplevel_build, 'build.ninja')),
width=120)
case_sensitive_filesystem = not os.path.exists(
os.path.join(toplevel_build, 'BUILD.NINJA'))
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'gcc'
cxx = 'g++'
ld = '$cxx'
ld_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
ld = GetEnvironFallback(['LD_target', 'LD'], ld)
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
ld_host = GetEnvironFallback(['LD_host'], ld_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True, link_incremental=True)
_AddWinLinkRules(master_ninja, embed_manifest=True, link_incremental=False)
_AddWinLinkRules(master_ninja, embed_manifest=False, link_incremental=False)
# Do not generate rules for embed_manifest=False and link_incremental=True
# because in that case rules for (False, False) should be used (see
# implementation of _GetWinLinkRuleNameSuffix()).
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': '$ld -shared $ldflags -o $lib %(suffix)s',
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'infoplist',
description='INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
OpenOutput(os.path.join(toplevel_build, output_file)),
flavor, toplevel_dir=options.toplevel_dir)
master_ninja.subninja(output_file)
target = writer.WriteSpec(
spec, config_name, generator_flags, case_sensitive_filesystem)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| bsd-3-clause | -8,764,016,464,242,874,000 | 40.203799 | 80 | 0.626624 | false |
myd7349/DiveIntoPython3Practices | chapter_09_UnitTesting/roman2.py | 1 | 4834 | # -*- coding: utf-8 -*-
# 2014-11-18T22:48+08:00
import unittest
class OutOfRangeError(ValueError): pass
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def to_roman(n):
'''Convert integer to Roman numeral'''
if n > 3999:
# The unit test does not check the human-readable string that accompanies the exception
raise OutOfRangeError('number out of range (must be less than 4000)')
result = ''
for numeral, integer in roman_numeral_map:
while n >= integer:
result += numeral
n -= integer
return result
class KnownValues(unittest.TestCase):
known_values = ((1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(7, 'VII'),
(8, 'VIII'),
(9, 'IX'),
(10, 'X'),
(50, 'L'),
(100, 'C'),
(500, 'D'),
(1000, 'M'),
(31, 'XXXI'),
(148, 'CXLVIII'),
(294, 'CCXCIV'),
(312, 'CCCXII'),
(421, 'CDXXI'),
(528, 'DXXVIII'),
(621, 'DCXXI'),
(782, 'DCCLXXXII'),
(870, 'DCCCLXX'),
(941, 'CMXLI'),
(1043, 'MXLIII'),
(1110, 'MCX'),
(1226, 'MCCXXVI'),
(1301, 'MCCCI'),
(1485, 'MCDLXXXV'),
(1509, 'MDIX'),
(1607, 'MDCVII'),
(1754, 'MDCCLIV'),
(1832, 'MDCCCXXXII'),
(1993, 'MCMXCIII'),
(2074, 'MMLXXIV'),
(2152, 'MMCLII'),
(2212, 'MMCCXII'),
(2343, 'MMCCCXLIII'),
(2499, 'MMCDXCIX'),
(2574, 'MMDLXXIV'),
(2646, 'MMDCXLVI'),
(2723, 'MMDCCXXIII'),
(2892, 'MMDCCCXCII'),
(2975, 'MMCMLXXV'),
(3051, 'MMMLI'),
(3185, 'MMMCLXXXV'),
(3250, 'MMMCCL'),
(3313, 'MMMCCCXIII'),
(3408, 'MMMCDVIII'),
(3501, 'MMMDI'),
(3610, 'MMMDCX'),
(3743, 'MMMDCCXLIII'),
(3844, 'MMMDCCCXLIV'),
(3888, 'MMMDCCCLXXXVIII'),
(3940, 'MMMCMXL'),
(3999, 'MMMCMXCIX'))
def test_to_roman_known_values(self):
'''to_roman should give known result with known input'''
for integer, numeral in self.known_values:
self.assertEqual(numeral, to_roman(integer))
class ToRomanBadInput(unittest.TestCase):
# PASS
def test_too_large(self):
'''to_roman should fail with large input'''
# 3. The unittest.TestCase class provides the assertRaises method,
# which takes the following arguments: the exception you’re expecting,
# the function you’re testing, and the arguments you’re passing to that
# function. (If the function you’re testing takes more than one argument,
# pass them all to assertRaises, in order, and it will pass them right along
# to the function you’re testing.)
# assertRaises(exception, callable, *args, **kwds)
# assertRaises(exception, msg=None)
self.assertRaises(OutOfRangeError, to_roman, 4000)
if __name__ == '__main__':
# By default, unittest.main will call sys.exit after all tests, and in this case,
# code after the invoking of unittest.main will be ignored. By passing False to
# 'exit' keyword argument, we change this behavior.
unittest.main(exit = False)
# It is not enough to test that functions succeed when given good input; you must
# also test that they fail when given bad input. And not just any sort of failure;
# they must fail in the way you expect.
try:
print(to_roman(5000))
except OutOfRangeError:
print('5000 is too large')
# Along with testing numbers that are too large,
# you need to test numbers that are too small.
print('{!r}'.format(to_roman(0)))
print('{!r}'.format(to_roman(-1)))
| lgpl-3.0 | 2,665,887,912,295,815,000 | 36.984252 | 95 | 0.439884 | false |
troup-system/troup | troup/observer.py | 1 | 1190 | # Copyright 2016 Pavle Jonoski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Observable:
def __init__(self):
self.listeners = {}
def on(self, event, handler):
listeners = self.listeners.get(event)
if not listeners:
listeners = self.listeners[event] = []
listeners.append(handler)
def trigger(self, event, *args):
listeners = self.listeners.get(event)
if listeners:
for listener in listeners:
listener(*args)
def remove_listener(self, event, listener):
listeners = self.listeners.get(event)
if listeners:
listeners.remove(listener)
| apache-2.0 | -3,194,647,551,165,012,500 | 33 | 74 | 0.663866 | false |
MaxTyutyunnikov/lino | obsolete/tests/74.py | 1 | 1459 | ## Copyright 2003-2006 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import win32ui
import win32con
from lino.misc import tsttools
from lino import config
class Case(tsttools.TestCase):
""
def test01(self):
spoolFile = self.addTempFile("74.ps",showOutput=True)
dc = win32ui.CreateDC()
dc.CreatePrinterDC(config.win32.get('postscript_printer'))
dc.StartDoc("my print job",spoolFile)
dc.SetMapMode(win32con.MM_TWIPS)
dc.StartPage()
minx, miny = dc.GetWindowOrg()
maxx,maxy = dc.GetWindowExt()
for x in range(minx,maxx,1440):
for y in range(miny,maxy,1440):
dc.TextOut(x,-y,repr((x,y)))
dc.EndDoc()
if __name__ == '__main__':
tsttools.main()
| gpl-3.0 | -2,120,210,230,867,551,700 | 30.717391 | 70 | 0.681974 | false |
gem/oq-engine | openquake/hmtk/seismicity/declusterer/dec_gardner_knopoff.py | 1 | 6271 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2021 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
"""
Module :mod:`openquake.hmtk.seismicity.declusterer.dec_gardner_knopoff`
defines the Gardner and Knopoff declustering algorithm
"""
import numpy as np
from openquake.hmtk.seismicity.declusterer.base import (
BaseCatalogueDecluster, DECLUSTERER_METHODS)
from openquake.hmtk.seismicity.utils import decimal_year, haversine
from openquake.hmtk.seismicity.declusterer.distance_time_windows import (
TIME_DISTANCE_WINDOW_FUNCTIONS)
@DECLUSTERER_METHODS.add(
"decluster",
time_distance_window=TIME_DISTANCE_WINDOW_FUNCTIONS,
fs_time_prop=np.float)
class GardnerKnopoffType1(BaseCatalogueDecluster):
"""
This class implements the Gardner Knopoff algorithm as described in
this paper:
Gardner, J. K. and Knopoff, L. (1974). Is the sequence of aftershocks
in Southern California, with aftershocks removed, poissonian?. Bull.
Seism. Soc. Am., 64(5): 1363-1367.
"""
def decluster(self, catalogue, config):
"""
The configuration of this declustering algorithm requires two
objects:
- A time-distance window object (key is 'time_distance_window')
- A value in the interval [0,1] expressing the fraction of the
time window used for aftershocks (key is 'fs_time_prop')
:param catalogue:
Catalogue of earthquakes
:type catalogue: Dictionary
:param config:
Configuration parameters
:type config: Dictionary
:returns:
**vcl vector** indicating cluster number,
**flagvector** indicating which eq events belong to a cluster
:rtype: numpy.ndarray
"""
# Get relevant parameters
neq = len(catalogue.data['magnitude']) # Number of earthquakes
# Get decimal year (needed for time windows)
year_dec = decimal_year(
catalogue.data['year'], catalogue.data['month'],
catalogue.data['day'])
# Get space and time windows corresponding to each event
# Initial Position Identifier
sw_space, sw_time = (
config['time_distance_window'].calc(
catalogue.data['magnitude'], config.get('time_cutoff')))
eqid = np.arange(0, neq, 1)
# Pre-allocate cluster index vectors
vcl = np.zeros(neq, dtype=int)
# Sort magnitudes into descending order
id0 = np.flipud(np.argsort(catalogue.data['magnitude'],
kind='heapsort'))
longitude = catalogue.data['longitude'][id0]
latitude = catalogue.data['latitude'][id0]
sw_space = sw_space[id0]
sw_time = sw_time[id0]
year_dec = year_dec[id0]
eqid = eqid[id0]
flagvector = np.zeros(neq, dtype=int)
# Begin cluster identification
clust_index = 0
for i in range(0, neq - 1):
if vcl[i] == 0:
# Find Events inside both fore- and aftershock time windows
dt = year_dec - year_dec[i]
vsel = np.logical_and(
vcl == 0,
np.logical_and(
dt >= (-sw_time[i] * config['fs_time_prop']),
dt <= sw_time[i]))
# Of those events inside time window,
# find those inside distance window
vsel1 = haversine(longitude[vsel],
latitude[vsel],
longitude[i],
latitude[i]) <= sw_space[i]
vsel[vsel] = vsel1[:, 0]
temp_vsel = np.copy(vsel)
temp_vsel[i] = False
if any(temp_vsel):
# Allocate a cluster number
vcl[vsel] = clust_index + 1
flagvector[vsel] = 1
# For those events in the cluster before the main event,
# flagvector is equal to -1
temp_vsel[dt >= 0.0] = False
flagvector[temp_vsel] = -1
flagvector[i] = 0
clust_index += 1
# Re-sort the catalog_matrix into original order
id1 = np.argsort(eqid, kind='heapsort')
eqid = eqid[id1]
vcl = vcl[id1]
flagvector = flagvector[id1]
return vcl, flagvector
| agpl-3.0 | -5,080,174,637,922,129,000 | 39.173077 | 79 | 0.629807 | false |
qtproject/pyside-shiboken | tests/samplebinding/privatedtor_test.py | 1 | 3555 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for a class with a private destructor.'''
import gc
import sys
import unittest
import shiboken2 as shiboken
from sample import PrivateDtor
class PrivateDtorTest(unittest.TestCase):
'''Test case for PrivateDtor class'''
def testPrivateDtorInstanciation(self):
'''Test if instanciation of class with a private destructor raises an exception.'''
self.assertRaises(TypeError, PrivateDtor)
def testPrivateDtorInheritance(self):
'''Test if inheriting from PrivateDtor raises an exception.'''
def inherit():
class Foo(PrivateDtor):
pass
self.assertRaises(TypeError, inherit)
def testPrivateDtorInstanceMethod(self):
'''Test if PrivateDtor.instance() method return the proper singleton.'''
pd1 = PrivateDtor.instance()
calls = pd1.instanceCalls()
self.assertEqual(type(pd1), PrivateDtor)
pd2 = PrivateDtor.instance()
self.assertEqual(pd2, pd1)
self.assertEqual(pd2.instanceCalls(), calls + 1)
def testPrivateDtorRefCounting(self):
'''Test refcounting of the singleton returned by PrivateDtor.instance().'''
pd1 = PrivateDtor.instance()
calls = pd1.instanceCalls()
refcnt = sys.getrefcount(pd1)
pd2 = PrivateDtor.instance()
self.assertEqual(pd2.instanceCalls(), calls + 1)
self.assertEqual(sys.getrefcount(pd2), sys.getrefcount(pd1))
self.assertEqual(sys.getrefcount(pd2), refcnt + 1)
del pd1
self.assertEqual(sys.getrefcount(pd2), refcnt)
del pd2
gc.collect()
pd3 = PrivateDtor.instance()
self.assertEqual(type(pd3), PrivateDtor)
self.assertEqual(pd3.instanceCalls(), calls + 2)
self.assertEqual(sys.getrefcount(pd3), refcnt)
def testClassDecref(self):
# Bug was that class PyTypeObject wasn't decrefed when instance
# was invalidated
before = sys.getrefcount(PrivateDtor)
for i in range(1000):
obj = PrivateDtor.instance()
shiboken.invalidate(obj)
after = sys.getrefcount(PrivateDtor)
self.assertLess(abs(before - after), 5)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 2,029,832,499,969,279,700 | 34.909091 | 91 | 0.654571 | false |
tommyod/KDEpy | KDEpy/tests/test_estimator_vs_estimator.py | 1 | 2239 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the implemented estimators against each other on simple data sets.
"""
import numpy as np
from KDEpy.NaiveKDE import NaiveKDE
from KDEpy.TreeKDE import TreeKDE
from KDEpy.FFTKDE import FFTKDE
import itertools
import pytest
N = 2 ** 5
estimators = [NaiveKDE, TreeKDE, FFTKDE]
estimators_2 = list(itertools.combinations(estimators, 2))
kernels = list(NaiveKDE._available_kernels.keys())
@pytest.mark.parametrize("est1, est2", estimators_2)
def test_vs_simple(est1, est2):
"""
Test that mean error is low on default parameters.
"""
np.random.seed(12)
data = np.random.randn(N)
x1, y1 = est1().fit(data)()
x1, y2 = est2().fit(data)()
assert np.sqrt(np.mean((y1 - y2) ** 2)) < 0.0001
@pytest.mark.parametrize("est1, est2", estimators_2)
def test_vs_simple_weighted(est1, est2):
"""
Test that mean error is low on default parameters with weighted data.
"""
np.random.seed(12)
data = np.random.randn(N) * 10
weights = np.random.randn(N) ** 2 + 1
x1, y1 = est1().fit(data, weights)()
x1, y2 = est2().fit(data, weights)()
assert np.sqrt(np.mean((y1 - y2) ** 2)) < 0.0001
@pytest.mark.parametrize("estimators, kernel, bw", list(itertools.product(estimators_2, kernels, [0.1, 5])))
def test_vs_simple_weighted_kernels(estimators, kernel, bw):
"""
Test every kernel function over every implementation.
"""
est1, est2 = estimators
np.random.seed(13)
data = np.random.randn(N) * 10
weights = np.random.randn(N) ** 2 + 1
x1, y1 = est1(kernel, bw=bw).fit(data, weights)()
x1, y2 = est2(kernel, bw=bw).fit(data, weights)()
assert np.sqrt(np.mean((y1 - y2) ** 2)) < 0.01
# TODO: Examine why error increases when bw -> 0
if __name__ == "__main__":
# --durations=10 <- May be used to show potentially slow tests
# pytest.main(args=['.', '--doctest-modules', '-v'])
est1, est2 = NaiveKDE, TreeKDE
np.random.seed(13)
data = np.random.randn(2 ** 8) * 10
weights = np.random.randn(2 ** 8) ** 2 + 1
x1, y1 = est1(bw=100).fit(data, weights)()
x1, y2 = est2(bw=100).fit(data, weights)()
import matplotlib.pyplot as plt
plt.plot(x1, y1 - y2)
| gpl-3.0 | 7,116,459,819,006,918,000 | 28.460526 | 108 | 0.634658 | false |
BenDoan/unomaha_utils | course_scraper/scraper.py | 1 | 5095 | #!/usr/bin/env python2
"""
Usage:
./scraper.py [options]
Options:
-h, --help Prints this help message
-o FILE, --output FILE Specifies output file
-c COLLEGE, --college COLLEGE Specifies a specific college
-l, --last-term-only Only ouputs the last term
-u URL, --url URL Specify an alternate class-search url
-v, --verbose Turns on verbose logging
"""
import datetime
import itertools
import json
import logging
import time
from collections import OrderedDict
from multiprocessing import Pool, cpu_count
from os import path
import requests
from BeautifulSoup import BeautifulSoup
from docopt import docopt
BASE_URL = "https://www.unomaha.edu/registrar/students/before-you-enroll/class-search/"
terms = [1158]
def get_college_data((college, term)):
"""Returns a dictionary containing all classes within college and term"""
logging.info("Processing college {}".format(college))
time.sleep(1)
page = requests.get("{}?term={}&session=&subject={}&catalog_nbr=&career=&instructor=&class_start_time=&class_end_time=&location=&special=&instruction_mode=".format(BASE_URL, term, college))
soup = BeautifulSoup(page.text)
if len(soup.findAll("div", {'class': 'dotted-bottom'})) == 0:
logging.error("No classes for college {}, term {}".format(college, term))
classes = OrderedDict()
#loop through each class in the college
for dotted in soup.findAll("div", {'class': 'dotted-bottom'}):
cls = OrderedDict()
number = dotted.find("h2")
if number:
class_number = number.text.split(" ")[-1]
else:
class_number = "-"
title = dotted.find("p")
if title:
cls['title'] = title.text
else:
cls['title'] = "-"
desc = dotted.findAll("p")
if len(desc) > 1:
cls['desc'] = desc[1].text
else:
cls['desc'] = "-"
if len(desc) > 2:
cls['prereq'] = desc[2].text
else:
cls['prereq'] = "-"
sections = OrderedDict()
tables = dotted.findAll("table")
if tables:
# loop through each section in the class
for table in tables:
section = OrderedDict()
rows = table.findAll("tr")
for tr in rows:
tds = tr.findAll("td")
if tds:
if len(tds) > 1 and tds[1].text != "Date": # remove weird field
section[tds[0].text] = tds[1].text
section_name = table.find("th")
if section_name:
section_num = section_name.text.split(" ")[-1]
sections[section_num] = section
cls['sections'] = sections
if class_number != "-":
classes[class_number] = cls
return classes
def get_full_term_listing(college=None):
"""Returns a dictionary containing the uno classes
for every listed term and college"""
pool = Pool(cpu_count()*2)
term_data = OrderedDict()
for term in terms:
logging.info("Processing term {}".format(term))
if college is None:
colleges = get_colleges(term)
else:
colleges = [college]
results = pool.map(get_college_data, zip(colleges, itertools.repeat(term)))
term_data[term] = OrderedDict(zip(colleges, results))
stats = {
"num_terms": len(term_data)
}
for term, colleges in term_data.items():
stats[term] = {
"num_colleges": len(colleges)
}
out_dict = {
"meta" : {
"time": int(datetime.datetime.utcnow().strftime("%s")),
"time_str": str(datetime.datetime.utcnow()),
"url": BASE_URL,
"stats": stats,
},
"term_data": term_data
}
return out_dict
def _main():
args = docopt(__doc__, version="1")
# process arguments
if args['--college']:
college = args['--college']
else:
college = None
if args['--last-term-only']:
global terms
terms = [terms[-1]]
if args['--url']:
global BASE_URL
BASE_URL = args['--url']
if args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
terms = get_terms()
term_data = get_full_term_listing(college)
# output class data as json
json_data = json.dumps(term_data)
if args['--output'] is not None:
with open(path.abspath(args['--output']), 'w') as f:
f.write(json_data)
else:
print json_data
def get_colleges(term):
return [x['value'] for x in requests.get("{}subjects.load.php?term={}".format(BASE_URL, term)).json()]
def get_terms():
page = requests.get(BASE_URL)
soup = BeautifulSoup(page.text)
return [int(dict(x.attrs)['value']) for x in soup.find("select").findAll("option")]
if __name__ == "__main__":
_main()
| mit | -6,599,031,899,051,639,000 | 27.463687 | 193 | 0.559961 | false |
admk/soap | soap/semantics/schedule/ii.py | 1 | 2125 | import itertools
import math
import numpy
from soap.context import context
neg_inf = -float('inf')
def rec_init_int_check(graph, ii):
"""
Checks if the target II is valid. Runs a modified Floyd-Warshall
algorithm to test the absence of positive cycles.
Input ii must be greater or equal to 1.
"""
nodes = graph.nodes()
len_nodes = len(nodes)
dist_shape = [len_nodes] * 2
dist = numpy.full(dist_shape, neg_inf)
iterer = itertools.product(enumerate(nodes), repeat=2)
for (from_idx, from_node), (to_idx, to_node) in iterer:
try:
edge = graph[from_node][to_node]
except KeyError:
continue
dist[from_idx, to_idx] = edge['latency'] - ii * edge['distance']
iterer = itertools.product(range(len_nodes), repeat=3)
for mid_idx, from_idx, to_idx in iterer:
dist_val = dist[from_idx, mid_idx] + dist[mid_idx, to_idx]
if dist_val > dist[from_idx, to_idx]:
if from_idx == to_idx and dist_val > 0:
return False
dist[from_idx, to_idx] = dist_val
return True
def rec_init_int_search(graph, init_ii=1, prec=None, round_values=False):
"""
Performs a binary search of the recurrence-based minimum initiation
interval (RecMII).
"""
prec = prec or context.ii_precision
min_ii = max_ii = init_ii
incr = prec = 2 ** -prec
# find an upper-bound on MII
while not rec_init_int_check(graph, max_ii):
max_ii += incr
incr *= 2
# binary search for the optimal MII
last_ii = max_ii
while max_ii - min_ii > prec:
mid_ii = (min_ii + max_ii) / 2
if rec_init_int_check(graph, mid_ii):
max_ii = last_ii = mid_ii
else:
min_ii = mid_ii
if round_values:
return int(math.ceil(last_ii - (max_ii - min_ii) / 2))
return last_ii
def res_init_int(memory_access_map):
if not memory_access_map:
return 1
port_count = context.port_count
return max(1, max(
access_count / port_count
for access_count in memory_access_map.values()))
| mit | 3,456,767,238,910,983,000 | 26.24359 | 73 | 0.596706 | false |
shivamvats/graphSearch | heuristicSearch/planners/island_astar.py | 1 | 2665 | from astar import *
class IslandAstar(Astar):
def __init__(self, env, inflation=10):
super(IslandAstar, self).__init__(env, inflation)
#@profile
def plan(self, startNode, goalNode, viz=None):
self.startNode = startNode
self.goalNode = goalNode
print(goalNode.getNodeId())
# Ordered List of expanded sates and their timestamps.
stateTimeStamps = collections.OrderedDict()
self.startNode.setG(0)
heuristicCost = self.env.heuristic(startNode, goalNode)
startNode.setH(heuristicCost)
openQ = Q.PriorityQueue()
# Using a dictionary 'cos list has slow lookup.
closed = {}
openQ.put((startNode.getH() + startNode.getG(), startNode))
currNode = startNode
startTime = time.time()
while(not openQ.empty() and currNode.getNodeId() !=
self.goalNode.getNodeId()):
priority, currNode = openQ.get()
nodeId = currNode.getNodeId()
if nodeId in closed:
continue
stateTimeStamps[nodeId] = (time.time(), currNode.getH())
closed[nodeId] = 1
if viz.incrementalDisplay:
viz.markPoint(self.env.getPointFromId(currNode.getNodeId()), 0)
viz.displayImage(1)
children, edgeCosts = \
self.env.getChildrenAndCosts(currNode)
for child, edgeCost in zip(children, edgeCosts):
if child.getNodeId() in closed:
continue
updated = self.updateG(child, currNode.getG() + edgeCost)
if updated:
child.setParent(currNode)
if currNode.getNodeId() in self.env.islandNodeIds:
child.viaIsland = True
else:
child.viaIsland = currNode.viaIsland
#XXX What if this node is already in the open list?
openQ.put((child.getG() +
self.inflation*self.env.heuristic(child, goalNode), child))
self.stateTimeStamps = stateTimeStamps
endTime = time.time()
timeTaken = endTime - startTime
print("Total time taken for planning is %f", timeTaken)
#print(self.stateTimeStamps)
print("Nodes expanded", len(closed))
closedNodeIds = list(closed.keys())
points = map(self.env.getPointFromId, closedNodeIds)
viz.markPoints(points, 90)
viz.displayImage(1)
if currNode.getNodeId() == self.goalNode.getNodeId():
return 1
else:
return 0
| mit | -3,337,239,640,545,486,300 | 31.901235 | 83 | 0.566604 | false |
sniemi/ShapeMeasurement | shapeMeasurements.py | 1 | 17431 | """
Measuring a shape of an object
==============================
Simple class to measure weighted quadrupole moments, size, and ellipticity of an object.
Can be used for example to verify weak lensing mission requirements.
:requires: NumPy (tested with 1.9.1)
:author: Sami-Matias Niemi
:contact: [email protected]
:version: 0.5
"""
import math, os, datetime, unittest
import numpy as np
import logging
import logging.handlers
try:
import pyfits as pf
PYFITS = True
except:
print 'No PyFITS available, cannot write FITS files'
PYFITS = False
class shapeMeasurement():
"""
Provides methods to measure the shape of an object.
:param data: name of the FITS file to be analysed.
:type data: ndarray
:param log: logger
:type log: instance
:param kwargs: additional keyword arguments
:type kwargs: dict
Settings dictionary contains all parameter values needed.
"""
def __init__(self, data, log, **kwargs):
"""
:param data: name of the FITS file to be analysed.
:type data: ndarray
:param log: logger
:type log: instance
:param kwargs: additional keyword arguments
:type kwargs: dict
Settings dictionary contains all parameter values needed.
"""
self.data = data.copy()
self.log = log
sizeY, sizeX = self.data.shape
self.settings = dict(sizeX=sizeX,
sizeY=sizeY,
iterations=4,
sampling=1.0,
platescale=120.0,
pixelSize=12.0,
sigma=0.75,
weighted=True,
conservePeak=True,
debug=False,
fixedPosition=False,
fixedX=None,
fixedY=None)
self.settings.update(kwargs)
for key, value in self.settings.iteritems():
self.log.info('%s = %s' % (key, value))
def quadrupoles(self, img):
"""
Derive quadrupole moments and ellipticity from the input image.
:param img: input image data
:type img: ndarray
:return: quadrupoles, centroid, and ellipticity (also the projected components e1, e2)
:rtype: dict
"""
self.log.info('Deriving quadrupole moments')
image = img.copy()
#normalization factor
imsum = float(np.sum(image))
#generate a mesh coordinate grid
sizeY, sizeX = image.shape
Xvector = np.arange(0, sizeX)
Yvector = np.arange(0, sizeY)
Xmesh, Ymesh = np.meshgrid(Xvector, Yvector)
# No centroid given, take from data and weighting with input image
Xcentre = np.sum(Xmesh.copy() * image.copy()) / imsum
Ycentre = np.sum(Ymesh.copy() * image.copy()) / imsum
#coordinate array
Xarray = Xcentre * np.ones([sizeY, sizeX])
Yarray = Ycentre * np.ones([sizeY, sizeX])
#centroided positions
Xpos = Xmesh - Xarray
Ypos = Ymesh - Yarray
#squared and cross term
Xpos2 = Xpos * Xpos
Ypos2 = Ypos * Ypos
XYpos = Ypos * Xpos
#integrand
Qyyint = Ypos2 * image.copy()
Qxxint = Xpos2 * image.copy()
Qxyint = XYpos * image.copy()
#sum over and normalize to get the quadrupole moments
Qyy = np.sum(Qyyint) / imsum
Qxx = np.sum(Qxxint) / imsum
Qxy = np.sum(Qxyint) / imsum
self.log.info('(Qxx, Qyy, Qxy) = (%f, %f, %f)' % (Qxx, Qyy, Qxy))
#derive projections and ellipticity
denom = Qxx + Qyy
e1 = (Qxx - Qyy) / denom
e2 = 2. * Qxy / denom
ellipticity = math.sqrt(e1*e1 + e2*e2)
#also a and b
a = np.sqrt(.5 * (Qxx + Qyy + np.sqrt((Qxx - Qyy)**2 + 4.*Qxy*Qxy)))
b = np.sqrt(.5 * (Qxx + Qyy - np.sqrt((Qxx - Qyy)**2 + 4.*Qxy*Qxy)))
#check that ellipticity is reasonable
if ellipticity > 1.0:
self.log.error('Ellipticity greater than 1 derived, will set it to unity!')
ellipticity = 1.0
self.log.info('Centroiding (x, y) = (%f, %f) and ellipticity = %.4f (%.4f, %.4f)' %
(Ycentre+1, Xcentre+1, ellipticity, e1, e2))
out = dict(ellipticity=ellipticity, e1=e1, e2=e2, Qxx=Qxx, Qyy=Qyy, Qxy=Qxy,
centreY=Ycentre, centreX=Xcentre,
a=a, b=b)
return out
def circular2DGaussian(self, x, y, sigma):
"""
Create a circular symmetric Gaussian centered on x, y.
:param x: x coordinate of the centre
:type x: float
:param y: y coordinate of the centre
:type y: float
:param sigma: standard deviation of the Gaussian, note that sigma_x = sigma_y = sigma
:type sigma: float
:return: circular Gaussian 2D profile and x and y mesh grid
:rtype: dict
"""
self.log.info('Creating a circular symmetric 2D Gaussian with sigma=%.3f centered on (x, y) = (%f, %f)' % (sigma, x, y))
#x and y coordinate vectors
Gyvect = np.arange(1, self.settings['sizeY'] + 1)
Gxvect = np.arange(1, self.settings['sizeX'] + 1)
#meshgrid
Gxmesh, Gymesh = np.meshgrid(Gxvect, Gyvect)
#normalizers
sigmax = 1. / (2. * sigma**2)
sigmay = sigmax #same sigma in both directions, thus same normalizer
#gaussian
exponent = (sigmax * (Gxmesh - x)**2 + sigmay * (Gymesh - y)**2)
Gaussian = np.exp(-exponent) / (2. * math.pi * sigma*sigma)
if self.settings['conservePeak']:
#normalize to unity
Gaussian /= np.max(Gaussian)
output = dict(GaussianXmesh=Gxmesh, GaussianYmesh=Gymesh, Gaussian=Gaussian)
return output
def Gaussian2D(self, x, y, sigmax, sigmay):
"""
Create a two-dimensional Gaussian centered on x, y.
:param x: x coordinate of the centre
:type x: float
:param y: y coordinate of the centre
:type y: float
:param sigmax: standard deviation of the Gaussian in x-direction
:type sigmax: float
:param sigmay: standard deviation of the Gaussian in y-direction
:type sigmay: float
:return: circular Gaussian 2D profile and x and y mesh grid
:rtype: dict
"""
self.log.info('Creating a 2D Gaussian with sigmax=%.3f and sigmay=%.3f centered on (x, y) = (%f, %f)' %
(sigmax, sigmay, x, y))
#x and y coordinate vectors
Gyvect = np.arange(1, self.settings['sizeY'] + 1)
Gxvect = np.arange(1, self.settings['sizeX'] + 1)
#meshgrid
Gxmesh, Gymesh = np.meshgrid(Gxvect, Gyvect)
#normalizers
sigx = 1. / (2. * sigmax**2)
sigy = 1. / (2. * sigmay**2)
#gaussian
exponent = (sigx * (Gxmesh - x)**2 + sigy * (Gymesh - y)**2)
Gaussian = np.exp(-exponent) / (2. * math.pi * sigmax*sigmay)
if self.settings['conservePeak']:
#normalize to unity
Gaussian /= np.max(Gaussian)
output = dict(GaussianXmesh=Gxmesh, GaussianYmesh=Gymesh, Gaussian=Gaussian)
return output
def measureRefinedEllipticity(self):
"""
Derive a refined iterated polarisability/ellipticity measurement for a given object.
By default polarisability/ellipticity is defined in terms of the Gaussian weighted quadrupole moments.
If self.settings['weighted'] is False then no weighting scheme is used.
The number of iterations is defined in self.settings['iterations'].
:return: centroids [indexing stars from 1], ellipticity (including projected e1 and e2), and R2
:rtype: dict
"""
self.settings['sampleSigma'] = self.settings['sigma'] / self.settings['pixelSize'] * \
self.settings['platescale'] / self.settings['sampling']
self.log.info('Sample sigma used for weighting = %f' % self.settings['sampleSigma'])
if self.settings['fixedPosition']:
self.log.info('Using a fixed ')
quad = dict(centreX=self.settings['fixedX'], centreY=self.settings['fixedY'])
else:
self.log.info('The initial estimate for the mean values are taken from the unweighted quadrupole moments.')
quad = self.quadrupoles(self.data.copy())
for x in range(self.settings['iterations']):
if self.settings['weighted']:
self.log.info('Iteration %i with circular symmetric Gaussian weights' % x)
if self.settings['fixedPosition']:
gaussian = self.circular2DGaussian(self.settings['fixedX'],
self.settings['fixedY'],
self.settings['sampleSigma'])
else:
gaussian = self.circular2DGaussian(quad['centreX']+1,
quad['centreY']+1,
self.settings['sampleSigma'])
GaussianWeighted = self.data.copy() * gaussian['Gaussian'].copy()
else:
self.log.info('Iteration %i with no weighting' % x)
GaussianWeighted = self.data.copy()
quad = self.quadrupoles(GaussianWeighted.copy())
# The squared radius R2 in um2
R2 = quad['Qxx'] * self.settings['sampling']**2 + quad['Qyy'] * self.settings['sampling']**2
R2arcsec = R2 * (self.settings['pixelSize'] / self.settings['platescale'])**2
if self.settings['debug']:
self.writeFITS(gaussian['Gaussian'], 'GaussianWeightingFunction.fits')
self.writeFITS(GaussianWeighted, 'GaussianWeighted.fits')
out = dict(centreX=quad['centreX']+1, centreY=quad['centreY']+1,
e1=quad['e1'], e2=quad['e2'],
ellipticity=quad['ellipticity'],
R2=R2,
R2arcsec=R2arcsec,
GaussianWeighted=GaussianWeighted,
a=quad['a'], b=quad['b'])
return out
def writeFITS(self, data, output):
"""
Write out a FITS file using PyFITS.
:param data: data to write to a FITS file
:type data: ndarray
:param output: name of the output file
:type output: string
:return: None
"""
if PYFITS:
if os.path.isfile(output):
os.remove(output)
#create a new FITS file, using HDUList instance
ofd = pf.HDUList(pf.PrimaryHDU())
#new image HDU
hdu = pf.ImageHDU(data=data)
#update and verify the header
hdu.header.add_history('If questions, please contact Sami-Matias Niemi (s.niemi at icloud.com).')
hdu.header.add_history('This file has been created at %s' \
% datetime.datetime.isoformat(datetime.datetime.now()))
hdu.verify('fix')
ofd.append(hdu)
#write the actual file
ofd.writeto(output)
self.log.info('Wrote %s' % output)
else:
print 'Cannot write a FITS file, please install PyFITS...'
class TestShape(unittest.TestCase):
"""
Unit tests for the shape class.
"""
def setUp(self):
self.log = setUpLogger('shapeTesting.log')
self.tolerance = 1.e-7
self.sigma = 40.0
self.sigmax = 67.25
self.sigmay = 24.15
self.sigmax2 = 77.12343
self.sigmay2 = 42.34543
self.xcent = 500.
self.ycent = 500.
#create 2D Gaussians that will be used for testing
self.GaussianCirc = shapeMeasurement(np.zeros((1000, 1000)), self.log).circular2DGaussian(self.xcent,
self.ycent,
self.sigma)['Gaussian']
self.Gaussian = shapeMeasurement(np.zeros((1000, 1000)), self.log).Gaussian2D(self.xcent,
self.ycent,
self.sigmax,
self.sigmay)['Gaussian']
self.Gaussian2 = shapeMeasurement(np.zeros((1000, 1000)), self.log).Gaussian2D(self.xcent,
self.ycent,
self.sigmax2,
self.sigmay2)['Gaussian']
def test_ellipticity_noweighting_circular_Gaussian(self):
expected = 0.0
settings = dict(weighted=False)
actual = shapeMeasurement(self.GaussianCirc, self.log, **settings).measureRefinedEllipticity()['ellipticity']
self.assertAlmostEqual(expected, actual, msg='exp=%f, got=%f' % (expected, actual), delta=self.tolerance)
def test_noweighting_Gaussian(self):
expected = math.fabs((self.sigmax**2 - self.sigmay**2) / (self.sigmax**2 + self.sigmay**2))
settings = dict(weighted=False)
actual = shapeMeasurement(self.Gaussian, self.log, **settings).measureRefinedEllipticity()
ae = actual['ellipticity']
ae1 = actual['e1']
ae2 = actual['e2']
R2 = actual['R2']
R2exp = self.sigmax**2 + self.sigmay**2
self.assertAlmostEqual(expected, ae, msg='exp=%f, got=%f' % (expected, ae), delta=self.tolerance)
self.assertAlmostEqual(expected, ae1, msg='exp=%f, got=%f' % (expected, ae1), delta=self.tolerance)
self.assertAlmostEqual(0.0, ae2, msg='exp=%f, got=%f' % (expected, ae2), delta=self.tolerance)
self.assertAlmostEqual(R2exp, R2, msg='exp=%f, got=%f' % (R2exp, R2), delta=self.tolerance)
def test_noweighting_Gaussian2(self):
expected = math.fabs((self.sigmax2**2 - self.sigmay2**2) / (self.sigmax2**2 + self.sigmay2**2))
settings = dict(weighted=False, iterations=40)
actual = shapeMeasurement(self.Gaussian2, self.log, **settings).measureRefinedEllipticity()
ae = actual['ellipticity']
ae1 = actual['e1']
ae2 = actual['e2']
R2 = actual['R2']
R2exp = self.sigmax2**2 + self.sigmay2**2
self.assertAlmostEqual(expected, ae, msg='exp=%f, got=%f' % (expected, ae), delta=self.tolerance)
self.assertAlmostEqual(expected, ae1, msg='exp=%f, got=%f' % (expected, ae1), delta=self.tolerance)
self.assertAlmostEqual(0.0, ae2, msg='exp=%f, got=%f' % (expected, ae2), delta=self.tolerance)
self.assertAlmostEqual(R2exp, R2, msg='exp=%f, got=%f' % (R2exp, R2), delta=1e-4)
def test_ellipticity_Gaussian(self):
expected = math.fabs((self.sigmax**2 - self.sigmay**2) / (self.sigmax**2 + self.sigmay**2))
settings = dict(sigma=3000., iterations=40)
actual = shapeMeasurement(self.Gaussian, self.log, **settings).measureRefinedEllipticity()['ellipticity']
self.assertAlmostEqual(expected, actual, msg='exp=%f, got=%f' % (expected, actual), delta=1e-5)
def test_centroiding_weighting_Gaussian(self):
expected = self.xcent, self.ycent
actual = shapeMeasurement(self.Gaussian, self.log).measureRefinedEllipticity()
self.assertAlmostEqual(expected[0], actual['centreX'],
msg='exp=%f, got=%f' % (expected[0], actual['centreX']), delta=self.tolerance)
self.assertAlmostEqual(expected[1], actual['centreY'],
msg='exp=%f, got=%f' % (expected[1], actual['centreY']), delta=self.tolerance)
def test_R2_noweighting_circular_Gaussian(self):
expected = 2 * self.sigma**2
settings = dict(weighted=False)
actual = shapeMeasurement(self.GaussianCirc, self.log, **settings).measureRefinedEllipticity()['R2']
self.assertAlmostEqual(expected, actual, msg='exp=%f, got=%f' % (expected, actual), delta=self.tolerance)
def setUpLogger(log_filename, loggername='logger'):
"""
Sets up a logger.
:param: log_filename: name of the file to save the log.
:param: loggername: name of the logger
:return: logger instance
"""
# create logger
logger = logging.getLogger(loggername)
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(log_filename)
#maxBytes=20, backupCount=5)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(module)s - %(funcName)s - %(levelname)s - %(message)s')
# add formatter to ch
handler.setFormatter(formatter)
# add handler to logger
logger.addHandler(handler)
return logger
if __name__ == '__main__':
#testing section
suite = unittest.TestLoader().loadTestsFromTestCase(TestShape)
unittest.TextTestRunner(verbosity=3).run(suite)
| bsd-2-clause | -302,642,680,278,198,100 | 37.821826 | 128 | 0.560151 | false |
amadeusproject/amadeuslms | reports/forms.py | 1 | 3623 | """
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
import datetime
from django.forms.formsets import BaseFormSet
class BaseResourceAndTagFormset(BaseFormSet):
def clean(self):
"""
Adds validation to check that no two links have the same anchor or URL
and that all links have both an anchor and URL.
"""
print(self.errors)
if any(self.errors):
return
for form in self.forms:
pass
class ResourceAndTagForm(forms.Form):
resource = forms.ChoiceField(label=_("Kind Of Resource"), required=True)
tag = forms.ChoiceField(label=_('Tag'))
def __init__(self, *args, **kwargs):
super(ResourceAndTagForm, self).__init__(*args, **kwargs)
if kwargs.get('initial'):
initial = kwargs['initial']
self.fields['resource'].choices = [(classes.__name__.lower(), classes.__name__.lower()) for classes in initial['class_name']]
self.fields['tag'].choices = [(tag.id, tag.name) for tag in initial['tag']]
class CreateInteractionReportForm(forms.Form):
topic = forms.ChoiceField( label= _("Topics"), required=True)
init_date = forms.DateField(required=True, label= _("Initial Date"))
end_date = forms.DateField(required=True, label= _("Final Date"))
from_mural = forms.BooleanField(required=False, label=_("From Mural"))
from_messages = forms.BooleanField(required=False, label=_("Messages"))
class Meta:
fields = ('topic', 'init_date', 'end_date', 'from_mural' , 'from_messages')
def __init__(self, *args, **kwargs):
super(CreateInteractionReportForm, self).__init__(*args, **kwargs)
initial = kwargs['initial']
topics = list(initial['topic'])
self.subject = initial['subject'] #so we can check date cleaned data
self.fields['topic'].choices = [(topic.id, topic.name) for topic in topics]
self.fields['topic'].choices.append((_("All"), _("All")))
def clean(self):
cleaned_data = super(CreateInteractionReportForm, self).clean()
init_date = cleaned_data.get("init_date")
end_date = cleaned_data.get("end_date")
if init_date and end_date:
if init_date > end_date:
raise forms.ValidationError(_("The initial date can't be after the end one."))
def clean_init_date(self):
init_date = self.cleaned_data['init_date']
if init_date < self.subject.init_date:
self._errors['init_date'] = [_('This date should be right or after %s, which is when the subject started. ') % str(self.subject.init_date)]
return init_date
def clean_end_date(self):
end_date = self.cleaned_data['end_date']
if end_date > self.subject.end_date:
self._errors['end_date'] = [_('This date should be right or before %s, which is when the subject finishes. ') % str(self.subject.end_date)]
return end_date | gpl-2.0 | -1,449,295,162,794,327,800 | 41.797619 | 231 | 0.705064 | false |
bernardhu/whlianjia | crawler.py | 1 | 59560 | # -*- coding: utf-8 -*-
import pickle
import math
import os.path
import shutil
import datetime
import time
import random
import json
import re
import chardet
import string
import base64
import requests
from bs4 import BeautifulSoup
from model import TradedHouse, DistricHouse, BidHouse, RentHouse, create_table, clear_table
grabedPool = {}
gz_district = ['jiangan', 'jianghan', 'qiaokou', 'dongxihu', 'wuchang', 'qingshan', 'hongshan', 'hanyang', 'donghugaoxin', 'jiangxia']
gz_district_name = {"jiangan":u"江岸", "jianghan":u"江汉", "qiaokou":u"硚口", "dongxihu":u"东西湖",
"wuchang":u"武昌", "qingshan":u"青山", "hongshan":u"洪山", "hanyang": u"汉阳", "donghugaoxing": u"东湖高新",
"jiangxia":u"江夏"}
global start_offset
start_offset = 1
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.39 (KHTML, like Gecko) Version/9.0 Mobile/13A4305g Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A344 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A344 Safari/600.1.4 (000205)",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/8.0.57838 Mobile/13A344 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A404 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/631.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A171 Safari/637.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/6.0.51363 Mobile/13A404 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/8.0.57838 Mobile/13B5110e Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000994)",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000862)",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000065)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/5.2.43972 Mobile/13A452 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A452 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B5130b Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000539)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000549)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000570)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/44.0.2403.67 Mobile/13A452 Safari/600.1.4 (000693)",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/9.0.60246 Mobile/13A404 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000292)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/9.0.60246 Mobile/13A452 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B137 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000996)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13B143 Safari/600.1.4 (000648)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13B143 Safari/600.1.4 (000119)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/9.0.60246 Mobile/13B143 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13B143 Safari/600.1.4 (000923)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.2 Mobile/13B143 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A340 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13B143",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/10.0.63022 Mobile/13B143 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1.56 (KHTML, like Gecko) Version/9.0 Mobile/13c75 Safari/601.1.56",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B144 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C75 Safari/601.1.46 (000144)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C75 Safari/601.1.46 (000042)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13C75 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 7_1_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) CriOS/38.0.2125.59 Mobile/11D201 Safari/9537.53",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.0.65374 Mobile/13B143 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C75 Safari/601.1.46 (000468)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.0.65374 Mobile/13C75 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.16 (KHTML, like Gecko) Version/8.0 Mobile/13A171a Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.1.66360 Mobile/13C75 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46 (000468)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13C75 Safari/601.1.46 (000702)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/10A403 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B14 3 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13D15 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13A452 Safari/601.1.46 (000412)",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/12.0.68608 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.87 Mobile/13A452 Safari/601.1.46 (000715)",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.87 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13B143 Safari/600.1.4 (000381)",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.1.66360 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13B143 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46 (000381)",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 Shelter/1.0.0 (YmqLQeAh3Z-nBdz2i87Rf) ",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13C143 Safari/600.1.4 (000718)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.4 Mobile/13E5181f Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13A15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E233 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/13.1.72140 Mobile/13E233 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13E233 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E238 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.109 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.4 Mobile/13A452 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/44.0.2403.67 Mobile/13B143 Safari/600.1.4 (000073)",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/3.0 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/14.1.119979954 Mobile/13E238 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E234 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E237 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/15.1.122860578 Mobile/13F69 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.64 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F72 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.104 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.77 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/4.0 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.104 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/16.0.124986583 Mobile/13F69 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/2.0 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G34 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/52.0.2743.84 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E188a Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/17.0.128207670 Mobile/13G35 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13G34 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G35",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/52.0.2743.84 Mobile/13G35 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/5.0 Mobile/13G35 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 iPadApp",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1 MXiOS/4.9.0.60",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.0.130791545 Mobile/13G35 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G36 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.0.130791545 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 7_1 like Mac OS X) AppleWebKit/537.51.3 (KHTML, like Gecko) Version/7.0 Mobile/11A4149 Safari/9537.72",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.1.132077863 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OSX) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A452 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13D11",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36 Safari/601.1.46 Sleipnir/4.3.0m",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/13A452 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46.140 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.66 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.91 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 Safari/601.1.46 Sleipnir/4.3.2m",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/5.3.48993 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.66 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.77 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/55.0.2883.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/55.0.2883.79 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/5.3 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/22.0.141836113 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/57.0.2987.100 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/6.1 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13BC75 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13G34 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/57.0.2987.137 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46(KHTML, like Gecko) FxiOS/6.1 Mobile/13G36 Safari/601.1.46",
"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/9.0 Mobile/13A340 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) CriOS/36.0.1985.49 Mobile/13G36 Safari/9537.53",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/59.0.3071.102 Mobile/13G36 Safari/601.1.46"
]
def get_header():
i = random.randint(0,len(user_agent_list)-1)
headers = {
'User-Agent': user_agent_list[i],
'x-forearded-for': "1.2.3.4"
}
return headers
def get_multipart_formdata(data, bondary):
post_data = []
for key, value in data.iteritems():
if value is None:
continue
post_data.append('--' + bondary )
post_data.append('Content-Disposition: form-data; name="{0}"'.format(key))
post_data.append('')
if isinstance(value, int):
value = str(value)
post_data.append(value)
post_data.append('--' + bondary + '--')
post_data.append('')
body = '\r\n'.join(post_data)
return body.encode('utf-8')
def verify_captcha():
url = "http://captcha.lianjia.com"
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("form", class_="human").find_all("input")
print pages[2]['value'], pages[2]['name']
csrf = pages[2]['value']
time.sleep(1)
url = "http://captcha.lianjia.com/human"
r = requests.get(url, headers= get_header(), timeout= 30)
cookie = r.headers['Set-Cookie']
soup = BeautifulSoup(r.content, "lxml")
images = json.loads(r.content)['images']
uuid = json.loads(r.content)['uuid']
#print images
for idx in xrange(0, len(images)):
fh = open("%d.jpg"%idx, "wb")
data = images['%d'%idx].split(',', 1)
fh.write(base64.b64decode(data[1]))
fh.close()
step = 0
mask = 0
while 1:
if step == 0:
val = raw_input("check 0.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 1
step = 1
elif step == 1:
val = raw_input("check 1.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 2
step = 2
elif step == 2:
val = raw_input("check 2.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 4
step = 3
elif step == 3:
val = raw_input("check 3.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 8
break
print mask
boundary='----WebKitFormBoundary7MA4YWxkTrZu0gW'
headers = get_header()
headers['content-type'] = "multipart/form-data; boundary={0}".format(boundary)
headers['Cookie'] = cookie
print get_multipart_formdata({'uuid':uuid, 'bitvalue': mask, '_csrf': csrf}, boundary)
print headers
r = requests.post(url, headers=headers, data=get_multipart_formdata({'uuid':uuid, 'bitvalue': mask, '_csrf': csrf}, boundary))
print r.request
print r.content
def get_distric_rent_cnt(distric):
print "try to grab %s community rent cnt "%distric
url = "http://wh.lianjia.com/zufang/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
try:
pageStr = pages["page-data"]
except Exception, e:
print e,r.content
os._exit(0)
jo = json.loads(pageStr)
return jo['totalPage']
def get_distric_community_cnt(distric):
print "try to grab %s community cnt "%distric
url = "http://wh.lianjia.com/xiaoqu/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
try:
pageStr = pages["page-data"]
except Exception, e:
print e,r.content,r.text
os._exit(0)
jo = json.loads(pageStr)
return jo['totalPage']
def grab_distric(url):
print "try to grab distric page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
districList = soup.find("ul", class_="listContent").find_all('li')
except Exception, e:
print e,r.content
os._exit(0)
if not districList:
return
for item in districList:
# 房屋详情链接,唯一标识符
distUrl = item.a["href"] or ''
#if distUrl in grabedPool["data"]:
# print distUrl, "already exits,skip"
# continue
print "start to crawl" , distUrl
# 抓取 历史成交
title = item.find("div", class_="title").a.string.encode("utf-8").rstrip()
historyList = item.find("div", class_="houseInfo").find_all('a')
history = historyList[0].string.encode("utf-8")
m = re.match(r"(\d+)天成交(\d+)套", history)
print m, history
historyRange = 0
historySell = 0
if m:
historyRange = m.group(1)
historySell = m.group(2)
print title, history, historyRange, historySell
# 抓取 区&商圈
pos = item.find("div", class_="positionInfo").find_all('a')
dis = pos[0].string.encode("utf-8")
bizcircle = pos[1].string.encode("utf-8")
print dis, bizcircle
#抓取成交均价噢
avgStr = item.find("div", class_="totalPrice").span.string.encode("utf-8")
m = re.match(r"(\d+)", avgStr)
if m:
avg = int(avgStr)
else:
avg = 0
print avg
#抓取在售
onSell = int(item.find("div", class_="xiaoquListItemSellCount").a.span.string)
print onSell
# 通过 ORM 存储到 sqlite
distItem = DistricHouse(
name = title,
district = dis,
bizcircle = bizcircle,
historyRange = historyRange,
historySell = historySell,
ref = distUrl,
avgpx = avg,
onsell = onSell,
)
distItem.save()
# 添加到已经抓取的池
#grabedPool["data"].add(distUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
def get_distric_chengjiao_cnt(distric, proxy):
print "try to grab %s chengjiao cnt "%distric
url = "http://wh.lianjia.com/chengjiao/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
try:
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
pageStr = pages["page-data"]
jo = json.loads(pageStr)
return jo['totalPage']
except Exception, e:
print e,r.content
os._exit(0)
def get_distric_bid_cnt(distric, proxy):
print "try to grab %s bid cnt "%distric
url = "http://wh.lianjia.com/ershoufang/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
try:
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
pageStr = pages["page-data"]
jo = json.loads(pageStr)
return jo['totalPage']
except Exception, e:
print e,r.content
os._exit(0)
#i = random.randint(0,len(proxy)-1)
#proxies = {
# "http": proxy[i]
# }
#print "try proxy", proxy[i]
#r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 30)
#soup = BeautifulSoup(r.content, "lxml")
#pages = soup.find("div", class_="page-box house-lst-page-box")
#time.sleep(random.randint(5,10))
#pageStr = pages["page-data"]
#jo = json.loads(pageStr)
#return jo['totalPage']
def get_xici_proxy(url, proxys):
print "get proxy", url
r = requests.get(url, headers= get_header(), timeout= 10)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find_all("tr", class_="odd")
for page in pages:
items = page.find_all("td")
proxy ="http://%s:%s"%(items[1].string, items[2].string)
url = "http://wh.lianjia.com/chengjiao/tianhe/"
proxies = {
"http": proxy
}
try:
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 3)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent")
if not tradedHoustList:
continue
proxys.append(proxy)
print proxy, proxys
except Exception, e:
#print Exception,":",e
continue
def get_kuaidaili_proxy(url, proxys):
print "get proxy", url
r = requests.get(url, headers= get_header(), timeout= 10)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("tbody").find_all("tr")
for page in pages:
items = page.find_all("td")
proxy ="http://%s:%s"%(items[0].string, items[1].string)
print proxy
url = "http://wh.lianjia.com/chengjiao/tianhe/"
proxies = {
"http": proxy
}
try:
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 3)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent")
if not tradedHoustList:
continue
proxys.append(proxy)
print proxy, proxys
except Exception, e:
#print Exception,":",e
continue
def get_youdaili_proxy(url, proxys):
print "get proxy", url
r = requests.get(url, headers= get_header(), timeout= 10)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("div", class_="chunlist").find_all("a")
page = pages[0]
u = page["href"]
html = requests.get(u, headers= get_header(), timeout= 3).content
proxy_list = re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}', html)
for proxy in proxy_list:
url = "http://wh.lianjia.com/chengjiao/tianhe/"
proxies = {
"http": proxy
}
try:
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 3)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent")
if not tradedHoustList:
continue
proxys.append(proxy)
print proxy, proxys
except Exception, e:
#print Exception,":",e
continue
def build_proxy():
proxys = []
#get_xici_proxy("http://www.xicidaili.com/nn/1", proxys)
#get_xici_proxy("http://www.xicidaili.com/nn/2", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/1", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/2", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/3", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/4", proxys)
#get_youdaili_proxy("http://www.youdaili.net/Daili/http", proxys)
r = requests.get("http://127.0.0.1:5000/get_all/", headers= get_header(), timeout= 10)
print r.content
proxys= json.loads(r.content)
print proxys
return proxys
def grabRent(url, proxy, disName, priceDic, bizDic):
print "try to grab page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
bidHoustList = soup.find("ul", class_="house-lst").find_all('li')
except Exception, e:
print e,r.content
os._exit(0)
if not bidHoustList:
return
storge = []
for item in bidHoustList:
# 房屋详情链接,唯一标识符
houseUrl = item.a["href"] or ''
#if houseUrl in grabedPool["data"]:
# print houseUrl, "already exit, skip"
# continue
print 'start to crawl' , houseUrl
# 抓取 小区,户型,面积 朝向,装修,电梯
xiaoqu = item.find("div", class_="where").a.string.rstrip().encode("utf-8")
houseType = item.find("span", class_="zone").span.string.rstrip().encode("utf-8")
squareStr = item.find("span", class_="meters").string.rstrip().encode("utf-8")
orientation = item.find("div", class_="where").findAll("span")[4].string.encode("utf-8").rstrip()
print xiaoqu, houseType, squareStr, orientation
m = re.match(r"\b[0-9]+(\.[0-9]+)?", squareStr)
square = 0
if m:
square = string.atof(m.group(0))
print squareStr, square
#楼层,楼龄
posInfo = item.find("div", class_="con").contents[2]
m = re.match(ur"(.*)楼层\(共(\d+)层\)", posInfo)
floorLevel = 'Nav'
floorTotal = -1
if m:
floorLevel = m.group(1)
floorTotal = m.group(2)
print m.group(1).encode("utf-8"), m.group(2)
print floorLevel.encode("utf-8"), floorTotal
#挂牌价
priceInfo = item.find("div", class_="price").span
if priceInfo:
price = string.atof(priceInfo.string)
else :
price = 0
print price
pricePre = item.find("div", class_="price-pre").string
priceUpdate, misc = ([x.strip() for x in pricePre.split(" ")])
print priceUpdate
#关注,带看, 放盘
seenStr = item.find("div", class_="square").find("span", class_="num").string
seen = 0
if m:
seen = string.atoi(seenStr)
print seen
try:
avg = priceDic[xiaoqu]
except Exception, e:
print e
avg = 0
print "avg", avg
try:
biz = bizDic[xiaoqu]
except Exception, e:
print e
biz = ""
print "biz", biz
loan = 0
loan = square*avg -1500000
loanRet = 0
yearRate = 0.049
monthRate = 0.049/12
loanYear = 30
loanMonth = loanYear*12
if loan < 0 :
loan = 0
loanRet = 0
else:
loanRet = loan*monthRate*((1+monthRate)**loanMonth)/(((1+monthRate)**loanMonth)-1)
loan = round(loan/10000)
print loan, loanRet
# 通过 ORM 存储到 sqlite
BidItem = RentHouse(
xiaoqu = xiaoqu,
houseType = houseType,
square = square,
houseUrl = houseUrl,
orientation = orientation,
floorLevel = floorLevel,
floorTotal = floorTotal,
price = price,
avg = avg,
loan = loan,
loanRet = loanRet,
seen = seen,
bizcircle = biz,
district = disName,
)
storge.append(BidItem)
for s in storge:
s.save()
# 添加到已经抓取的池
#grabedPool["data"].add(s.houseUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
def grabBid(url, proxy, disName, priceDic):
print "try to grabbid page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
bidHoustList = soup.find("ul", class_="sellListContent").find_all('li')
except Exception, e:
print e,r.content
os._exit(0)
i = random.randint(0,len(proxy)-1)
proxies = {
"http": proxy[i]
}
print "try proxy", proxy[i]
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
bidHoustList = soup.find("ul", class_="sellListContent").find_all('li')
if not bidHoustList:
return
storge = []
for item in bidHoustList:
# 房屋详情链接,唯一标识符
houseUrl = item.a["href"] or ''
#if houseUrl in grabedPool["data"]:
# print houseUrl, "already exit, skip"
# continue
print 'start to crawl' , houseUrl
# 抓取 小区,户型,面积 朝向,装修,电梯
houseInfo = item.find("div", class_="houseInfo").contents[2]
xiaoqu = item.find("div", class_="houseInfo").a.string.encode("utf-8").rstrip()
if houseInfo:
if len(houseInfo.split("|")) == 5:
null, houseType, squareStr, orientation, decoration = ([x.strip() for x in houseInfo.split("|")])
elevator = 'Nav'
if len(houseInfo.split("|")) == 6:
null, houseType, squareStr, orientation, decoration, elevator = ([x.strip() for x in houseInfo.split("|")])
print xiaoqu, houseType.encode("utf-8"), orientation.encode("utf-8"), decoration.encode("utf-8"), elevator.encode("utf-8")
m = re.match(ur"\b[0-9]+(\.[0-9]+)?", squareStr)
square = 0
if m:
square = string.atof(m.group(0))
print squareStr.encode("utf-8"), square
#楼层,楼龄
posInfo = item.find("div", class_="positionInfo").contents[1]
print posInfo.encode("utf-8")
m = re.match(ur"(.*)楼层\(共(\d+)层\)(\d+)年建", posInfo)
floorLevel = 'Nav'
floorTotal = -1
build = -1
if m:
floorLevel = m.group(1)
floorTotal = m.group(2)
build = int(m.group(3))
print m.group(1).encode("utf-8"), m.group(2), m.group(3)
print floorLevel.encode("utf-8"), floorTotal, build
biz = item.find("div", class_="positionInfo").a.string
print biz
#挂牌价
priceInfo = item.find("div", class_="totalPrice").span
if priceInfo:
bid = string.atof(priceInfo.string)
else :
bid = 0
print bid
#均价
priceInfo = item.find("div", class_="unitPrice").span
priceStr = ""
if priceInfo:
priceStr = priceInfo.string
m = re.match(ur"单价(\d+)元", priceStr)
price = 0
if m:
price = m.group(1)
print price, priceStr.encode("utf-8")
#关注,带看, 放盘
followInfo = item.find("div", class_="followInfo").contents[1]
if followInfo:
watchStr, seenStr, releaseStr = ([x.strip() for x in followInfo.split("/")])
print watchStr.encode("utf-8"), seenStr.encode("utf-8"), releaseStr.encode("utf-8")
m = re.match(ur"(\d+)人", watchStr)
watch = 0
if m:
watch = m.group(1)
m = re.match(ur"共(\d+)次", seenStr)
seen = 0
if m:
seen = m.group(1)
m = re.match(ur"(\d+)天", releaseStr)
release = 0
if m:
release = int(m.group(1))
else:
m = re.match(ur"(\d+)个月", releaseStr)
if m:
release = int(m.group(1))*30
else:
m = re.match(ur"(.*)年", releaseStr)
if m:
release = m.group(1)
if release == u"一":
release = 365
try:
avg = priceDic[xiaoqu]
except Exception, e:
avg = 0
print watch, seen, release, avg
# 通过 ORM 存储到 sqlite
BidItem = BidHouse(
xiaoqu = xiaoqu,
houseType = houseType,
square = square,
houseUrl = houseUrl,
orientation = orientation,
decoration = decoration,
elevator = elevator,
floorLevel = floorLevel,
floorTotal = floorTotal,
build = build,
price = price,
avg = avg,
bid = bid,
watch = watch,
seen = seen,
release = release,
bizcircle = biz,
district = disName,
)
storge.append(BidItem)
for s in storge:
s.save()
# 添加到已经抓取的池
#grabedPool["data"].add(s.houseUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
def grab(url, proxy, disName, bizDic, lastMarkTrade):
print "try to grab page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
tradedHoustList = soup.find("ul", class_="listContent").find_all('li')
except Exception, e:
print e,r.content
#os._exit(0)
tradedHoustList = soup.find("li", class_="pictext")
if not tradedHoustList:
tradedHoustList = soup.find("ul", class_="listContent").find_all('li')
else:
i = random.randint(0,len(proxy)-1)
proxies = {
"http": proxy[i]
}
print "try proxy", proxy[i]
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent").find_all('li')
if not tradedHoustList:
return
storge = []
stop = False
for item in tradedHoustList:
# 房屋详情链接,唯一标识符
houseUrl = item.a["href"] or ''
#if houseUrl in grabedPool["data"]:
# print houseUrl, "already exit, skip"
# continue
print 'start to crawl' , houseUrl
# 抓取 小区,户型,面积
title = item.find("div", class_="title")
if title:
print title
xiaoqu, houseType, square = (title.string.replace(" ", " ").split(" "))
m = re.match(ur"\b[0-9]+(\.[0-9]+)?", square)
if m:
square = string.atof(m.group(0))
else:
xiaoqu, houseType, square = ('Nav', 'Nav', 0)
xiaoqu = xiaoqu.encode("utf-8").rstrip()
houseType = houseType.encode("utf-8")
print xiaoqu, houseType, square
dealInfo = item.find("div", class_="totalPrice").span
try:
deal = string.atof(dealInfo.string.encode("utf-8"))
except Exception, e:
deal = -1
print deal
# 朝向,装修,电梯
houseInfo = item.find("div", class_="houseInfo").contents[1]
if houseInfo:
if len(houseInfo.split("|")) == 2:
orientation, decoration = ([x.strip() for x in houseInfo.split("|")])
elevator = 'Nav'
if len(houseInfo.split("|")) == 3:
orientation, decoration, elevator = ([x.strip() for x in houseInfo.split("|")])
print orientation.encode("utf-8"), decoration.encode("utf-8"), elevator.encode("utf-8")
#成交日期
dealDate = item.find("div", class_="dealDate")
if dealDate:
tradeDate = datetime.datetime.strptime(dealDate.string, '%Y.%m.%d') or datetime.datetime(1990, 1, 1)
print tradeDate
if lastMarkTrade >= tradeDate:
print 'break for time'
stop = True
break
#楼层,楼龄
posInfo = item.find("div", class_="positionInfo").contents[1]
if posInfo:
floor, buildStr = ([x.strip() for x in posInfo.split(" ")])
print floor.encode("utf-8"), buildStr.encode("utf-8")
m = re.match(ur"(.*)楼层\(共(\d+)层\)", floor)
floorLevel = 'Nav'
floorTotal = -1
if m:
floorLevel = m.group(1)
floorTotal = m.group(2)
print m.group(1).encode("utf-8"), m.group(2)
m = re.match(ur"(\d+)年建", buildStr)
build = -1
if m:
build = m.group(1)
print floorLevel.encode("utf-8"), floorTotal, build
#均价
priceInfo = item.find("div", class_="unitPrice").span
if priceInfo:
price = int(priceInfo.string)
else :
price = 0
print price
#挂牌价,成交周期
dealCycle = item.find("span", class_="dealCycleTxt").find_all('span')
bid = -1
cycle = -1
if dealCycle:
if len(dealCycle) == 1:
bidStr = dealCycle[0].string
cycleStr = ""
if len(dealCycle) == 2:
bidStr = dealCycle[0].string
cycleStr = dealCycle[1].string
print bidStr.encode("utf-8"), cycleStr.encode("utf-8")
m = re.match(ur"挂牌(\d+)万", bidStr)
if m:
bid = m.group(1)
m = re.match(ur"成交周期(\d+)天", cycleStr)
if m:
cycle = m.group(1)
try:
biz = bizDic[xiaoqu]
except Exception, e:
biz = "unknown"
#print bid, cycle, disName, biz
# 通过 ORM 存储到 sqlite
tradeItem = TradedHouse(
xiaoqu = xiaoqu,
houseType = houseType,
square = square,
houseUrl = houseUrl,
orientation = orientation,
decoration = decoration,
elevator = elevator,
floorLevel = floorLevel,
floorTotal = floorTotal,
build = build,
price = price,
tradeDate = tradeDate,
bid = bid,
deal = deal,
cycle = cycle,
district = disName,
bizcircle = biz,
)
storge.append(tradeItem)
for s in storge:
s.save()
# 添加到已经抓取的池
#grabedPool["data"].add(s.houseUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
return stop
step_context = {"phase":0, "cnt":0, "offset":0, "pgoffset":1, "date":"20170705"}
def save_context():
global step_context
print "save", step_context, type(step_context)
json.dump(step_context, open('context','w'))
def load_context():
global step_context
step_context = json.load(open('context','r'))
print "load", step_context, type(step_context)
def crawl_district():
global step_context
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
step_context['offset'] = dis_offset
save_context()
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_community_cnt(dis)
print "get_distric_info", dis, cnt
step_context['cnt'] = cnt
save_context()
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
url = "http://wh.lianjia.com/xiaoqu/%s/pg%s/"%(dis, format(str(i)))
grab_distric(url)
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def crawl_district_chengjiao():
global step_context
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
step_context['offset'] = dis_offset
save_context()
distric = DistricHouse.select(DistricHouse.name, DistricHouse.bizcircle, DistricHouse.avgpx).where(DistricHouse.district == gz_district_name[dis])
print distric
bizDic = {}
priceDic = {}
for item in distric:
name = item.name.rstrip().encode("utf-8")
biz = item.bizcircle.encode("utf-8")
bizDic[name] = biz
price = item.avgpx
priceDic[name] = price
#print name
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_chengjiao_cnt(dis, [])
step_context['cnt'] = cnt
save_context()
lastMarkTrade = datetime.datetime(1990, 1, 1)
ts = TradedHouse.select(TradedHouse.tradeDate).where(TradedHouse.district == gz_district_name[dis]).order_by(TradedHouse.tradeDate.desc()).limit(1)
print ts
for item in ts:
print item.tradeDate, type(item.tradeDate)
lastMarkTrade = item.tradeDate
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
page = "http://wh.lianjia.com/chengjiao/%s/pg%s/"%(dis, format(str(i)))
stop = grab(page, [], gz_district_name[dis], bizDic, lastMarkTrade)
if stop == True:
break
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def crawl_district_bid():
global step_context
#proxy = build_proxy()
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
distric = DistricHouse.select(DistricHouse.name, DistricHouse.bizcircle, DistricHouse.avgpx).where(DistricHouse.district == gz_district_name[dis])
print distric
bizDic = {}
priceDic = {}
for item in distric:
name = item.name.rstrip().encode("utf-8")
biz = item.bizcircle.encode("utf-8")
bizDic[name] = biz
price = item.avgpx
priceDic[name] = price
#print name
step_context['offset'] = dis_offset
save_context()
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_bid_cnt(dis, [])
step_context['cnt'] = cnt
save_context()
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
page = "http://wh.lianjia.com/ershoufang/%s/pg%s/"%(dis, format(str(i)))
grabBid(page, [], gz_district_name[dis], priceDic)
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def crawl_district_rent():
global step_context
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
distric = DistricHouse.select(DistricHouse.name, DistricHouse.bizcircle, DistricHouse.avgpx).where(DistricHouse.district == gz_district_name[dis])
print distric
bizDic = {}
priceDic = {}
for item in distric:
name = item.name.rstrip().encode("utf-8")
biz = item.bizcircle.encode("utf-8")
bizDic[name] = biz
price = item.avgpx
priceDic[name] = price
#print name
step_context['offset'] = dis_offset
save_context()
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_rent_cnt(dis)
step_context['cnt'] = cnt
save_context()
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
page = "http://wh.lianjia.com/zufang/%s/pg%s/"%(dis, format(str(i)))
grabRent(page, [], gz_district_name[dis], priceDic, bizDic)
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def process_context():
#global step_context
print step_context['phase']
if step_context['phase'] == 0:
crawl_district()
step_context['phase'] = 1
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
step_context['date'] = time.strftime("%Y%m%d", time.localtime())
save_context()
elif step_context['phase'] == 1:
crawl_district_chengjiao()
step_context['phase'] = 2
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
save_context()
elif step_context['phase'] == 2:
crawl_district_bid()
step_context['phase'] = 3
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
save_context()
elif step_context['phase'] == 3:
crawl_district_rent()
step_context['phase'] = -1
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
save_context()
elif step_context['phase'] == -1:
#shutil.copy('houseprice.db', time.strftime("houseprice_%Y%m%d.db", time.localtime()))
clear_table()
step_context['phase'] = 0
if __name__== "__main__":
#save_context()
load_context()
#verify_captcha()
if step_context['phase'] == -1:
process_context()
while step_context['phase'] != -1:
process_context()
| mit | -7,020,224,055,126,833,000 | 47.170492 | 155 | 0.585199 | false |
akx/shoop | shoop_tests/admin/test_modelform_persistence.py | 1 | 1653 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import six
from django.forms.models import ModelForm
from django.utils import translation
from shoop.core.models import Product, StockBehavior
from shoop.utils.multilanguage_model_form import MultiLanguageModelForm
class MultiProductForm(MultiLanguageModelForm):
class Meta:
model = Product
fields = (
"barcode", # Regular field
"stock_behavior", # Enum field
"name"
)
class SingleProductForm(ModelForm):
class Meta:
model = Product
fields = (
"barcode", # Regular field
"stock_behavior", # Enum field
)
@pytest.mark.django_db
def test_modelform_persistence():
with translation.override("en"):
test_product = Product(barcode="666", stock_behavior=StockBehavior.STOCKED)
test_product.set_current_language("en")
test_product.name = "foo"
frm = MultiProductForm(languages=["en"], instance=test_product, default_language="en")
assert frm["barcode"].value() == test_product.barcode
stock_behavior_field = Product._meta.get_field_by_name("stock_behavior")[0]
assert stock_behavior_field.to_python(frm["stock_behavior"].value()) is test_product.stock_behavior
assert 'value="1" selected="selected"' in six.text_type(frm["stock_behavior"].as_widget())
assert frm.initial["name"] == test_product.name
| agpl-3.0 | -856,095,380,547,393,200 | 34.170213 | 107 | 0.669691 | false |
andres53016/domotica | software/alarma.py | 1 | 1989 | import zmq,json,time
import pygame
import RPi.GPIO as GPIO
pygame.mixer.init()
GPIO.setmode(GPIO.BCM)
GPIO.setup(25,GPIO.OUT)
GPIO.setup(8,GPIO.OUT)
GPIO.output(25,0)
GPIO.output(8,0)
entradas={"puerta":17,"ventanaCocina":27,"ventanaDormitorio":22}
for entrada in entradas.values():
GPIO.setup(entrada,GPIO.IN,pull_up_down=GPIO.PUD_UP)
def reproducir2(archivo):
s = pygame.mixer.Sound(archivo)
s.play()
cont = zmq.Context()
s = cont.socket(zmq.REP)
s.bind("tcp://127.0.0.1:5001")
tags=["alarma","focoCocina","focoDormitorio"]
alarma="true"
irrumpieron=0
while True:
try:
msg = s.recv(zmq.NOBLOCK)
vector=json.loads(msg.decode())
print(vector)
if vector[0]=="w":
if vector[1]=="focoCocina":
if vector[2]=="true":
GPIO.output(25,1)
elif vector[2]=="false":
GPIO.output(25,0)
elif vector[1]=="focoDormitorio":
if vector[2]=="true":
GPIO.output(8,1)
elif vector[2]=="false":
GPIO.output(8,0)
elif vector[1]=="alarma":
alarma=vector[2]
if alarma=="true":
reproducir2("activada.wav")
elif alarma=="false":
reproducir2("desactivada.wav")
irrumpieron=0
s.send("ok".encode())
elif vector[0]=="r":
msg={}
for entrada in entradas.keys():
msg[entrada]=GPIO.input(entradas[entrada])
msg["alarma"]=alarma
msg["focoCocina"]=GPIO.input(25)
msg["focoDormitorio"]=GPIO.input(8)
s.send(json.dumps(msg).encode())
except:
pass
if alarma=="true":
for entrada in entradas.values():
irrumpieron+=GPIO.input(entrada)
if irrumpieron:
reproducir2("alarma.wav")
pass
time.sleep(0.1)
| gpl-3.0 | -7,158,794,117,130,000,000 | 29.136364 | 64 | 0.529915 | false |
csutherl/sos | sos/plugins/atomichost.py | 1 | 1611 | # Copyright (C) 2015 Red Hat, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
import os.path
class AtomicHost(Plugin, RedHatPlugin):
""" Atomic Host """
plugin_name = "atomichost"
option_list = [("info", "gather atomic info for each image",
"fast", False)]
def check_enabled(self):
if not os.path.exists("/host/etc/system-release-cpe"):
return False
cpe = open("/host/etc/system-release-cpe", "r").readlines()
return ':atomic-host' in cpe[0]
def setup(self):
self.add_copy_spec("/etc/ostree/remotes.d")
self.add_cmd_output("atomic host status")
if self.get_option('info'):
images = self.get_command_output("docker images -q")
for image in set(
images['output'].splitlines()):
if image:
self.add_cmd_output("atomic info {0}".format(image))
| gpl-2.0 | -940,422,531,690,939,600 | 36.465116 | 72 | 0.657356 | false |
2016-Capstone/PythonController | src/Bybop_Commands.py | 1 | 8092 | import os
import sys
import struct
MY_PATH, _ = os.path.split(os.path.realpath(__file__))
ARSDK_PATH=os.path.join(MY_PATH,'..', 'arsdk-xml')
ARCOMMANDS_PATH=os.path.join(ARSDK_PATH, 'xml')
sys.path.append(ARSDK_PATH)
import arsdkparser
_ctx = arsdkparser.ArParserCtx()
arsdkparser.parse_xml(_ctx, os.path.join(ARCOMMANDS_PATH, 'generic.xml'))
for f in sorted(os.listdir(ARCOMMANDS_PATH)):
if not f.endswith('.xml') or f == 'generic.xml':
continue
arsdkparser.parse_xml(_ctx, os.path.join(ARCOMMANDS_PATH, f))
arsdkparser.finalize_ftrs(_ctx)
class CommandError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
_struct_fmt_for_type = {
'u8' : 'B',
'i8' : 'b',
'u16' : 'H',
'i16' : 'h',
'u32' : 'I',
'i32' : 'i',
'u64' : 'Q',
'i64' : 'q',
'float' : 'f',
'double' : 'd',
'string' : 'z',
'enum' : 'i',
}
def _format_string_for_cmd(cmd):
ret = '<'
for arg in cmd.args:
if isinstance(arg.argType, arsdkparser.ArMultiSetting):
raise Exception('Multisettings not supported !')
elif isinstance(arg.argType, arsdkparser.ArBitfield):
arg_str_type = arsdkparser.ArArgType.TO_STRING[arg.argType.btfType]
elif isinstance(arg.argType, arsdkparser.ArEnum):
arg_str_type = 'i32'
else:
arg_str_type = arsdkparser.ArArgType.TO_STRING[arg.argType]
ret += _struct_fmt_for_type[arg_str_type]
return ret, bool(cmd.args)
def _struct_pack(fmt, *args):
"""
like struct.pack(fmt, *args)
except that a 'z' format is supported to include null terminated strings
"""
nbarg = 0
real_fmt = ''
for c in fmt:
if c == 'z':
real_fmt += '%ds' % (len(args[nbarg])+1)
nbarg += 1
else:
real_fmt += c
if c in 'cbB?hHiIlLqQfdspP':
nbarg += 1
return struct.pack(real_fmt, *args)
def _struct_unpack(fmt, string):
"""
like struct.unpack(fmt, string)
except that a 'z' format is supported to read a null terminated string
"""
real_fmt=''
null_idx=[]
nbarg = 0
for i in range(len(fmt)):
c = fmt[i]
if c == 'z':
start = struct.calcsize(real_fmt)
strlen = string[start:].find('\0')
if strlen < 0:
raise CommandError('No null char in string')
real_fmt += '%dsB' % strlen
nbarg += 1
null_idx.append(nbarg)
nbarg += 1
else:
real_fmt += c
if c in 'cbB?hHiIlLqQfdspP':
nbarg += 1
content = struct.unpack(real_fmt, string)
ret = tuple([content[i] for i in range(len(content)) if i not in null_idx])
return ret
def pack_command(s_proj, s_cls, s_cmd, *args):
"""
Pack a command into a string.
Arguments:
- s_proj : Name of the project
- s_cls : Name of the class within the project (ignored for features)
- s_cmd : Name of the command within the class
- *args : Arguments of the command.
If the project, the class or the command can not be found in the command table,
a CommandError will be raised.
If the number and type of arguments in *arg do not match the expected ones, a
CommandError will be raised.
Return the command string, the command recommanded buffer and the command
recommanded timeout policy.
"""
proj = None
feat = None
projid = 0
cls = None
clsid = 0
cmd = None
# Let an exception be raised if we do not know the command or if the format is bad
# Find the project
if s_proj in _ctx.projectsByName:
proj = _ctx.projectsByName[s_proj]
elif s_proj in _ctx.featuresByName:
feat = _ctx.featuresByName[s_proj]
if proj is None and feat is None:
raise CommandError('Unknown project ' + s_proj)
if proj: # Project
projid = proj.projectId
# Find the class
if s_cls in proj.classesByName:
cls = proj.classesByName[s_cls]
if cls is None:
raise CommandError('Unknown class ' + s_cls + ' in project ' + s_proj)
clsid = cls.classId
# Find the command
if s_cmd in cls.cmdsByName:
cmd = cls.cmdsByName[s_cmd]
if cmd is None:
raise CommandError('Unknown command ' + s_cmd + ' in class ' + s_cls + ' of project ' + s_proj)
elif feat: # Feature
projid = feat.featureId
# Find the command
if s_cmd in feat.cmdsByName:
cmd = feat.cmdsByName[s_cmd]
if cmd is None:
raise CommandError('Unknown command ' + s_cmd + ' in feature ' + s_proj)
ret = struct.pack('<BBH', projid, clsid, cmd.cmdId)
argsfmt, needed = _format_string_for_cmd(cmd)
if needed:
try:
ret += _struct_pack(argsfmt, *args)
except IndexError:
raise CommandError('Missing arguments')
except TypeError:
raise CommandError('Bad type for arguments')
except struct.error:
raise CommandError('Bad type for arguments')
return ret, cmd.bufferType, cmd.timeoutPolicy
def unpack_command(buf):
"""
Unpack a command string into a dictionnary of arguments
Arguments:
- buf : The packed command
Return a dictionnary describing the command, and a boolean indicating whether the
command is known. If the boolean is False, then the dictionnary is {}
Return dictionnary format:
{
'name' : full name of the command (project.class.command)
'project' : project of the command
'class' : class of the command
'cmd' : command name
'listtype' : list type (none/list/map) of the command
'args' : arguments in the commands, in the form { 'name':value, ... }
'arg0' : value of the first argument ('' if no arguments)
this is useful for map commands, as this will be the key.
}
A CommandError is raised if the command is in a bad format.
"""
# Read the project/cls/cmd from the buffer
try:
(i_proj, i_cls, i_cmd) = struct.unpack('<BBH', buf[:4])
except struct.error:
raise CommandError('Bad input buffer (not an ARCommand)')
proj = None
feat = None
cls = None
cmd = None
# Let an exception be raised if we do not know the command or if the format is bad
# Find the project
if i_proj in _ctx.projectsById:
proj = _ctx.projectsById[i_proj]
# Or the feature
if i_proj in _ctx.featuresById:
feat = _ctx.featuresById[i_proj]
# If project, Find the class
if proj:
if i_cls in proj.classesById:
cls = proj.classesById[i_cls]
else:
return {}, False
if i_cmd in cls.cmdsById:
cmd = cls.cmdsById[i_cmd]
else:
return {}, False
# If feature, find directly the command
elif feat:
if i_cmd in feat.cmdsById:
cmd = feat.cmdsById[i_cmd]
elif i_cmd in feat.evtsById:
cmd = feat.evtsById[i_cmd]
else:
return {}, False
else:
return {}, False
args = ()
argsfmt, needed = _format_string_for_cmd(cmd)
if needed:
try:
args = _struct_unpack(argsfmt, buf[4:])
except struct.error:
raise CommandError('Bad input buffers (arguments do not match the command)')
ret = {
'name' : '%s.%s.%s' % (proj.name if proj else feat.name, cls.name if cls else '', cmd.name),
'proj' : proj.name if proj else feat.name,
'class' : cls.name if cls else '',
'cmd' : cmd.name,
'listtype' : cmd.listType,
'listtype_str' : arsdkparser.ArCmdListType.TO_STRING[cmd.listType],
'args' : {},
'arg0' : '',
}
for i in range(len(args)):
if i == 0:
ret['arg0'] = args[0]
ret['args'][cmd.args[i].name] = args[i]
return ret, True
| bsd-3-clause | -1,871,689,588,491,917,000 | 29.885496 | 107 | 0.57699 | false |
juanchodepisa/sbtk | SBTK_League_Helper/src/security/key_handling.py | 1 | 4752 | from urllib.parse import quote
import json
import os
from src import log_entry
from .obfuscation import transform
from .exceptions import KeysDirectoryNotFound, KeysFileNotFound
user_index = os.path.join(os.path.dirname(__file__), "keys_loc.json")
default_context = "OGS"
obfuscated = "_obfuscated_"
plaintext = "_plaintext_"
no_directory_default = lambda usr: ""
def reset_index():
with open (user_index, 'w') as f:
json.dump({}, f)
log_entry (user_index, "file reset to empty value.")
def get_keys_directory(user, on_fail = no_directory_default):
with open(user_index, 'r+') as f:
index_data = json.load(f)
update = False
ref = log_entry("Searching %s's keys location from %s...." % (user, user_index))
if user in index_data:
dir = index_data[user]
else:
log_entry(ref, "Location not found.")
dir = False
if not (dir and os.path.isdir(dir)):
if dir:
log_entry (ref, "Location invalid.")
index_data.pop(user)
update = True
ref = log_entry("Getting %s's keys location from backup method...." % user)
dir = on_fail(user)
try:
if os.path.isdir(dir):
index_data[user] = dir
update = True
else:
log_entry(ref, "Location not found or invalid.")
raise KeysDirectoryNotFound(user)
finally:
if update:
ref = log_entry ("Updating %s...." % user_index)
f.seek(0)
json.dump(index_data, f, sort_keys=True, indent=4)
f.truncate()
log_entry (ref, "Updated!")
log_entry (ref, "Location found!")
return dir
def set_keys_directory(user, directory):
with open(user_index, 'r+') as f:
ref = log_entry ("Updating %s's keys location at %s...." % (user, user_index))
index_data = json.load(f)
index_data[user] = directory
f.seek(0)
json.dump(index_data, f, sort_keys=True, indent=4)
f.truncate()
log_entry (ref, "Updated!")
def remove_keys_directory(user):
with open(user_index, 'r+') as f:
ref = log_entry ("Removing %s's keys location at %s...." % (user, user_index))
index_data = json.load(f)
index_data.pop(user)
f.seek(0)
json.dump(index_data, f, sort_keys=True, indent=4)
f.truncate()
log_entry (ref, "Removed!")
def store_keys (user, keys, password="", context=default_context, if_no_directory = no_directory_default):
directory = get_keys_directory(user, if_no_directory)
if password:
ref = log_entry ("Encrypting %s's keys...." % user)
keys = transform(keys, password)
log_entry (ref, "Encrypted!")
else:
log_entry ("WARNING: No password provided to encrypt %s's keys. This is unsafe, as keys will be stored in plain text." % user)
filename = standard_filename(user, password, directory, context)
with open(filename, 'w') as f:
ref = log_entry("Storing %s's keys at %s...." % (user, filename))
json.dump(keys, f, sort_keys=True, indent=4)
log_entry(ref, "Stored!")
def retrieve_keys (user, password="", context=default_context, return_location=False):
directory = get_keys_directory(user)
filename = standard_filename(user, password, directory, context)
if os.path.isfile(filename):
with open(filename, 'r') as f:
ref = log_entry("Retrieving %s's keys from %s...." % (user, filename))
keys = json.load(f)
log_entry(ref, "Retrieved!")
else:
raise KeysFileNotFound(user, filename)
if password:
ref = log_entry ("Decrypting %s's keys...." % user)
keys = transform(keys, password)
log_entry (ref, "Decrypted!")
if return_location:
return (keys, filename)
else:
return keys
def standard_filename(user, password, directory, context):
filename = context+(obfuscated if password else plaintext)+quote(user, safe='')+".json"
return os.path.join(directory, filename)
###########################
## ##
## INITIALIZATION CODE ##
## ##
###########################
if not os.path.isfile(user_index):
log_entry (user_index, "file does not exist.")
__ref = log_entry ("Creating file %s...." % user_index)
reset_index()
log_entry(__ref, "File created. Ready!")
del __ref
else:
log_entry (user_index, "file exists. Ready!") | mit | -288,971,447,259,933,760 | 32.471831 | 134 | 0.555766 | false |
xirdneh/oposum | oPOSum/apps/client/models.py | 1 | 3379 | from django.db import models
from oPOSum.libs import utils as pos_utils
from django.utils.translation import ugettext as _
from decimal import Decimal
from django.core.validators import RegexValidator
# Create your models here.
class Client(models.Model):
first_name = models.CharField(_("First Name"), max_length=100, blank=False)
last_name = models.CharField(_("Last Name"), max_length = 512, blank=False)
phonenumber = models.CharField(_("Phone Number"),
max_length=512, blank=True, unique=False, null=True,
validators = [
RegexValidator(r'[0-9]{3}\-?[0-9]{3}\-?[0-9]{4}',
'Format: 834-117-1086',
'phone_format_error')
]
)
address = models.TextField(_("Address"), max_length=1024, blank=True)
id_type = models.CharField(_("ID Type"), max_length=50, blank=True, default='IFE',
choices=(
('IFE', 'IFE (Credencial de Elector'),
('LICENCIA', 'Licencia de conducir'),
('PASAPORTE', 'Pasaporte'),
('OTRO', 'Otro'),
))
id_number = models.CharField(_("Identification Number"), max_length=255, blank=True)
email = models.EmailField(_("Email"), max_length = 255, blank=True, unique=False)
class Meta:
unique_together = (('first_name', 'last_name', 'phonenumber', 'email'))
verbose_name = "client"
verbose_name_plural = "clients"
def __unicode__(self):
return u"{0} {1}. {2}, {3}".format(self.first_name, self.last_name, self.phonenumber, self.email)
def as_json(self):
return dict(
id = self.id,
first_name = self.first_name.encode('latin-1'),
last_name = self.last_name.encode('latin-1'),
phonenumber = self.phonenumber,
address = self.address.encode('latin-1'),
id_type = self.id_type,
id_number = self.id_number,
email = self.email
)
def get_total_debt(self):
apps = pos_utils.get_installed_oposum_apps()
ret = Decimal(0)
if 'layaway' in apps:
from oPOSum.apps.layaway.models import Layaway
ret += Layaway.objects.get_total_debt_amount(self)
if 'repairshop' in apps:
from oPOSum.apps.repairshop.models import RepairTicket
ret += RepairTicket.objects.get_total_debt_amount(self)
return ret
def get_layaway_debt(self):
apps = pos_utils.get_installed_oposum_apps()
ret = Decimal(0)
if 'layway' in apps:
from oPOSum.apps.layaway.models import Layaway
ret += Layaway.objects.get_total_debt_amount(self)
return ret
def get_repairshop_debt(self):
apps = pos_utils.get_installed_oposum_apps()
ret = Decimal(0)
if 'repairshop' in apps:
from oPOSum.apps.repairshop.models import RepairTicket
ret += RepairTicket.objects.get_total_debt_amount(self)
return ret
def get_repairshop_pending_tickets(self):
#TODO get pending tickets
return 0
def save(self, *args, **kwargs):
if not self.pk:
self.phonenumber = self.phonenumber.replace("-", "")
super(Client, self).save(*args, **kwargs)
| mit | -6,659,084,490,970,760,000 | 39.22619 | 105 | 0.576798 | false |
avanzosc/avanzosc6.1 | avanzosc_crm_call_ext/crm_opportunity.py | 1 | 7613 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import time
from crm import crm
from osv import fields, osv
from tools.translate import _
class crm_opportunity(osv.osv):
_inherit = 'crm.lead'
_columns = {
'credit': fields.float('Total Receivable'),
'invoice2pay': fields.integer('Invoices to pay'),
'last_invoice': fields.date('Last Invoice'),
'last_payment': fields.date('Last Payment'),
}
def onchange_partner_id(self, cr, uid, ids, part, email=False):
invoice_obj = self.pool.get('account.invoice')
voucher_obj = self.pool.get('account.voucher')
res = super(crm_opportunity, self).onchange_partner_id(cr, uid, ids, part, email)
if part:
partner = self.pool.get('res.partner').browse(cr, uid, part)
unpaid_invoice_ids = invoice_obj.search(cr, uid, [('partner_id', '=', part), ('state', '=', 'open')])
invoice_ids = invoice_obj.search(cr, uid, [('partner_id', '=', part)])
voucher_ids = voucher_obj.search(cr, uid, [('partner_id', '=', part)])
if invoice_ids:
last_invoice = invoice_obj.browse(cr, uid, invoice_ids[0])
for invoice in invoice_obj.browse(cr, uid, invoice_ids):
if invoice.date_invoice > last_invoice.date_invoice and invoice.date_invoice != False:
last_invoice = invoice
elif last_invoice.date_invoice == False:
last_invoice = invoice
res['value'].update({
'last_invoice': last_invoice.date_invoice,
})
if voucher_ids:
last_voucher = voucher_obj.browse(cr, uid, voucher_ids[0])
for voucher in voucher_obj.browse(cr, uid, voucher_ids):
if voucher.date > last_voucher.date and voucher.date != False:
last_voucher = voucher
elif last_voucher.date == False:
last_voucher = voucher
res['value'].update({
'last_payment': last_voucher.date,
})
res['value'].update({
'credit': partner.credit,
'invoice2pay': int(len(unpaid_invoice_ids)),
})
return res
crm_opportunity()
class crm_make_sale(osv.osv_memory):
_inherit = "crm.make.sale"
def makeOrder(self, cr, uid, ids, context=None):
"""
This function create Quotation on given case.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm make sales' ids
@param context: A standard dictionary for contextual values
@return: Dictionary value of created sales order.
"""
if context is None:
context = {}
case_obj = self.pool.get('crm.lead')
sale_obj = self.pool.get('sale.order')
partner_obj = self.pool.get('res.partner')
address_obj = self.pool.get('res.partner.address')
data = context and context.get('active_ids', []) or []
for make in self.browse(cr, uid, ids, context=context):
partner = make.partner_id
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
fpos = partner.property_account_position and partner.property_account_position.id or False
new_ids = []
for case in case_obj.browse(cr, uid, data, context=context):
if not partner and case.partner_id:
partner = case.partner_id
fpos = partner.property_account_position and partner.property_account_position.id or False
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
if False in partner_addr.values():
raise osv.except_osv(_('Data Insufficient!'), _('Customer has no addresses defined!'))
def_address = address_obj.browse(cr, uid, partner_addr['default'])
vals = {
'origin': _('Opportunity: %s') % str(case.id),
'section_id': case.section_id and case.section_id.id or False,
'shop_id': make.shop_id.id,
'partner_id': partner.id,
'pricelist_id': pricelist,
'partner_invoice_id': partner_addr['invoice'],
'partner_order_id': partner_addr['contact'],
'partner_shipping_id': partner_addr['delivery'],
'date_order': time.strftime('%Y-%m-%d'),
'fiscal_position': fpos,
}
if partner.id:
vals['user_id'] = partner.user_id and partner.user_id.id or uid
new_id = sale_obj.create(cr, uid, vals)
case_obj.write(cr, uid, [case.id], {'ref': 'sale.order,%s' % new_id})
new_ids.append(new_id)
message = _('Opportunity ') + " '" + case.name + "' "+ _("is converted to Quotation.")
self.log(cr, uid, case.id, message)
case_obj.message_append(cr, uid, [case], _("Converted to Sales Quotation(id: %s).") % (new_id))
if make.close:
case_obj.case_close(cr, uid, data)
if not new_ids:
return {'type': 'ir.actions.act_window_close'}
if len(new_ids)<=1:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'res_id': new_ids and new_ids[0]
}
else:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'res_id': new_ids
}
return value
crm_make_sale()
| agpl-3.0 | -5,189,564,213,395,655,000 | 45.408537 | 113 | 0.517935 | false |
weirdgiraffe/plugin.video.giraffe.seasonvar | resources/site-packages/plugin_video/screen.py | 1 | 7083 | # coding: utf-8
#
# Copyright © 2017 weirdgiraffe <[email protected]>
#
# Distributed under terms of the MIT license.
#
from kodi import logger, Plugin
import seasonvar
from datetime import datetime, timedelta
def week(plugin):
date = datetime.today()
for date_offset in range(7):
datestr = date.strftime('%d.%m.%Y')
dayurl = plugin.make_url({
'screen': 'day',
'date': datestr,
})
plugin.add_screen_directory(datestr, dayurl)
date -= timedelta(days=1)
searchurl = plugin.make_url({'screen': 'search'})
plugin.add_screen_directory('[COLOR FFFFD700]поиск[/COLOR]', searchurl)
plugin.publish_screen(True)
def day(plugin):
date = plugin.args.get('date')
if date is None:
logger.error('{0}: "date" arg is missing or malformed: {0}'.format(
'screen "day"', plugin.args))
plugin.publish_screen(False)
return
for i in seasonvar.day_items(date):
url = plugin.make_url({
'screen': 'episodes',
'url': i['url'],
})
name = '{0} [COLOR FFFFD700]{1}[/COLOR]'.format(
i['name'], i['changes'])
plugin.add_screen_directory(name, url,
thumb=seasonvar.thumb_url(i['url']))
plugin.publish_screen(True)
def direct_search(plugin):
term = plugin.args.get('q')
if term is None:
logger.error('{0}: "q" arg is missing or malformed: {0}'.format(
'screen "direct_search"', plugin.args))
plugin.publish_screen(False)
return
for i in seasonvar.search(term):
if i['url'] is not None:
season_url = i['url'].encode('utf-8')
url = plugin.make_url({
'screen': 'episodes',
'url': season_url,
})
plugin.add_screen_directory(
i['name'],
url,
thumb=seasonvar.thumb_url(season_url)
)
plugin.publish_screen(True)
def search(plugin):
term = plugin.read_input('Что искать?')
plugin.args["q"] = term
direct_search(plugin)
def episodes(plugin):
season_url = plugin.args.get('url')
if season_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'screen "episodes"', plugin.args))
plugin.publish_screen(False)
return
tr = plugin.args.get('tr')
thumb = seasonvar.thumb_url(season_url)
season = seasonvar.season_info(season_url)
if season is None or len(season) == 0:
logger.error('{0}: failed to get season info: {0}'.format(
'screen "episodes"', plugin.args))
plugin.show_notification(
'Content is blocked',
'Or external player is being used')
plugin.publish_screen(False)
return
if season.get('total', 0) > 1:
url = plugin.make_url({
'screen': 'seasons',
'url': season_url,
})
name = '[COLOR FFFFD700]сезон[/COLOR]: {0} / {1}'.format(
season['number'], season['total'])
plugin.add_screen_directory(name, url)
if len(season.get('playlist', [])) > 1:
url = plugin.make_url({
'screen': 'translations',
'url': season_url,
'tr': tr,
})
name = '[COLOR FFFFD700]озвучка[/COLOR]: {0}'.format(
tr if tr is not None else 'Стандартная')
plugin.add_screen_directory(name, url)
pl_url = (x['url'] for x in season.get('playlist', []) if x['tr'] == tr)
for e in (x for url in pl_url for x in seasonvar.episodes(url)):
url = plugin.make_url({'play': e['url']})
plugin.add_screen_item(e['name'], url, thumb=thumb)
plugin.publish_screen(True)
def seasons(plugin):
season_url = plugin.args.get('url')
if season_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'screen "seasons"', plugin.args))
plugin.publish_screen(False)
return
num, seasons = seasonvar.seasons(season_url)
if seasons is None:
logger.error('{0}: failed to get season info: {0}'.format(
'screen "seasons"', plugin.args))
plugin.publish_screen(False)
return
for n, s in enumerate(seasons, 1):
prefix = '* ' if n == num else ''
name = '{0}сезон {1}'.format(prefix, n)
url = plugin.make_url({
'screen': 'episodes',
'url': s,
})
plugin.add_screen_directory(name, url, thumb=seasonvar.thumb_url(s))
plugin.publish_screen(True)
def translations(plugin):
season_url = plugin.args.get('url')
if season_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'screen "translations"', plugin.args))
plugin.publish_screen(False)
return
tr = plugin.args.get('tr')
thumb = seasonvar.thumb_url(season_url)
season = seasonvar.season_info(season_url)
if season is None:
logger.error('{0}: failed to get season info: {0}'.format(
'screen "translations"', plugin.args))
plugin.publish_screen(False)
return
for n, pl in enumerate(season['playlist']):
if tr is None and n == 0 or pl['tr'] == tr:
prefix = '* '
else:
prefix = ''
url = plugin.make_url({
'screen': 'episodes',
'url': season_url,
'tr': pl['tr'],
})
name = '{0}{1}'.format(
prefix,
pl['tr'] if pl['tr'] is not None else 'Стандартная')
plugin.add_screen_directory(name, url, thumb=thumb)
plugin.publish_screen(True)
def play(plugin):
play_url = plugin.args.get('play')
if play_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'play', plugin.args))
plugin.publish_screen(False)
return
plugin.play(play_url)
def render(plugin):
screen = plugin.args.get('screen')
if screen is None:
screen = 'week'
try:
if 'play' in plugin.args:
play(plugin)
return
if 'q' in plugin.args:
direct_search(plugin)
return
{'week': week,
'day': day,
'episodes': episodes,
'seasons': seasons,
'translations': translations,
'search': search,
}[screen](plugin)
except KeyError:
logger.error('unexpected screen "{0}"'.format(screen))
except seasonvar.NetworkError:
logger.error('NetworkError')
plugin.show_notification(
'Network error',
'Check your connection')
except seasonvar.HTTPError:
logger.error('HTTPError')
plugin.show_notification(
'HTTP error',
'Something goes wrong. Please, send your logs to addon author')
if __name__ == "__main__":
import sys
render(Plugin(*sys.argv))
| mit | 7,216,715,170,337,933,000 | 31.391705 | 76 | 0.551999 | false |
bagage/cadastre-conflation | back/batimap/bbox.py | 1 | 1181 | import re
from math import sqrt
class Bbox(object):
def __init__(self, xmin, ymin, xmax, ymax):
self.coords = [xmin, ymin, xmax, ymax]
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __repr__(self):
return f"{self.xmin},{self.ymin},{self.xmax},{self.ymax}"
def max_distance(self):
"""
Maximum distance from the center of the screen that this bbox may reach
"""
return sqrt((self.xmax - self.xmin) ** 2 + (self.ymax - self.ymin) ** 2) / 2
@staticmethod
def from_pg(bbox_string):
# cf https://docs.python.org/3/library/re.html#simulating-scanf
# need to handle 10e3 notation too
float_re = r"([-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)"
box_re = re.compile(
r"BOX\("
+ float_re
+ " "
+ float_re
+ ","
+ float_re
+ " "
+ float_re
+ r"\)"
)
groups = box_re.match(bbox_string).groups()
return Bbox(
float(groups[0]), float(groups[4]), float(groups[8]), float(groups[12])
)
| mit | -6,715,307,017,670,733,000 | 26.465116 | 84 | 0.485182 | false |
guiccbr/autonomous-fuzzy-quadcopter | python/py_quad_control/controller/sparc.py | 1 | 17796 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# ------------------------ Imports ----------------------------------#
from __future__ import division # Force real division
import numpy as np # Numpy Arrays
import math # Square root
# ------------------------ Classes ---------------------------------#
class SparcController:
def __init__(self, control_range, ref_range, input_size, init_input, init_ref, init_y, monotonicity=1,
dc_radius_const=0.5):
"""
Initiates a sparc controller using the first sample.
Keyword arguments:
control_range -- Tuple of two elements with the first element representing
the minimum value of the control signal, and the second
element representing its maximum. (float, float).
ref_range -- Tuple of two elements with the first element representing
the minimum value of the reference (desired plant state) the second
element representing its maximum. (float, float).
input_size -- Size of the input x (int)
init_input -- First input value (numpy array of size input_size)
init_ref
init_y
monotonicity
dc_radius_const -- DataCloud radius constant (see DataCloud class for more details)
"""
# Set constant values
self.umin, self.umax = control_range
self.refmin, self.refmax = ref_range
self.xsize = input_size
self.radius_update_const = dc_radius_const
self.k = 1 # Initial step number
# Global density recursive values Initial
self.g_csi = np.array([0.0] * (self.xsize + 1))
self.g_b = 0.0
# - Consequents normalization constant (Changes according to the current reference curve)
self.c = abs(float(self.umax - self.umin) / (self.refmax - self.refmin))
# - C signal is the same as the monotonicity
if monotonicity < 0:
self.c = -self.c
# Initial consequent will be proportinal to the error.
q_init = self.c * (init_ref - init_y)
# Initial input
curr_x = np.copy(init_input)
curr_z = np.append(curr_x, q_init)
# Instantiate a list of clouds
self.clouds = []
# Initiates SPARC with the first cloud, with an initial
# consequent given by q_init, and update the plant if first iteration.
initial_variance = np.array([0.0] * self.xsize)
self.clouds.append(DataCloud(curr_z, initial_variance, self.radius_update_const))
# Initializes array with membership degrees.
# md[i] corresponds to the degree of membership of the sample xk to the Data Cloud i
curr_md = np.array([1.0])
# Store last sample
self.prev_y = init_y
self.prev_ref = init_ref
self.prev_md = np.copy(curr_md)
self.prev_z = np.copy(curr_z)
# Update k before next iteration
self.k += 1
def update_reference_range(self, refmin, refmax):
"""
Update the Consequent normalization constant according to a new refmim, refmax.
:param refmin: Minimum value of the current reference curve.
:param refmax: Maximum value of the current reference curve.
:return: void
"""
self.refmax = refmax
self.refmin = refmin
self.c = float(self.umax - self.umin) / (self.refmax - self.refmin)
def update(self, curr_x, curr_y, curr_ref, prev_u):
"""
Calculate the output given an input and a reference.
Keyword arguments:
curr_x -- current data sample of dimension XSIZE (numpy array of size self.xsize)
curr_y -- current plant output value (float)
curr_ref -- current reference value (float)
prev_u -- value of the input finally applied to the plant (Truncated if needed)
Returns:
u -- output respective to curr_x (float)
"""
num_clouds = len(self.clouds)
#print 'num_clouds, curr_x:', num_clouds, curr_x
# (1) Updates the consequents of all clouds
for i in range(num_clouds):
self.clouds[i].update_consequent(self.prev_md[i], self.prev_ref, curr_y,
prev_u, self.c, self.umin, self.umax)
#print 'First Cloud (focal point, consequent):', self.clouds[0].zf
# (2) Find the the Data Cloud associated to the new sample
# First, calculate the relative local density relative to each cloud
relative_ld = [0.] * num_clouds
for i in range(num_clouds):
relative_ld[i] = self.clouds[i].get_local_density(curr_x)
# Second, calculate the normalized relative densities (membership degrees)
curr_md = [md / float(sum(relative_ld)) for md in relative_ld]
# Third, find the data cloud that better describes the current sample.
curr_x_associated_cloud = np.argmax(curr_md)
# (3) Generate control signal
curr_u = 0.0
for i in range(num_clouds):
curr_u += curr_md[i] * self.clouds[i].get_consequent()
# (4) Compute Global Density
# First, concatenates x and u to form z and compute global
curr_z = np.append(curr_x, curr_u)
# Second, calculate Global Density of curr_z
curr_gd = self.get_global_density(curr_z)
# (5) Perform two tests to check if a new cloud is needed or if it needs to be updated.
# First, Calculate the global density of the focal points of every existing Data Cloud.
focal_points_gd = np.array([0.] * num_clouds)
for i in range(num_clouds):
focal_points_gd[i] = self.get_global_density(self.clouds[i].zf)
# Second, Calculate the distances from the current sample to every focal point
focal_points_distances = np.array([0.] * num_clouds)
for i in range(num_clouds):
focal_points_distances[i] = np.linalg.norm(curr_x - self.clouds[i].zf[:self.xsize])
# Third, Check if the Global Density of the current point is bigger than the Global Densities of all
# the focal points.
curr_sample_global_density_is_better = False
if curr_gd > np.max(focal_points_gd):
curr_sample_global_density_is_better = True
# Fourth, Check if the point is far enough from every data cloud.
curr_sample_is_distant_enough = True
for i in range(num_clouds):
if focal_points_distances[i] <= np.max(self.clouds[i].r) / 2.:
curr_sample_is_distant_enough = False
# Inverse Alternative to FIFTH (Check if sample satisfies one sigma condition)
# If it's satisfied, a new cloud is not created.
# if np.max(relative_ld) > 1./math.e:
# curr_sample_is_distant_enough = False
# Fifth, If a new cloud is needed, creates a new cloud
# Otherwise, adds the current point to the best matching cloud and checks
# if the focal point has to be updated
new_cloud_needed = curr_sample_global_density_is_better and curr_sample_is_distant_enough
# If both conditions are satisfied (global density is better and sample is distant enough), create a new cloud
if new_cloud_needed:
# Get variances of all clouds to get the local scatter for the new cloud.
local_scatters = np.array([[0., 0.]] * num_clouds)
for i in range(num_clouds):
local_scatters[i][0] = math.sqrt(self.clouds[i].variance[0])
local_scatters[i][1] = math.sqrt(self.clouds[i].variance[1])
new_cloud_local_scatter = np.average(local_scatters, 0)
new_cloud_variance = new_cloud_local_scatter ** 2
# Creates new cloud with focal point zk and starting variance
self.clouds.append(DataCloud(curr_z, new_cloud_variance, self.radius_update_const))
# Update Membership degree to include this new cloud!
relative_ld.append(self.clouds[num_clouds].get_local_density(curr_x))
curr_md = [float(md) / sum(relative_ld) for md in relative_ld]
# If a new cloud is not needed, a focal point update might still be needed. If the local density of the current
# sample relative to the associated cloud is bigger than the local density of the focal point of the associated
# cloud relative to itself, and also if the global density of the current sample is bigger than the global
# density of the focal point of the associated cloud, than update the focal point.
# TEST: Add data sample to data cloud before updating focal point
# self.clouds[curr_x_associated_cloud].add_point(curr_z)
if not new_cloud_needed:
# Local density of the sample and the focal point relative to the associated cloud:
associated_cloud_xf = self.clouds[curr_x_associated_cloud].zf[:self.xsize]
associated_cloud_xf_ld = self.clouds[curr_x_associated_cloud].get_local_density(associated_cloud_xf)
curr_x_ld = self.clouds[curr_x_associated_cloud].get_local_density(curr_x)
# Global density of the sample and the focal point of the associated cloud:
associated_cloud_zf = self.clouds[curr_x_associated_cloud].zf
associated_cloud_zf_gd = self.get_global_density(associated_cloud_zf)
if curr_x_ld > associated_cloud_xf_ld and curr_gd > associated_cloud_zf_gd:
self.clouds[curr_x_associated_cloud].update_focal_point(curr_z)
# Add data sample to data cloud after updating focal point
self.clouds[curr_x_associated_cloud].add_point(curr_z)
# Update Global Density values g_csi and g_b
# Update global density recursive values
prev_gcsi = self.g_csi
prev_gb = self.g_b
self.g_csi = prev_gcsi + self.prev_z
self.g_b = prev_gb + np.dot(self.prev_z, self.prev_z)
# Store last sample
self.prev_md = np.copy(curr_md)
self.prev_ref = curr_ref
self.prev_y = curr_y
self.prev_z = np.copy(curr_z)
# Update k before next iteration
self.k += 1
# Return output u related to input curr_x
return curr_u
def get_global_density(self, z):
"""
Calculates recursively the Global Density of point curr_z.
Keyword arguments:
curr_z -- sample that will have its corresponding global density calculated.
"""
prev_z = self.prev_z
prev_gcsi = self.g_csi
prev_gb = self.g_b
gcsi_k = prev_gcsi + prev_z
ga_k = np.dot(z, gcsi_k)
gb_k = prev_gb + np.dot(prev_z, prev_z)
gd = float(self.k - 1) / ((self.k - 1) * (np.dot(z, z) + 1) - 2. * ga_k + gb_k)
return gd
class DataCloud:
"""
Class that represents a data cloud.
It stores the following information in the form of instance variables:
zf -- Focal point, composed by xf (data sample) and q (consequent)
csi, betha -- parameters for recursively calculation of local density
r -- array of radii, one for each dimension of X.
sigma_sq -- parameter for recursively calculation of radii. (variance)
m -- number of points added so far
z -- Last point added.
"""
def __init__(self, z, initial_variance, radius_update_const=0.5):
"""
Initializes a DataCloud with one point z.
Extracts x and u, setting u as the consequent q.
Keyword arguments:
z --
initial_variance -- array containing the variance starting value for the new DataCloud
radius_update_const -- Radius constant, usually 0.5
"""
# Set radius update constant
self.radius_update_const = radius_update_const
# Gets plant input (x) and control signal (u)
# from z where z = [x', u']', setting them
# as focal point (xf) and consequent (q) respectively.
self.zf = np.copy(z)
self.xsize = len(z) - 1
# Local density calculation values
self.csi = np.array([0.0] * self.xsize)
self.betha = 0.0
# Data Cloud Size
self.m = 1
# Data Cloud Radius
# Each data cloud has X_SIZE radiuses, one for each dimension of x.
# By definition the initial radius r1 is 1 for each dimension.
self.r = np.array([1.0] * self.xsize)
# Local Scatter square (sigma_square), has to be stored for recursive calculation of
# the radius. For each dimension of x, there's a sigma associated to it.
# By definition the initial sigma sigma1 is 1 if not provided
self.variance = np.copy(initial_variance)
# Save previous added point for next calculations
self.prev_z = np.copy(z)
def update_focal_point(self, z):
"""
Update focal point. Just updates self.zf. Does not increment the size of the data cloud, neither
updates radius or variance. Usually add_point is called right after.
Keyword arguments:
z -- datacloud point composed by x (data sample) and u (control signal)
"""
self.zf = z
def __update_radius__(self):
"""
Update radius of the Data Cloud recursively.
It needs to be called after a new point is added to the Cloud.
"""
p = self.radius_update_const
for i in range(0, len(self.r)):
self.r[i] = p * self.r[i] + (1 - p) * math.sqrt(self.variance[i])
def __update_variance_and_centroid__(self, curr_z):
"""
Update the local scatter square of the Data Cloud recursively.
The local scatter ( sigma ) is needed to update the radius.
Keyword arguments:
curr_z -- Last added sample
"""
# Extract X and centroid
x = curr_z[:self.xsize]
# Calculate New Centroid (OLD WAY)
# for i in range(0, len(self.centroid)):
# new_centroid[i] = (self.centroid[i] * (self.m - 1) + curr_z[i]) / self.m
# Calulate and Update New Variance (OLD WAY _ WITH CENTROID)
# for i in range(0, len(self.variance)):
# prev_variance = self.variance[i]
# self.variance[i] = (1.0 / self.m) * (
# (self.m - 1) * prev_variance + (x[i] - self.centroid[i]) * (x[i] - new_centroid[i]))
# Calulate and Update New Variance (NEW WAY _ WITH FOCAL POINT)
# for i in range(0, len(self.variance)):
# # dist_x_f = self.zf[:self.xsize] - x
# dist_z_f = self.zf - curr_z
# self.variance[i] = self.variance[i]*float(self.m-1)/self.m + np.dot(dist_z_f, dist_z_f)/float(self.m-1)
# Calulate and Update New Variance (NEW WAY _ WITH FOCAL POINT)
for i in range(len(self.variance)):
dist_x_f = self.zf[:self.xsize][i] - x[i]
self.variance[i] = self.variance[i] * float(self.m - 1) / self.m + (dist_x_f ** 2) / float(self.m - 1)
# Update centroid (OLD WAY)
# self.centroid = new_centroid
def add_point(self, curr_z):
"""
Associates a new point to the data cloud, updating the number of points
updating local density values, sigma and radius.
Keyword arguments:
curr_z -- datacloud point composed by x (data sample) and u (control signal)
"""
# Update number of points
self.m += 1
# Update Variance
self.__update_variance_and_centroid__(curr_z)
# Update radius
self.__update_radius__()
# Update local density values
prev_x = self.prev_z[:self.xsize]
prev_csi = self.csi
prev_b = self.betha
self.csi = prev_csi + prev_x
self.betha = prev_b + np.dot(prev_x, prev_x)
# Update Prev values (last added point):
self.prev_z = np.copy(curr_z)
def get_local_density(self, x):
"""
Recursively calculate the local density relative to the sample input x
Keyword arguments:
x -- an input of dimension XSIZE
"""
prev_x = self.prev_z[:self.xsize]
prev_csi = self.csi
prev_b = self.betha
csi_k = prev_csi + prev_x
a_k = np.dot(x, csi_k)
b_k = prev_b + np.dot(prev_x, prev_x)
ld = float(self.m) / (self.m * (np.dot(x, x) + 1) - 2. * a_k + b_k)
return ld
def update_consequent(self, prev_md, prev_ref, curr_y, prev_u, c, umin, umax):
"""
Updates consequent
Keyword arguments:
prev_md -- membership degree of the previous data sample related to this cloud.
prev_ref -- previous reference value
curr_y -- current plant output value
prev_u -- previous control signal
C -- Consequent constant calculated by: C = (UMAX - UMIN)/(REFMAX - REFMIN)
umin, umax -- Control Signal range, use determine if the consequent should be penalized.
"""
# Calculate relative error:
e = (prev_ref - curr_y)
# Calculate consequent differential
dq = c * prev_md * e
# Checks if control signal maximum or minimum has been reached
# to prevent penalization on these cases
if (prev_u <= umin) and (dq < 0):
dq = 0.0
if (prev_u >= umax) and (dq > 0):
dq = 0.0
# Get Consequent
q = self.get_consequent()
# Updates consequent
self.set_consequent(q + dq)
def set_consequent(self, new_consequent):
self.zf[-1] = new_consequent
def get_consequent(self):
"""
Extract consequent value from the focal point of the data cloud (zf).
"""
return self.zf[-1]
| mit | 6,382,856,539,554,038,000 | 38.284768 | 119 | 0.600079 | false |
rtts/qqq | user_profile/views.py | 1 | 3338 | from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required
from user_profile.models import Profile
from user_profile.forms import *
from messages.models import Message
from qqq.models import Contribution
from messages.views import reply as dm_reply
from messages.views import delete as dm_delete
from messages.views import compose as dm_compose
from messages.utils import format_quote
from datetime import datetime
import types
def get_profile(user):
"""Returns a user's profile, and creates one if it doesn't exist
yet. (Should've been implemented in some auth module, but just
in case...)"""
try:
p = Profile.objects.get(user=user)
except Profile.DoesNotExist:
p = Profile(description='', user=user)
p.save()
return p
def view_profile(request, username):
t = get_template('profile.html')
c = RequestContext(request)
user = get_object_or_404(User, username=username)
profile = get_profile(user)
c['karma'] = profile.karma
c['description'] = profile.description
c['feed'] = user.contributions.all().select_related('user', 'question', 'revision', 'post', 'tagaction')[:25]
c['username'] = username
return HttpResponse(t.render(c))
@login_required
def view_message(request, id):
t = get_template('view_pm.html')
c = RequestContext(request)
msg = get_object_or_404(Message, id=id)
msg.read_at = datetime.now()
msg.save()
c['msg'] = msg
return HttpResponse(t.render(c))
@login_required
def edit_profile(request):
t = get_template('edit_profile.html')
c = RequestContext(request)
profile = get_profile(request.user)
if request.method == 'POST':
form = ProfileForm(request.POST)
if form.is_valid():
form.save(profile)
return HttpResponseRedirect(reverse(view_profile, args=[request.user.username]))
else:
form = ProfileForm(initial={'description': profile.description})
c['form'] = form
c['username'] = request.user.username
return HttpResponse(t.render(c))
def sent(request):
c = RequestContext(request)
t = get_template('pm_sent.html')
return HttpResponse(t.render(c))
@login_required
def compose(request, username):
t = get_template('send-pm.html')
c = RequestContext(request)
next = reverse(sent)
recipient = get_object_or_404(User, username=username)
if 'parent' in request.GET:
parent = get_object_or_404(Message, id=request.GET['parent'])
else:
parent = False
if request.method == 'POST':
form = MessageForm(request.POST)
if form.is_valid():
form.save(sender=request.user, recipient=recipient, parent=parent)
return HttpResponseRedirect(next)
else:
if parent:
body = format_quote(parent.sender, parent.body)
else:
body = ''
form = MessageForm(initial = {'body': body})
c['form'] = form
c['username'] = username
return HttpResponse(t.render(c))
@login_required
def delete(request):
if 'message' in request.GET:
return dm_delete(request, request.GET['message'], success_url="/")
else:
raise Http404
| gpl-3.0 | 1,092,736,102,146,066,000 | 29.623853 | 111 | 0.713901 | false |
shady831213/myBlog | myBlog/articles/permission.py | 1 | 1112 | from rest_framework import permissions
#adminorreadonly
class ArticlePermission(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS or
request.user and request.user.is_staff
)
def has_object_permission(self, request, view, obj):
return (
request.user.is_staff or obj.status == 'published' and request.method in permissions.SAFE_METHODS
)
class ArticleLonginPermission(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS or
request.user and request.user.is_authenticated
)
def has_object_permission(self, request, view, obj):
return (
request.user.is_staff or
request.user.is_authenticated and request.method == 'POST' or
obj.author == request.user and request.method in ('PUT', 'GET','PATCH', 'DELETE') or
obj.status == 'published' and request.method in permissions.SAFE_METHODS
) | mit | 2,163,750,632,873,102,600 | 37.37931 | 109 | 0.652878 | false |
Exesium/python_training | fixture/session.py | 1 | 1504 | # -*- coding: utf-8 -*-
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("pass").click()
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
return self.logged_user() == username
def logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
| gpl-3.0 | 6,056,223,281,163,196,000 | 30.333333 | 76 | 0.569149 | false |
padajuan/ansible-module-etcd | library/etcd.py | 1 | 9055 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Juan Manuel Parrilla <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: etcd
short_description: Set and delete values from etcd
description:
- Sets or deletes values in etcd.
- Parent directories of the key will be created if they do not already exist.
version_added: "2.4"
author: Juan Manuel Parrilla (@padajuan)
requirements:
- python-etcd >= 0.3.2
options:
state:
description:
- This will be the state of the key in etcd
- after this module completes its operations.
required: true
choices: [present, absent]
default: null
protocol:
description:
- The scheme to connect to ETCD
required: false
default: http
choices: [http, https]
host:
description:
- The etcd host to use
required: false
default: 127.0.0.1
port:
description:
- The port to use on the above etcd host
required: false
default: 4001
api_version:
description:
- Api version of ETCD endpoint
required: false
default: '/v2'
key:
description:
- The key in etcd at which to set the value
required: true
default: null
value:
description:
- The value to be set in etcd
required: true
default: null
override:
description:
- Force the overwriting of a key-value on etcd
required: false
default: false
allow_redirect:
description:
- Etcd attempts to redirect all write requests to the etcd master
- for safety reasons. If allow_redirect is set to false, such
- redirection will not be allowed. In this case, the value for `host`
- must be the etcd leader or this module will err.
required: false
default: true
read_timeout:
description:
- Time limit for a read request agains ETCD
required: false
default: 60
cert:
description:
- Certificate to connect to an ETCD server with SSL
required: false
default: None
cert_ca:
description:
- CA Certificate to connect to an ETCD server with SSL
required: false
default: None
username:
description:
- Username to connect to ETCD with RBAC activated
required: false
default: None (by default etcd will use guest)
password:
description:
- Password to authenticate to ETCD with RBAC activated
required: false
default: None
notes:
- Do not override the value stored on ETCD, you must specify it.
- Based on a module from Rafe Colton
- Adapted from https://github.com/modcloth-labs/ansible-module-etcd
- The python-etcd bindings are not still compatible with v1 and v3 of
ETCD api endpoint, then we will not work with it.
- I will try to contribute with python-etcd to make it compatible
with those versions.
"""
EXAMPLES = """
---
# set a value in etcd
- etcd:
state=present
host=my-etcd-host.example.com
port=4001
key=/asdf/foo/bar/baz/gorp
value=my-foo-bar-baz-gor-server.prod.example.com
# delete a value from etcd
- etcd:
state=absent
host=my-etcd-host.example.com
port=4001
key=/asdf/foo/bar/baz/gorp
# override an existant ETCD value
- etcd:
state: present
host: 127.0.0.1
port: 2379
key: "test"
value: "test_value"
override: True
# override a value through SSL connection
- etcd:
state: present
protocol: https
host: etcd.globalcorp.com
port: 2379
key: "test"
value: "test_value"
cert: /path/to/cert
ca_cert: /path/to/CA
override: True
# delete an ETCD value with a user and password
- etcd:
state: absent
host: 127.0.0.1
port: 2379
username: 'user'
password: 'P4ssW0rd'
"""
RETURN = '''
---
key:
description: The key quieried
returned: success
type: string
value:
description: The result of the write on ETCD
returned: sucess
type: dictionary
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import ConnectionError
try:
import etcd
etcd_found = True
except ImportError:
etcd_found = False
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
protocol=dict(required=False, default='http', choices=['http', 'https']),
host=dict(required=False, default='127.0.0.1'),
port=dict(required=False, default=4001, type='int'),
api_version=dict(required=False, default='/v2'),
key=dict(required=True),
value=dict(required=False, default=None),
override=dict(required=False, default=False),
allow_redirect=dict(required=False, default=True),
read_timeout=dict(required=False, default=60, type='int'),
cert=dict(required=False, default=None),
ca_cert=dict(required=False, default=None),
username=dict(required=False, default=None),
password=dict(required=False, default=None, no_log=True)
),
supports_check_mode=True
)
if not etcd_found:
module.fail_json(msg="the python etcd module is required")
# For now python-etcd is not compatible with ETCD v1 and v3 api version
# Contributing on https://github.com/jplana/python-etcd.
# The entry point at this module is prepared for other versions.
if module.params['api_version'] != '/v2':
module.fail_json(msg="This module only support v2 of ETCD, for now")
# State
state = module.params['state']
# Target info
target_scheme = module.params['protocol']
target_host = module.params['host']
target_port = int(module.params['port'])
target_version = module.params['api_version']
# K-V
key = module.params['key']
value = module.params['value']
# Config
override = module.params['override']
if state == 'present' and not value:
module.fail_json(msg='Value is required with state="present".')
kwargs = {
'protocol': target_scheme,
'host': target_host,
'port': target_port,
'version_prefix': target_version,
'allow_redirect': module.params['allow_redirect'],
'read_timeout': int(module.params['read_timeout']),
'cert': module.params['cert'],
'ca_cert': module.params['ca_cert'],
'username': module.params['username'],
'password': module.params['password']
}
client = etcd.Client(**kwargs)
change = False
prev_value = None
# Attempt to get key
try:
# Getting ETCD Value
prev_value = client.get(key).value
except etcd.EtcdKeyNotFound:
# There is not value on ETCD
prev_value = None
# Handle check mode
if module.check_mode:
if ((state == 'absent' and prev_value is not None) or
(state == 'present' and prev_value != value)):
change = True
module.exit_json(changed=change)
if state == 'present' and prev_value is None:
# If 'Present' and there is not a previous value on ETCD
try:
set_res = client.write(key, value)
change = True
except ConnectionError:
module.fail_json(msg="Cannot connect to target.")
elif state == 'present' and prev_value is not None:
# If 'Present' and exists a previous value on ETCD
if prev_value == value:
# The value to set, is already present
change = False
elif override == 'True':
# Trying to Override already existant key on ETCD with flag
set_res = client.write(key, value)
change = True
else:
# Trying to Override already existant key on ETCD without flag
module.fail_json(msg="The Key '%s' is already set with '%s', exiting..." % (key, prev_value))
elif state == 'absent':
if prev_value is not None:
try:
set_res = client.delete(key)
change = True
except ConnectionError:
module.fail_json(msg="Cannot connect to target.")
results = {
'changed': change,
'value': value,
'key': key
}
module.exit_json(**results)
if __name__ == "__main__":
main()
| mit | -4,966,589,647,599,316,000 | 27.564669 | 105 | 0.631695 | false |
meeb/txcloudfiles | examples/account_set_temp_key.py | 1 | 2390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2012 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Trivial example of how to set an account temporary URL key. See:
http://docs.rackspace.com/files/api/v1/cf-devguide/content/Set_Account_Metadata-d1a4460.html
'''
import os, sys
# make sure our local copy of txcloudfiles is in sys.path
PATH_TO_TXCF = '../txcloudfiles/'
try:
import txcloudfiles
except ImportError:
txcfpath = os.path.dirname(os.path.realpath(PATH_TO_TXCF))
if txcfpath not in sys.path:
sys.path.insert(0, txcfpath)
from hashlib import sha256
from twisted.internet import reactor
from txcloudfiles import get_auth, UK_ENDPOINT, US_ENDPOINT
def _got_session(session):
print '> got session: %s' % session
random_key = sha256(os.urandom(256)).hexdigest()
def _ok((response, v)):
'''
'response' is a transport.Response() instance.
'v' is boolean True.
'''
print '> got response: %s' % response
print '> set temp url key to:'
print random_key
reactor.stop()
print '> sending request'
# 'key' here is any random string to set as the temporary URL key
session.set_temp_url_key(key=random_key).addCallback(_ok).addErrback(_error)
def _error(e):
'''
'e' here will be a twisted.python.failure.Failure() instance wrapping
a ResponseError() object. ResponseError() instances contain information
about the request to help you find out why it errored through its
ResponseError().request attribute.
'''
print 'error!'
print e.printTraceback()
reactor.stop()
auth = get_auth(UK_ENDPOINT, os.environ.get('TXCFUSR', ''), os.environ.get('TXCFAPI', ''))
auth.get_session().addCallback(_got_session).addErrback(_error)
reactor.run()
'''
EOF
'''
| apache-2.0 | 9,021,300,464,980,471,000 | 28.506173 | 96 | 0.673222 | false |
googleads/google-ads-python | google/ads/googleads/v8/errors/types/partial_failure_error.py | 1 | 1168 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"PartialFailureErrorEnum",},
)
class PartialFailureErrorEnum(proto.Message):
r"""Container for enum describing possible partial failure
errors.
"""
class PartialFailureError(proto.Enum):
r"""Enum describing possible partial failure errors."""
UNSPECIFIED = 0
UNKNOWN = 1
PARTIAL_FAILURE_MODE_REQUIRED = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -6,045,782,478,356,995,000 | 29.736842 | 74 | 0.708048 | false |
sequana/sequana | sequana/modules_report/bwa_bam_to_fastq.py | 1 | 3230 | # coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write coverage report"""
import os
import glob
import io
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils import config
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
import colorlog
logger = colorlog.getLogger(__name__)
from sequana.utils.datatables_js import DataTable
class BWABAMtoFastQModule(SequanaBaseModule):
""" Write HTML report of BWA mapping (phix)"""
def __init__(self, input_directory, output_filename=None):
"""
:param input_directory: the directory of the bwa_bam_to_fastq output
:param output_filename: if not provided, the HTML is not created.
"""
super().__init__()
self.directory = input_directory + os.sep
self.create_report_content()
if output_filename:
self.create_html(output_filename)
def create_report_content(self):
""" Generate the sections list to fill the HTML report.
"""
self.sections = list()
self.add_stats()
def _get_html_stats(self):
from sequana.tools import StatsBAM2Mapped
from easydev import precision
data = StatsBAM2Mapped(self.directory + "bwa_mem_stats.json").data
html = "Reads with Phix: %s %%<br>" % precision(data['contamination'], 3)
# add HTML table
if "R2_mapped" in data.keys():
df = pd.DataFrame({
'R1': [data['R1_mapped'], data['R1_unmapped']],
'R2': [data['R2_mapped'], data['R2_unmapped']]})
else:
df = pd.DataFrame({
'R1': [data['R1_mapped'], data['R1_unmapped']]})
df.index = ['mapped', 'unmapped']
datatable = DataTable(df, "bwa_bam")
datatable.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 30,
'scrollCollapse': 'true',
'dom': 'irtpB',
"paging": "false",
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
#html += "{} {}".format(html_tab, js)
html += "Unpaired: %s <br>" % data['unpaired']
html += "duplicated: %s <br>" % data['duplicated']
return html
def _get_html_mapped_stats(self):
html = ""
return html
def add_stats(self):
html1 = self._get_html_stats()
html2 = self._get_html_mapped_stats()
self.sections.append({
"name": "Stats inputs",
"anchor": "stats",
"content": html1+html2
})
| bsd-3-clause | 6,454,543,908,194,447,000 | 30.666667 | 81 | 0.586378 | false |
ramondiez/machine-learning | ex2/plotDecisionBoundary.py | 1 | 1679 | '''
Created on 20 feb. 2017
@author: fara
'''
import numpy as np
from matplotlib import pyplot as plt
from mapFeature import mapFeature
from show import show
def plotDecisionBoundary(ax,theta, X, y):
'''
%PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
%the decision boundary defined by theta
% PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
% positive examples and o for the negative examples. X is assumed to be
% a either
% 1) Mx3 matrix, where the first column is an all-ones column for the
% intercept.
% 2) MxN, N>3 matrix, where the first column is all-ones
'''
if X.shape[1] <= 3:
# Only need 2 points to define a line, so choose two endpoints
plot_x = np.array([min(X[:, 2]), max(X[:, 2])])
# Calculate the decision boundary line
plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])
# Plot, and adjust axes for better viewing
ax.plot(plot_x, plot_y)
# Legend, specific for the exercise
plt.legend(['Admitted', 'Not admitted'],loc='upper right', fontsize='x-small', numpoints=1)
plt.axis([30, 100, 30, 100])
else:
# Here is the grid range
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z= np.array([mapFeature(u[i], v[j]).dot(theta) for i in range(len(u)) for j in range(len(v))])
#Reshape to get a 2D array
z=np.reshape(z, (50, 50))
#Draw the plot
plt.contour(u,v,z, levels=[0.0])
| gpl-3.0 | -6,819,960,103,957,050,000 | 29 | 102 | 0.564622 | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/NFR4XBoot/ubi_reader/ubi/block/sort.py | 5 | 2187 |
def list_by_list(blist, slist):
slist_blocks = []
for block in blist:
if block in slist:
slist_blocks.append(block)
return slist_blocks
def by_image_seq(blocks, image_seq):
seq_blocks = []
for block in blocks:
if blocks[block].ec_hdr.image_seq == image_seq:
seq_blocks.append(block)
return seq_blocks
def by_range(blocks, block_range):
peb_range = range(block_range[0], block_range[1])
return [ i for i in blocks if i in peb_range ]
def by_leb(blocks):
slist_len = len(blocks)
slist = ['x'] * slist_len
for block in blocks:
if blocks[block].leb_num >= slist_len:
add_elements = blocks[block].leb_num - slist_len + 1
slist += ['x'] * add_elements
slist_len = len(slist)
slist[blocks[block].leb_num] = block
return slist
return sorted(blocks.iterkeys(), key=lambda x: blocks[x].leb_num)
def by_vol_id(blocks, slist = None):
vol_blocks = {}
for i in blocks:
if slist and i not in slist:
continue
elif not blocks[i].is_valid:
continue
if blocks[i].vid_hdr.vol_id not in vol_blocks:
vol_blocks[blocks[i].vid_hdr.vol_id] = []
vol_blocks[blocks[i].vid_hdr.vol_id].append(blocks[i].peb_num)
return vol_blocks
def clean_bad(blocks, slist = None):
clean_blocks = []
for i in range(0, len(blocks)):
if slist and i not in slist:
continue
if blocks[i].is_valid:
clean_blocks.append(i)
return clean_blocks
def by_type(blocks, slist = None):
layout = []
data = []
int_vol = []
unknown = []
for i in blocks:
if slist and i not in slist:
continue
if blocks[i].is_vtbl and blocks[i].is_valid:
layout.append(i)
elif blocks[i].is_internal_vol and blocks[i].is_valid:
int_vol.append(i)
elif blocks[i].is_valid:
data.append(i)
else:
unknown.append(i)
return (layout,
data,
int_vol,
unknown) | gpl-2.0 | 8,051,677,869,966,243,000 | 23.752941 | 70 | 0.54321 | false |
skosukhin/spack | var/spack/repos/builtin/packages/gmake/package.py | 1 | 2179 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gmake(AutotoolsPackage):
"""GNU Make is a tool which controls the generation of executables and
other non-source files of a program from the program's source files."""
homepage = "https://www.gnu.org/software/make/"
url = "https://ftp.gnu.org/gnu/make/make-4.2.1.tar.gz"
version('4.2.1', '7d0dcb6c474b258aab4d54098f2cf5a7')
version('4.0', 'b5e558f981326d9ca1bfdb841640721a')
variant('guile', default=False, description='Support GNU Guile for embedded scripting')
depends_on('guile', when='+guile')
build_directory = 'spack-build'
def configure_args(self):
args = []
if '+guile' in self.spec:
args.append('--with-guile')
else:
args.append('--without-guile')
return args
@run_after('install')
def symlink_gmake(self):
with working_dir(self.prefix.bin):
symlink('make', 'gmake')
| lgpl-2.1 | 6,446,346,152,501,126,000 | 37.22807 | 91 | 0.65397 | false |
epeios-q37/epeios | other/exercises/Hangman/workshop/en/l.py | 1 | 1311 | # coding: utf-8
"""
MIT License
Copyright (c) 2019 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import workshop._.L as workshop
from workshop.en._ import *
def go(globals):
workshop.main(lambda dom: workshop.Core(dom),globals,USER_ITEM_LABELS)
| agpl-3.0 | 4,384,650,867,266,639,000 | 38.96875 | 78 | 0.767353 | false |
carlshan/ml_workflow | datascience_tools/modeling/workflow_model_setup.py | 1 | 2346 | from workflow_diagnostics import get_diagnostics_dict
from workflow_util import upload_to_s3
from sklearn import preprocessing
import cPickle as pickle
import pandas as pd
import os
def run_model(training, testing, features, outcome, clf,
clf_name, normalize=True, verbose=True):
# NOTE: You should set the clf seed ahead of time
if verbose:
print 'Starting training of: {}'.format(clf_name)
print '----------'
print 'Num Features: {}'.format(len(features))
print 'Shape of Training: {}'.format(training.shape)
print 'Shape of Testing: {}'.format(testing.shape)
print 'Outcome: {}'.format(outcome)
X_train, y_train = training[features].values, training[outcome].values
X_test = testing[features].values
if normalize:
X_train = preprocessing.StandardScaler().fit(X_train).transform(X_train)
X_test = preprocessing.StandardScaler().fit(X_test).transform(X_test)
fitted_clf = clf.fit(X_train, y_train)
if verbose:
print 'Finished Training'
print '\n'
print 'Starting Testing:'
print '----------'
predicted_probabilities = fitted_clf.predict_proba(X_test)
if verbose:
print 'Finished Testing...\n'
return fitted_clf, predicted_probabilities
def run_and_output_model_to_s3(training, testing, features, outcome, clf, clf_name, s3_path,
verbose=True, **kwargs):
fitted_clf, predicted_probs = run_model(training, testing, features, outcome, clf,
clf_name, verbose)
#Pickling happens here
os.mkdir('../results/temp/')
filepath = os.path.join('../results/temp', clf_name + '.pkl')
pickle.dump(fitted_clf, open(filepath, 'wb'))
print 'Uploading to S3 at {}'.format(s3_path)
upload_to_s3('../results/temp', clf_name + '.pkl', s3_path = s3_path)
print 'Done uploading {} to s3 \n'.format(filepath)
os.remove(filepath)
os.rmdir('../results/temp/')
# Putting the diagnostics dict into a dataframe and saving to results folder
diagnostics_dict = get_diagnostics_dict(fitted_clf, testing, features, outcome, clf_name, **kwargs)
results_df = pd.read_csv('../results/results.csv')
results_df = results_df.append([diagnostics_dict])
results_df.to_csv(path_or_buf='../results/results.csv', index=False)
return diagnostics_dict
| mit | -8,404,296,448,362,090,000 | 37.459016 | 103 | 0.665388 | false |
akretion/odoo | odoo/tools/image.py | 10 | 15192 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import codecs
import io
from PIL import Image
from PIL import ImageEnhance
from random import randrange
# Preload PIL with the minimal subset of image formats we need
from odoo.tools import pycompat
Image.preinit()
Image._initialized = 2
# Maps only the 6 first bits of the base64 data, accurate enough
# for our purpose and faster than decoding the full blob first
FILETYPE_BASE64_MAGICWORD = {
b'/': 'jpg',
b'R': 'gif',
b'i': 'png',
b'P': 'svg+xml',
}
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False, upper_limit=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectively
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
# Return unmodified content if no resize or we etect first 6 bits of '<'
# (0x3C) for SVG documents - This will bypass XML files as well, but it's
# harmless for these purposes
if size == (None, None) or base64_source[:1] == b'P':
return base64_source
image_stream = io.BytesIO(codecs.decode(base64_source, encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if upper_limit:
if asked_width:
if asked_width >= image.size[0]:
asked_width = image.size[0]
if asked_height:
if asked_height >= image.size[1]:
asked_height = image.size[1]
if image.size[0] >= image.size[1]:
asked_height = None
else:
asked_width = None
if asked_width is None and asked_height is None:
return base64_source
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
image = image_resize_and_sharpen(image, size, upper_limit=upper_limit)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"] or (filetype == 'JPEG' and image.mode == 'RGBA'):
image = image.convert("RGB")
background_stream = io.BytesIO()
image.save(background_stream, filetype)
return codecs.encode(background_stream.getvalue(), encoding)
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0, upper_limit=False):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
origin_mode = image.mode
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
if upper_limit:
image = Image.new('RGBA', (size[0], size[1]-3), (255, 255, 255, 0)) # FIXME temporary fix for trimming the ghost border.
else:
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) // 2, (size[1] - resized_image.size[1]) // 2))
if image.mode != origin_mode:
image = image.convert(origin_mode)
return image
def image_save_for_web(image, fp=None, format=None):
"""
Save image optimized for web usage.
:param image: PIL.Image.Image()
:param fp: File name or file object. If not specified, a bytestring is returned.
:param format: File format if could not be deduced from image.
"""
opt = dict(format=image.format or format)
if image.format == 'PNG':
opt.update(optimize=True)
if image.mode != 'P':
# Floyd Steinberg dithering by default
image = image.convert('RGBA').convert('P', palette=Image.WEB, colors=256)
elif image.format == 'JPEG':
opt.update(optimize=True, quality=80)
if fp:
image.save(fp, **opt)
else:
img = io.BytesIO()
image.save(img, **opt)
return img.getvalue()
def image_resize_image_big(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Crop Image
# ----------------------------------------
def crop_image(data, type='top', ratio=False, size=None, image_format=None):
""" Used for cropping image and create thumbnail
:param data: base64 data of image.
:param type: Used for cropping position possible
Possible Values : 'top', 'center', 'bottom'
:param ratio: Cropping ratio
e.g for (4,3), (16,9), (16,10) etc
send ratio(1,1) to generate square image
:param size: Resize image to size
e.g (200, 200)
after crop resize to 200x200 thumbnail
:param image_format: return image format PNG,JPEG etc
"""
if not data:
return False
image_stream = Image.open(io.BytesIO(base64.b64decode(data)))
output_stream = io.BytesIO()
w, h = image_stream.size
new_h = h
new_w = w
if ratio:
w_ratio, h_ratio = ratio
new_h = (w * h_ratio) // w_ratio
new_w = w
if new_h > h:
new_h = h
new_w = (h * w_ratio) // h_ratio
image_format = image_format or image_stream.format or 'JPEG'
if type == "top":
cropped_image = image_stream.crop((0, 0, new_w, new_h))
cropped_image.save(output_stream, format=image_format)
elif type == "center":
cropped_image = image_stream.crop(((w - new_w) // 2, (h - new_h) // 2, (w + new_w) // 2, (h + new_h) // 2))
cropped_image.save(output_stream, format=image_format)
elif type == "bottom":
cropped_image = image_stream.crop((0, h - new_h, new_w, h))
cropped_image.save(output_stream, format=image_format)
else:
raise ValueError('ERROR: invalid value for crop_type')
if size:
thumbnail = Image.open(io.BytesIO(output_stream.getvalue()))
output_stream.truncate(0)
output_stream.seek(0)
thumbnail.thumbnail(size, Image.ANTIALIAS)
thumbnail.save(output_stream, image_format)
return base64.b64encode(output_stream.getvalue())
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(io.BytesIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
image.paste(color, box=(0, 0) + original.size)
image.paste(original, mask=original)
# return the new image
buffer = io.BytesIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False, sizes={}):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returned values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
size_big = sizes.get(big_name, (1024, 1024))
size_medium = sizes.get(medium_name, (128, 128))
size_small = sizes.get(small_name, (64, 64))
if isinstance(base64_source, pycompat.text_type):
base64_source = base64_source.encode('ascii')
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big, size=size_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium, size=size_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small, size=size_small)
return return_dict
def image_resize_images(vals, big_name='image', medium_name='image_medium', small_name='image_small', sizes={}):
""" Update ``vals`` with image fields resized as expected. """
if vals.get(big_name):
vals.update(image_get_resized_images(vals[big_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False, sizes=sizes))
elif vals.get(medium_name):
vals.update(image_get_resized_images(vals[medium_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=False, sizes=sizes))
elif vals.get(small_name):
vals.update(image_get_resized_images(vals[small_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=True, sizes=sizes))
elif big_name in vals or medium_name in vals or small_name in vals:
vals[big_name] = vals[medium_name] = vals[small_name] = False
def image_data_uri(base64_source):
"""This returns data URL scheme according RFC 2397
(https://tools.ietf.org/html/rfc2397) for all kind of supported images
(PNG, GIF, JPG and SVG), defaulting on PNG type if not mimetype detected.
"""
return 'data:image/%s;base64,%s' % (
FILETYPE_BASE64_MAGICWORD.get(base64_source[:1], 'png'),
base64_source.decode(),
)
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = base64.b64encode(open(sys.argv[1],'rb').read())
new = image_resize_image(img, (128,100))
open(sys.argv[2], 'wb').write(base64.b64decode(new))
| agpl-3.0 | 2,528,655,349,134,123,500 | 44.214286 | 132 | 0.634742 | false |
kit-cel/gr-dab | python/qa_mp4_encode_sb.py | 1 | 2634 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Moritz Luca Schmid, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT).
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import dab_swig as dab
from gnuradio import audio
import os
class qa_mp4_encode_sb (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
# loopback test - manual check if encoded AAC frames are recognized and extracted properly of the decoder and audio is played correctly
def test_001_t(self):
if os.path.exists("debug/PCM_left.dat") and os.path.exists("debug/PCM_right.dat"):
self.src_left = blocks.file_source_make(gr.sizeof_float, "debug/PCM_left.dat")
self.src_right = blocks.file_source_make(gr.sizeof_float, "debug/PCM_right.dat")
self.f2s_1 = blocks.float_to_short_make(1, 32767)
self.f2s_2 = blocks.float_to_short_make(1, 32767)
self.mp4_encode = dab.mp4_encode_sb_make(14, 2, 32000, 1)
self.mp4_decode = dab.mp4_decode_bs_make(14)
self.s2f_1 = blocks.short_to_float_make(1, 32767)
self.s2f_2 = blocks.short_to_float_make(1, 32767)
self.audio = audio.sink_make(32000)
self.tb.connect(self.src_left, self.f2s_1, (self.mp4_encode, 0))
self.tb.connect(self.src_right, self.f2s_2, (self.mp4_encode, 1))
self.tb.connect(self.mp4_encode, self.mp4_decode, self.s2f_1, (self.audio, 0))
self.tb.connect((self.mp4_decode, 1), self.s2f_2, (self.audio, 1))
self.tb.run ()
# check data
else:
log = gr.logger("log")
log.debug("debug file not found - skipped test")
log.set_level("WARN")
pass
if __name__ == '__main__':
gr_unittest.run(qa_mp4_encode_sb, "qa_mp4_encode_sb.xml")
| gpl-3.0 | -2,318,910,913,169,316,400 | 40.15625 | 135 | 0.657175 | false |
jjdmol/LOFAR | SAS/ResourceAssignment/ResourceAssignmentEstimator/resource_estimators/base_resource_estimator.py | 1 | 3118 | # base_resource_estimator.py
#
# Copyright (C) 2016
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: base_resource_estimator.py 33534 2016-02-08 14:28:26Z schaap $
""" Base class for Resource Estimators
"""
import logging
from datetime import datetime
from lofar.common.datetimeutils import totalSeconds
from datetime import datetime, timedelta
from lofar.parameterset import parameterset
logger = logging.getLogger(__name__)
class BaseResourceEstimator(object):
""" Base class for all other resource estmiater classes
"""
def __init__(self, name):
self.name = name
self.required_keys = ()
def _checkParsetForRequiredKeys(self, parset):
""" Check if all required keys needed are available """
logger.debug("required keys: %s" % ', '.join(self.required_keys))
logger.debug("parset keys: %s" % ', '.join(parset.keys()))
missing_keys = set(self.required_keys) - set(parset.keys())
if missing_keys:
logger.error("missing keys: %s" % ', '.join(missing_keys))
return False
return True
def _getDateTime(self, date_time):
return datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')
def _getDuration(self, start, end):
startTime = self._getDateTime(start)
endTime = self._getDateTime(end)
if startTime >= endTime:
logger.warning("startTime is after endTime")
return 1 ##TODO To prevent divide by zero later
return totalSeconds(endTime - startTime)
#TODO check if this makes duration = int(parset.get('duration', 0)) as a key reduntant?
def _calculate(self, parset, input_files={}):
raise NotImplementedError('calculate() in base class is called. Please implement calculate() in your subclass')
def verify_and_estimate(self, parset, input_files={}):
""" Create estimates for a single process based on its parset and input files"""
if self._checkParsetForRequiredKeys(parset):
estimates = self._calculate(parameterset(parset), input_files)
else:
raise ValueError('The parset is incomplete')
result = {}
result[self.name] = {}
result[self.name]['storage'] = estimates['storage']
result[self.name]['bandwidth'] = estimates['bandwidth']
return result
| gpl-3.0 | 7,776,506,931,752,656,000 | 40.026316 | 119 | 0.679602 | false |
midokura/python-midonetclient | src/midonetclient/tenant.py | 1 | 2329 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ryu Ishimoto <[email protected]>, Midokura
from midonetclient import vendor_media_type
from midonetclient.bridge import Bridge
from midonetclient.chain import Chain
from midonetclient.port_group import PortGroup
from midonetclient.resource_base import ResourceBase
from midonetclient.router import Router
class Tenant(ResourceBase):
media_type = vendor_media_type.APPLICATION_TENANT_JSON
def __init__(self, uri, dto, auth):
super(Tenant, self).__init__(uri, dto, auth)
def get_name(self):
return self.dto['name']
def get_id(self):
return self.dto['id']
def id(self, id):
self.dto['id'] = id
return self
def name(self, name):
self.dto['name'] = name
return self
def get_bridges(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_BRIDGE_COLLECTION_JSON}
return self.get_children(self.dto['bridges'], query, headers, Bridge)
def get_chains(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_CHAIN_COLLECTION_JSON}
return self.get_children(self.dto['chains'], query, headers, Chain)
def get_port_groups(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_PORTGROUP_COLLECTION_JSON}
return self.get_children(self.dto['portGroups'], query, headers,
PortGroup)
def get_routers(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_ROUTER_COLLECTION_JSON}
return self.get_children(self.dto['routers'], query, headers, Router)
| apache-2.0 | -1,329,265,309,934,497,500 | 32.753623 | 77 | 0.674109 | false |
pudo-attic/ted-xml | parse.py | 1 | 6683 | import os
from lxml import etree
from pprint import pprint
from forms.parseutil import ted_documents, Extractor
from collections import defaultdict
from common import engine, documents_table, contracts_table, cpvs_table, references_table
def select_form(form, lang):
lang = lang.split()[0]
children = form.getchildren()
if len(children) == 1:
return children.pop()
orig = None
for child in children:
if child.get('LG') == 'EN':
return child
if child.get('LG') == lang:
orig = child
return orig
def parse(filename, file_content):
#fh = open(filename, 'rb')
xmldata = file_content.replace('xmlns="', 'xmlns_="')
#fh.close()
#print xmldata.decode('utf-8').encode('ascii', 'replace')
root = etree.fromstring(xmldata)
form = root.find('.//FORM_SECTION')
form.getparent().remove(form)
ext = Extractor(root)
cpvs = [{'code': e.get('CODE'), 'text': e.text} for e in root.findall('.//NOTICE_DATA/ORIGINAL_CPV')]
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/ORIGINAL_CPV')
refs = [e.text for e in root.findall('.//NOTICE_DATA/REF_NOTICE/NO_DOC_OJS')]
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/REF_NOTICE/NO_DOC_OJS')
data = {
'technical_reception_id': ext.text('./TECHNICAL_SECTION/RECEPTION_ID'),
'technical_comments': ext.text('./TECHNICAL_SECTION/COMMENTS'),
'technical_deletion_date': ext.text('./TECHNICAL_SECTION/DELETION_DATE'),
'technical_form_lang': ext.text('./TECHNICAL_SECTION/FORM_LG_LIST'),
'reception_id': ext.text('./TECHNICAL_SECTION/RECEPTION_ID'),
'oj_collection': ext.text('.//REF_OJS/COLL_OJ'),
'oj_number': ext.text('.//REF_OJS/NO_OJ'),
'oj_date': ext.text('.//REF_OJS/DATE_PUB'),
'doc_no': ext.text('.//NOTICE_DATA/NO_DOC_OJS'),
'doc_url': ext.text('.//NOTICE_DATA//URI_DOC[@LG="EN"]') or ext.text('.//NOTICE_DATA//URI_DOC'),
'info_url': ext.text('.//NOTICE_DATA/IA_URL_GENERAL'),
'etendering_url': ext.text('.//NOTICE_DATA/IA_URL_ETENDERING'),
'orig_language': ext.text('.//NOTICE_DATA/LG_ORIG'),
'orig_nuts': ext.text('.//NOTICE_DATA/ORIGINAL_NUTS'),
'orig_nuts_code': ext.attr('.//NOTICE_DATA/ORIGINAL_NUTS', 'CODE'),
'iso_country': ext.attr('.//NOTICE_DATA/ISO_COUNTRY', 'VALUE'),
'original_cpv': cpvs,
'references': refs,
'dispatch_date': ext.text('.//CODIF_DATA/DS_DATE_DISPATCH'),
'request_document_date': ext.text('.//CODIF_DATA/DD_DATE_REQUEST_DOCUMENT'),
'submission_date': ext.text('.//CODIF_DATA/DT_DATE_FOR_SUBMISSION'),
'heading': ext.text('.//CODIF_DATA/HEADING'),
'directive': ext.attr('.//CODIF_DATA/DIRECTIVE', 'VALUE'),
'authority_type_code': ext.attr('.//CODIF_DATA/AA_AUTHORITY_TYPE', 'CODE'),
'authority_type': ext.text('.//CODIF_DATA/AA_AUTHORITY_TYPE'),
'document_type_code': ext.attr('.//CODIF_DATA/TD_DOCUMENT_TYPE', 'CODE'),
'document_type': ext.text('.//CODIF_DATA/TD_DOCUMENT_TYPE'),
'contract_nature_code': ext.attr('.//CODIF_DATA/NC_CONTRACT_NATURE', 'CODE'),
'contract_nature': ext.text('.//CODIF_DATA/NC_CONTRACT_NATURE'),
'procedure_code': ext.attr('.//CODIF_DATA/PR_PROC', 'CODE'),
'procedure': ext.text('.//CODIF_DATA/PR_PROC'),
'regulation_code': ext.attr('.//CODIF_DATA/RP_REGULATION', 'CODE'),
'regulation': ext.text('.//CODIF_DATA/RP_REGULATION'),
'bid_type_code': ext.attr('.//CODIF_DATA/TY_TYPE_BID', 'CODE'),
'bid_type': ext.text('.//CODIF_DATA/TY_TYPE_BID'),
'award_criteria_code': ext.attr('.//CODIF_DATA/AC_AWARD_CRIT', 'CODE'),
'award_criteria': ext.text('.//CODIF_DATA/AC_AWARD_CRIT'),
'main_activities_code': ext.attr('.//CODIF_DATA/MA_MAIN_ACTIVITIES', 'CODE'),
'main_activities': ext.text('.//CODIF_DATA/MA_MAIN_ACTIVITIES'),
'title_text': ext.text('.//ML_TITLES/ML_TI_DOC[@LG="EN"]/TI_TEXT'),
'title_town': ext.text('.//ML_TITLES/ML_TI_DOC[@LG="EN"]/TI_TOWN'),
'title_country': ext.text('.//ML_TITLES/ML_TI_DOC[@LG="EN"]/TI_CY'),
'authority_name': ext.text('./TRANSLATION_SECTION/ML_AA_NAMES/AA_NAME')
}
ext.ignore('./LINKS_SECTION/FORMS_LABELS_LINK')
ext.ignore('./LINKS_SECTION/OFFICIAL_FORMS_LINK')
ext.ignore('./LINKS_SECTION/ORIGINAL_NUTS_LINK')
ext.ignore('./LINKS_SECTION/ORIGINAL_CPV_LINK')
ext.ignore('./LINKS_SECTION/XML_SCHEMA_DEFINITION_LINK')
# TODO: Figure out if we need any of this, even with the forms.
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/VALUES_LIST/VALUES/SINGLE_VALUE/VALUE')
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/VALUES_LIST')
ext.ignore('./CODED_DATA_SECTION/NOTICE_DATA/VALUES_LIST/VALUES/RANGE_VALUE/VALUE')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/TOWN')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/POSTAL_CODE')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/PHONE')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/ORGANISATION/OFFICIALNAME')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/FAX')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/COUNTRY')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/CONTACT_POINT')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/ATTENTION')
ext.ignore('./TRANSLATION_SECTION/TRANSLITERATIONS/TRANSLITERATED_ADDR/ADDRESS')
ext.audit()
form_ = select_form(form, data['orig_language'])
contracts = []
if form_.tag.startswith('CONTRACT_AWARD_'):
from forms.contract_award import parse_form
contracts = parse_form(form_)
# save to DB
doc_no = data['doc_no']
engine.begin()
cpvs_table.delete(doc_no=doc_no)
references_table.delete(doc_no=doc_no)
contracts_table.delete(doc_no=doc_no)
documents_table.delete(doc_no=doc_no)
for cpv in data.pop('original_cpv'):
cpv['doc_no'] = doc_no
cpvs_table.insert(cpv)
for ref in data.pop('references'):
obj = {'doc_no': doc_no, 'ref': ref}
references_table.insert(obj)
for contract in contracts:
contract['doc_no'] = doc_no
contracts_table.insert(contract)
documents_table.insert(data)
engine.commit()
#pprint(data)
if __name__ == '__main__':
import sys
for file_name, file_content in ted_documents():
parse(file_name, file_content)
#break
#parse_all(sys.argv[1])
| mit | 4,463,501,169,465,063,000 | 46.397163 | 105 | 0.638635 | false |
gregmuellegger/django-superform | django_superform/forms.py | 1 | 13772 | """
This is awesome. And needs more documentation.
To bring some light in the big number of classes in this file:
First there are:
* ``SuperForm``
* ``SuperModelForm``
They are the forms that you probably want to use in your own code. They are
direct base classes of ``django.forms.Form`` and ``django.forms.ModelForm``
and have the formset functionallity of this module backed in. They are ready
to use. Subclass them and be happy.
Then there are:
* ``SuperFormMixin``
* ``SuperModelFormMixin``
These are the mixins you can use if you don't want to subclass from
``django.forms.Form`` for whatever reason. The ones with Base at the beginning
don't have a metaclass attached. The ones without the Base in the name have
the relevant metaclass in place that handles the search for
``FormSetField``s.
Here is an example on how you can use this module::
from django import forms
from django_superform import SuperModelForm, FormSetField
from .forms import CommentFormSet
class PostForm(SuperModelForm):
title = forms.CharField()
text = forms.CharField()
comments = FormSetField(CommentFormSet)
# Now you can use the form in the view:
def post_form(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save()
return HttpResponseRedirect(obj.get_absolute_url())
else:
form = PostForm()
return render_to_response('post_form.html', {
'form',
}, context_instance=RequestContext(request))
And yes, thanks for asking, the ``form.is_valid()`` and ``form.save()`` calls
transparantly propagate to the defined comments formset and call their
``is_valid()`` and ``save()`` methods. So you don't have to do anything
special in your view!
Now to how you can access the instantiated formsets::
>>> form = PostForm()
>>> form.composite_fields['comments']
<CommetFormSet: ...>
Or in the template::
{{ form.as_p }}
{{ form.composite_fields.comments.management_form }}
{% for fieldset_form in form.composite_fields.comments %}
{{ fieldset_form.as_p }}
{% endfor %}
You're welcome.
"""
from functools import reduce
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass, ErrorDict, ErrorList
from django.forms.models import ModelFormMetaclass
from django.utils import six
import copy
from .fields import CompositeField
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
class DeclerativeCompositeFieldsMetaclass(type):
"""
Metaclass that converts FormField and FormSetField attributes to a
dictionary called `composite_fields`. It will also include all composite
fields from parent classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect composite fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, CompositeField):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_composite_fields'] = OrderedDict(current_fields)
new_class = super(DeclerativeCompositeFieldsMetaclass, mcs).__new__(
mcs, name, bases, attrs)
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_composite_fields'):
declared_fields.update(base.declared_composite_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_composite_fields = declared_fields
new_class.declared_composite_fields = declared_fields
return new_class
class SuperFormMetaclass(
DeclerativeCompositeFieldsMetaclass,
DeclarativeFieldsMetaclass):
"""
Metaclass for :class:`~django_superform.forms.SuperForm`.
"""
class SuperModelFormMetaclass(
DeclerativeCompositeFieldsMetaclass,
ModelFormMetaclass):
"""
Metaclass for :class:`~django_superform.forms.SuperModelForm`.
"""
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
from django_superform import SuperModelFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperModelFormMetaclass,
SuperModelFormMixin,
MyCustomModelForm)):
pass
"""
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_form(self, commit=True):
"""
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
"""
return super(SuperModelFormMixin, self).save(commit=commit)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
def save_formsets(self, commit=True):
"""
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
"""
saved_composites = []
for name, composite in self.formsets.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
class SuperModelForm(six.with_metaclass(SuperModelFormMetaclass,
SuperModelFormMixin, forms.ModelForm)):
"""
The ``SuperModelForm`` works like a Django ``ModelForm`` but has the
capabilities of nesting like :class:`~django_superform.forms.SuperForm`.
Saving a ``SuperModelForm`` will also save all nested model forms as well.
"""
class SuperForm(six.with_metaclass(SuperFormMetaclass,
SuperFormMixin, forms.Form)):
"""
The base class for all super forms. The goal of a superform is to behave
just like a normal django form but is able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
| bsd-3-clause | 3,049,697,306,495,678,000 | 33.865823 | 79 | 0.636364 | false |
DStauffman/dstauffman2 | dstauffman2/puzzles/codefights_2017_01_17.py | 1 | 7196 | r"""Codefight challenge (stringsRearrangement), 2017-01-17, by DStauffman."""
#%% Imports
import doctest
import unittest
#%% Functions - is_str_one_off
def is_str_one_off(str1, str2):
r"""
Determines if strings are only one character different from one another.
Parameters
----------
str1 : str
First string
str2 : str
Second string
Returns
-------
unnamed : bool
Whether strings are one off from each other
Examples
--------
>>> from dstauffman2.puzzles.codefights_2017_01_17 import *
>>> print(is_str_one_off('abc', 'axc'))
True
>>> print(is_str_one_off('abc', 'cba'))
False
"""
# check that the lengths are the same
assert len(str1) == len(str2)
# do a simple test to see if everything is the same
if str1 == str2:
return False
# keep a counter of differences
diff = 0
# loop through the characters
for ix in range(len(str1)):
# if the characters are the same, then continue to the next one
if str1[ix] == str2[ix]:
continue
else:
# if not the same, increment the counter
diff += 1
# check for too many differences
if diff > 1:
return False
# if you got here, then there is exactly one difference and it's a good result
return True
#%% Functions - find_one_offs
def find_one_offs(key, input_array):
r"""
Finds the one-off paths within the list.
Parameters
----------
key : char
Input key to use when finding elements that are one character away
input_array : list
List of fixed length character strings for which to compare to the key
Returns
-------
out : list
Element numbers that are one character off from the given key
Examples
--------
>>> from dstauffman2.puzzles.codefights_2017_01_17 import *
>>> print(find_one_offs('abc', ['abc', 'aac', 'aby']))
[1, 2]
"""
# save the indices in the input where they are only one off from the key
out = [ix for (ix, this_str) in enumerate(input_array) if is_str_one_off(key, this_str)]
return out
#%% Functions - stringsRearrangement
def stringsRearrangement(input_array):
r"""
Finds out if there is a viable path from one string to the next.
Parameters
----------
input_array : list
List of fixed length character strings for which to decide if there is a one string change
Returns
-------
has_path : bool
Flag for whether the list has a one character path
Examples
--------
>>> from dstauffman2.puzzles.codefights_2017_01_17 import *
>>> print(stringsRearrangement(['ab', 'bb', 'aa']))
True
>>> print(stringsRearrangement(['aba', 'bbb', 'bab']))
False
"""
def recursive_solver(key, items, last=0):
r"""
Recursive path solver.
Using a current key and list, deterimen if there is a next move
If there is, take it and shorten list
If list is empty, we found a path
If list is not empty, recursively continue
If there is not a path, reverse the last step
"""
# find valid next steps
possible_steps = find_one_offs(key, items)
for this_step in possible_steps:
# if any steps, take the first one and continue recursively
new_key = items.pop(this_step)
done = recursive_solver(new_key, items, last=this_step)
if done:
# solution was found
return done
else:
# if no valid steps, determine if we are done or need to backtrack
if items:
# backtrack
items.insert(last, key)
return False
else:
# success
return True
# no steps and no backtracking
return False
# solve wrapper
for (i, this_key) in enumerate(input_array):
# build a working list of everything except the current starting point
working_list = [value for (j, value) in enumerate(input_array) if j != i]
# recursively solve puzzle
has_path = recursive_solver(this_key, working_list)
if has_path:
return True
return False
#%% Tests - is_str_one_off
class Test_is_str_one_off(unittest.TestCase):
r"""
Tests the is_str_one_off function with the following cases:
TBD
"""
def test_nominal(self):
self.assertTrue(is_str_one_off('aaa', 'aab'))
self.assertTrue(is_str_one_off('aaa', 'aba'))
self.assertTrue(is_str_one_off('aaa', 'baa'))
self.assertTrue(is_str_one_off('ccc', 'cbc'))
def test_char_jumps(self):
self.assertTrue(is_str_one_off('aaa', 'aac'))
self.assertTrue(is_str_one_off('ccc', 'cac'))
def test_not_true(self):
self.assertFalse(is_str_one_off('cc', 'bb'))
self.assertFalse(is_str_one_off('cccc', 'dddd'))
self.assertFalse(is_str_one_off('ccc', 'dce'))
def test_equal(self):
self.assertFalse(is_str_one_off('a', 'a'))
def test_wrong_len(self):
with self.assertRaises(AssertionError):
is_str_one_off('aaa', 'aa')
#%% Tests - find_one_offs
class Test_find_one_offs(unittest.TestCase):
r"""
Tests the find_one_offs function with the following cases:
TBD
"""
def test_nominal(self):
ix = find_one_offs('ccc', ['ccd', 'cdc', 'dcc', 'ddd', 'bbb', 'bcc', 'cbc', 'ccb', 'ccc'])
self.assertListEqual(ix, [0, 1, 2, 5, 6, 7])
#%% Tests - stringsRearrangement
class Test_stringsRearrangement(unittest.TestCase):
r"""
Tests the stringsRearrangement function with the following cases:
TBD
"""
def test_simple1(self):
self.assertTrue(stringsRearrangement(['aa', 'ab', 'bb']))
def test_simple2(self):
self.assertTrue(stringsRearrangement(['aa', 'bb', 'ba']))
def test_simple3(self):
self.assertFalse(stringsRearrangement(['aa', 'ab', 'ee']))
def test_1(self):
self.assertFalse(stringsRearrangement(['aba', 'bbb', 'bab']))
def test_2(self):
self.assertTrue(stringsRearrangement(['ab', 'bb', 'aa']))
def test_3(self):
self.assertFalse(stringsRearrangement(['q', 'q']))
def test_4(self):
self.assertTrue(stringsRearrangement(['zzzzab', 'zzzzbb', 'zzzzaa']))
def test_5(self):
self.assertFalse(stringsRearrangement(['ab', 'ad', 'ef', 'eg']))
def test_6(self):
self.assertFalse(stringsRearrangement(['abc', 'abx', 'axx', 'abc']))
def test_7(self):
self.assertTrue(stringsRearrangement(['abc', 'abx', 'axx', 'abx', 'abc']))
def test_8(self):
self.assertTrue(stringsRearrangement(['f', 'g', 'a', 'h']))
def test_repeats(self):
self.assertTrue(stringsRearrangement(['abc', 'xbc', 'xxc', 'xbc', 'aby', 'ayy', 'aby']))
#%% Script
if __name__ == '__main__':
# execute unit tests
unittest.main(module='dstauffman2.puzzles.codefights_2017_01_17', exit=False)
# execute doctests
doctest.testmod(verbose=False)
| lgpl-3.0 | -1,356,583,979,382,144,300 | 29.88412 | 98 | 0.592135 | false |
satra/NiPypeold | nipype/interfaces/afni/tests/test_preprocess.py | 1 | 23033 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import warnings
warnings.simplefilter('ignore')
from nipype.testing import *
from nipype.interfaces import afni
from nipype.interfaces.base import InterfaceResult
def afni_not_installed():
''' XXX: This test assumes that AFNI.Info.version will not crash on a system without AFNI installed'''
if afni.Info.version is None:
return True
else:
return False
def test_To3dInputSpec():
inputs_map = dict(infolder = dict(argstr= '%s/*.dcm',
position = -1,
mandatory = True),
outfile = dict(desc = 'converted image file',
argstr = '-prefix %s',
position = -2,
mandatory = True),
filetype = dict(desc = 'type of datafile being converted',
argstr = '-%s'),
skipoutliers = dict(desc = 'skip the outliers check',
argstr = '-skip_outliers'),
assumemosaic = dict(desc = 'assume that Siemens image is mosaic',
argstr = '-assume_dicom_mosaic'),
datatype = dict(desc = 'set output file datatype',
argstr = '-datum %s'),
funcparams = dict(desc = 'parameters for functional data',
argstr = '-time:zt %s alt+z2'))
instance = afni.To3d()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
'''XXX: This test is broken. output_spec does not appear to have the out_file attribute the same way that inputs does
def test_To3dOutputSpec():
outputs_map = dict(out_file = dict(desc = 'converted file'))
instance = afni.To3d()
for key, metadata in outputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.output_spec.traits()[key], metakey), value
'''
def test_ThreedrefitInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3drefit',
argstr = '%s',
position = -1,
mandatory = True),
deoblique = dict(desc = 'replace current transformation matrix with cardinal matrix',
argstr = '-deoblique'),
xorigin = dict(desc = 'x distance for edge voxel offset',
argstr = '-xorigin %s'),
yorigin = dict(desc = 'y distance for edge voxel offset',
argstr = '-yorigin %s'),
zorigin = dict(desc = 'y distance for edge voxel offset',
argstr = '-yorigin %s'))
instance = afni.Threedrefit()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def test_ThreedresampleInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dresample',
argstr = '-inset %s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dresample',
argstr = '-prefix %s',
position = -2,
mandatory = True),
orientation = dict(desc = 'new orientation code',
argstr = '-orient %s'))
instance = afni.Threedresample()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def test_ThreedTstatInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dTstat',
argstr = '%s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dTstat',
argstr = '-prefix %s',
position = -2,
mandatory = True),
options = dict(desc = 'selected statistical output',
argstr = '%s'))
instance = afni.ThreedTstat()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def test_ThreedAutomaskInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dAutomask',
argstr = '%s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dAutomask',
argstr = '-prefix %s',
position = -2,
mandatory = True),
options = dict(desc = 'automask settings',
argstr = '%s'))
instance = afni.ThreedAutomask()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def test_ThreedvolregInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dvolreg',
argstr = '%s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dvolreg',
argstr = '-prefix %s',
position = -2,
mandatory = True),
basefile = dict(desc = 'base file for registration',
argstr = '-base %s',
position = -5),
md1dfile = dict(desc = 'max displacement output file',
argstr = '-maxdisp1D %s',
position = -4),
onedfile = dict(desc = '1D movement parameters output file',
argstr = '-1Dfile %s',
position = -3),
verbose = dict(desc = 'more detailed description of the process',
argstr = '-verbose'),
timeshift = dict(desc = 'time shift to mean slice time offset',
argstr = '-tshift 0'),
copyorigin = dict(desc = 'copy base file origin coords to output',
argstr = '-twodup'),
other = dict(desc = 'other options',
argstr = '%s'))
instance = afni.Threedvolreg()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def test_ThreedmergeInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dvolreg',
argstr = '%s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dvolreg',
argstr = '-prefix %s',
position = -2,
mandatory = True),
doall = dict(desc = 'apply options to all sub-bricks in dataset',
argstr = '-doall'),
blurfwhm = dict(desc = 'FWHM blur value (mm)',
argstr = '-1blur_fwhm %d',
units = 'mm'),
other = dict(desc = 'other options',
argstr = '%s'))
instance = afni.Threedmerge()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def test_ThreedZcutupInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dZcutup',
argstr = '%s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dZcutup',
argstr = '-prefix %s',
position = -2,
mandatory = True),
keep = dict(desc = 'slice range to keep in output',
argstr = '-keep %s'),
other = dict(desc = 'other options',
argstr = '%s'))
instance = afni.ThreedZcutup()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def ThreedAllineateInputSpec():
inputs_map = dict(infile = dict(desc = 'input file to 3dAllineate',
argstr = '-source %s',
position = -1,
mandatory = True),
outfile = dict(desc = 'output file from 3dAllineate',
argstr = '-prefix %s',
position = -2,
mandatory = True),
matrix = dict(desc = 'matrix to align input file',
argstr = '-1dmatrix_apply %s',
position = -3))
instance = afni.ThreedAllineate()
for key, metadata in inputs_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
#@skipif(afni_not_installed)
@skipif(True)
def test_To3d():
cmd = afni.To3d()
yield assert_equal, cmd.cmdline, 'to3d'
# datatype
cmd = afni.To3d(datatype='anat')
yield assert_equal, cmd.cmdline, 'to3d -anat'
cmd = afni.To3d(datatype='epan')
yield assert_equal, cmd.cmdline, 'to3d -epan'
# datum
cmd = afni.To3d()
cmd.inputs.datum = 'float'
yield assert_equal, cmd.cmdline, 'to3d -datum float'
# session
cmd = afni.To3d()
cmd.inputs.session = '/home/bobama'
yield assert_equal, cmd.cmdline, 'to3d -session /home/bobama'
# prefix
cmd = afni.To3d(prefix='foo.nii.gz')
yield assert_equal, cmd.cmdline, 'to3d -prefix foo.nii.gz'
# infiles
cmd = afni.To3d(infiles='/data/*.dcm')
yield assert_equal, cmd.cmdline, 'to3d /data/*.dcm'
# infiles list
cmd = afni.To3d()
infiles = ['data/foo.dcm', 'data/bar.dcm']
cmd.inputs.infiles = infiles
yield assert_equal, cmd.cmdline, 'to3d data/foo.dcm data/bar.dcm'
cmd = afni.To3d()
res = cmd.run(infiles=infiles)
yield assert_equal, res.interface.cmdline, 'to3d data/foo.dcm data/bar.dcm'
# skip_outliers
cmd = afni.To3d(skip_outliers=True)
yield assert_equal, cmd.cmdline, 'to3d -skip_outliers'
# assume_dicom_mosaic
cmd = afni.To3d(assume_dicom_mosaic=True)
yield assert_equal, cmd.cmdline, 'to3d -assume_dicom_mosaic'
# Test slice time params
cmd = afni.To3d()
td = dict(slice_order='zt', nz=12, nt=170, TR=2000, tpattern='alt+z')
cmd.inputs.time_dependencies = td
yield assert_equal, cmd.cmdline, 'to3d -time:zt 12 170 2000 alt+z'
cmd = afni.To3d()
td = dict(slice_order='tz', nt=150, nz=12, TR=2000, tpattern='alt+z')
cmd.inputs.time_dependencies = td
yield assert_equal, cmd.cmdline, 'to3d -time:tz 150 12 2000 alt+z'
# time_dependencies provided as a tuple
# slice_order, nz, nt, TR, tpattern
td = ('zt', 12, 130, 2000, 'alt+z')
cmd = afni.To3d()
cmd.inputs.time_dependencies = td
yield assert_equal, cmd.cmdline, 'to3d -time:zt 12 130 2000.00 alt+z'
# These tests fill fail because they do not specify all required
# args for the time_dependencies
# dict(slice_order='zt', nz=12, nt=150, TR=2000, tpattern='alt+z')
# only slice_order
cmd.inputs.time_dependencies = dict(slice_order='zt')
yield assert_raises, KeyError, getattr, cmd, 'cmdline'
# only slice_order
cmd.inputs.time_dependencies = dict(slice_order='tz')
yield assert_raises, KeyError, getattr, cmd, 'cmdline'
# slice_order and nz
cmd.inputs.time_dependencies = dict(slice_order='zt', nz=12)
yield assert_raises, KeyError, getattr, cmd, 'cmdline'
# slice_order, nz, nt
cmd.inputs.time_dependencies = dict(slice_order='zt', nz=12, nt=150)
yield assert_raises, KeyError, getattr, cmd, 'cmdline'
# slice_order, nz, nt, TR
cmd.inputs.time_dependencies = dict(slice_order='zt', nz=12, nt=150,
TR=2000)
yield assert_raises, KeyError, getattr, cmd, 'cmdline'
# slice_order, nz, nt, tpattern
cmd.inputs.time_dependencies = dict(slice_order='zt', nz=12, nt=150,
tpattern='alt+z')
yield assert_raises, KeyError, getattr, cmd, 'cmdline'
# provide unknown parameters
cmd = afni.To3d(datatype='anat', foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# order of params
cmd = afni.To3d(datatype='anat')
cmd.inputs.skip_outliers = True
cmd.inputs.infiles = 'foo.nii'
cmd.inputs.prefix = 'bar.nii'
cmd.inputs.datum = 'float'
realcmd = 'to3d -anat -datum float -prefix bar.nii -skip_outliers foo.nii'
yield assert_equal, cmd.cmdline, realcmd
# result should be InterfaceResult object
cmd = afni.To3d()
res = cmd.run('foo.nii')
yield assert_true, isinstance(res, InterfaceResult)
# don't specify infile and call run should raise error
cmd = afni.To3d()
yield assert_raises, AttributeError, cmd.run
@skipif(True)
def test_Threedrefit():
cmd = afni.Threedrefit()
yield assert_equal, cmd.cmdline, '3drefit'
# deoblique
cmd = afni.Threedrefit()
cmd.inputs.deoblique = True
yield assert_equal, cmd.cmdline, '3drefit -deoblique'
# xorigin
cmd = afni.Threedrefit()
cmd.inputs.xorigin = 12.34
yield assert_equal, cmd.cmdline, '3drefit -xorigin 12.34'
# yorigin
cmd = afni.Threedrefit(yorigin=12.34)
yield assert_equal, cmd.cmdline, '3drefit -yorigin 12.34'
# zorigin
cmd = afni.Threedrefit(zorigin=12.34)
yield assert_equal, cmd.cmdline, '3drefit -zorigin 12.34'
# infile
cmd = afni.Threedrefit(infile='foo.nii')
yield assert_equal, cmd.cmdline, '3drefit foo.nii'
# order of params
cmd = afni.Threedrefit(deoblique=True)
cmd.inputs.zorigin = 34.5
cmd.inputs.infile = 'foo.nii'
realcmd = '3drefit -deoblique -zorigin 34.5 foo.nii'
yield assert_equal, cmd.cmdline, realcmd
# provide unknown params
cmd = afni.Threedrefit(foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# don't specify infile and call run should raise error
cmd = afni.Threedrefit()
yield assert_raises, AttributeError, cmd.run
# result should be InterfaceResult object
cmd = afni.Threedrefit()
res = cmd.run('foo.nii')
yield assert_true, isinstance(res, InterfaceResult)
@skipif(True)
def test_Threedresample():
cmd = afni.Threedresample()
yield assert_equal, cmd.cmdline, '3dresample'
# rsmode
cmd = afni.Threedresample(rsmode='Li')
yield assert_equal, cmd.cmdline, '3dresample -rmode Li'
# orient
cmd = afni.Threedresample()
cmd.inputs.orient = 'lpi'
yield assert_equal, cmd.cmdline, '3dresample -orient lpi'
# gridfile
cmd = afni.Threedresample(gridfile='dset+orig')
yield assert_equal, cmd.cmdline, '3dresample -master dset+orig'
# infile
cmd = afni.Threedresample()
cmd.inputs.infile = 'foo.nii'
yield assert_equal, cmd.cmdline, '3dresample -inset foo.nii'
# outfile
cmd = afni.Threedresample(outfile='bar.nii')
yield assert_equal, cmd.cmdline, '3dresample -prefix bar.nii'
# order of params
cmd = afni.Threedresample(rsmode='Li')
cmd.inputs.orient = 'lpi'
cmd.inputs.infile = 'foo.nii'
cmd.inputs.outfile = 'bar.nii'
realcmd = '3dresample -rmode Li -orient lpi -prefix bar.nii -inset foo.nii'
yield assert_equal, cmd.cmdline, realcmd
# unknown params
cmd = afni.Threedresample(foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# infile not specified
cmd = afni.Threedresample(outfile='bar.nii')
yield assert_raises, AttributeError, cmd.run
# outfile not specified
cmd = afni.Threedresample(infile='foo.nii')
yield assert_raises, AttributeError, cmd.run
# result should be InterfaceResult object
cmd = afni.Threedresample()
res = cmd.run(infile='foo.nii', outfile='bar.nii')
yield assert_true, isinstance(res, InterfaceResult)
@skipif(True)
def test_ThreedTstat():
cmd = afni.ThreedTstat()
yield assert_equal, cmd.cmdline, '3dTstat'
# outfile
cmd = afni.ThreedTstat(outfile='bar.nii')
yield assert_equal, cmd.cmdline, '3dTstat -prefix bar.nii'
# infile
cmd = afni.ThreedTstat()
cmd.inputs.infile = 'foo.nii'
yield assert_equal, cmd.cmdline, '3dTstat foo.nii'
# order of params
cmd = afni.ThreedTstat()
cmd.inputs.infile = 'foo.nii'
cmd.inputs.outfile = 'bar.nii'
yield assert_equal, cmd.cmdline, '3dTstat -prefix bar.nii foo.nii'
# unknown params
cmd = afni.ThreedTstat(foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# infile not specified
cmd = afni.ThreedTstat()
yield assert_raises, AttributeError, cmd.run
# result should be InterfaceResult object
cmd = afni.ThreedTstat()
res = cmd.run(infile='foo.nii')
yield assert_true, isinstance(res, InterfaceResult)
@skipif(True)
def test_ThreedAutomask():
cmd = afni.ThreedAutomask()
yield assert_equal, cmd.cmdline, '3dAutomask'
# outfile
cmd = afni.ThreedAutomask(outfile='bar.nii')
yield assert_equal, cmd.cmdline, '3dAutomask -prefix bar.nii'
# infile
cmd = afni.ThreedAutomask(infile='foo.nii')
yield assert_equal, cmd.cmdline, '3dAutomask foo.nii'
# order of params
cmd = afni.ThreedAutomask(infile='foo.nii')
cmd.inputs.outfile = 'bar.nii'
yield assert_equal, cmd.cmdline, '3dAutomask -prefix bar.nii foo.nii'
# unknown params
cmd = afni.ThreedAutomask(foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# infile not specified
cmd = afni.ThreedAutomask()
yield assert_raises, AttributeError, cmd.run
# result should be InterfaceResult object
cmd = afni.ThreedAutomask()
res = cmd.run(infile='foo.nii')
yield assert_true, isinstance(res, InterfaceResult)
@skipif(True)
def test_Threedvolreg():
cmd = afni.Threedvolreg()
yield assert_equal, cmd.cmdline, '3dvolreg'
# verbose
cmd = afni.Threedvolreg(verbose=True)
yield assert_equal, cmd.cmdline, '3dvolreg -verbose'
# copy_origin
cmd = afni.Threedvolreg(copy_origin=True)
yield assert_equal, cmd.cmdline, '3dvolreg -twodup'
# time_shift
cmd = afni.Threedvolreg()
cmd.inputs.time_shift = 14
yield assert_equal, cmd.cmdline, '3dvolreg -tshift 14'
# basefile
cmd = afni.Threedvolreg()
cmd.inputs.basefile = 5
yield assert_equal, cmd.cmdline, '3dvolreg -base 5'
# md1dfile
cmd = afni.Threedvolreg(md1dfile='foo.nii')
yield assert_equal, cmd.cmdline, '3dvolreg -maxdisp1D foo.nii'
# onedfile
cmd = afni.Threedvolreg(onedfile='bar.nii')
yield assert_equal, cmd.cmdline, '3dvolreg -1Dfile bar.nii'
# outfile
cmd = afni.Threedvolreg(outfile='bar.nii')
yield assert_equal, cmd.cmdline, '3dvolreg -prefix bar.nii'
# infile
cmd = afni.Threedvolreg()
cmd.inputs.infile = 'foo.nii'
yield assert_equal, cmd.cmdline, '3dvolreg foo.nii'
# order of params
cmd = afni.Threedvolreg(infile='foo.nii')
cmd.inputs.time_shift = 14
cmd.inputs.copy_origin = True
cmd.inputs.outfile = 'bar.nii'
realcmd = '3dvolreg -twodup -tshift 14 -prefix bar.nii foo.nii'
yield assert_equal, cmd.cmdline, realcmd
# unknown params
cmd = afni.Threedvolreg(foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# infile not specified
cmd = afni.Threedvolreg()
yield assert_raises, AttributeError, cmd.run
# result should be InterfaceResult object
cmd = afni.Threedvolreg()
res = cmd.run(infile='foo.nii')
yield assert_true, isinstance(res, InterfaceResult)
@skipif(True)
def test_Threedmerge():
cmd = afni.Threedmerge()
yield assert_equal, cmd.cmdline, '3dmerge'
# doall
cmd = afni.Threedmerge(doall=True)
yield assert_equal, cmd.cmdline, '3dmerge -doall'
# gblur_fwhm
cmd = afni.Threedmerge()
cmd.inputs.gblur_fwhm = 2.0
yield assert_equal, cmd.cmdline, '3dmerge -1blur_fwhm 2.0'
# outfile
cmd = afni.Threedmerge(outfile='bar.nii')
yield assert_equal, cmd.cmdline, '3dmerge -prefix bar.nii'
# infile
cmd = afni.Threedmerge(infiles='foo.nii')
yield assert_equal, cmd.cmdline, '3dmerge foo.nii'
# infile list
cmd = afni.Threedmerge(infiles=['data/foo.nii', 'data/bar.nii'])
yield assert_equal, cmd.cmdline, '3dmerge data/foo.nii data/bar.nii'
# order of params
cmd = afni.Threedmerge(infiles='foo.nii')
cmd.inputs.outfile = 'bar.nii'
cmd.inputs.doall = True
cmd.inputs.gblur_fwhm = 2.0
realcmd = '3dmerge -doall -1blur_fwhm 2.0 -prefix bar.nii foo.nii'
yield assert_equal, cmd.cmdline, realcmd
# unknown params
cmd = afni.Threedmerge(foo='bar')
yield assert_raises, AttributeError, getattr, cmd, 'cmdline'
# infile not specified
cmd = afni.Threedmerge()
yield assert_raises, AttributeError, cmd.run
# result should be InterfaceResult object
cmd = afni.Threedmerge()
res = cmd.run(infiles='foo.nii')
yield assert_true, isinstance(res, InterfaceResult)
| bsd-3-clause | 5,142,300,293,424,247,000 | 41.972015 | 118 | 0.564712 | false |
openjck/kuma | kuma/wiki/models.py | 1 | 71106 | import hashlib
import json
import sys
import traceback
from datetime import datetime, timedelta
from functools import wraps
import newrelic.agent
import waffle
from constance import config
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import signals
from django.utils.decorators import available_attrs
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from pyquery import PyQuery
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase
from taggit.utils import edit_string_for_tags, parse_tags
from tidings.models import NotificationsMixin
from kuma.attachments.models import Attachment
from kuma.core.cache import memcache
from kuma.core.exceptions import ProgrammingError
from kuma.core.i18n import get_language_mapping
from kuma.core.urlresolvers import reverse
from kuma.search.decorators import register_live_index
from kuma.spam.models import AkismetSubmission, SpamAttempt
from . import kumascript
from .constants import (DEKI_FILE_URL, DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL,
KUMA_FILE_URL, REDIRECT_CONTENT, REDIRECT_HTML,
TEMPLATE_TITLE_PREFIX)
from .content import parse as parse_content
from .content import (H2TOCFilter, H3TOCFilter, SectionTOCFilter,
extract_code_sample, extract_css_classnames,
extract_html_attributes, extract_kumascript_macro_names,
get_content_sections, get_seo_description)
from .exceptions import (DocumentRenderedContentNotAvailable,
DocumentRenderingInProgress, PageMoveError,
SlugCollision, UniqueCollision)
from .jobs import DocumentContributorsJob, DocumentZoneStackJob
from .managers import (DeletedDocumentManager, DocumentAdminManager,
DocumentManager, RevisionIPManager,
TaggedDocumentManager, TransformManager)
from .search import WikiDocumentType
from .signals import render_done
from .templatetags.jinja_helpers import absolutify
from .utils import tidy_content
def cache_with_field(field_name):
"""Decorator for generated content methods.
If the backing model field is null, or kwarg force_fresh is True, call the
decorated method to generate and return the content.
Otherwise, just return the value in the backing model field.
"""
def decorator(fn):
@wraps(fn, assigned=available_attrs(fn))
def wrapper(self, *args, **kwargs):
force_fresh = kwargs.pop('force_fresh', False)
# Try getting the value using the DB field.
field_val = getattr(self, field_name)
if field_val is not None and not force_fresh:
return field_val
# DB field is blank, or we're forced to generate it fresh.
field_val = fn(self, force_fresh=force_fresh)
setattr(self, field_name, field_val)
return field_val
return wrapper
return decorator
def _inherited(parent_attr, direct_attr):
"""Return a descriptor delegating to an attr of the original document.
If `self` is a translation, the descriptor delegates to the attribute
`parent_attr` from the original document. Otherwise, it delegates to the
attribute `direct_attr` from `self`.
Use this only on a reference to another object, like a ManyToMany or a
ForeignKey. Using it on a normal field won't work well, as it'll preclude
the use of that field in QuerySet field lookups. Also, ModelForms that are
passed instance=this_obj won't see the inherited value.
"""
getter = lambda self: (getattr(self.parent, parent_attr)
if self.parent and self.parent.id != self.id
else getattr(self, direct_attr))
setter = lambda self, val: (setattr(self.parent, parent_attr, val)
if self.parent and self.parent.id != self.id
else setattr(self, direct_attr, val))
return property(getter, setter)
def valid_slug_parent(slug, locale):
slug_bits = slug.split('/')
slug_bits.pop()
parent = None
if slug_bits:
parent_slug = '/'.join(slug_bits)
try:
parent = Document.objects.get(locale=locale, slug=parent_slug)
except Document.DoesNotExist:
raise Exception(
ugettext('Parent %s does not exist.' % (
'%s/%s' % (locale, parent_slug))))
return parent
class DocumentTag(TagBase):
"""A tag indexing a document"""
class Meta:
verbose_name = _('Document Tag')
verbose_name_plural = _('Document Tags')
def tags_for(cls, model, instance=None, **extra_filters):
"""
Sadly copied from taggit to work around the issue of not being
able to use the TaggedItemBase class that has tag field already
defined.
"""
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedDocument(ItemBase):
"""Through model, for tags on Documents"""
content_object = models.ForeignKey('Document')
tag = models.ForeignKey(DocumentTag, related_name="%(app_label)s_%(class)s_items")
objects = TaggedDocumentManager()
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class DocumentAttachment(models.Model):
"""
Intermediary between Documents and Attachments. Allows storing the
user who attached a file to a document, and a (unique for that
document) name for referring to the file from the document.
"""
file = models.ForeignKey(Attachment)
# This has to be a string ref to avoid circular import.
document = models.ForeignKey('wiki.Document')
attached_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
name = models.TextField()
class Meta:
db_table = 'attachments_documentattachment'
@register_live_index
class Document(NotificationsMixin, models.Model):
"""A localized knowledgebase document, not revision-specific."""
TOC_FILTERS = {
1: SectionTOCFilter,
2: H2TOCFilter,
3: H3TOCFilter,
4: SectionTOCFilter
}
title = models.CharField(max_length=255, db_index=True)
slug = models.CharField(max_length=255, db_index=True)
# NOTE: Documents are indexed by tags, but tags are edited in Revisions.
# Also, using a custom through table to isolate Document tags from those
# used in other models and apps. (Works better than namespaces, for
# completion and such.)
tags = TaggableManager(through=TaggedDocument)
# Is this document a template or not?
is_template = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this a redirect or not?
is_redirect = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this document localizable or not?
is_localizable = models.BooleanField(default=True, db_index=True)
locale = models.CharField(
max_length=7,
choices=settings.LANGUAGES,
default=settings.WIKI_DEFAULT_LANGUAGE,
db_index=True,
)
# Latest approved revision. L10n dashboard depends on this being so (rather
# than being able to set it to earlier approved revisions). (Remove "+" to
# enable reverse link.)
current_revision = models.ForeignKey('Revision', null=True,
related_name='current_for+')
# The Document I was translated from. NULL if this doc is in the default
# locale or it is nonlocalizable. TODO: validate against
# settings.WIKI_DEFAULT_LANGUAGE.
parent = models.ForeignKey('self', related_name='translations',
null=True, blank=True)
parent_topic = models.ForeignKey('self', related_name='children',
null=True, blank=True)
files = models.ManyToManyField(Attachment,
through=DocumentAttachment)
# JSON representation of Document for API results, built on save
json = models.TextField(editable=False, blank=True, null=True)
# Raw HTML of approved revision's wiki markup
html = models.TextField(editable=False)
# Cached result of kumascript and other offline processors (if any)
rendered_html = models.TextField(editable=False, blank=True, null=True)
# Errors (if any) from the last rendering run
rendered_errors = models.TextField(editable=False, blank=True, null=True)
# Whether or not to automatically defer rendering of this page to a queued
# offline task. Generally used for complex pages that need time
defer_rendering = models.BooleanField(default=False, db_index=True)
# Timestamp when this document was last scheduled for a render
render_scheduled_at = models.DateTimeField(null=True, db_index=True)
# Timestamp when a render for this document was last started
render_started_at = models.DateTimeField(null=True, db_index=True)
# Timestamp when this document was last rendered
last_rendered_at = models.DateTimeField(null=True, db_index=True)
# Maximum age (in seconds) before this document needs re-rendering
render_max_age = models.IntegerField(blank=True, null=True)
# Time after which this document needs re-rendering
render_expires = models.DateTimeField(blank=True, null=True, db_index=True)
# Whether this page is deleted.
deleted = models.BooleanField(default=False, db_index=True)
# Last modified time for the document. Should be equal-to or greater than
# the current revision's created field
modified = models.DateTimeField(auto_now=True, null=True, db_index=True)
body_html = models.TextField(editable=False, blank=True, null=True)
quick_links_html = models.TextField(editable=False, blank=True, null=True)
zone_subnav_local_html = models.TextField(editable=False,
blank=True, null=True)
toc_html = models.TextField(editable=False, blank=True, null=True)
summary_html = models.TextField(editable=False, blank=True, null=True)
summary_text = models.TextField(editable=False, blank=True, null=True)
class Meta(object):
unique_together = (
('parent', 'locale'),
('slug', 'locale'),
)
permissions = (
('view_document', 'Can view document'),
('add_template_document', 'Can add Template:* document'),
('change_template_document', 'Can change Template:* document'),
('move_tree', 'Can move a tree of documents'),
('purge_document', 'Can permanently delete document'),
('restore_document', 'Can restore deleted document'),
)
objects = DocumentManager()
deleted_objects = DeletedDocumentManager()
admin_objects = DocumentAdminManager()
def __unicode__(self):
return u'%s (%s)' % (self.get_absolute_url(), self.title)
@cache_with_field('body_html')
def get_body_html(self, *args, **kwargs):
html = self.rendered_html and self.rendered_html or self.html
sections_to_hide = ('Quick_Links', 'Subnav')
doc = parse_content(html)
for sid in sections_to_hide:
doc = doc.replaceSection(sid, '<!-- -->')
doc.injectSectionIDs()
doc.annotateLinks(base_url=settings.SITE_URL)
return doc.serialize()
@cache_with_field('quick_links_html')
def get_quick_links_html(self, *args, **kwargs):
return self.get_section_content('Quick_Links')
@cache_with_field('zone_subnav_local_html')
def get_zone_subnav_local_html(self, *args, **kwargs):
return self.get_section_content('Subnav')
@cache_with_field('toc_html')
def get_toc_html(self, *args, **kwargs):
if not self.current_revision:
return ''
toc_depth = self.current_revision.toc_depth
if not toc_depth:
return ''
html = self.rendered_html and self.rendered_html or self.html
return (parse_content(html)
.injectSectionIDs()
.filter(self.TOC_FILTERS[toc_depth])
.serialize())
@cache_with_field('summary_html')
def get_summary_html(self, *args, **kwargs):
return self.get_summary(strip_markup=False)
@cache_with_field('summary_text')
def get_summary_text(self, *args, **kwargs):
return self.get_summary(strip_markup=True)
def regenerate_cache_with_fields(self):
"""Regenerate fresh content for all the cached fields"""
# TODO: Maybe @cache_with_field can build a registry over which this
# method can iterate?
self.get_body_html(force_fresh=True)
self.get_quick_links_html(force_fresh=True)
self.get_zone_subnav_local_html(force_fresh=True)
self.get_toc_html(force_fresh=True)
self.get_summary_html(force_fresh=True)
self.get_summary_text(force_fresh=True)
def get_zone_subnav_html(self):
"""
Search from self up through DocumentZone stack, returning the first
zone nav HTML found.
"""
src = self.get_zone_subnav_local_html()
if src:
return src
for zone in DocumentZoneStackJob().get(self.pk):
src = zone.document.get_zone_subnav_local_html()
if src:
return src
def extract_section(self, content, section_id, ignore_heading=False):
parsed_content = parse_content(content)
extracted = parsed_content.extractSection(section_id,
ignore_heading=ignore_heading)
return extracted.serialize()
def get_section_content(self, section_id, ignore_heading=True):
"""
Convenience method to extract the rendered content for a single section
"""
if self.rendered_html:
content = self.rendered_html
else:
content = self.html
return self.extract_section(content, section_id, ignore_heading)
def calculate_etag(self, section_id=None):
"""Calculate an etag-suitable hash for document content or a section"""
if not section_id:
content = self.html
else:
content = self.extract_section(self.html, section_id)
return '"%s"' % hashlib.sha1(content.encode('utf8')).hexdigest()
def current_or_latest_revision(self):
"""Returns current revision if there is one, else the last created
revision."""
rev = self.current_revision
if not rev:
revs = self.revisions.order_by('-created')
if revs.exists():
rev = revs[0]
return rev
@property
def is_rendering_scheduled(self):
"""Does this have a rendering scheduled?"""
if not self.render_scheduled_at:
return False
# Check whether a scheduled rendering has waited for too long. Assume
# failure, in this case, and allow another scheduling attempt.
timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = datetime.now() - self.render_scheduled_at
if duration > max_duration:
return False
if not self.last_rendered_at:
return True
return self.render_scheduled_at > self.last_rendered_at
@property
def is_rendering_in_progress(self):
"""Does this have a rendering in progress?"""
if not self.render_started_at:
# No start time, so False.
return False
# Check whether an in-progress rendering has gone on for too long.
# Assume failure, in this case, and allow another rendering attempt.
timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = datetime.now() - self.render_started_at
if duration > max_duration:
return False
if not self.last_rendered_at:
# No rendering ever, so in progress.
return True
# Finally, if the render start is more recent than last completed
# render, then we have one in progress.
return self.render_started_at > self.last_rendered_at
@newrelic.agent.function_trace()
def get_rendered(self, cache_control=None, base_url=None):
"""Attempt to get rendered content for this document"""
# No rendered content yet, so schedule the first render.
if not self.rendered_html:
try:
self.schedule_rendering(cache_control, base_url)
except DocumentRenderingInProgress:
# Unable to trigger a rendering right now, so we bail.
raise DocumentRenderedContentNotAvailable
# If we have a cache_control directive, try scheduling a render.
if cache_control:
try:
self.schedule_rendering(cache_control, base_url)
except DocumentRenderingInProgress:
pass
# Parse JSON errors, if available.
errors = None
try:
errors = (self.rendered_errors and
json.loads(self.rendered_errors) or None)
except ValueError:
pass
# If the above resulted in an immediate render, we might have content.
if not self.rendered_html:
if errors:
return ('', errors)
else:
# But, no such luck, so bail out.
raise DocumentRenderedContentNotAvailable
return (self.rendered_html, errors)
def schedule_rendering(self, cache_control=None, base_url=None):
"""
Attempt to schedule rendering. Honor the deferred_rendering field to
decide between an immediate or a queued render.
"""
# Avoid scheduling a rendering if already scheduled or in progress.
if self.is_rendering_scheduled or self.is_rendering_in_progress:
return False
# Note when the rendering was scheduled. Kind of a hack, doing a quick
# update and setting the local property rather than doing a save()
now = datetime.now()
Document.objects.filter(pk=self.pk).update(render_scheduled_at=now)
self.render_scheduled_at = now
if (waffle.switch_is_active('wiki_force_immediate_rendering') or
not self.defer_rendering):
# Attempt an immediate rendering.
self.render(cache_control, base_url)
else:
# Attempt to queue a rendering. If celery.conf.ALWAYS_EAGER is
# True, this is also an immediate rendering.
from . import tasks
tasks.render_document.delay(self.pk, cache_control, base_url)
def render(self, cache_control=None, base_url=None, timeout=None):
"""
Render content using kumascript and any other services necessary.
"""
if not base_url:
base_url = settings.SITE_URL
# Disallow rendering while another is in progress.
if self.is_rendering_in_progress:
raise DocumentRenderingInProgress
# Note when the rendering was started. Kind of a hack, doing a quick
# update and setting the local property rather than doing a save()
now = datetime.now()
Document.objects.filter(pk=self.pk).update(render_started_at=now)
self.render_started_at = now
# Perform rendering and update document
if not config.KUMASCRIPT_TIMEOUT:
# A timeout of 0 should shortcircuit kumascript usage.
self.rendered_html, self.rendered_errors = self.html, []
else:
self.rendered_html, errors = kumascript.get(self, cache_control,
base_url,
timeout=timeout)
self.rendered_errors = errors and json.dumps(errors) or None
# Regenerate the cached content fields
self.regenerate_cache_with_fields()
# Finally, note the end time of rendering and update the document.
self.last_rendered_at = datetime.now()
# If this rendering took longer than we'd like, mark it for deferred
# rendering in the future.
timeout = config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = self.last_rendered_at - self.render_started_at
if duration >= max_duration:
self.defer_rendering = True
# TODO: Automatically clear the defer_rendering flag if the rendering
# time falls under the limit? Probably safer to require manual
# intervention to free docs from deferred jail.
if self.render_max_age:
# If there's a render_max_age, automatically update render_expires
self.render_expires = (datetime.now() +
timedelta(seconds=self.render_max_age))
else:
# Otherwise, just clear the expiration time as a one-shot
self.render_expires = None
self.save()
render_done.send(sender=self.__class__, instance=self)
def get_summary(self, strip_markup=True, use_rendered=True):
"""
Attempt to get the document summary from rendered content, with
fallback to raw HTML
"""
if use_rendered and self.rendered_html:
src = self.rendered_html
else:
src = self.html
return get_seo_description(src, self.locale, strip_markup)
def build_json_data(self):
html = self.rendered_html and self.rendered_html or self.html
content = parse_content(html).injectSectionIDs().serialize()
sections = get_content_sections(content)
translations = []
if self.pk:
for translation in self.other_translations:
revision = translation.current_revision
if revision.summary:
summary = revision.summary
else:
summary = translation.get_summary(strip_markup=False)
translations.append({
'last_edit': revision.created.isoformat(),
'locale': translation.locale,
'localization_tags': list(revision.localization_tags
.names()),
'review_tags': list(revision.review_tags.names()),
'summary': summary,
'tags': list(translation.tags.names()),
'title': translation.title,
'url': translation.get_absolute_url(),
})
if self.current_revision:
review_tags = list(self.current_revision.review_tags.names())
localization_tags = list(self.current_revision
.localization_tags
.names())
last_edit = self.current_revision.created.isoformat()
if self.current_revision.summary:
summary = self.current_revision.summary
else:
summary = self.get_summary(strip_markup=False)
else:
review_tags = []
localization_tags = []
last_edit = ''
summary = ''
if not self.pk:
tags = []
else:
tags = list(self.tags.names())
now_iso = datetime.now().isoformat()
if self.modified:
modified = self.modified.isoformat()
else:
modified = now_iso
return {
'title': self.title,
'label': self.title,
'url': self.get_absolute_url(),
'id': self.id,
'slug': self.slug,
'tags': tags,
'review_tags': review_tags,
'localization_tags': localization_tags,
'sections': sections,
'locale': self.locale,
'summary': summary,
'translations': translations,
'modified': modified,
'json_modified': now_iso,
'last_edit': last_edit
}
def get_json_data(self, stale=True):
"""Returns a document in object format for output as JSON.
The stale parameter, when True, accepts stale cached data even after
the document has been modified."""
# Have parsed data & don't care about freshness? Here's a quick out..
curr_json_data = getattr(self, '_json_data', None)
if curr_json_data and stale:
return curr_json_data
# Attempt to parse the current contents of self.json, taking care in
# case it's empty or broken JSON.
self._json_data = {}
if self.json:
try:
self._json_data = json.loads(self.json)
except (TypeError, ValueError):
pass
# Try to get ISO 8601 datestamps for the doc and the json
json_lmod = self._json_data.get('json_modified', '')
doc_lmod = self.modified.isoformat()
# If there's no parsed data or the data is stale & we care, it's time
# to rebuild the cached JSON data.
if (not self._json_data) or (not stale and doc_lmod > json_lmod):
self._json_data = self.build_json_data()
self.json = json.dumps(self._json_data)
Document.objects.filter(pk=self.pk).update(json=self.json)
return self._json_data
def extract_code_sample(self, id):
"""Given the id of a code sample, attempt to extract it from rendered
HTML with a fallback to non-rendered in case of errors."""
try:
src, errors = self.get_rendered()
if errors:
src = self.html
except:
src = self.html
return extract_code_sample(id, src)
def extract_kumascript_macro_names(self):
return extract_kumascript_macro_names(self.html)
def extract_css_classnames(self):
return extract_css_classnames(self.rendered_html)
def extract_html_attributes(self):
return extract_html_attributes(self.rendered_html)
def natural_key(self):
return (self.locale, self.slug)
@staticmethod
def natural_key_hash(keys):
natural_key = u'/'.join(keys)
return hashlib.md5(natural_key.encode('utf8')).hexdigest()
@cached_property
def natural_cache_key(self):
return self.natural_key_hash(self.natural_key())
def _existing(self, attr, value):
"""Return an existing doc (if any) in this locale whose `attr` attr is
equal to mine."""
return Document.objects.filter(locale=self.locale, **{attr: value})
def _raise_if_collides(self, attr, exception):
"""Raise an exception if a page of this title/slug already exists."""
if self.id is None or hasattr(self, 'old_' + attr):
# If I am new or my title/slug changed...
existing = self._existing(attr, getattr(self, attr))
if existing.exists():
raise exception(existing[0])
def clean(self):
"""Translations can't be localizable."""
self._clean_is_localizable()
def _clean_is_localizable(self):
"""is_localizable == allowed to have translations. Make sure that isn't
violated.
For default language (en-US), is_localizable means it can have
translations. Enforce:
* is_localizable=True if it has translations
* if has translations, unable to make is_localizable=False
For non-default langauges, is_localizable must be False.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
self.is_localizable = False
# Can't save this translation if parent not localizable
if (self.parent and self.parent.id != self.id and
not self.parent.is_localizable):
raise ValidationError('"%s": parent "%s" is not localizable.' % (
unicode(self), unicode(self.parent)))
# Can't make not localizable if it has translations
# This only applies to documents that already exist, hence self.pk
if self.pk and not self.is_localizable and self.translations.exists():
raise ValidationError('"%s": document has %s translations but is '
'not localizable.' %
(unicode(self), self.translations.count()))
def _attr_for_redirect(self, attr, template):
"""Return the slug or title for a new redirect.
`template` is a Python string template with "old" and "number" tokens
used to create the variant.
"""
def unique_attr():
"""Return a variant of getattr(self, attr) such that there is no
Document of my locale with string attribute `attr` equal to it.
Never returns the original attr value.
"""
# "My God, it's full of race conditions!"
i = 1
while True:
new_value = template % dict(old=getattr(self, attr), number=i)
if not self._existing(attr, new_value).exists():
return new_value
i += 1
old_attr = 'old_' + attr
if hasattr(self, old_attr):
# My slug (or title) is changing; we can reuse it for the redirect.
return getattr(self, old_attr)
else:
# Come up with a unique slug (or title):
return unique_attr()
def revert(self, revision, user, comment=None):
old_review_tags = list(revision.review_tags.names())
revision.id = None
revision.comment = ("Revert to revision of %s by %s" %
(revision.created, revision.creator))
if comment:
revision.comment += ': "%s"' % comment
revision.created = datetime.now()
revision.creator = user
revision.save()
# TODO: change to primary key check instead of object comparison
if revision.document.original == self:
revision.save(update_fields=['based_on'])
if old_review_tags:
revision.review_tags.set(*old_review_tags)
revision.make_current()
self.schedule_rendering('max-age=0')
return revision
def revise(self, user, data, section_id=None):
"""Given a dict of changes to make, build and save a new Revision to
revise this document"""
curr_rev = self.current_revision
new_rev = Revision(creator=user, document=self, content=self.html)
for n in ('title', 'slug', 'render_max_age'):
setattr(new_rev, n, getattr(self, n))
if curr_rev:
new_rev.toc_depth = curr_rev.toc_depth
original_doc = curr_rev.document.original
if original_doc == self:
new_rev.based_on = curr_rev
else:
new_rev.based_on = original_doc.current_revision
# Accept optional field edits...
new_title = data.get('title', False)
new_rev.title = new_title and new_title or self.title
new_tags = data.get('tags', False)
new_rev.tags = (new_tags and new_tags or
edit_string_for_tags(self.tags.all()))
new_review_tags = data.get('review_tags', False)
if new_review_tags:
review_tags = new_review_tags
elif curr_rev:
review_tags = edit_string_for_tags(curr_rev.review_tags.all())
else:
review_tags = ''
new_rev.summary = data.get('summary', '')
# To add comment, when Technical/Editorial review completed
new_rev.comment = data.get('comment', '')
# Accept HTML edits, optionally by section
new_html = data.get('content', data.get('html', False))
if new_html:
if not section_id:
new_rev.content = new_html
else:
content = parse_content(self.html)
new_rev.content = (content.replaceSection(section_id, new_html)
.serialize())
# Finally, commit the revision changes and return the new rev.
new_rev.save()
new_rev.review_tags.set(*parse_tags(review_tags))
return new_rev
@cached_property
def last_modified_cache_key(self):
return DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % self.natural_cache_key
def fill_last_modified_cache(self):
"""
Convert python datetime to Unix epoch seconds. This is more
easily digested by the cache, and is more compatible with other
services that might spy on Kuma's cache entries (eg. KumaScript)
"""
modified_epoch = self.modified.strftime('%s')
memcache.set(self.last_modified_cache_key, modified_epoch)
return modified_epoch
def save(self, *args, **kwargs):
self.is_template = self.slug.startswith(TEMPLATE_TITLE_PREFIX)
self.is_redirect = bool(self.get_redirect_url())
try:
# Check if the slug would collide with an existing doc
self._raise_if_collides('slug', SlugCollision)
except UniqueCollision as err:
if err.existing.get_redirect_url() is not None:
# If the existing doc is a redirect, delete it and clobber it.
err.existing.delete()
else:
raise err
# These are too important to leave to a (possibly omitted) is_valid
# call:
self._clean_is_localizable()
if not self.parent_topic and self.parent:
# If this is a translation without a topic parent, try to get one.
self.acquire_translated_topic_parent()
super(Document, self).save(*args, **kwargs)
# Delete any cached last-modified timestamp.
self.fill_last_modified_cache()
def delete(self, *args, **kwargs):
if waffle.switch_is_active('wiki_error_on_delete'):
# bug 863692: Temporary while we investigate disappearing pages.
raise Exception("Attempt to delete document %s: %s" %
(self.id, self.title))
else:
if self.is_redirect or 'purge' in kwargs:
if 'purge' in kwargs:
kwargs.pop('purge')
return super(Document, self).delete(*args, **kwargs)
signals.pre_delete.send(sender=self.__class__,
instance=self)
if not self.deleted:
Document.objects.filter(pk=self.pk).update(deleted=True)
memcache.delete(self.last_modified_cache_key)
signals.post_delete.send(sender=self.__class__, instance=self)
def purge(self):
if waffle.switch_is_active('wiki_error_on_delete'):
# bug 863692: Temporary while we investigate disappearing pages.
raise Exception("Attempt to purge document %s: %s" %
(self.id, self.title))
else:
if not self.deleted:
raise Exception("Attempt tp purge non-deleted document %s: %s" %
(self.id, self.title))
self.delete(purge=True)
def restore(self):
"""
Restores a logically deleted document by reverting the deleted
boolean to False. Sends pre_save and post_save Django signals to
follow ducktyping best practices.
"""
if not self.deleted:
raise Exception("Document is not deleted, cannot be restored.")
signals.pre_save.send(sender=self.__class__, instance=self)
Document.deleted_objects.filter(pk=self.pk).update(deleted=False)
signals.post_save.send(sender=self.__class__, instance=self)
def _post_move_redirects(self, new_slug, user, title):
"""
Create and return a Document and a Revision to serve as
redirects once this page has been moved.
"""
redirect_doc = Document(locale=self.locale,
title=self.title,
slug=self.slug,
is_localizable=False)
content = REDIRECT_CONTENT % {
'href': reverse('wiki.document',
args=[new_slug],
locale=self.locale),
'title': title,
}
redirect_rev = Revision(content=content,
is_approved=True,
toc_depth=self.current_revision.toc_depth,
creator=user)
return redirect_doc, redirect_rev
def _moved_revision(self, new_slug, user, title=None):
"""
Create and return a Revision which is a copy of this
Document's current Revision, as it will exist at a moved
location.
"""
moved_rev = self.current_revision
# Shortcut trick for getting an object with all the same
# values, but making Django think it's new.
moved_rev.id = None
moved_rev.creator = user
moved_rev.created = datetime.now()
moved_rev.slug = new_slug
if title:
moved_rev.title = title
return moved_rev
def _get_new_parent(self, new_slug):
"""
Get this moved Document's parent doc if a Document
exists at the appropriate slug and locale.
"""
return valid_slug_parent(new_slug, self.locale)
def _move_conflicts(self, new_slug):
"""
Given a new slug to be assigned to this document, check
whether there is an existing, non-redirect, Document at that
slug in this locale. Any redirect existing there will be
deleted.
This is necessary since page moving is a background task, and
a Document may come into existence at the target slug after
the move is requested.
"""
existing = None
try:
existing = Document.objects.get(locale=self.locale,
slug=new_slug)
except Document.DoesNotExist:
pass
if existing is not None:
if existing.is_redirect:
existing.delete()
else:
raise Exception("Requested move would overwrite a non-redirect page.")
def _tree_conflicts(self, new_slug):
"""
Given a new slug to be assigned to this document, return a
list of documents (if any) which would be overwritten by
moving this document or any of its children in that fashion.
"""
conflicts = []
try:
existing = Document.objects.get(locale=self.locale, slug=new_slug)
if not existing.is_redirect:
conflicts.append(existing)
except Document.DoesNotExist:
pass
for child in self.get_descendants():
child_title = child.slug.split('/')[-1]
try:
slug = '/'.join([new_slug, child_title])
existing = Document.objects.get(locale=self.locale, slug=slug)
if not existing.get_redirect_url():
conflicts.append(existing)
except Document.DoesNotExist:
pass
return conflicts
def _move_tree(self, new_slug, user=None, title=None):
"""
Move this page and all its children.
"""
# Page move is a 10-step process.
#
# Step 1: Sanity check. Has a page been created at this slug
# since the move was requested? If not, OK to go ahead and
# change our slug.
self._move_conflicts(new_slug)
if user is None:
user = self.current_revision.creator
if title is None:
title = self.title
# Step 2: stash our current review tags, since we want to
# preserve them.
review_tags = list(self.current_revision.review_tags.names())
# Step 3: Create (but don't yet save) a Document and Revision
# to leave behind as a redirect from old location to new.
redirect_doc, redirect_rev = self._post_move_redirects(new_slug,
user,
title)
# Step 4: Update our breadcrumbs.
new_parent = self._get_new_parent(new_slug)
# If we found a Document at what will be our parent slug, set
# it as our parent_topic. If we didn't find one, then we no
# longer have a parent_topic (since our original parent_topic
# would already have moved if it were going to).
self.parent_topic = new_parent
# Step 5: Save this Document.
self.slug = new_slug
self.save()
# Step 6: Create (but don't yet save) a copy of our current
# revision, but with the new slug and title (if title is
# changing too).
moved_rev = self._moved_revision(new_slug, user, title)
# Step 7: Save the Revision that actually moves us.
moved_rev.save(force_insert=True)
# Step 8: Save the review tags.
moved_rev.review_tags.set(*review_tags)
# Step 9: Save the redirect.
redirect_doc.save()
redirect_rev.document = redirect_doc
redirect_rev.save()
# Finally, step 10: recurse through all of our children.
for child in self.children.filter(locale=self.locale):
# Save the original slug and locale so we can use them in
# the error message if something goes wrong.
old_child_slug, old_child_locale = child.slug, child.locale
child_title = child.slug.split('/')[-1]
try:
child._move_tree('/'.join([new_slug, child_title]), user)
except PageMoveError:
# A child move already caught this and created the
# correct exception + error message, so just propagate
# it up.
raise
except Exception as e:
# One of the immediate children of this page failed to
# move.
exc_class, exc_message, exc_tb = sys.exc_info()
message = """
Failure occurred while attempting to move document
with id %(doc_id)s.
That document can be viewed at:
https://developer.mozilla.org/%(locale)s/docs/%(slug)s
The exception raised was:
Exception type: %(exc_class)s
Exception message: %(exc_message)s
Full traceback:
%(traceback)s
""" % {'doc_id': child.id,
'locale': old_child_locale,
'slug': old_child_slug,
'exc_class': exc_class,
'exc_message': exc_message,
'traceback': traceback.format_exc(e)}
raise PageMoveError(message)
def repair_breadcrumbs(self):
"""
Temporary method while we work out the real issue behind
translation/breadcrumb mismatches (bug 900961).
Basically just walks up the tree of topical parents, calling
acquire_translated_topic_parent() for as long as there's a
language mismatch.
"""
if (not self.parent_topic or
self.parent_topic.locale != self.locale):
self.acquire_translated_topic_parent()
if self.parent_topic:
self.parent_topic.repair_breadcrumbs()
def acquire_translated_topic_parent(self):
"""
This normalizes topic breadcrumb paths between locales.
Attempt to acquire a topic parent from a translation of our translation
parent's topic parent, auto-creating a stub document if necessary.
"""
if not self.parent:
# Bail, if this is not in fact a translation.
return
parent_topic = self.parent.parent_topic
if not parent_topic:
# Bail, if the translation parent has no topic parent
return
try:
# Look for an existing translation of the topic parent
new_parent = parent_topic.translations.get(locale=self.locale)
except Document.DoesNotExist:
try:
# No luck. As a longshot, let's try looking for the same slug.
new_parent = Document.objects.get(locale=self.locale,
slug=parent_topic.slug)
if not new_parent.parent:
# HACK: This same-slug/different-locale doc should probably
# be considered a translation. Let's correct that on the
# spot.
new_parent.parent = parent_topic
new_parent.save()
except Document.DoesNotExist:
# Finally, let's create a translated stub for a topic parent
new_parent = Document.objects.get(pk=parent_topic.pk)
new_parent.pk = None
new_parent.current_revision = None
new_parent.parent_topic = None
new_parent.parent = parent_topic
new_parent.locale = self.locale
new_parent.save()
if parent_topic.current_revision:
# Don't forget to clone a current revision
new_rev = Revision.objects.get(pk=parent_topic.current_revision.pk)
new_rev.pk = None
new_rev.document = new_parent
# HACK: Let's auto-add tags that flag this as a topic stub
stub_tags = '"TopicStub","NeedsTranslation"'
stub_l10n_tags = ['inprogress']
if new_rev.tags:
new_rev.tags = '%s,%s' % (new_rev.tags, stub_tags)
else:
new_rev.tags = stub_tags
new_rev.save()
new_rev.localization_tags.add(*stub_l10n_tags)
# Finally, assign the new default parent topic
self.parent_topic = new_parent
self.save()
@property
def content_parsed(self):
if not self.current_revision:
return None
return self.current_revision.content_parsed
def files_dict(self):
intermediates = DocumentAttachment.objects.filter(document__pk=self.id)
files = {}
for intermediate in intermediates:
attachment = intermediate.file
revision = attachment.current_revision
files[intermediate.name] = {
'attached_by': intermediate.attached_by.username,
'creator': revision.creator.username,
'description': revision.description,
'mime_type': revision.mime_type,
'html': attachment.get_embed_html(),
'url': attachment.get_file_url(),
}
return files
@cached_property
def attachments(self):
# Is there a more elegant way to do this?
#
# File attachments aren't really stored at the DB level;
# instead, the page just gets appropriate HTML to embed
# whatever type of file it is. So we find them by
# regex-searching over the HTML for URLs that match the
# file URL patterns.
mt_files = DEKI_FILE_URL.findall(self.html)
kuma_files = KUMA_FILE_URL.findall(self.html)
params = None
if mt_files:
# We have at least some MindTouch files.
params = models.Q(mindtouch_attachment_id__in=mt_files)
if kuma_files:
# We also have some kuma files. Use an OR query.
params = params | models.Q(id__in=kuma_files)
if kuma_files and not params:
# We have only kuma files.
params = models.Q(id__in=kuma_files)
if params:
return Attachment.objects.filter(params)
else:
# If no files found, return an empty Attachment queryset.
return Attachment.objects.none()
@property
def show_toc(self):
return self.current_revision and self.current_revision.toc_depth
@cached_property
def language(self):
return get_language_mapping()[self.locale.lower()]
def get_absolute_url(self, endpoint='wiki.document'):
"""
Build the absolute URL to this document from its full path
"""
return reverse(endpoint, locale=self.locale, args=[self.slug])
def get_edit_url(self):
return self.get_absolute_url(endpoint='wiki.edit')
def get_redirect_url(self):
"""
If I am a redirect, return the absolute URL to which I redirect.
Otherwise, return None.
"""
# If a document starts with REDIRECT_HTML and contains any <a> tags
# with hrefs, return the href of the first one. This trick saves us
# from having to parse the HTML every time.
if REDIRECT_HTML in self.html:
anchors = PyQuery(self.html)('a[href].redirect')
if anchors:
url = anchors[0].get('href')
# allow explicit domain and *not* '//'
# i.e allow "https://developer...." and "/en-US/docs/blah"
if len(url) > 1:
if url.startswith(settings.SITE_URL):
return url
elif url[0] == '/' and url[1] != '/':
return url
elif len(url) == 1 and url[0] == '/':
return url
def filter_permissions(self, user, permissions):
"""Filter permissions with custom logic"""
# No-op, for now.
return permissions
def get_topic_parents(self):
"""Build a list of parent topics from self to root"""
curr, parents = self, []
while curr.parent_topic:
curr = curr.parent_topic
parents.append(curr)
return parents
def allows_revision_by(self, user):
"""
Return whether `user` is allowed to create new revisions of me.
The motivation behind this method is that templates and other types of
docs may have different permissions.
"""
if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.change_template_document')):
return False
return True
def allows_editing_by(self, user):
"""
Return whether `user` is allowed to edit document-level metadata.
If the Document doesn't have a current_revision (nothing approved) then
all the Document fields are still editable. Once there is an approved
Revision, the Document fields can only be edited by privileged users.
"""
if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.change_template_document')):
return False
return (not self.current_revision or
user.has_perm('wiki.change_document'))
def translated_to(self, locale):
"""
Return the translation of me to the given locale.
If there is no such Document, return None.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
raise NotImplementedError('translated_to() is implemented only on'
'Documents in the default language so'
'far.')
try:
return Document.objects.get(locale=locale, parent=self)
except Document.DoesNotExist:
return None
@property
def original(self):
"""Return the document I was translated from or, if none, myself."""
return self.parent or self
@cached_property
def other_translations(self):
"""Return a list of Documents - other translations of this Document"""
if self.parent is None:
return self.translations.all().order_by('locale')
else:
translations = (self.parent.translations.all()
.exclude(id=self.id)
.order_by('locale'))
pks = list(translations.values_list('pk', flat=True))
return Document.objects.filter(pk__in=[self.parent.pk] + pks)
@property
def parents(self):
"""Return the list of topical parent documents above this one,
or an empty list if none exist."""
if self.parent_topic is None:
return []
current_parent = self.parent_topic
parents = [current_parent]
while current_parent.parent_topic is not None:
parents.insert(0, current_parent.parent_topic)
current_parent = current_parent.parent_topic
return parents
def is_child_of(self, other):
"""
Circular dependency detection -- if someone tries to set
this as a parent of a document it's a child of, they're gonna
have a bad time.
"""
return other.id in (d.id for d in self.parents)
# This is a method, not a property, because it can do a lot of DB
# queries and so should look scarier. It's not just named
# 'children' because that's taken already by the reverse relation
# on parent_topic.
def get_descendants(self, limit=None, levels=0):
"""
Return a list of all documents which are children
(grandchildren, great-grandchildren, etc.) of this one.
"""
results = []
if (limit is None or levels < limit) and self.children.exists():
for child in self.children.all().filter(locale=self.locale):
results.append(child)
[results.append(grandchild)
for grandchild in child.get_descendants(limit, levels + 1)]
return results
def is_watched_by(self, user):
"""Return whether `user` is notified of edits to me."""
from .events import EditDocumentEvent
return EditDocumentEvent.is_notifying(user, self)
def tree_is_watched_by(self, user):
"""Return whether `user` is notified of edits to me AND sub-pages."""
from .events import EditDocumentInTreeEvent
return EditDocumentInTreeEvent.is_notifying(user, self)
def parent_trees_watched_by(self, user):
"""
Return any and all of this document's parents that are watched by the
given user.
"""
return [doc for doc in self.parents if doc.tree_is_watched_by(user)]
def get_document_type(self):
return WikiDocumentType
@cached_property
def contributors(self):
return DocumentContributorsJob().get(self.pk)
@cached_property
def zone_stack(self):
return DocumentZoneStackJob().get(self.pk)
def get_full_url(self):
return absolutify(self.get_absolute_url())
class DocumentDeletionLog(models.Model):
"""
Log of who deleted a Document, when, and why.
"""
# We store the locale/slug because it's unique, and also because a
# ForeignKey would delete this log when the Document gets purged.
locale = models.CharField(
max_length=7,
choices=settings.LANGUAGES,
default=settings.WIKI_DEFAULT_LANGUAGE,
db_index=True,
)
slug = models.CharField(max_length=255, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
timestamp = models.DateTimeField(auto_now=True)
reason = models.TextField()
def __unicode__(self):
return "/%(locale)s/%(slug)s deleted by %(user)s" % {
'locale': self.locale,
'slug': self.slug,
'user': self.user
}
class DocumentZone(models.Model):
"""
Model object declaring a content zone root at a given Document, provides
attributes inherited by the topic hierarchy beneath it.
"""
document = models.OneToOneField(Document, related_name='zone')
styles = models.TextField(null=True, blank=True)
url_root = models.CharField(
max_length=255, null=True, blank=True, db_index=True,
help_text="alternative URL path root for documents under this zone")
def __unicode__(self):
return u'DocumentZone %s (%s)' % (self.document.get_absolute_url(),
self.document.title)
class ReviewTag(TagBase):
"""A tag indicating review status, mainly for revisions"""
class Meta:
verbose_name = _('Review Tag')
verbose_name_plural = _('Review Tags')
class LocalizationTag(TagBase):
"""A tag indicating localization status, mainly for revisions"""
class Meta:
verbose_name = _('Localization Tag')
verbose_name_plural = _('Localization Tags')
class ReviewTaggedRevision(ItemBase):
"""Through model, just for review tags on revisions"""
content_object = models.ForeignKey('Revision')
tag = models.ForeignKey(ReviewTag, related_name="%(app_label)s_%(class)s_items")
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class LocalizationTaggedRevision(ItemBase):
"""Through model, just for localization tags on revisions"""
content_object = models.ForeignKey('Revision')
tag = models.ForeignKey(LocalizationTag, related_name="%(app_label)s_%(class)s_items")
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class Revision(models.Model):
"""A revision of a localized knowledgebase document"""
# Depth of table-of-contents in document display.
TOC_DEPTH_NONE = 0
TOC_DEPTH_ALL = 1
TOC_DEPTH_H2 = 2
TOC_DEPTH_H3 = 3
TOC_DEPTH_H4 = 4
TOC_DEPTH_CHOICES = (
(TOC_DEPTH_NONE, _(u'No table of contents')),
(TOC_DEPTH_ALL, _(u'All levels')),
(TOC_DEPTH_H2, _(u'H2 and higher')),
(TOC_DEPTH_H3, _(u'H3 and higher')),
(TOC_DEPTH_H4, _('H4 and higher')),
)
document = models.ForeignKey(Document, related_name='revisions')
# Title and slug in document are primary, but they're kept here for
# revision history.
title = models.CharField(max_length=255, null=True, db_index=True)
slug = models.CharField(max_length=255, null=True, db_index=True)
summary = models.TextField() # wiki markup
content = models.TextField() # wiki markup
tidied_content = models.TextField(blank=True) # wiki markup tidied up
# Keywords are used mostly to affect search rankings. Moderators may not
# have the language expertise to translate keywords, so we put them in the
# Revision so the translators can handle them:
keywords = models.CharField(max_length=255, blank=True)
# Tags are stored in a Revision as a plain CharField, because Revisions are
# not indexed by tags. This data is retained for history tracking.
tags = models.CharField(max_length=255, blank=True)
# Tags are (ab)used as status flags and for searches, but the through model
# should constrain things from getting expensive.
review_tags = TaggableManager(through=ReviewTaggedRevision)
localization_tags = TaggableManager(through=LocalizationTaggedRevision)
toc_depth = models.IntegerField(choices=TOC_DEPTH_CHOICES,
default=TOC_DEPTH_ALL)
# Maximum age (in seconds) before this document needs re-rendering
render_max_age = models.IntegerField(blank=True, null=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
comment = models.CharField(max_length=255)
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='created_revisions')
is_approved = models.BooleanField(default=True, db_index=True)
# The default locale's rev that was current when the Edit button was hit to
# create this revision. Used to determine whether localizations are out of
# date.
based_on = models.ForeignKey('self', null=True, blank=True)
# TODO: limit_choices_to={'document__locale':
# settings.WIKI_DEFAULT_LANGUAGE} is a start but not sufficient.
is_mindtouch_migration = models.BooleanField(default=False, db_index=True,
help_text="Did this revision come from MindTouch?")
objects = TransformManager()
def get_absolute_url(self):
"""Build the absolute URL to this revision"""
return reverse('wiki.revision',
locale=self.document.locale,
args=[self.document.slug, self.pk])
def _based_on_is_clean(self):
"""Return a tuple: (the correct value of based_on, whether the old
value was correct).
based_on must be an approved revision of the English version of the
document if there are any such revisions, any revision if no
approved revision exists, and None otherwise. If based_on is not
already set when this is called, the return value defaults to the
current_revision of the English document.
"""
# TODO(james): This could probably be simplified down to "if
# based_on is set, it must be a revision of the original document."
original = self.document.original
base = original.current_or_latest_revision()
has_approved = original.revisions.filter(is_approved=True).exists()
if (original.current_revision or not has_approved):
if (self.based_on and self.based_on.document != original):
# based_on is set and points to the wrong doc.
return base, False
# Else based_on is valid; leave it alone.
elif self.based_on:
return None, False
return self.based_on, True
def clean(self):
"""Ensure based_on is valid."""
# All of the cleaning herein should be unnecessary unless the user
# messes with hidden form data.
try:
self.document and self.document.original
except Document.DoesNotExist:
# For clean()ing forms that don't have a document instance behind
# them yet
self.based_on = None
else:
based_on, is_clean = self._based_on_is_clean()
if not is_clean:
if self.document.parent:
# Restoring translation source, so base on current_revision
self.based_on = self.document.parent.current_revision
else:
old = self.based_on
self.based_on = based_on # Guess a correct value.
locale = settings.LOCALES[settings.WIKI_DEFAULT_LANGUAGE].native
error = ugettext(
'A revision must be based on a revision of the '
'%(locale)s document. Revision ID %(id)s does '
'not fit those criteria.')
raise ValidationError(error %
{'locale': locale, 'id': old.id})
def save(self, *args, **kwargs):
_, is_clean = self._based_on_is_clean()
if not is_clean: # No more Mister Nice Guy
# TODO(erik): This error message ignores non-translations.
raise ProgrammingError('Revision.based_on must be None or refer '
'to a revision of the default-'
'language document. It was %s' %
self.based_on)
if not self.title:
self.title = self.document.title
if not self.slug:
self.slug = self.document.slug
super(Revision, self).save(*args, **kwargs)
# When a revision is approved, update document metadata and re-cache
# the document's html content
if self.is_approved:
self.make_current()
def make_current(self):
"""Make this revision the current one for the document"""
self.document.title = self.title
self.document.slug = self.slug
self.document.html = self.content_cleaned
self.document.render_max_age = self.render_max_age
self.document.current_revision = self
# Since Revision stores tags as a string, we need to parse them first
# before setting on the Document.
self.document.tags.set(*parse_tags(self.tags))
self.document.save()
def __unicode__(self):
return u'[%s] %s #%s' % (self.document.locale,
self.document.title,
self.id)
def get_section_content(self, section_id):
"""Convenience method to extract the content for a single section"""
return self.document.extract_section(self.content, section_id)
def get_tidied_content(self, allow_none=False):
"""
Return the revision content parsed and cleaned by tidy.
First, check in denormalized db field. If it's not available, schedule
an asynchronous task to store it.
allow_none -- To prevent CPU-hogging calls, return None instead of
calling tidy_content in-process.
"""
# we may be lucky and have the tidied content already denormalized
# in the database, if so return it
if self.tidied_content:
tidied_content = self.tidied_content
else:
from .tasks import tidy_revision_content
tidying_scheduled_cache_key = 'kuma:tidying_scheduled:%s' % self.pk
# if there isn't already a task scheduled for the revision
tidying_already_scheduled = memcache.get(tidying_scheduled_cache_key)
if not tidying_already_scheduled:
tidy_revision_content.delay(self.pk)
# we temporarily set a flag that we've scheduled a task
# already and don't need to schedule it the next time
# we use 3 days as a limit to try it again
memcache.set(tidying_scheduled_cache_key, 1, 60 * 60 * 24 * 3)
if allow_none:
tidied_content = None
else:
tidied_content, errors = tidy_content(self.content)
return tidied_content
@property
def content_cleaned(self):
if self.document.is_template:
return self.content
else:
return Document.objects.clean_content(self.content)
@cached_property
def previous(self):
"""
Returns the previous approved revision or None.
"""
try:
return self.document.revisions.filter(
is_approved=True,
created__lt=self.created,
).order_by('-created')[0]
except IndexError:
return None
@cached_property
def needs_editorial_review(self):
return self.review_tags.filter(name='editorial').exists()
@cached_property
def needs_technical_review(self):
return self.review_tags.filter(name='technical').exists()
@cached_property
def localization_in_progress(self):
return self.localization_tags.filter(name='inprogress').exists()
@property
def translation_age(self):
return abs((datetime.now() - self.created).days)
class RevisionIP(models.Model):
"""
IP Address for a Revision including User-Agent string and Referrer URL.
"""
revision = models.ForeignKey(
Revision
)
ip = models.CharField(
_('IP address'),
max_length=40,
editable=False,
db_index=True,
blank=True,
null=True,
)
user_agent = models.TextField(
_('User-Agent'),
editable=False,
blank=True,
)
referrer = models.TextField(
_('HTTP Referrer'),
editable=False,
blank=True,
)
objects = RevisionIPManager()
def __unicode__(self):
return '%s (revision %d)' % (self.ip or 'No IP', self.revision.id)
class RevisionAkismetSubmission(AkismetSubmission):
"""
The Akismet submission per wiki document revision.
Stores only a reference to the submitted revision.
"""
revision = models.ForeignKey(
Revision,
related_name='akismet_submissions',
null=True,
blank=True,
verbose_name=_('Revision'),
# don't delete the akismet submission but set the revision to null
on_delete=models.SET_NULL,
)
class Meta:
verbose_name = _('Akismet submission')
verbose_name_plural = _('Akismet submissions')
def __unicode__(self):
if self.revision:
return (
u'%(type)s submission by %(sender)s (Revision %(revision_id)d)' % {
'type': self.get_type_display(),
'sender': self.sender,
'revision_id': self.revision.id,
}
)
else:
return (
u'%(type)s submission by %(sender)s (no revision)' % {
'type': self.get_type_display(),
'sender': self.sender,
}
)
class EditorToolbar(models.Model):
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='created_toolbars')
default = models.BooleanField(default=False)
name = models.CharField(max_length=100)
code = models.TextField(max_length=2000)
def __unicode__(self):
return self.name
class DocumentSpamAttempt(SpamAttempt):
"""
The wiki document specific spam attempt.
Stores title, slug and locale of the documet revision to be able
to see where it happens.
"""
title = models.CharField(
verbose_name=ugettext('Title'),
max_length=255,
)
slug = models.CharField(
verbose_name=ugettext('Slug'),
max_length=255,
)
document = models.ForeignKey(
Document,
related_name='spam_attempts',
null=True,
blank=True,
verbose_name=ugettext('Document (optional)'),
on_delete=models.SET_NULL,
)
def __unicode__(self):
return u'%s (%s)' % (self.slug, self.title)
| mpl-2.0 | -1,674,403,241,280,517,600 | 37.414911 | 100 | 0.59919 | false |
ahknight/fig-django | app/test_app/settings.py | 1 | 2113 | """
Django settings for test_app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z#cqgk_fe7!3y8w2*f!@gcc_z5&ir-)p)_vxfjhf$9jwrxf)dt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_app.urls'
WSGI_APPLICATION = 'test_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/app/static/'
| bsd-3-clause | 2,028,688,857,110,303,200 | 23.569767 | 71 | 0.716517 | false |
dashmoment/moxa_ai_training | CNN_HW_solution/model_zoo.py | 1 | 4589 | import tensorflow as tf
import netfactory as nf
import numpy as np
class model_zoo:
def __init__(self, inputs, dropout, is_training, model_ticket):
self.model_ticket = model_ticket
self.inputs = inputs
self.dropout = dropout
self.is_training = is_training
def googleLeNet_v1(self):
model_params = {
"conv1": [5,5, 64],
"conv2": [3,3,128],
"inception_1":{
"1x1":64,
"3x3":{ "1x1":96,
"3x3":128
},
"5x5":{ "1x1":16,
"5x5":32
},
"s1x1":32
},
"inception_2":{
"1x1":128,
"3x3":{ "1x1":128,
"3x3":192
},
"5x5":{ "1x1":32,
"5x5":96
},
"s1x1":64
},
"fc3": 10,
}
with tf.name_scope("googleLeNet_v1"):
net = nf.convolution_layer(self.inputs, model_params["conv1"], [1,2,2,1],name="conv1")
net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1], padding='SAME')
net = tf.nn.local_response_normalization(net, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='LocalResponseNormalization')
net = nf.convolution_layer(net, model_params["conv2"], [1,1,1,1],name="conv2", flatten=False)
net = tf.nn.local_response_normalization(net, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='LocalResponseNormalization')
net = nf.inception_v1(net, model_params, name= "inception_1", flatten=False)
net = nf.inception_v1(net, model_params, name= "inception_2", flatten=False)
net = tf.nn.avg_pool (net, ksize=[1, 3, 3, 1],strides=[1, 1, 1, 1], padding='VALID')
net = tf.reshape(net, [-1, int(np.prod(net.get_shape()[1:]))])
net = tf.layers.dropout(net, rate=self.dropout, training=self.is_training, name='dropout2')
logits = nf.fc_layer(net, model_params["fc3"], name="logits", activat_fn=None)
return logits
def resNet_v1(self):
model_params = {
"conv1": [5,5, 64],
"rb1_1": [3,3,64],
"rb1_2": [3,3,64],
"rb2_1": [3,3,128],
"rb2_2": [3,3,128],
"fc3": 10,
}
with tf.name_scope("resNet_v1"):
net = nf.convolution_layer(self.inputs, model_params["conv1"], [1,2,2,1],name="conv1")
id_rb1 = tf.nn.max_pool(net, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1], padding='SAME')
net = nf.convolution_layer(id_rb1, model_params["rb1_1"], [1,1,1,1],name="rb1_1")
id_rb2 = nf.convolution_layer(net, model_params["rb1_2"], [1,1,1,1],name="rb1_2")
id_rb2 = nf.shortcut(id_rb2,id_rb1, name="rb1")
net = nf.convolution_layer(id_rb2, model_params["rb2_1"], [1,2,2,1],padding="SAME",name="rb2_1")
id_rb3 = nf.convolution_layer(net, model_params["rb2_2"], [1,1,1,1],name="rb2_2")
id_rb3 = nf.shortcut(id_rb3,id_rb2, name="rb2")
net = nf.global_avg_pooling(id_rb3, flatten=True)
net = tf.layers.dropout(net, rate=self.dropout, training=self.is_training, name='dropout2')
logits = nf.fc_layer(net, model_params["fc3"], name="logits", activat_fn=None)
return logits
def build_model(self):
model_list = ["googleLeNet_v1", "resNet_v1"]
if self.model_ticket not in model_list:
print("sorry, wrong ticket!")
return 0
else:
fn = getattr(self, self.model_ticket)
netowrk = fn()
return netowrk
def unit_test():
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
is_training = tf.placeholder(tf.bool, name='is_training')
dropout = tf.placeholder(tf.float32, name='dropout')
mz = model_zoo(x, dropout, is_training,"resNet_v1")
return mz.build_model()
#m = unit_test() | mit | 1,189,355,182,992,824,300 | 34.859375 | 143 | 0.460885 | false |
kubernetes-client/python | kubernetes/e2e_test/test_client.py | 1 | 20967 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import select
import socket
import time
import unittest
import uuid
import six
from kubernetes.client import api_client
from kubernetes.client.api import core_v1_api
from kubernetes.e2e_test import base
from kubernetes.stream import stream, portforward
from kubernetes.stream.ws_client import ERROR_CHANNEL
from kubernetes.client.rest import ApiException
import six.moves.urllib.request as urllib_request
if six.PY3:
from http import HTTPStatus
else:
import httplib
def short_uuid():
id = str(uuid.uuid4())
return id[-12:]
def manifest_with_command(name, command):
return {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': name
},
'spec': {
'containers': [{
'image': 'busybox',
'name': 'sleep',
"args": [
"/bin/sh",
"-c",
command
]
}]
}
}
class TestClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = base.get_e2e_configuration()
def test_pod_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'busybox-test-' + short_uuid()
pod_manifest = manifest_with_command(name, "while true;do date;sleep 5; done")
# wait for the default service account to be created
timeout = time.time() + 30
while True:
if time.time() > timeout:
print('timeout waiting for default service account creation')
break
try:
resp = api.read_namespaced_service_account(name='default',
namespace='default')
except ApiException as e:
if (six.PY3 and e.status != HTTPStatus.NOT_FOUND) or (
six.PY3 is False and e.status != httplib.NOT_FOUND):
print('error: %s' % e)
self.fail(msg="unexpected error getting default service account")
print('default service not found yet: %s' % e)
time.sleep(1)
continue
self.assertEqual('default', resp.metadata.name)
break
resp = api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase != 'Pending':
break
time.sleep(1)
exec_command = ['/bin/sh',
'-c',
'for i in $(seq 1 3); do date; done']
resp = stream(api.connect_get_namespaced_pod_exec, name, 'default',
command=exec_command,
stderr=False, stdin=False,
stdout=True, tty=False)
print('EXEC response : %s' % resp)
self.assertEqual(3, len(resp.splitlines()))
exec_command = 'uptime'
resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
command=exec_command,
stderr=False, stdin=False,
stdout=True, tty=False)
print('EXEC response : %s' % resp)
self.assertEqual(1, len(resp.splitlines()))
resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
command='/bin/sh',
stderr=True, stdin=True,
stdout=True, tty=False,
_preload_content=False)
resp.write_stdin("echo test string 1\n")
line = resp.readline_stdout(timeout=5)
self.assertFalse(resp.peek_stderr())
self.assertEqual("test string 1", line)
resp.write_stdin("echo test string 2 >&2\n")
line = resp.readline_stderr(timeout=5)
self.assertFalse(resp.peek_stdout())
self.assertEqual("test string 2", line)
resp.write_stdin("exit\n")
resp.update(timeout=5)
line = resp.read_channel(ERROR_CHANNEL)
status = json.loads(line)
self.assertEqual(status['status'], 'Success')
resp.update(timeout=5)
self.assertFalse(resp.is_open())
number_of_pods = len(api.list_pod_for_all_namespaces().items)
self.assertTrue(number_of_pods > 0)
resp = api.delete_namespaced_pod(name=name, body={},
namespace='default')
def test_exit_code(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'busybox-test-' + short_uuid()
pod_manifest = manifest_with_command(name, "while true;do date;sleep 5; done")
# wait for the default service account to be created
timeout = time.time() + 30
while True:
if time.time() > timeout:
print('timeout waiting for default service account creation')
break
try:
resp = api.read_namespaced_service_account(name='default',
namespace='default')
except ApiException as e:
if (six.PY3 and e.status != HTTPStatus.NOT_FOUND) or (
six.PY3 is False and e.status != httplib.NOT_FOUND):
print('error: %s' % e)
self.fail(msg="unexpected error getting default service account")
print('default service not found yet: %s' % e)
time.sleep(1)
continue
self.assertEqual('default', resp.metadata.name)
break
resp = api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase == 'Running':
break
time.sleep(1)
commands_expected_values = (
(["false", 1]),
(["/bin/sh", "-c", "sleep 1; exit 3"], 3),
(["true", 0]),
(["/bin/sh", "-c", "ls /"], 0)
)
for command, value in commands_expected_values:
client = stream(api.connect_get_namespaced_pod_exec, name, 'default',
command=command,
stderr=True, stdin=False,
stdout=True, tty=False,
_preload_content=False)
self.assertIsNone(client.returncode)
client.run_forever(timeout=10)
self.assertEqual(client.returncode, value)
resp = api.delete_namespaced_pod(name=name, body={},
namespace='default')
# Skipping this test as this flakes a lot
# See: https://github.com/kubernetes-client/python/issues/1300
# Re-enable the test once the flakiness is investigated
@unittest.skip("skipping due to extreme flakiness")
def test_portforward_raw(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
with open(os.path.join(os.path.dirname(__file__), 'port_server.py')) as fh:
port_server_py = fh.read()
name = 'portforward-raw-' + short_uuid()
resp = api.create_namespaced_config_map(
body={
'apiVersion': 'v1',
'kind': 'ConfigMap',
'metadata': {
'name': name,
},
'data': {
'port-server.py': port_server_py,
}
},
namespace='default',
)
resp = api.create_namespaced_pod(
body={
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': name
},
'spec': {
'containers': [
{
'name': 'port-server',
'image': 'python',
'command': [
'/opt/port-server.py', '1234', '1235',
],
'volumeMounts': [
{
'name': 'port-server',
'mountPath': '/opt',
'readOnly': True,
},
],
'startupProbe': {
'tcpSocket': {
'port': 1234,
},
},
},
],
'volumes': [
{
'name': 'port-server',
'configMap': {
'name': name,
'defaultMode': 0o777,
},
},
],
},
},
namespace='default',
)
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase != 'Pending':
break
time.sleep(1)
self.assertEqual(resp.status.phase, 'Running')
pf = portforward(api.connect_get_namespaced_pod_portforward,
name, 'default',
ports='1234,1235,1236')
self.assertTrue(pf.connected)
sock1234 = pf.socket(1234)
sock1235 = pf.socket(1235)
sock1234.setblocking(True)
sock1235.setblocking(True)
sent1234 = b'Test port 1234 forwarding...'
sent1235 = b'Test port 1235 forwarding...'
sock1234.sendall(sent1234)
sock1235.sendall(sent1235)
reply1234 = b''
reply1235 = b''
while True:
rlist = []
if sock1234.fileno() != -1:
rlist.append(sock1234)
if sock1235.fileno() != -1:
rlist.append(sock1235)
if not rlist:
break
r, _w, _x = select.select(rlist, [], [], 1)
if not r:
break
if sock1234 in r:
data = sock1234.recv(1024)
self.assertNotEqual(data, b'', "Unexpected socket close")
reply1234 += data
if sock1235 in r:
data = sock1235.recv(1024)
self.assertNotEqual(data, b'', "Unexpected socket close")
reply1235 += data
self.assertEqual(reply1234, sent1234)
self.assertEqual(reply1235, sent1235)
self.assertTrue(pf.connected)
sock = pf.socket(1236)
self.assertRaises(socket.error, sock.sendall, b'This should fail...')
self.assertIsNotNone(pf.error(1236))
sock.close()
for sock in (sock1234, sock1235):
self.assertTrue(pf.connected)
sent = b'Another test using fileno %s' % str(sock.fileno()).encode()
sock.sendall(sent)
reply = b''
while True:
r, _w, _x = select.select([sock], [], [], 1)
if not r:
break
data = sock.recv(1024)
self.assertNotEqual(data, b'', "Unexpected socket close")
reply += data
self.assertEqual(reply, sent)
sock.close()
time.sleep(1)
self.assertFalse(pf.connected)
self.assertIsNone(pf.error(1234))
self.assertIsNone(pf.error(1235))
resp = api.delete_namespaced_pod(name=name, namespace='default')
resp = api.delete_namespaced_config_map(name=name, namespace='default')
def test_portforward_http(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'portforward-http-' + short_uuid()
pod_manifest = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': name
},
'spec': {
'containers': [{
'name': 'nginx',
'image': 'nginx',
}]
}
}
resp = api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase != 'Pending':
break
time.sleep(1)
def kubernetes_create_connection(address, *args, **kwargs):
dns_name = address[0]
if isinstance(dns_name, bytes):
dns_name = dns_name.decode()
dns_name = dns_name.split(".")
if len(dns_name) != 3 or dns_name[2] != "kubernetes":
return socket_create_connection(address, *args, **kwargs)
pf = portforward(api.connect_get_namespaced_pod_portforward,
dns_name[0], dns_name[1], ports=str(address[1]))
return pf.socket(address[1])
socket_create_connection = socket.create_connection
try:
socket.create_connection = kubernetes_create_connection
response = urllib_request.urlopen('http://%s.default.kubernetes/' % name)
html = response.read().decode('utf-8')
finally:
socket.create_connection = socket_create_connection
self.assertEqual(response.code, 200)
self.assertTrue('<h1>Welcome to nginx!</h1>' in html)
resp = api.delete_namespaced_pod(name=name, body={},
namespace='default')
def test_service_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'frontend-' + short_uuid()
service_manifest = {'apiVersion': 'v1',
'kind': 'Service',
'metadata': {'labels': {'name': name},
'name': name,
'resourceversion': 'v1'},
'spec': {'ports': [{'name': 'port',
'port': 80,
'protocol': 'TCP',
'targetPort': 80}],
'selector': {'name': name}}}
resp = api.create_namespaced_service(body=service_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status)
resp = api.read_namespaced_service(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status)
service_manifest['spec']['ports'] = [{'name': 'new',
'port': 8080,
'protocol': 'TCP',
'targetPort': 8080}]
resp = api.patch_namespaced_service(body=service_manifest,
name=name,
namespace='default')
self.assertEqual(2, len(resp.spec.ports))
self.assertTrue(resp.status)
resp = api.delete_namespaced_service(name=name, body={},
namespace='default')
def test_replication_controller_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'frontend-' + short_uuid()
rc_manifest = {
'apiVersion': 'v1',
'kind': 'ReplicationController',
'metadata': {'labels': {'name': name},
'name': name},
'spec': {'replicas': 2,
'selector': {'name': name},
'template': {'metadata': {
'labels': {'name': name}},
'spec': {'containers': [{
'image': 'nginx',
'name': 'nginx',
'ports': [{'containerPort': 80,
'protocol': 'TCP'}]}]}}}}
resp = api.create_namespaced_replication_controller(
body=rc_manifest, namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertEqual(2, resp.spec.replicas)
resp = api.read_namespaced_replication_controller(
name=name, namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertEqual(2, resp.spec.replicas)
resp = api.delete_namespaced_replication_controller(
name=name, body={}, namespace='default')
def test_configmap_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'test-configmap-' + short_uuid()
test_configmap = {
"kind": "ConfigMap",
"apiVersion": "v1",
"metadata": {
"name": name,
"labels": {"e2e-tests": "true"},
},
"data": {
"config.json": "{\"command\":\"/usr/bin/mysqld_safe\"}",
"frontend.cnf": "[mysqld]\nbind-address = 10.0.0.3\nport = 3306\n"
}
}
resp = api.create_namespaced_config_map(
body=test_configmap, namespace='default'
)
self.assertEqual(name, resp.metadata.name)
resp = api.read_namespaced_config_map(
name=name, namespace='default')
self.assertEqual(name, resp.metadata.name)
test_configmap['data']['config.json'] = "{}"
resp = api.patch_namespaced_config_map(
name=name, namespace='default', body=test_configmap)
resp = api.delete_namespaced_config_map(
name=name, body={}, namespace='default')
resp = api.list_namespaced_config_map('default', pretty=True, label_selector="e2e-tests=true")
self.assertEqual([], resp.items)
def test_node_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
for item in api.list_node().items:
node = api.read_node(name=item.metadata.name)
self.assertTrue(len(node.metadata.labels) > 0)
self.assertTrue(isinstance(node.metadata.labels, dict))
| apache-2.0 | 8,037,305,217,771,363,000 | 38.264045 | 102 | 0.484666 | false |
collectiveacuity/labPack | tests/test_events_meetup.py | 1 | 10267 | __author__ = 'rcj1492'
__created__ = '2016.12'
__license__ = 'MIT'
from labpack.events.meetup import *
if __name__ == '__main__':
# import dependencies & configs
from pprint import pprint
from time import time
from labpack.records.settings import load_settings
from labpack.handlers.requests import handle_requests
meetup_config = load_settings('../../cred/meetup.yaml')
# test oauth construction
from labpack.authentication.oauth2 import oauth2Client
oauth_kwargs = {
'client_id': meetup_config['oauth_client_id'],
'client_secret': meetup_config['oauth_client_secret'],
'redirect_uri': meetup_config['oauth_redirect_uri'],
'auth_endpoint': meetup_config['oauth_auth_endpoint'],
'token_endpoint': meetup_config['oauth_token_endpoint'],
'request_mimetype': meetup_config['oauth_request_mimetype'],
'requests_handler': handle_requests
}
meetup_oauth = oauth2Client(**oauth_kwargs)
# test generate url
url_kwargs = {
'service_scope': meetup_config['oauth_service_scope'].split(),
'state_value': 'unittest_%s' % str(time())
}
auth_url = meetup_oauth.generate_url(**url_kwargs)
assert auth_url.find('oauth2') > 0
# retrieve access token
from labpack.storage.appdata import appdataClient
log_client = appdataClient(collection_name='Logs', prod_name='Fitzroy')
path_filters = [{
0: {'discrete_values': ['knowledge']},
1: {'discrete_values': ['tokens']},
2: {'discrete_values':['meetup']}
}]
import yaml
token_list = log_client.list(log_client.conditional_filter(path_filters), reverse_search=True)
token_data = log_client.load(token_list[0])
token_details = yaml.load(token_data.decode())
# test access token renewal
# new_details = meetup_oauth.renew_token(token_details['refresh_token'])
# token_details.update(**new_details['json'])
# new_key = 'knowledge/tokens/meetup/%s/%s.yaml' % (token_details['user_id'], token_details['expires_at'])
# log_client.create(new_key, token_details)
# test client construction
meetup_client = meetupClient(token_details['access_token'], token_details['service_scope'])
# test member profile, settings, topics, groups and events
profile_details = meetup_client.get_member_brief()
member_id = int(profile_details['json']['id'])
assert isinstance(profile_details['json']['id'], str)
profile_details = meetup_client.get_member_profile(member_id)
assert isinstance(profile_details['json']['id'], int)
member_topics = meetup_client.list_member_topics(member_id)
assert isinstance(member_topics['json'][0]['id'], int)
member_groups = meetup_client.list_member_groups(member_id)
assert member_groups['json'][5]['group']['name']
if len(member_groups['json']) <= 200:
assert len(member_groups['json']) == profile_details['json']['stats']['groups']
member_events = meetup_client.list_member_events()
assert isinstance(member_events['json'], list)
# test member calendar, event attendees & other member profile, settings, topics & groups
# event_details = meetup_client.get_member_calendar(max_results=10)
# group_url = event_details['json'][0]['group']['urlname']
# event_id = int(event_details['json'][0]['id'])
# event_attendees = meetup_client.list_event_attendees(group_url, event_id)
# member_id = event_attendees['json'][0]['member']['id']
# profile_details = meetup_client.get_member_brief(member_id)
# assert profile_details['json']['joined']
# profile_details = meetup_client.get_member_profile(member_id)
# assert 'bio' in profile_details['json']['privacy'].keys()
# member_topics = meetup_client.list_member_topics(member_id)
# assert isinstance(member_topics['json'], list)
# member_groups = meetup_client.list_member_groups(member_id)
# assert isinstance(member_groups['json'], list)
# test event, venue and group details and list group events
# event_details = meetup_client.get_member_calendar(max_results=10)
# group_url = event_details['json'][0]['group']['urlname']
# event_id = int(event_details['json'][0]['id'])
# venue_id = event_details['json'][0]['venue']['id']
# group_id = int(event_details['json'][0]['group']['id'])
# group_details = meetup_client.get_group_details(group_id=group_id)
# assert group_details['json']['next_event']['id']
# print(group_details['json']['join_info'])
# group_events = meetup_client.list_group_events(group_url)
# assert group_events['json'][0]['created']
# event_details = meetup_client.get_event_details(group_url, event_id)
# assert event_details['json']['event_hosts'][0]['id']
# venue_details = meetup_client.get_venue_details(venue_id)
# assert venue_details['json']['name']
# test list groups, group members and locations
# list_kwargs = {
# 'categories': [34],
# 'latitude': 40.75,
# 'longitude': -73.98,
# 'radius': 1.0,
# 'max_results': 5
# }
# group_list = meetup_client.list_groups(**list_kwargs)
# assert group_list['json'][0]['organizer']['id']
# group_url = group_list['json'][0]['urlname']
# group_members = meetup_client.list_group_members(group_url, max_results=5)
# assert group_members['json'][0]['group_profile']['created']
# list_kwargs = {
# 'zip_code': '94203',
# 'max_results': 1
# }
# meetup_locations = meetup_client.list_locations(**list_kwargs)
# assert meetup_locations['json'][0]['city'] == 'Sacramento'
# test join and leave group
# member_profile = meetup_client.get_member_brief()
# member_id = int(member_profile['json']['id'])
# list_kwargs = {
# 'categories': [34],
# 'latitude': 40.75,
# 'longitude': -73.98,
# 'radius': 2.0,
# 'max_results': 10,
# 'member_groups': False
# }
# group_list = meetup_client.list_groups(**list_kwargs)
# group_url = ''
# question_id = 0
# for group in group_list['json']:
# if not group['join_info']['questions_req'] and group['join_info']['questions']:
# for question in group['join_info']['questions']:
# question_tokens = question['question'].split()
# for token in question_tokens:
# if token.lower() == 'name':
# group_url = group['urlname']
# question_id = question['id']
# break
# if group_url:
# break
# if group_url:
# break
# if group_url and question_id:
# membership_answers = [ { 'question_id': question_id, 'answer_text': 'First Last'}]
# response = meetup_client.join_group(group_url, membership_answers)
# print(response['json'])
# from time import sleep
# sleep(2)
# group_url = 'gdgnyc'
# response = meetup_client.leave_group(group_url, member_id)
# assert response['code'] == 204
# test join and leave topics
# member_profile = meetup_client.get_member_brief()
# member_id = int(member_profile['json']['id'])
# topic_list = [ 511, 611, 766 ]
# member_topics = meetup_client.list_member_topics(member_id)
# topic_set = [x['id'] for x in member_topics['json']]
# assert len(set(topic_list) - set(topic_set)) == len(topic_list)
# updated_profile = meetup_client.join_topics(member_id, topic_list)
# added_topics = []
# for topic in updated_profile['json']:
# if topic['id'] in topic_list:
# added_topics.append(topic['name'])
# assert len(added_topics) == len(topic_list)
# from time import sleep
# sleep(1)
# updated_profile = meetup_client.leave_topics(member_id, topic_list)
# assert len(updated_profile['json']) == len(member_topics['json'])
# test update profile
# member_brief = meetup_client.get_member_brief()
# member_id = int(member_brief['json']['id'])
# member_profile = meetup_client.get_member_profile(member_id)
# member_profile['json']['privacy']['groups'] = 'visible'
# member_profile['json']['birthday']['year'] = 1991
# updated_profile = meetup_client.update_member_profile(member_brief['json'], member_profile['json'])
# assert updated_profile['json']['privacy']['groups'] == 'visible'
# member_profile['json']['privacy']['groups'] = 'hidden'
# member_profile['json']['birthday']['year'] = 0
# updated_profile = meetup_client.update_member_profile(member_brief['json'], member_profile['json'])
# assert updated_profile['json']['privacy']['groups'] == 'hidden'
# test join and leave event
# event_details = meetup_client.get_member_calendar(max_results=100)
# event_id = 0
# group_url = ''
# survey_questions = []
# for event in event_details['json']:
# if event['fee']['required']:
# pass
# elif event['rsvp_limit'] >= event['yes_rsvp_count'] + 1:
# pass
# elif not event['rsvp_rules']['guest_limit']:
# pass
# elif not event['rsvpable']:
# pass
# elif not event['survey_questions']:
# pass
# elif event['self']['rsvp']['response'] == 'yes':
# pass
# else:
# group_url = event['group']['urlname']
# event_id = int(event['id'])
# survey_questions = event['survey_questions']
# break
# if event_id:
# join_kwargs = {
# 'attendance_answers': [{'question_id': survey_questions[0]['id'], 'answer_text': 'maybe'}],
# 'group_url': group_url,
# 'event_id': event_id,
# 'additional_guests': 1
# }
# attendee_details = meetup_client.join_event(**join_kwargs)
# assert attendee_details['json']['guests'] == 1
# attendee_details = meetup_client.leave_event(group_url, event_id)
# assert attendee_details['json']['response'] == 'no' | mit | 1,395,610,115,595,362,000 | 43.438053 | 110 | 0.598812 | false |
katakumpo/nicedjango | tests/test_utils.py | 1 | 1809 | import pytest
from nicedjango.utils import divide_model_def, model_label, queryset_from_def
from tests.a1.models import A
from tests.a2 import models as a2
from tests.a3.models import Book
DIVIDE_RESULTS = (
(A, A, ''),
('a1-a', A, ''),
('a', A, ''),
('a1-a.foo.bar', A, 'foo.bar'),
('a.foo.bar', A, 'foo.bar'),
(a2.Book, a2.Book, ''),
('a2-book', a2.Book, ''),
('a2-book.foo.bar', a2.Book, 'foo.bar'),
(Book, Book, ''),
('a3-book', Book, ''),
('a3-book.foo.bar', Book, 'foo.bar'),
)
DIVIDE_RESULTS_IDS = list(map(lambda d: model_label(d[0]), DIVIDE_RESULTS))
@pytest.mark.parametrize(('model_def', 'model', 'rest'), DIVIDE_RESULTS, ids=DIVIDE_RESULTS_IDS)
def test_divide_model_def(model_def, model, rest):
actual = divide_model_def(model_def)
assert actual == (model, rest)
DIVIDE_FAILS = (
'article',
'article.foo',
'a1-foo.bar',
'a1',
'a1-foo',
'a1-foo.bar',
object()
)
DIVIDE_FAILS_IDS = list(map(model_label, DIVIDE_FAILS))
@pytest.mark.parametrize('model_def', DIVIDE_FAILS, ids=DIVIDE_FAILS_IDS)
def test_divide_model_def_fails(model_def):
with pytest.raises(ValueError):
divide_model_def(model_def)
ALL_QS = A.objects.all()
ALL_SQL = str(ALL_QS.query)
TEST_QS = A.objects.filter(id=1)
TEST_SQL = str(TEST_QS.query)
QS_DEF_RESULTS = (
(ALL_QS, ALL_SQL),
(A, ALL_SQL),
('a1-a', ALL_SQL),
('a', ALL_SQL),
('a1-a.all()', ALL_SQL),
('a.all()', ALL_SQL),
(TEST_QS, TEST_SQL),
('a1-a.filter(id=1)', TEST_SQL),
('a.filter(id=1)', TEST_SQL)
)
@pytest.mark.parametrize(('queryset_def', 'expected_sql'), QS_DEF_RESULTS)
def test_queryset_from_def(queryset_def, expected_sql):
queryset = queryset_from_def(queryset_def)
assert str(queryset.query) == expected_sql
| mit | 2,524,520,707,256,657,000 | 25.217391 | 96 | 0.610835 | false |
MazamaScience/ispaq | ispaq/pressureCorrelation_metrics.py | 1 | 6526 | """
ISPAQ Business Logic for Simple Metrics.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import math
import numpy as np
import pandas as pd
from obspy import UTCDateTime
from . import utils
from . import irisseismic
from . import irismustangmetrics
def pressureCorrelation_metrics(concierge):
"""
Generate *pressureCorrelation* metrics.
:type concierge: :class:`~ispaq.concierge.Concierge`
:param concierge: Data access expiditer.
:rtype: pandas dataframe (TODO: change this)
:return: Dataframe of pressureCorrelation metrics. (TODO: change this)
.. rubric:: Example
TODO: doctest examples
"""
# Get the logger from the concierge
logger = concierge.logger
# Container for all of the metrics dataframes generated
dataframes = []
# Default parameters from IRISMustangUtils::generateMetrics_crossTalk
includleRestricted = False
channelFilter = "LH."
pressureLocation = "*"
pressureChannel = "LDO"
# ----- All available SNCLs -------------------------------------------------
try:
pressureAvailability = concierge.get_availability(location=pressureLocation, channel=pressureChannel)
except Exception as e:
logger.error('Metric calculation failed because concierge.get_availability failed: %s' % (e))
return None
if pressureAvailability is None or pressureAvailability.shape[0] == 0:
logger.info('No pressure channels available')
return None
else:
logger.info('%d pressure channels available' % (pressureAvailability.shape[0]))
# Loop over rows of the availability dataframe
for (pIndex, pAv) in pressureAvailability.iterrows():
logger.info(' %03d Pressure channel %s' % (pIndex, pAv.snclId))
# Get the data ----------------------------------------------
try:
r_pStream = concierge.get_dataselect(pAv.network, pAv.station, pAv.location, pAv.channel, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.debug('No data for %s' % (pAv.snclId))
else:
logger.debug('No data for %s from %s: %s' % (pAv.snclId, concierge.dataselect_url, e))
continue
# Merge traces -- gracefully go to next in loop if an error reported
try:
r_pStream = irisseismic.mergeTraces(r_pStream)
except Exception as e:
logger.debug("%s" % (e))
continue
# Get all desired seismic channels for this network-station
seismicAvailability = concierge.get_availability(pAv.network, pAv.station)
# Apply the channelFilter
seismicAvailability = seismicAvailability[seismicAvailability.channel.str.contains(channelFilter)]
if seismicAvailability is None or seismicAvailability.shape[0] == 0:
logger.debug('No seismic %s channels available' %s (channelFilter))
continue
# Find the locations associated with seismic channels
locations = list(seismicAvailability.location.unique())
# NOTE: At each unique location we should have a triplet of seismic channels that can
# NOTE: be correlated with the pressure channel
############################################################
# Loop through all locations with seismic data that can be
# correlated to this pressure channel.
############################################################
for loc in locations:
logger.debug('Working on location %s' % (loc))
locationAvailability = seismicAvailability[seismicAvailability.location == loc]
if locationAvailability is None or locationAvailability.shape[0] == 0:
logger.debug('No location %s channels available' %s (loc))
continue
############################################################
# Loop through all seismic channels at this SN.L
############################################################
# Loop over rows of the availability dataframe
for (index, lAv) in locationAvailability.iterrows():
try:
r_stream = concierge.get_dataselect(lAv.network, lAv.station, lAv.location, lAv.channel, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.debug('No data for %s' % (lAv.snclId))
else:
logger.debug('No data for %s from %s: %s' % (lAv.snclId, concierge.dataselect_url, e))
continue
# Merge traces -- gracefully go to next in loop if an error reported
try:
r_stream = irisseismic.mergeTraces(r_stream)
except Exception as e:
logger.debug("%s" % (e))
continue
logger.debug('Calculating pressureCorrelation metrics for %s:%s' % (pAv.snclId, lAv.snclId))
try:
df = irismustangmetrics.apply_correlation_metric(r_pStream, r_stream, 'correlation')
dataframes.append(df)
except Exception as e:
logger.debug('"pressure_effects" metric calculation failed for %s:%s: %s' % (pAv.snclId, lAv.snclId, e))
# End of locationAvailability loop
# End of locations loop
# End of pressureAvailability loop
# Concatenate and filter dataframes before returning -----------------------
if len(dataframes) == 0:
logger.warn('"pressure_correlation" metric calculation generated zero metrics')
return None
else:
result = pd.concat(dataframes, ignore_index=True)
# Change metricName to "pressure_effects"
result['metricName'] = 'pressure_effects'
result.reset_index(drop=True, inplace=True)
return(result)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| gpl-3.0 | 7,286,951,401,950,999,000 | 37.163743 | 138 | 0.564818 | false |
dodger487/MIST | data/magnetak_ml.py | 1 | 23641 | """
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Chris Riederer
# Google, Inc
# 2014-08-26
"""Contains everything related to machine learning in magnetak"""
import magnetak_detectors
import magnetak_util
import numpy as np
import scipy
import scipy.spatial
import scipy.spatial.distance
import sklearn
import sklearn.cross_validation
import sklearn.svm
import sklearn.linear_model
class MLDetector(magnetak_detectors.Detector):
"""A simple detector which detects a button press if magnet vector magnitude is
above a certain threshold"""
def __init__(self):
self.clf = None
# fcn that takes magnetomer data and converts it to a feature vector
self.MagnetToVectorObj = None
self.lookBehindTime = 400 #ms
self.waitTime = 350 # ms
def detect(self, runData):
lookBehindTime = self.lookBehindTime * 1e6 # convert to nanoseconds
waitTime = self.waitTime *1e6
detections = []
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0] # times
lastFiring = 0 # keep track of last time button was pulled
for sensorTime in domain[domain > domain[0]+lookBehindTime]:
# wait for a full window before looking again
if sensorTime - lastFiring < waitTime:
continue
window = data[(domain > sensorTime - lookBehindTime) & (domain <= sensorTime)]
# wait to fire if we don't have any sensor events
if len(window) == 0:
continue
X = window[:,2]
Y = window[:,3]
Z = window[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
# some basic thresholds, put in sequence for easy commenting-out!
if abs(magnitudes[0] - magnitudes[-1]) > 500:
continue
# if min(magnitudes) > 1400:
# continue
if max(magnitudes) - min(magnitudes) < 30:
continue
featureVector = self.MagnetToVectorObj.featurize(window)
if self.clf.predict(featureVector)[0]:
detections.append(sensorTime)
lastFiring = sensorTime
return detections
class MagnetometerToFeatureVector(object):
def featurize(self, magnetometer):
"""This method should take in magnetometer data and output a feature vector"""
raise NotImplementedError("Please implement this method")
class MagnitudeTemplateSumOfDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
self.window_size = 400000000
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t)) for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, magnitudes, self.templates[3])]
class AllAxesTemplateSumOfDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
self.window_size = 400 * 1e6
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t)) for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, X, self.templates[0]),
self.SumOfDifferences(domain, Y, self.templates[1]),
self.SumOfDifferences(domain, Z, self.templates[2]),
self.SumOfDifferences(domain, magnitudes, self.templates[3]),
]
class ManyFeaturesSumOfDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
# self.window_size = 500 * 1e6
self.window_size = 450 * 1e6
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
# axis = axis - axis[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, X, self.templates[0]),
self.SumOfDifferences(domain, Y, self.templates[1]),
self.SumOfDifferences(domain, Z, self.templates[2]),
self.SumOfDifferences(domain, magnitudes, self.templates[3]),
magnitudes[0] - magnitudes[-1],
max(magnitudes),
min(magnitudes),
max(magnitudes) - min(magnitudes),
]
class RawMagnitudeManyFeaturesMagToVec(MagnetometerToFeatureVector):
def __init__(self, templates):
self.templates = templates
# self.window_size = 500 * 1e6
self.window_size = 450 * 1e6
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
# axis = axis - axis[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.SumOfDifferences(domain, X, self.templates[0]),
self.SumOfDifferences(domain, Y, self.templates[1]),
self.SumOfDifferences(domain, Z, self.templates[2]),
self.SumOfDifferences(domain, magnitudes, self.templates[3]),
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class NegAndPosTemplatesMagToVec(MagnetometerToFeatureVector):
def __init__(self, posTemplates, negTemplates):
self.posTemplates = posTemplates
self.negTemplates = negTemplates
self.window_size = 450 * 1e6
myFunc = lambda x : float(x) / self.window_size
self.negTemplates = [myFunc] * 4
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
# axis = axis - axis[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def CosineSimilarity(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
otherVect = [template(t) for t in domain if t < self.window_size]
distance = scipy.spatial.distance.cosine(axis, otherVect)
# features = [f if not np.isnan(f) else 0 for f in features]
# return features
return distance if not np.isnan(distance) else 0
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.CosineSimilarity(domain, X, self.posTemplates[0]),
self.CosineSimilarity(domain, Y, self.posTemplates[1]),
self.CosineSimilarity(domain, Z, self.posTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.posTemplates[3]),
self.CosineSimilarity(domain, X, self.negTemplates[0]),
self.CosineSimilarity(domain, Y, self.negTemplates[1]),
self.CosineSimilarity(domain, Z, self.negTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.negTemplates[3]),
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class KitchenSync(MagnetometerToFeatureVector):
def __init__(self, posTemplates):
self.posTemplates = posTemplates
self.window_size = 400 * 1e6
myFunc = lambda x : float(x) / self.window_size
self.negTemplates = [myFunc] * 4
def CosineSimilarity(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
otherVect = [template(t) for t in domain if t < self.window_size]
distance = scipy.spatial.distance.cosine(axis, otherVect)
return distance if not np.isnan(distance) else 0
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [self.CosineSimilarity(domain, X, self.posTemplates[0]),
self.CosineSimilarity(domain, Y, self.posTemplates[1]),
self.CosineSimilarity(domain, Z, self.posTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.posTemplates[3]),
self.CosineSimilarity(domain, X, self.negTemplates[0]),
self.CosineSimilarity(domain, Y, self.negTemplates[1]),
self.CosineSimilarity(domain, Z, self.negTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.negTemplates[3]),
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class MagnitudeFeaturesDataToVec(MagnetometerToFeatureVector):
def __init__(self):
self.window_size = 450 * 1e6
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
return [
raw_magnitudes[0] - raw_magnitudes[-1],
raw_magnitudes[-1] - raw_magnitudes[0],
abs(raw_magnitudes[0] - raw_magnitudes[-1]),
max(raw_magnitudes),
min(raw_magnitudes),
max(raw_magnitudes) - min(raw_magnitudes),
max(gradM)
]
class TestTemplateDifferencesMagToVec(MagnetometerToFeatureVector):
def __init__(self, posTemplates):
self.posTemplates = posTemplates
self.window_size = 450 * 1e6
myFunc = lambda x : float(x) / self.window_size
self.negTemplates = [myFunc] * 4
def SumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t)) for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def SquareSumOfDifferences(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
distances = [abs(data - template(t))**2 for data, t in zip(axis, domain) if t < self.window_size]
return sum(distances) / len(distances)
def CosineSimilarity(self, domain, axis, template):
domain = np.array(domain)
domain = domain - domain[0]
axis = magnetak_util.scale(axis)
otherVect = [template(t) for t in domain if t < self.window_size]
distance = scipy.spatial.distance.cosine(axis, otherVect)
return distance
def featurize(self, magData):
"""This method should take in magnetometer data and output a feature vector"""
magData = np.array(magData)
domain = magData[:,0] # first index is time, second is accuracy
magData = magData[ domain < domain[0] + self.window_size ]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
raw_magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
gradM = np.gradient(raw_magnitudes)
magData = magData - magData[0,:]
domain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
features = [
self.SumOfDifferences(domain, X, self.posTemplates[0]),
self.SumOfDifferences(domain, Y, self.posTemplates[1]),
self.SumOfDifferences(domain, Z, self.posTemplates[2]),
self.SumOfDifferences(domain, magnitudes, self.posTemplates[3]),
self.SumOfDifferences(domain, X, self.negTemplates[0]),
self.SumOfDifferences(domain, Y, self.negTemplates[1]),
self.SumOfDifferences(domain, Z, self.negTemplates[2]),
self.SumOfDifferences(domain, magnitudes, self.negTemplates[3]),
self.SquareSumOfDifferences(domain, X, self.posTemplates[0]),
self.SquareSumOfDifferences(domain, Y, self.posTemplates[1]),
self.SquareSumOfDifferences(domain, Z, self.posTemplates[2]),
self.SquareSumOfDifferences(domain, magnitudes, self.posTemplates[3]),
self.SquareSumOfDifferences(domain, X, self.negTemplates[0]),
self.SquareSumOfDifferences(domain, Y, self.negTemplates[1]),
self.SquareSumOfDifferences(domain, Z, self.negTemplates[2]),
self.SquareSumOfDifferences(domain, magnitudes, self.negTemplates[3]),
self.CosineSimilarity(domain, X, self.posTemplates[0]),
self.CosineSimilarity(domain, Y, self.posTemplates[1]),
self.CosineSimilarity(domain, Z, self.posTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.posTemplates[3]),
self.CosineSimilarity(domain, X, self.negTemplates[0]),
self.CosineSimilarity(domain, Y, self.negTemplates[1]),
self.CosineSimilarity(domain, Z, self.negTemplates[2]),
self.CosineSimilarity(domain, magnitudes, self.negTemplates[3]),
]
features = [f if not np.isnan(f) else 0 for f in features]
return features
class CloseToOriginal(MagnetometerToFeatureVector):
def __init__(self, T1=30, T2=130):
self.T1 = 30
self.T2 = 130
self.segment_time = 200 # ms
def featurize(self, data):
"""This method should take in magnetometer data and output a feature vector"""
segment_time_ns = self.segment_time * 1e6 # convert to nanoseconds
window_size = segment_time_ns * 2
data = np.array(data)
domain = data[:,0] # first index is time, second is accuracy
# magData = magData[ domain < domain[0] + self.window_size ]
segment1 = data[(domain <= domain[0] + segment_time_ns)]
segment2 = data[(domain > domain[0] + segment_time_ns) & (domain <= domain[0] + window_size)]
# window = data[(domain > sensorTime - window_size) & (domain <= sensorTime)]
if len(segment1) == 0 or len(segment2) == 0:
return [0,0]
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
return [min(norms1), max(norms2)]
class ThreePartFeaturizer(MagnetometerToFeatureVector):
def __init__(self, T1=30, T2=130):
self.segment1_time = 100
self.segment2_time = 200 # ms
self.segment3_time = 100 # ms
def featurize(self, data):
"""This method should take in magnetometer data and output a feature vector"""
segment_time1_ns = self.segment1_time * 1e6 # convert to nanoseconds
segment_time2_ns = self.segment2_time * 1e6 # convert to nanoseconds
segment_time3_ns = self.segment3_time * 1e6 # convert to nanoseconds
data = np.array(data)
domain = data[:,0] # first index is time, second is accuracy
segment1 = data[(domain <= domain[0] + segment_time1_ns)]
segment2 = data[(domain > domain[0] + segment_time1_ns) &
(domain <= domain[0] + segment_time1_ns + segment_time2_ns)]
segment3 = data[(domain > domain[0] + segment_time1_ns + segment_time2_ns) &
(domain <= domain[0] + segment_time1_ns + segment_time2_ns + segment_time3_ns)]
if len(segment1) == 0 or len(segment2) == 0 or len(segment3) == 0:
return [0,0,0]
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
offsets3 = segment3[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
norms3 = [np.linalg.norm(row) for row in offsets3]
return [max(norms1), max(norms2), max(norms3)]
class WindowFeaturizer(MagnetometerToFeatureVector):
def __init__(self, T1=30, T2=130):
self.segment_time = 200 # ms
def featurize(self, data):
"""This method should take in magnetometer data and output a feature vector"""
segment_time_ns = self.segment_time * 1e6 # convert to nanoseconds
window_size = segment_time_ns * 2
data = np.array(data)
domain = data[:,0] # first index is time, second is accuracy
# magData = magData[ domain < domain[0] + self.window_size ]
segment1 = data[(domain <= domain[0] + segment_time_ns)]
segment2 = data[(domain > domain[0] + segment_time_ns) & (domain <= domain[0] + window_size)]
if len(segment1) == 0 or len(segment2) == 0:
return np.array([0,0,0,0,0,0,0,0,0,0,0,0])
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
window = data[(domain <= domain[0] + window_size)]
window = window - window[0,:]
norms_scaled = [np.linalg.norm(row[2:5]) for row in window]
# X = window[:,2]
# Y = window[:,3]
# Z = window[:,4]
# magnitudes = np.sqrt(X**2 + Y**2 + Z**2)
scaled_magnitudes = np.array(magnetak_util.scale(norms_scaled))
scaled_segment1 = np.array(scaled_magnitudes[(window[:,0] < segment_time_ns)])
scaled_segment2 = np.array(scaled_magnitudes[(window[:,0] > segment_time_ns) & (window[:,0] <= window_size)])
# print len(norms1), len(norms2)
# print len(scaled_segment1), len(scaled_segment2)
return np.array([
min(norms1),
max(norms1),
np.mean(norms1),
min(norms2),
max(norms2),
np.mean(norms2),
min(scaled_segment1),
max(scaled_segment1),
np.mean(scaled_segment1),
min(scaled_segment2),
max(scaled_segment2),
np.mean(scaled_segment2),
])
def GenerateData(runDataList, DataToVectorObj):
"""Given a list of runData objects, and a Featurizer object, returns a list
of feature vectors and labels"""
X, Y = [], []
for runData in runDataList:
# print runData['filename']
features = DataToVectorObj.featurize(runData['magnetometer'])
# if float('NaN') in features or float('inf') in features or float('-inf') in features:
# print runData['filename']
# if len(filter(np.isnan, features)) > 1:
# print runData['filename']
X.append(features)
if len(runData['labels']) > 0:
label = runData['labels'][0][1] # label of first labeled item
Y.append(label)
else:
Y.append(0)
return np.array(X), np.array(Y)
def TrainDetectorOnData(runDataList, featurizer):
"""Given a list of runData objects and a Featurizer, creates training data
and trains an algorithm. Returns a trained MLDetector object.
"""
# TODO(cjr): make options for using other algorithms
# train, test = sklearn.cross_validation.train_test_split(runDataList)
positives = [rd for rd in runDataList if len(rd['labels']) > 0]
posTemplates = magnetak_util.CreateTemplates(positives)
negatives = [rd for rd in runDataList if len(rd['labels']) == 0]
negTemplates = magnetak_util.CreateTemplates(negatives)
trainX, trainY = GenerateData(runDataList, featurizer)
# clf = sklearn.svm.LinearSVC()
# clf = sklearn.svm.SVC(kernel='linear')
clf = sklearn.linear_model.LogisticRegression()
clf.fit(trainX, trainY)
print clf.coef_
detector = MLDetector()
detector.clf = clf
detector.MagnetToVectorObj = featurizer
return detector
| apache-2.0 | 1,774,346,763,933,319,000 | 37.378247 | 113 | 0.646419 | false |
alkaitz/general-programming | water_level/water_level.py | 1 | 1035 | '''
Created on Aug 1, 2017
@author: alkaitz
'''
'''
An integer array defines the height of a 2D set of columns. After it rains enough amount of water,
how much water will be contained in the valleys formed by these mountains?
Ex: [3 2 3]
X X X W X
X X X -> X X X -> 1
X X X X X X
'''
def water_level(a):
if not a:
raise "Array cannot be empty"
water = 0
leftIndex, rightIndex = 0, len(a) - 1
left, right = a[0], a[-1]
while leftIndex <= rightIndex:
if left <= right:
water += max(left - a[leftIndex], 0)
left = max(left, a[leftIndex])
leftIndex += 1
else:
water += max(right - a[rightIndex], 0)
right = max(right, a[rightIndex])
rightIndex -= 1
return water
if __name__ == '__main__':
assert(water_level([3, 2, 3]) == 1)
assert(water_level([1, 2, 3, 4]) == 0)
assert(water_level([5, 1, 3, 4]) == 4)
assert(water_level([2, 1, 4, 3, 6]) == 2)
print "Successful" | mit | 4,423,365,319,701,444,600 | 26.263158 | 102 | 0.533333 | false |
jonfoster/pyxb1 | examples/ndfd/showreq.py | 1 | 1914 | import pyxb.utils.domutils
import xml.dom
import xml.dom.minidom
import pyxb.namespace
# Structure
#import DWML
#print 'Validating DWML'
#DWML.Namespace.validateSchema()
#print 'Validated DWML: types %s' % ("\n".join(DWML.Namespace.typeDefinitions().keys()),)
xmls = open('NDFDgen.xml').read()
dom = xml.dom.minidom.parseString(xmls)
body_dom = dom.documentElement.firstChild.nextSibling.firstChild.nextSibling
print body_dom
# Service interface types
import ndfd
# WSDL
import pyxb.bundles.wssplat.wsdl11 as wsdl
uri_src = open('ndfdXML.wsdl')
doc = xml.dom.minidom.parseString(uri_src.read())
spec = wsdl.definitions.createFromDOM(doc.documentElement, process_schema=True)
binding = spec.binding[0]
print binding.name
port_type = spec.portType[0]
print port_type.name
bop = binding.operationMap()[body_dom.localName]
print bop.toxml("utf-8")
pop = port_type.operationMap()[body_dom.localName]
print pop.toxml("utf-8")
input = pop.input
print input.toxml("utf-8")
print type(input)
print input.message
im_en = input._namespaceContext().interpretQName(input.message)
print im_en
msg = im_en.message()
print msg
for p in msg.part:
print p.toxml("utf-8")
msg_ns = pyxb.namespace.NamespaceForURI(body_dom.namespaceURI)
print '%s %s' % (body_dom.namespaceURI, msg_ns)
parts = msg.part
nodes = body_dom.childNodes
while parts and nodes:
p = parts.pop(0)
while nodes and (not (xml.dom.Node.ELEMENT_NODE == nodes[0].nodeType)):
nodes.pop(0)
assert nodes
n = nodes.pop(0)
if p.name != n.localName:
print 'Desynchronized: part %s expected node %s' % (p.name, n.localName)
nodes.insert(0, n)
continue
print '%s %s' % (p.name, n.localName)
#print '%s yielded %s' msg_ns
#msg = spec.messageMap()
#print msg
#print req
#dom_support = req.toDOM(pyxb.utils.domutils.BindingDOMSupport())
#dom_support.finalize()
#print dom_support.document().toxml("utf-8")
| apache-2.0 | -6,638,820,040,696,046,000 | 25.583333 | 89 | 0.718913 | false |
huazhisong/graduate_text | src/rnn/utils.py | 1 | 4176 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import time
import csv
import collections
import cPickle as pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.contrib import learn
class TextLoader(object):
def __init__(self, utils_dir, data_path, batch_size, seq_length, vocab, labels, encoding='utf8'):
self.data_path = data_path
self.batch_size = batch_size
self.seq_length = seq_length
self.encoding = encoding
if utils_dir is not None:
self.utils_dir = utils_dir
label_file = os.path.join(utils_dir, 'labels.pkl')
vocab_file = os.path.join(utils_dir, 'vocab.pkl')
corpus_file = os.path.join(utils_dir, 'corpus.txt')
with open(label_file, 'r') as f:
self.labels = pickle.load(f)
self.label_size = len(self.labels)
if not os.path.exists(vocab_file):
print 'reading corpus and processing data'
self.preprocess(vocab_file, corpus_file, data_path)
else:
print 'loading vocab and processing data'
self.load_preprocessed(vocab_file, data_path)
elif vocab is not None and labels is not None:
self.vocab = vocab
self.vocab_size = len(vocab) + 1
self.labels = labels
self.label_size = len(self.labels)
self.load_preprocessed(None, data_path)
self.reset_batch_pointer()
def transform(self, d):
new_d = map(self.vocab.get, d[:self.seq_length])
new_d = map(lambda i: i if i else 0, new_d)
if len(new_d) >= self.seq_length:
new_d = new_d[:self.seq_length]
else:
new_d = new_d + [0] * (self.seq_length - len(new_d))
return new_d
def preprocess(self, vocab_file, corpus_file, data_path):
with open(corpus_file, 'r') as f:
corpus = f.readlines()
corpus = ''.join(map(lambda i: i.strip(), corpus))
try:
corpus = corpus.decode('utf8')
except Exception as e:
# print e
pass
counter = collections.Counter(corpus)
count_pairs = sorted(counter.items(), key=lambda i: -i[1])
self.chars, _ = zip(*count_pairs)
with open(vocab_file, 'wb') as f:
pickle.dump(self.chars, f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars)+1)))
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
def load_preprocessed(self, vocab_file, data_path):
if vocab_file is not None:
with open(vocab_file, 'rb') as f:
self.chars = pickle.load(f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars)+1)))
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
def create_batches(self):
self.num_batches = int(self.tensor.shape[0] / self.batch_size)
if self.num_batches == 0:
assert False, 'Not enough data, make batch_size small.'
np.random.shuffle(self.tensor)
tensor = self.tensor[:self.num_batches * self.batch_size]
self.x_batches = np.split(tensor[:, :-1], self.num_batches, 0)
self.y_batches = np.split(tensor[:, -1], self.num_batches, 0)
def next_batch(self):
x = self.x_batches[self.pointer]
y = self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.create_batches()
self.pointer = 0
| agpl-3.0 | 323,030,258,419,681,400 | 31.95122 | 101 | 0.559148 | false |
USGSDenverPychron/pychron | launchers/pydiode.py | 1 | 1078 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from helpers import entry_point
entry_point('pydiode', 'PyDiode', '_diode')
# ============= EOF =============================================
| apache-2.0 | -2,949,165,895,291,454,000 | 43.916667 | 81 | 0.519481 | false |
pymedusa/SickRage | medusa/show/coming_episodes.py | 1 | 7304 | # coding=utf-8
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from builtins import object
from builtins import str
from datetime import date, timedelta
from operator import itemgetter
from medusa import app
from medusa.common import (
ARCHIVED,
DOWNLOADED,
IGNORED,
SNATCHED,
SNATCHED_BEST,
SNATCHED_PROPER,
UNAIRED,
WANTED
)
from medusa.db import DBConnection
from medusa.helper.common import dateFormat, timeFormat
from medusa.helpers.quality import get_quality_string
from medusa.network_timezones import parse_date_time
from medusa.sbdatetime import sbdatetime
from medusa.tv.series import SeriesIdentifier
class ComingEpisodes(object):
"""
Missed: yesterday...(less than 1 week)
Today: today
Soon: tomorrow till next week
Later: later than next week
"""
categories = ['later', 'missed', 'soon', 'today']
sorts = {
'date': itemgetter('localtime'),
'network': itemgetter('network', 'localtime'),
'show': itemgetter('show_name', 'localtime'),
}
def __init__(self):
pass
@staticmethod
def get_coming_episodes(categories, sort, group, paused=app.COMING_EPS_DISPLAY_PAUSED):
"""
:param categories: The categories of coming episodes. See ``ComingEpisodes.categories``
:param sort: The sort to apply to the coming episodes. See ``ComingEpisodes.sorts``
:param group: ``True`` to group the coming episodes by category, ``False`` otherwise
:param paused: ``True`` to include paused shows, ``False`` otherwise
:return: The list of coming episodes
"""
categories = ComingEpisodes._get_categories(categories)
sort = ComingEpisodes._get_sort(sort)
today = date.today().toordinal()
next_week = (date.today() + timedelta(days=7)).toordinal()
recently = (date.today() - timedelta(days=app.COMING_EPS_MISSED_RANGE)).toordinal()
status_list = [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER,
ARCHIVED, IGNORED]
db = DBConnection()
fields_to_select = ', '.join(
['airdate', 'airs', 'e.description as description', 'episode', 'imdb_id', 'e.indexer',
'indexer_id', 'name', 'network', 'paused', 's.quality', 'runtime', 'season', 'show_name',
'showid', 's.status']
)
results = db.select(
'SELECT %s ' % fields_to_select +
'FROM tv_episodes e, tv_shows s '
'WHERE season != 0 '
'AND airdate >= ? '
'AND airdate < ? '
'AND s.indexer = e.indexer '
'AND s.indexer_id = e.showid '
'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',
[today, next_week] + status_list
)
done_shows_list = [int(result['showid']) for result in results]
placeholder = ','.join(['?'] * len(done_shows_list))
placeholder2 = ','.join(['?'] * len([DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]))
# FIXME: This inner join is not multi indexer friendly.
results += db.select(
'SELECT %s ' % fields_to_select +
'FROM tv_episodes e, tv_shows s '
'WHERE season != 0 '
'AND showid NOT IN (' + placeholder + ') '
'AND s.indexer_id = e.showid '
'AND airdate = (SELECT airdate '
'FROM tv_episodes inner_e '
'WHERE inner_e.season != 0 '
'AND inner_e.showid = e.showid '
'AND inner_e.indexer = e.indexer '
'AND inner_e.airdate >= ? '
'ORDER BY inner_e.airdate ASC LIMIT 1) '
'AND e.status NOT IN (' + placeholder2 + ')',
done_shows_list + [next_week] + [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]
)
results += db.select(
'SELECT %s ' % fields_to_select +
'FROM tv_episodes e, tv_shows s '
'WHERE season != 0 '
'AND s.indexer_id = e.showid '
'AND airdate < ? '
'AND airdate >= ? '
'AND e.status IN (?,?) '
'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',
[today, recently, WANTED, UNAIRED] + status_list
)
for index, item in enumerate(results):
item['series_slug'] = str(SeriesIdentifier.from_id(int(item['indexer']), item['indexer_id']))
results[index]['localtime'] = sbdatetime.convert_to_setting(
parse_date_time(item['airdate'], item['airs'], item['network']))
results.sort(key=ComingEpisodes.sorts[sort])
if not group:
return results
grouped_results = ComingEpisodes._get_categories_map(categories)
for result in results:
if result['paused'] and not paused:
continue
result['airs'] = str(result['airs']).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')
result['airdate'] = result['localtime'].toordinal()
if result['airdate'] < today:
category = 'missed'
elif result['airdate'] >= next_week:
category = 'later'
elif result['airdate'] == today:
category = 'today'
else:
category = 'soon'
if len(categories) > 0 and category not in categories:
continue
if not result['network']:
result['network'] = ''
result['quality'] = get_quality_string(result['quality'])
result['airs'] = sbdatetime.sbftime(result['localtime'], t_preset=timeFormat).lstrip('0').replace(' 0', ' ')
result['weekday'] = 1 + date.fromordinal(result['airdate']).weekday()
result['tvdbid'] = result['indexer_id']
result['airdate'] = sbdatetime.sbfdate(result['localtime'], d_preset=dateFormat)
result['localtime'] = result['localtime'].toordinal()
grouped_results[category].append(result)
return grouped_results
@staticmethod
def _get_categories(categories):
if not categories:
return []
if not isinstance(categories, list):
return categories.split('|')
return categories
@staticmethod
def _get_categories_map(categories):
if not categories:
return {}
return {category: [] for category in categories}
@staticmethod
def _get_sort(sort):
sort = sort.lower() if sort else ''
if sort not in ComingEpisodes.sorts:
return 'date'
return sort
| gpl-3.0 | -8,968,734,022,603,750,000 | 35.52 | 120 | 0.583242 | false |
okffi/decisions | web/decisions/subscriptions/models.py | 1 | 4698 | from __future__ import unicode_literals
import os
import base64
from datetime import timedelta
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.postgres import fields as pgfields
def make_confirm_code():
return base64.b64encode(os.urandom(15))
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', related_name="profile")
email_confirmed = models.DateTimeField(null=True, blank=True)
email_confirm_code = models.CharField(
max_length=20,
default=make_confirm_code
)
email_confirm_sent_on = models.DateTimeField(null=True, blank=True)
extra = pgfields.JSONField(default=dict)
def __unicode__(self):
return self.user.username
def confirmed_email(self):
if self.email_confirmed:
return self.user.email
class SubscriptionUser(models.Model):
user = models.ForeignKey('auth.User')
subscription = models.ForeignKey('Subscription')
active = models.BooleanField(default=True, verbose_name=_('Active'))
send_mail = models.BooleanField(
default=False,
verbose_name=_('Sends email')
)
subscribed_at = models.DateTimeField(default=now)
def __unicode__(self):
return u"%s: %s" % (self.user, self.subscription)
def is_fresh(self, for_user):
return self.subscription.subscriptionhit_set.filter(
created__gt=now()-timedelta(days=3),
notified_users=for_user
).count()
class Meta:
verbose_name = _("subscribed user")
verbose_name_plural = _("subscribed users")
class SubscriptionQuerySet(models.QuerySet):
def get_fresh(self):
return (
self
.filter(
subscriptionhit__created__gt=now()-timedelta(days=3)
)
.annotate(hit_count=models.Count('subscriptionhit'))
.filter(hit_count__gt=0)
)
class Subscription(models.Model):
subscribed_users = models.ManyToManyField(
'auth.User',
through=SubscriptionUser
)
previous_version = models.ForeignKey(
'self',
null=True,
blank=True,
related_name="next_versions"
)
HAYSTACK, GEO = range(2)
BACKEND_CHOICES = (
(HAYSTACK, _("Text Search")),
(GEO, _("Map Search")),
)
search_backend = models.IntegerField(
default=HAYSTACK,
choices=BACKEND_CHOICES,
verbose_name=_("Search type")
)
search_term = models.CharField(
max_length=300,
verbose_name=_('Search term')
)
created = models.DateTimeField(default=now)
extra = pgfields.JSONField(default=dict)
objects = SubscriptionQuerySet.as_manager()
def is_geo_search(self):
return self.search_backend == self.GEO
def is_text_search(self):
return self.search_backend == self.HAYSTACK
def __unicode__(self):
return self.search_term
class Meta:
verbose_name = _("subscription")
verbose_name_plural = _("subscriptions")
get_latest_by = 'created'
class SubscriptionHit(models.Model):
subscriptions = models.ManyToManyField(Subscription)
notified_users = models.ManyToManyField('auth.User')
created = models.DateTimeField(default=now)
subject = models.CharField(max_length=300)
link = models.CharField(max_length=300)
SEARCH_RESULT, COMMENT_REPLY = range(2)
HIT_TYPES = (
(SEARCH_RESULT, _("Search result")),
(COMMENT_REPLY, _("Comment reply")),
)
hit_type = models.IntegerField(default=SEARCH_RESULT, choices=HIT_TYPES)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
hit = GenericForeignKey('content_type', 'object_id')
extra = pgfields.JSONField(default=dict)
# utility functions to allow template checks
def is_comment_reply(self):
return self.hit_type == self.COMMENT_REPLY
def is_search_result(self):
return self.hit_type == self.SEARCH_RESULT
def format_subject(self):
"translated, formatted subject line"
if "subject_mapping" in self.extra:
return _(self.subject) % self.extra["subject_mapping"]
return self.subject
def __unicode__(self):
return self.subject
class Meta:
verbose_name = _("subscription hit")
verbose_name_plural = _("subscription hits")
get_latest_by = "created"
ordering = ('-created',)
| bsd-3-clause | -2,500,066,219,397,966,300 | 28.923567 | 76 | 0.649638 | false |
rowinggolfer/openmolar2 | src/lib_openmolar/client/qt4/dialogs/save_discard_cancel_dialog.py | 1 | 3474 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <[email protected]> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
from PyQt4 import QtCore, QtGui
from lib_openmolar.common.qt4.dialogs import ExtendableDialog
class SaveDiscardCancelDialog(ExtendableDialog):
def __init__(self, message, changes, parent=None):
'''
offers a choiced of save discard cancel, but allows for examination
of what has changed.
changes should be a function, which returns a string list
'''
ExtendableDialog.__init__(self, parent)
self.set_advanced_but_text(_("What's changed?"))
self.apply_but.setText("&Save")
self.enableApply()
self.save_on_exit = True
label = QtGui.QLabel(message)
label.setAlignment(QtCore.Qt.AlignCenter)
self.insertWidget(label)
self.discard_but = self.button_box.addButton(
QtGui.QDialogButtonBox.Discard)
self.changes = changes
self.changes_list_widget = QtGui.QListWidget()
self.add_advanced_widget(self.changes_list_widget)
def sizeHint(self):
return QtCore.QSize(400,100)
def _clicked(self, but):
if but == self.discard_but:
self.discard()
return
ExtendableDialog._clicked(self, but)
def discard(self):
if QtGui.QMessageBox.question(self,_("Confirm"),
_("Are you sure you want to discard these changes?"),
QtGui.QMessageBox.No | QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No )==QtGui.QMessageBox.Yes:
self.save_on_exit = False
self.accept()
def showExtension(self, extend):
if extend:
self.changes_list_widget.clear()
self.changes_list_widget.addItems(self.changes())
ExtendableDialog.showExtension(self, extend)
if __name__ == "__main__":
from gettext import gettext as _
def changes():
return ["Sname","Fname"]
app = QtGui.QApplication([])
message = "You have unsaved changes"
dl = SaveDiscardCancelDialog(message, changes)
print dl.exec_() | gpl-3.0 | -4,159,314,006,897,088,500 | 39.882353 | 79 | 0.519862 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.