repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apyrgio/synnefo | snf-astakos-app/astakos/api/util.py | 6 | 6738 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps
from time import time, mktime
import datetime
from django.http import HttpResponse
from django.utils import simplejson as json
from django.template.loader import render_to_string
from astakos.im.models import AstakosUser, Component
from snf_django.lib.api import faults
from snf_django.lib.api.utils import isoformat
from astakos.im.forms import FeedbackForm
from astakos.im.user_utils import send_feedback as send_feedback_func
import logging
logger = logging.getLogger(__name__)
absolute = lambda request, url: request.build_absolute_uri(url)
def _dthandler(obj):
if isinstance(obj, datetime.datetime):
return isoformat(obj)
else:
raise TypeError
def json_response(content, status_code=None):
response = HttpResponse()
if status_code is not None:
response.status_code = status_code
response.content = json.dumps(content, default=_dthandler)
response['Content-Type'] = 'application/json; charset=UTF-8'
response['Content-Length'] = len(response.content)
return response
def xml_response(content, template, status_code=None):
response = HttpResponse()
if status_code is not None:
response.status_code = status_code
response.content = render_to_string(template, content)
response['Content-Type'] = 'application/xml; charset=UTF-8'
response['Content-Length'] = len(response.content)
return response
def check_is_dict(obj):
if not isinstance(obj, dict):
raise faults.BadRequest("Request should be a JSON dict")
def is_integer(x):
return isinstance(x, (int, long))
def are_integer(lst):
return all(map(is_integer, lst))
def validate_user(user):
# Check if the user is active.
if not user.is_active:
raise faults.Unauthorized('User inactive')
# Check if the token has expired.
if user.token_expired():
raise faults.Unauthorized('Authentication expired')
# Check if the user has accepted the terms.
if not user.signed_terms:
raise faults.Unauthorized('Pending approval terms')
def user_from_token(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
token = request.x_auth_token
except AttributeError:
raise faults.Unauthorized("No authentication token")
if not token:
raise faults.Unauthorized("Invalid X-Auth-Token")
try:
user = AstakosUser.objects.get(auth_token=token)
except AstakosUser.DoesNotExist:
raise faults.Unauthorized('Invalid X-Auth-Token')
validate_user(user)
request.user = user
return func(request, *args, **kwargs)
return wrapper
def component_from_token(func):
"""Decorator for authenticating component by its token.
Check that a component with the corresponding token exists. Also,
if component's token has an expiration token, check that it has not
expired.
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
token = request.x_auth_token
except AttributeError:
raise faults.Unauthorized("No authentication token")
if not token:
raise faults.Unauthorized("Invalid X-Auth-Token")
try:
component = Component.objects.get(auth_token=token)
except Component.DoesNotExist:
raise faults.Unauthorized("Invalid X-Auth-Token")
# Check if the token has expired
expiration_date = component.auth_token_expires
if expiration_date:
expires_at = mktime(expiration_date.timetuple())
if time() > expires_at:
raise faults.Unauthorized("Authentication expired")
request.component_instance = component
return func(request, *args, **kwargs)
return wrapper
def get_uuid_displayname_catalogs(request, user_call=True):
# Normal Response Codes: 200
# Error Response Codes: BadRequest (400)
try:
input_data = json.loads(request.body)
except:
raise faults.BadRequest('Request body should be json formatted.')
else:
if not isinstance(input_data, dict):
raise faults.BadRequest(
'Request body should be a json formatted dictionary')
uuids = input_data.get('uuids', [])
if uuids is None and user_call:
uuids = []
displaynames = input_data.get('displaynames', [])
if displaynames is None and user_call:
displaynames = []
user_obj = AstakosUser.objects
d = {'uuid_catalog': user_obj.uuid_catalog(uuids),
'displayname_catalog': user_obj.displayname_catalog(displaynames)}
response = HttpResponse()
response.content = json.dumps(d)
response['Content-Type'] = 'application/json; charset=UTF-8'
response['Content-Length'] = len(response.content)
return response
def send_feedback(request, email_template_name='im/feedback_mail.txt'):
form = FeedbackForm(request.POST)
if not form.is_valid():
logger.error("Invalid feedback request: %r", form.errors)
raise faults.BadRequest('Invalid data')
msg = form.cleaned_data['feedback_msg']
data = form.cleaned_data['feedback_data']
try:
send_feedback_func(msg, data, request.user, email_template_name)
except:
return HttpResponse(status=502)
return HttpResponse(status=200)
def rename_meta_key(d, old, new):
if old not in d:
return
d[new] = d[old]
del(d[old])
def get_int_parameter(p):
if p is not None:
try:
p = int(p)
except ValueError:
return None
if p < 0:
return None
return p
def get_content_length(request):
content_length = get_int_parameter(request.META.get('CONTENT_LENGTH'))
if content_length is None:
raise faults.LengthRequired('Missing or invalid Content-Length header')
return content_length
def invert_dict(d):
return dict((v, k) for k, v in d.iteritems())
| gpl-3.0 | 8,798,754,084,800,237,000 | 29.627273 | 79 | 0.667112 | false | 4.020286 | false | false | false |
nieklinnenbank/bouwer | source/bouwer/plugins/ProgressBar.py | 1 | 2841 | #
# Copyright (C) 2012 Niek Linnenbank
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import argparse
import fcntl, termios, struct
from bouwer.plugin import Plugin
from bouwer.action import ActionEvent
class ProgressBar(Plugin):
"""
Output a textual progress bar on the terminal
"""
def initialize(self):
"""
Initialize plugin
"""
self.conf.cli.parser.add_argument('-p', '--progress',
dest = 'output_plugin',
action = 'store_const',
const = self,
default = argparse.SUPPRESS,
help = 'Output a progress bar to indicate action status')
def action_event(self, action, event):
"""
Called when an :class:`.ActionEvent` is triggered
"""
if event.type == ActionEvent.FINISH:
todo = len(self.build.actions.workers.pending) + len(self.build.actions.workers.running)
total = len(self.build.actions.actions)
perc = float(total - todo) / float(total)
self.update_progress(perc, action.target)
def get_console_width(self):
"""
Return the width of the console in characters
"""
# TODO: not portable to windows
try:
term = os.get_terminal_size()
return term.columns
except:
try:
return os.environ['COLUMNS']
except:
hw = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))
return hw[1]
def update_progress(self, progress, label = ""):
"""
Displays or updates a console progress bar
"""
labelLength = len(label) + 16
barLength = self.get_console_width() - labelLength
block = int(round(barLength*progress))
#text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, label)
text = "\r[{0}] {1:.2%} {2}".format("#" * block + "-" * (barLength - block),
progress,
label)
sys.stdout.write(text)
sys.stdout.flush()
if progress == 1.0:
print()
| gpl-3.0 | 2,782,745,559,287,731,700 | 33.646341 | 107 | 0.583597 | false | 4.184094 | false | false | false |
sandersnewmedia/django-fogbugz-gadget | django_fogbugz_gadget/utils.py | 1 | 3182 | from django.conf import settings
from django.core import exceptions
from django.core.cache import cache
from pyquery import PyQuery as pq
from urllib2 import HTTPError, URLError, Request, urlopen, quote
from urllib import urlencode
conf = {}
class GadgetError(Exception):
def __init__(self, msg):
self.msg = 'FogBugz Gadget says... %s' % msg
def __str__(self):
return repr(self.msg)
def _configure():
"""
Checks Django settings for necessary configuration variables.
"""
try:
conf['api_root'] = settings.FOG_API_ROOT
conf['email'] = settings.FOG_EMAIL
conf['password'] = settings.FOG_PASSWORD
conf['project'] = settings.FOG_PROJECT
conf['primary_contact'] = settings.FOG_PRIMARY_CONTACT
except AttributeError:
raise exceptions.ImproperlyConfigured
def _send(query):
# for some reason we have to grab the XML doc manually before passing to pyquery;
# the token isn't grabbed otherwise
try:
request = Request(conf['api_root'], urlencode(query.items()))
xml = pq(urlopen(request).read())
return xml
except HTTPError, e:
raise GadgetError('Error code: %s (check app settings)' % e.code)
except URLError, e:
raise GadgetError('Failed to reach server: %s (check app settings)' % e.reason)
def _logon():
reply = _send({
'cmd': 'logon',
'email': conf['email'],
'password': conf['password'] })
if reply('error'):
raise GadgetError(reply)
token = reply('token').html()
if token is None:
raise GadgetError('No token provided, login unsuccessful')
return token
def _logoff(token):
_send({
'token=': token,
'cmd': 'logoff' })
def get_priorities():
"""
Returns priority values for use in a choice field.
Values are pulled from FogBugz if not found in cache.
"""
if cache.get('priorities') is not None:
return cache.get('priorities')
if not conf:
_configure()
token = _logon()
reply = _send({
'token': token,
'cmd': 'listPriorities' })
if reply('error'):
raise GadgetError(reply)
choices, initial = [], None
for elem in reply('priority'):
val = pq(elem).find('ixPriority').html()
name = val + ' - ' + pq(elem).find('sPriority').html()
choices.append((val, name))
if pq(elem).find('fDefault').html() == 'true':
initial = val
_logoff(token)
cache.set('priorities', (choices, initial))
return choices, initial
def submit_ticket(data):
"""
Returns a case number upon successfull submission of a ticket.
Cleaned form data is expected.
"""
if not conf:
_configure()
token = _logon()
reply = _send({
'cmd': 'new',
'token': token,
'sProject': conf['project'],
'sPrimary': conf['primary_contact'],
'sTitle': data['title'],
'ixPriority': data['priority'],
'sEvent': data['message'] })
case = reply('case').attr('ixBug')
if reply('error'):
raise GadgetError(reply)
_logoff(token)
return case
| mit | -7,714,947,979,779,037,000 | 24.66129 | 87 | 0.59868 | false | 3.885226 | false | false | false |
CBien/django-alert | alert/migrations/0001_initial.py | 1 | 7531 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Alert'
db.create_table('alert_alert', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('backend', self.gf('django.db.models.fields.CharField')(default='EmailBackend', max_length=20)),
('alert_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('title', self.gf('django.db.models.fields.CharField')(default=u'Premium Domain Finder alert', max_length=250)),
('body', self.gf('django.db.models.fields.TextField')()),
('when', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('last_attempt', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('is_sent', self.gf('django.db.models.fields.BooleanField')(default=False)),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('alert', ['Alert'])
# Adding model 'AlertPreference'
db.create_table('alert_alertpreference', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('alert_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('backend', self.gf('django.db.models.fields.CharField')(max_length=25)),
('preference', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('alert', ['AlertPreference'])
# Adding unique constraint on 'AlertPreference', fields ['user', 'alert_type', 'backend']
db.create_unique('alert_alertpreference', ['user_id', 'alert_type', 'backend'])
def backwards(self, orm):
# Removing unique constraint on 'AlertPreference', fields ['user', 'alert_type', 'backend']
db.delete_unique('alert_alertpreference', ['user_id', 'alert_type', 'backend'])
# Deleting model 'Alert'
db.delete_table('alert_alert')
# Deleting model 'AlertPreference'
db.delete_table('alert_alertpreference')
models = {
'alert.alert': {
'Meta': {'object_name': 'Alert'},
'alert_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'backend': ('django.db.models.fields.CharField', [], {'default': "'EmailBackend'", 'max_length': '20'}),
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'Premium Domain Finder alert'", 'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'alert.alertpreference': {
'Meta': {'unique_together': "(('user', 'alert_type', 'backend'),)", 'object_name': 'AlertPreference'},
'alert_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'backend': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preference': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['alert']
| mit | -2,943,198,966,531,534,000 | 65.061404 | 182 | 0.573895 | false | 3.792044 | false | false | false |
BirkbeckCTP/janeway | src/journal/migrations/0001_initial.py | 1 | 7158 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-07-11 12:03
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import journal.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ArticleOrdering',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(default=1)),
],
),
migrations.CreateModel(
name='BannedIPs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.GenericIPAddressField()),
('date_banned', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='FixedPubCheckItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metadata', models.BooleanField(default=False)),
('verify_doi', models.BooleanField(default=False)),
('select_issue', models.BooleanField(default=False)),
('set_pub_date', models.BooleanField(default=False)),
('notify_the_author', models.BooleanField(default=False)),
('select_render_galley', models.BooleanField(default=False)),
('select_article_image', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('volume', models.IntegerField(default=1)),
('issue', models.IntegerField(default=1)),
('issue_title', models.CharField(blank=True, max_length=300)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('order', models.IntegerField(default=1)),
('issue_type', models.CharField(choices=[('Issue', 'Issue'), ('Collection', 'Collection')], default='Issue', max_length=200)),
('issue_description', models.TextField()),
('cover_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('large_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.issue_large_image_path)),
],
options={
'ordering': ('order', '-date'),
},
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=4)),
('domain', models.CharField(default='localhost', max_length=255, unique=True)),
('default_cover_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('default_large_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('header_image', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('favicon', models.ImageField(blank=True, null=True, storage=django.core.files.storage.FileSystemStorage(location='/home/ajrbyers/code/janeway/src/media'), upload_to=journal.models.cover_images_upload_path)),
('description', models.TextField(blank=True, null=True, verbose_name='Journal Description')),
('is_remote', models.BooleanField(default=False)),
('remote_submit_url', models.URLField(blank=True, null=True)),
('remote_view_url', models.URLField(blank=True, null=True)),
('nav_home', models.BooleanField(default=True)),
('nav_articles', models.BooleanField(default=True)),
('nav_issues', models.BooleanField(default=True)),
('nav_contact', models.BooleanField(default=True)),
('nav_start', models.BooleanField(default=True)),
('nav_review', models.BooleanField(default=True)),
('nav_sub', models.BooleanField(default=True)),
('has_xslt', models.BooleanField(default=False)),
('hide_from_press', models.BooleanField(default=False)),
('sequence', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='Notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=100)),
('type', models.CharField(choices=[('submission', 'Submission'), ('acceptance', 'Acceptance')], max_length=10)),
('active', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='PrePublicationChecklistItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('completed', models.BooleanField(default=False)),
('completed_on', models.DateTimeField(blank=True, null=True)),
('title', models.TextField()),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='PresetPublicationCheckItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('text', models.TextField()),
('enabled', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='SectionOrdering',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(default=1)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journal.Issue')),
],
),
]
| agpl-3.0 | -4,119,311,126,828,057,000 | 54.921875 | 236 | 0.593043 | false | 4.359318 | false | false | false |
zillolo/vsut-python | vsut/unit.py | 1 | 2431 | from collections import namedtuple
from enum import Enum
from math import floor, log10
from sys import stdout
from time import clock
from vsut.assertion import AssertResult
class Unit():
"""A unit is a group of tests, that are run at once.
Every method of this class, that starts with 'test' will be run automatically,
when the run()-method is called.
Before and after every test the setup and teardown methods will be called respectively.
For every test it's execution time, status, and if necessary an error message are recorded.
Attributes:
tests ({int: str}): A map that maps function names to an unique id.
times ({int: str}): A map that maps a functions execution time as a string to its id.
results ({int: AssertResult}): A map that maps a tests result to its id. If a test is successful its entry is None.
"""
def __init__(self):
self.tests = {
id: funcName
for id, funcName in enumerate([method for method in dir(self)
if callable(getattr(self, method))
and method.startswith("test")])
}
self.times = {}
self.results = {}
self.failed = False
self.ignoreUnit = False
def run(self):
"""Runs all tests in this unit.
Times the execution of all tests and records them.
"""
for id, name in self.tests.items():
# Start timing the tests.
start = clock()
try:
# Get the method that needs to be executed.
func = getattr(self, name, None)
# Run the setup method.
self.setup()
# Run the test method.
func()
# Run the teardown method.
self.teardown()
except AssertResult as e:
result = e
self.failed = True
else:
result = None
self.results[id] = result
# Add the execution time of the test to the times map.
elapsed = clock() - start
self.times[id] = "{0:.6f}".format(elapsed)
def setup(self):
"""Setup is executed before every test.
"""
pass
def teardown(self):
"""Teardown is executed after every test.
"""
pass
| mit | -6,015,725,794,670,264,000 | 32.763889 | 127 | 0.547923 | false | 4.881526 | true | false | false |
bavardage/statsmodels | statsmodels/genmod/families/family.py | 4 | 36622 | '''
The one parameter exponential family distributions used by GLM.
'''
#TODO: quasi, quasibinomial, quasipoisson
#see http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import numpy as np
from scipy import special
from scipy.stats import ss
import links as L
import varfuncs as V
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
"""
#TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
tol = 1.0e-05
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
"""
#TODO: change the links class attribute in the families to hold meaningful
# information instead of a list of links instances such as
#[<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
# validlink = max([isinstance(link, _.__class__) for _ in self.links])
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (`self.links`, link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
#link property for each family
#pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
self.link = link()
self.variance = variance
def starting_mu(self, y):
"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
mu_0 = (endog + mean(endog))/2.
Notes
-----
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
`w` = 1 / (link'(`mu`)**2 * variance(`mu`))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, Y, mu, scale=1.):
"""
Deviance of (Y,mu) pair.
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
Y : array-like
The endogenous response variable
mu : array-like
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional scale argument
Returns
-------
DEV : array
The value of deviance function defined below.
Notes
-----
DEV = (sum_i(2*loglike(Y_i,Y_i) - 2*loglike(Y_i,mu_i)) / scale
The deviance functions are analytically defined for each family.
"""
raise NotImplementedError
def resid_dev(self, Y, mu, scale=1.):
"""
The deviance residuals
Parameters
----------
Y : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
Deviance residuals.
Notes
-----
The deviance residuals are defined for each family.
"""
raise NotImplementedError
def fitted(self, eta):
"""
Fitted values based on linear predictors eta.
Parameters
-----------
eta : array
Values of the linear predictor of the model.
dot(X,beta) in a classical linear model.
Returns
--------
mu : array
The mean response variables given by the inverse of the link
function.
"""
return self.link.inverse(eta)
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
eta : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike(self, Y, mu, scale=1.):
"""
The loglikelihood function.
Parameters
----------
`Y` : array
Usually the endogenous response variable.
`mu` : array
Usually but not always the fitted mean response variable.
Returns
-------
llf : float
The value of the loglikelihood evaluated at (Y,mu).
Notes
-----
This is defined for each family. Y and mu are not restricted to
`Y` and `mu` respectively. For instance, the deviance function calls
both loglike(Y,Y) and loglike(Y,mu) to get the likelihood ratio.
"""
raise NotImplementedError
def resid_anscombe(self, Y, mu):
"""
The Anscome residuals.
See also
--------
statsmodels.families.family.Family docstring and the `resid_anscombe` for
the individual families for more information.
"""
raise NotImplementedError
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
`variance` is an instance of
statsmodels.genmod.families.family.varfuncs.mu
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
def __init__(self, link=L.log):
self.variance = Poisson.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""Poisson deviance residual
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
resid_dev = sign(Y-mu)*sqrt(2*Y*log(Y/mu)-2*(Y-mu))
"""
return np.sign(Y-mu) * np.sqrt(2*Y*np.log(Y/mu)-2*(Y-mu))/scale
def deviance(self, Y, mu, scale=1.):
'''
Poisson deviance function
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (Y,mu) as defined below.
Notes
-----
If a constant term is included it is defined as
:math:`deviance = 2*\\sum_{i}(Y*\\log(Y/\\mu))`
'''
if np.any(Y==0):
retarr = np.zeros(Y.shape)
Ymu = Y/mu
mask = Ymu != 0
YmuMasked = Ymu[mask]
Ymasked = Y[mask]
np.putmask(retarr, mask, Ymasked*np.log(YmuMasked)/scale)
return 2*np.sum(retarr)
else:
return 2*np.sum(Y*np.log(Y/mu))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Poisson exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
llf = scale * sum(-mu + Y*log(mu) - gammaln(Y+1))
where gammaln is the log gamma function
"""
return scale * np.sum(-mu + Y*np.log(mu)-special.gammaln(Y+1))
def resid_anscombe(self, Y, mu):
"""
Anscombe residuals for the Poisson exponential family distribution
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
resid_anscombe = :math:`(3/2.)*(Y^{2/3.} - \\mu**(2/3.))/\\mu^{1/6.}`
"""
return (3/2.)*(Y**(2/3.)-mu**(2/3.))/mu**(1/6.)
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.constant
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
def __init__(self, link=L.identity):
self.variance = Gaussian.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""
Gaussian deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
--------
`resid_dev` = (`Y` - `mu`)/sqrt(variance(`mu`))
"""
return (Y - mu) / np.sqrt(self.variance(mu))/scale
def deviance(self, Y, mu, scale=1.):
"""
Gaussian deviance function
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (Y,mu) as defined below.
Notes
--------
`deviance` = sum((Y-mu)**2)
"""
return np.sum((Y-mu)**2)/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Gaussian exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
llf = -(nobs/2)*(log(SSR) + (1 + log(2*pi/nobs)))
where SSR = sum((Y-link^(-1)(mu))**2)
If the links is not the identity link then the loglikelihood
function is defined as
llf = sum((`Y`*`mu`-`mu`**2/2)/`scale` - `Y`**2/(2*`scale`) - \
(1/2.)*log(2*pi*`scale`))
"""
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = Y.shape[0]/2.
SSR = ss(Y-self.fitted(mu))
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
# Return the loglikelihood for Gaussian GLM
return np.sum((Y*mu-mu**2/2)/scale-Y**2/(2*scale)-\
.5*np.log(2*np.pi*scale))
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the Gaussian exponential family distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
--------
`resid_anscombe` = `Y` - `mu`
"""
return Y-mu
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_squared
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
def __init__(self, link=L.inverse_power):
self.variance = Gamma.variance
self.link = link()
#TODO: note the note
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, 1.0e-10, np.inf)
def deviance(self, Y, mu, scale=1.):
"""
Gamma deviance function
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = 2*sum((Y - mu)/mu - log(Y/mu))
"""
Y_mu = self._clean(Y/mu)
return 2 * np.sum((Y - mu)/mu - np.log(Y_mu))
def resid_dev(self, Y, mu, scale=1.):
"""
Gamma deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`resid_dev` = sign(Y - mu) * sqrt(-2*(-(Y-mu)/mu + log(Y/mu)))
"""
Y_mu = self._clean(Y/mu)
return np.sign(Y-mu) * np.sqrt(-2*(-(Y-mu)/mu + np.log(Y_mu)))
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Gamma exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
--------
llf = -1/scale * sum(Y/mu + log(mu) + (scale-1)*log(Y) + log(scale) +\
scale*gammaln(1/scale))
where gammaln is the log gamma function.
"""
return - 1./scale * np.sum(Y/mu+np.log(mu)+(scale-1)*np.log(Y)\
+np.log(scale)+scale*special.gammaln(1./scale))
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs. our
# results due to an assumed difference in implementation
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for Gamma exponential family distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
resid_anscombe = 3*(Y**(1/3.)-mu**(1/3.))/mu**(1/3.)
"""
return 3*(Y**(1/3.)-mu**(1/3.))/mu**(1/3.)
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.binary
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
endog for Binomial can be specified in one of three ways.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog]
variance = V.binary # this is not used below in an effort to include n
def __init__(self, link=L.logit): #, n=1.):
#TODO: it *should* work for a constant n>1 actually, if data_weights is
# equal to n
self.n = 1 # overwritten by initialize if needed but
# always used to initialize variance
# since Y is assumed/forced to be (0,1)
self.variance = V.Binomial(n=self.n)
self.link = link()
def starting_mu(self, y):
"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is
starting_mu = (y + .5)/2
"""
return (y + .5)/2
def initialize(self, Y):
'''
Initialize the response variable.
Parameters
----------
Y : array
Endogenous response variable
Returns
--------
If `Y` is binary, returns `Y`
If `Y` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
if (Y.ndim > 1 and Y.shape[1] > 1):
y = Y[:,0]
self.n = Y.sum(1) # overwrite self.n for deviance below
return y*1./self.n
else:
return Y
def deviance(self, Y, mu, scale=1.):
'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
Y : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
--------
deviance : float
The deviance function as defined below
Notes
-----
If the endogenous variable is binary:
`deviance` = -2*sum(I_one * log(mu) + (I_zero)*log(1-mu))
where I_one is an indicator function that evalueates to 1 if Y_i == 1.
and I_zero is an indicator function that evaluates to 1 if Y_i == 0.
If the model is ninomial:
`deviance` = 2*sum(log(Y/mu) + (n-Y)*log((n-Y)/(n-mu)))
where Y and n are as defined in Binomial.initialize.
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(Y,1)
return -2 * np.sum(one * np.log(mu+1e-200) + (1-one) * np.log(1-mu+1e-200))
else:
return 2*np.sum(self.n*(Y*np.log(Y/mu+1e-200)+(1-Y)*np.log((1-Y)/(1-mu)+1e-200)))
def resid_dev(self, Y, mu, scale=1.):
"""
Binomial deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
If `Y` is binary:
resid_dev = sign(Y-mu)*sqrt(-2*log(I_one*mu + I_zero*(1-mu)))
where I_one is an indicator function that evaluates as 1 if Y == 1
and I_zero is an indicator function that evaluates as 1 if Y == 0.
If `Y` is binomial:
resid_dev = sign(Y-mu)*sqrt(2*n*(Y*log(Y/mu)+(1-Y)*log((1-Y)/(1-mu))))
where Y and n are as defined in Binomial.initialize.
"""
mu = self.link._clean(mu)
if np.shape(self.n) == () and self.n == 1:
one = np.equal(Y,1)
return np.sign(Y-mu)*np.sqrt(-2*np.log(one*mu+(1-one)*(1-mu)))\
/scale
else:
return np.sign(Y-mu) * np.sqrt(2*self.n*(Y*np.log(Y/mu+1e-200)+(1-Y)*\
np.log((1-Y)/(1-mu)+1e-200)))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Binomial exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
--------
If `Y` is binary:
`llf` = scale*sum(Y*log(mu/(1-mu))+log(1-mu))
If `Y` is binomial:
`llf` = scale*sum(gammaln(n+1) - gammaln(y+1) - gammaln(n-y+1) +\
y*log(mu/(1-mu)) + n*log(1-mu)
where gammaln is the log gamma function and y = Y*n with Y and n
as defined in Binomial initialize. This simply makes y the original
number of successes.
"""
if np.shape(self.n) == () and self.n == 1:
return scale*np.sum(Y*np.log(mu/(1-mu)+1e-200)+np.log(1-mu))
else:
y=Y*self.n #convert back to successes
return scale * np.sum(special.gammaln(self.n+1)-\
special.gammaln(y+1)-special.gammaln(self.n-y+1)\
+y*np.log(mu/(1-mu))+self.n*np.log(1-mu))
def resid_anscombe(self, Y, mu):
'''
The Anscombe residuals
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
sqrt(n)*(cox_snell(Y)-cox_snell(mu))/(mu**(1/6.)*(1-mu)**(1/6.))
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
cox_snell(x) = x**(2/3.)/(2/3.)*hyp2f1(2/3.,1/3.,5/3.,x)
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
cox_snell = lambda x: special.betainc(2/3., 2/3., x)\
*special.beta(2/3.,2/3.)
return np.sqrt(self.n)*(cox_snell(Y)-cox_snell(mu))/\
(mu**(1/6.)*(1-mu)**(1/6.))
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_cubed
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
def __init__(self, link=L.inverse_squared):
self.variance = InverseGaussian.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""
Returns the deviance residuals for the inverse Gaussian family.
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`dev_resid` = sign(Y-mu)*sqrt((Y-mu)**2/(Y*mu**2))
"""
return np.sign(Y-mu) * np.sqrt((Y-mu)**2/(Y*mu**2))/scale
def deviance(self, Y, mu, scale=1.):
"""
Inverse Gaussian deviance function
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum((Y=mu)**2/(Y*mu**2))
"""
return np.sum((Y-mu)**2/(Y*mu**2))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for inverse Gaussian distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
`llf` = -(1/2.)*sum((Y-mu)**2/(Y*mu**2*scale) + log(scale*Y**3)\
+ log(2*pi))
"""
return -.5 * np.sum((Y-mu)**2/(Y*mu**2*scale)\
+ np.log(scale*Y**3) + np.log(2*np.pi))
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the inverse Gaussian distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
`resid_anscombe` = log(Y/mu)/sqrt(mu)
"""
return np.log(Y/mu)/np.sqrt(mu)
class NegativeBinomial(Family):
"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now `alpha` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between .01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.nbinom
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
Support for Power link functions is not yet supported.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
#TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
def __init__(self, link=L.log, alpha=1.):
self.alpha = alpha
self.variance = V.NegativeBinomial(alpha=self.alpha)
if isinstance(link, L.NegativeBinomial):
self.link = link(alpha=self.alpha)
else:
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, 1.0e-10, np.inf)
def deviance(self, Y, mu, scale=1.):
"""
Returns the value of the deviance function.
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum(piecewise)
where piecewise is defined as
if :math:`Y_{i} == 0:`
piecewise_i = :math:`2\\log\\left(1+\\alpha*\\mu\\right)/\\alpha`
if :math:`Y_{i} > 0`:
piecewise_i = :math:`2 Y \\log(Y/\\mu)-2/\\alpha(1+\\alpha Y)*\\log((1+\\alpha Y)/(1+\\alpha\\mu))`
"""
iszero = np.equal(Y,0)
notzero = 1 - iszero
tmp = np.zeros(len(Y))
Y_mu = self._clean(Y/mu)
tmp = iszero*2*np.log(1+self.alpha*mu)/self.alpha
tmp += notzero*(2*Y*np.log(Y_mu)-2/self.alpha*(1+self.alpha*Y)*\
np.log((1+self.alpha*Y)/(1+self.alpha*mu)))
return np.sum(tmp)/scale
def resid_dev(self, Y, mu, scale=1.):
'''
Negative Binomial Deviance Residual
Parameters
----------
Y : array-like
`Y` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by scale
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
`resid_dev` = sign(Y-mu) * sqrt(piecewise)
where piecewise is defined as
if :math:`Y_i = 0`:
:math:`piecewise_i = 2*log(1+alpha*mu)/alpha`
if :math:`Y_i > 0`:
:math:`piecewise_i = 2*Y*log(Y/\\mu)-2/\\alpha*(1+\\alpha*Y)*log((1+\\alpha*Y)/(1+\\alpha*\\mu))`
'''
iszero = np.equal(Y,0)
notzero = 1 - iszero
tmp=np.zeros(len(Y))
tmp = iszero*2*np.log(1+self.alpha*mu)/self.alpha
tmp += notzero*(2*Y*np.log(Y/mu)-2/self.alpha*(1+self.alpha*Y)*\
np.log((1+self.alpha*Y)/(1+self.alpha*mu)))
return np.sign(Y-mu)*np.sqrt(tmp)/scale
def loglike(self, Y, fittedvalues=None):
"""
The loglikelihood function for the negative binomial family.
Parameters
----------
Y : array-like
Endogenous response variable
fittedvalues : array-like
The linear fitted values of the model. This is dot(exog,params).
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
sum(Y*log(alpha*exp(fittedvalues)/(1+alpha*exp(fittedvalues))) -\
log(1+alpha*exp(fittedvalues))/alpha + constant)
where constant is defined as
constant = gammaln(Y + 1/alpha) - gammaln(Y + 1) - gammaln(1/alpha)
"""
# don't need to specify mu
if fittedvalues is None:
raise AttributeError('The loglikelihood for the negative binomial \
requires that the fitted values be provided via the `fittedvalues` keyword \
argument.')
constant = special.gammaln(Y + 1/self.alpha) - special.gammaln(Y+1)\
-special.gammaln(1/self.alpha)
return np.sum(Y*np.log(self.alpha*np.exp(fittedvalues)/\
(1 + self.alpha*np.exp(fittedvalues))) - \
np.log(1+self.alpha*np.exp(fittedvalues))/self.alpha\
+ constant)
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the negative binomial family
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
`resid_anscombe` = (hyp2f1(-alpha*Y)-hyp2f1(-alpha*mu)+\
1.5*(Y**(2/3.)-mu**(2/3.)))/(mu+alpha*mu**2)**(1/6.)
where hyp2f1 is the hypergeometric 2f1 function parameterized as
hyp2f1(x) = hyp2f1(2/3.,1/3.,5/3.,x)
"""
hyp2f1 = lambda x : special.hyp2f1(2/3.,1/3.,5/3.,x)
return (hyp2f1(-self.alpha*Y)-hyp2f1(-self.alpha*mu)+1.5*(Y**(2/3.)-\
mu**(2/3.)))/(mu+self.alpha*mu**2)**(1/6.)
| bsd-3-clause | -8,825,799,312,798,299,000 | 28.321057 | 107 | 0.547922 | false | 4.00153 | false | false | false |
udoyen/pythonlearning | 1-35/ex26.py | 1 | 2284 | def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d jeans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good\tthings come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
# print_sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
| mit | -921,647,685,885,816,600 | 23.826087 | 83 | 0.681699 | false | 3.212377 | false | false | false |
FrederichRiver/neutrino | applications/saturn/saturn/dev.py | 1 | 12577 | #!/usr/bin/python3
from abc import ABCMeta, abstractmethod
from venus.stock_base import StockEventBase, StockBase
from venus import stock_base
import pandas
import datetime
import numpy as np
from jupiter.utils import TIME_FMT
"""
趋势跟踪法
所有的择时法,最终都输出一个stock list
所有的风控,都实时输出signal
所有的选股法,最终也输出一个stock list
1.当90日线位于250日线以下,30日线上穿60日线时,发出买入信号
2.判断n日线lower than m日线
3.判断n日线上穿m日线,或下穿m日线
4.获取一个时间段内的数据线
"""
class StockDataSet(object):
"""
get data from a exterior data like pandas.DataFrame.
method: StockDataSet.data = pandas.DataFrame
"""
def __init__(self):
self.data = pandas.DataFrame()
def set_stock_data(self, df:pandas.DataFrame):
"""
:param df columns [trade_date, open_price, close_price, high_price, low_price]
"""
if df.shape[1] != 5:
print("data shape error, input date should has 5 columns, date type first, and others float.")
df.columns = ['trade_date', 'open', 'close', 'high', 'low']
df['trade_date'] = pandas.to_datetime(df['trade_date'],format=TIME_FMT)
df.set_index('trade_date', inplace=True)
mean = [5, 10,]
for i in mean:
df[f"MA{i}"] = df['close'].rolling(i).mean()
return df
def set_time_period(self, start_date:datetime.date, end_date:datetime.date):
self.data = self.data.loc[start_date:end_date]
return self.data
def get_data(self):
return self.data
def init_data(self, stock_code, start_date):
pass
def detect_cross(self):
import numpy as np
self.data['DIFF'] = self.data['MA5'] - self.data['MA10']
self.data['DIFF2'] = self.data['DIFF'].shift(1)
self.data.dropna(inplace=True)
self.data['flag'] = self.data['DIFF'] * self.data['DIFF2']
self.data['flag'] = self.data['flag'].apply(lambda x: 1 if x<=0 else 0 )
self.data['flag'] *= np.sign(self.data['DIFF'])
self.data['signal'] = self.data['flag'].apply(bs_signal )
self.data['amp'] = self.data['close'] / self.data['close'].shift(1)
# print(self.data)
def profit(self):
self.data['value'] = 1.0
p = 0
v = 1.0
for index,row in self.data.iterrows():
if p:
v *= row['amp']
self.data.loc[index,'value'] = v
if row['signal'] == 'B':
p = 1.0
elif row['signal'] == 'S':
p = 0.0
print(self.data)
import matplotlib.pyplot as plt
result = pandas.DataFrame()
#result['close'] = self.data['close']
result['value'] = self.data['value']
result.index = self.data.index
result.plot()
plt.show()
def bs_signal(x):
if x>0:
return 'B'
elif x<0:
return 'S'
else:
return np.nan
class StratagyBase(StockDataSet):
def __init__(self, header):
super(StockDataSet, self).__init__()
self.header = header
self.price_data = ClosePrice(header)
self.data = StockDataSet()
def set_benchmark(self, stock_code)->bool:
return self.price_data.get_benchmark(stock_code)
def get_stock_data(self, stock_code:str):
return self.price_data.get_benchmark(stock_code)
def detect_cross(self):
self.data['DIFF'] = self.data['MA5'] - self.data['MA10']
self.data['DIFF2'] = self.data['DIFF'].shift(-1)
self.data['flag'] = 0
print(self.data)
def conv(x:list, y:list)-> float:
result = 0
for i in range(len(x)):
result += x[i]*y[i]
return result
class ClosePrice(object):
"""
A smart application to get close price.
: stock_code : benchmark code
: header : header
: return: result like DataFrame
"""
def __init__(self, header):
self.mysql = mysqlBase(header)
def get_data(self, stock_code:str, query_type='close'):
if query_type=='close':
query_column = 'trade_date,close_price'
def_column = ['trade_date', f"{stock_code}"]
elif query_type == 'full':
query_column = 'trade_date,open_price,close_price,highest_price,lowest_price'
def_column = ['trade_date','open','close','high','low']
result = self.mysql.select_values(stock_code, query_column)
result.columns = def_column
result['trade_date'] = pandas.to_datetime(result['trade_date'])
result.set_index('trade_date', inplace=True)
return result
def get_benchmark(self, stock_code:str):
return self.get_data(stock_code, query_type='close')
class RiskBase(object):
def __init__(self):
pass
def set_threshold(self, threshold):
raise NotImplementedError
# Event (market, signal, order, fill)
# Event Queue
# portfolio
# DataHandler(abstract base class)产生market event
# Strategy
class Strategy(object):
__metaclass__ = ABCMeta
@abstractmethod
def interface(self):
raise NotImplementedError
# ExecutionHandler
# Back test
class MarketEventBase(object):
pass
class SingalBase(object):
def __init__(self):
pass
class CAMP(StockEventBase):
def __init__(self, header):
super(CAMP, self).__init__(header)
self._rate = 0.0
self._market_asset = 'SH000300'
@property
def risk_free_rate(self):
return self._rate
@risk_free_rate.setter
def risk_free_rate(self, rate):
self._rate = rate
@property
def market_asset(self):
return self.get_stock_var(self._market_asset)
@market_asset.setter
def market_asset(self, stock_code):
self._market_asset = stock_code
def get_stock_var(self, stock_code:str):
import pandas
from dev_global.env import TIME_FMT
df = self.mysql.select_values(stock_code, 'trade_date,close_price')
df.columns = ['date', 'close']
df['date'] = pandas.to_datetime(df['date'], format=TIME_FMT)
df.set_index('date', inplace=True)
df[stock_code] = ( df['close'] - df['close'].shift(1) ) / df['close'].shift(1)
result = df[stock_code]
return result
def asset_beta(self, df:pandas.DataFrame, market_asset:str):
import numpy as np
beta_matrix = {}
for index, col in df.iteritems():
beta = df[[index, market_asset]].cov().iloc[0, 1] / df[market_asset].var()
beta_matrix[index] = beta
return beta_matrix
def sharpe_ratio(self, df:pandas.DataFrame, market_asset:str):
import numpy as np
sharpe_matrix = {}
for index, col in df.iteritems():
sharpe_ratio = np.sqrt(250)*( df[index].mean() - self.risk_free_rate/250 ) / df[index].std()
sharpe_matrix[index] = sharpe_ratio
return sharpe_matrix
def event_sharpe_analysis():
from dev_global.env import GLOBAL_HEADER
event = CAMP(GLOBAL_HEADER)
event.risk_free_rate = 0.03
print(event.risk_free_rate)
market_asset = event.market_asset
stock_pool = [market_asset]
stock_list = ['SH600000', 'SZ002230', 'SH601818']
for stock in stock_list:
df = event.get_stock_var(stock)
stock_pool.append(df)
asset_group = pandas.concat(stock_pool, axis=1)
beta = event.asset_beta(asset_group[-500:], 'SH000300')
print(beta)
from datetime import date
input_group = asset_group.loc[date(2017,1,1):date(2017,12,31),:]
sharpe = event.sharpe_ratio(input_group, 'SH000300')
print(sharpe)
class filterBase(StockBase):
def filter_roe(self, threshold=0.1):
"""
filter by ROE
"""
import pandas
today = '2020-03-31'
df = self.mysql.condition_select(
'finance_perspective', 'char_stock_code,float_roe', f"report_date='{today}'")
df.columns = ['stock', 'roe']
df = df[df['roe']>threshold]
result = df['stock'].to_json()
return df
def user_defined_pool(self, tag:str):
"""
Tag file format:
{ "stock": "XXXXXX" },
"""
import os
import json
from dev_global.env import SOFT_PATH
stock_pool = StockPool()
tag_file = SOFT_PATH + f"config/{tag}-tag.json"
if os.path.exists(tag_file):
with open(tag_file, 'r') as f:
file_content = f.read()
stock_json = json.loads(file_content)
stock_pool.pool(stock_json)
return stock_pool
class StockPool(object):
def __init__(self, pool_name=None):
self._name = ''
if isinstance(pool_name, str):
self.name = pool_name
self._pool = []
@property
def pool(self):
return self._pool
@pool.setter
def pool(self, value):
if isinstance(value, dict):
self._pool.append(value)
elif isinstance(value, list):
for stock in value:
if isinstance(stock, dict):
self._pool.append(stock)
def set_empty(self):
self._pool = []
class orderBase(object):
def __init__(self):
pass
def trade_record(self, stock_code, trade_time, trade_type, unit_cost, quantity, order_time=None, flag=None):
bid = {
"order": stock_code,
"order_time": order_time if not order_time else trade_time,
"trade_time": trade_time,
"trade_type": trade_type,
"unit_cost": unit_cost,
"quantity": quantity,
"fee": 0.0,
"cost": 0.0,
"flag": False
}
return bid
def order_deal(self, order):
if isinstance(order, dict):
order['flag'] = True
return order
class assetBase(object):
"""Docstring for asset. """
def __init__(self, code, start_time, name=None, cost=0.0, quantity=0):
"""TODO: to be defined. """
# stock code
self.code = code
# stock name could be null
self.name = name
# to be delete
self.unit_cost = cost
# quantity
self.quantity = quantity
# cost
self.cost = 0.0
# asset value
self.value = 0.0
self.start_time = start_time
self.trade_record = None
def order(self):
self.cost = self.quantity * self.unit_cost
return self.cost
def reset(self):
self.unit_cost = 0.0
self.quantity = 0.0
self.cost = 0.0
self.value = 0.0
#market event engine, running and generate event signal, broadcasting to market.
#date engine generate date series.
#data engine generate data to strategy.
class NoName(object):
def __init__(self):
pass
#build object stock,get stock price series
#run strategy checking, if cross, send signal
#recieve signal, generating order
#recieve order, record.
#calculate returns.
#evaluation, beta, sharpe etc.
import datetime
class DateTimeEngine(object):
def __init__(self):
self.START_DATE = datetime.date(1990,12,19)
def date_range(self):
# full date delta from start date to today.
n = datetime.date.today() - self.START_DATE
# generate date series.
date_series = [self.START_DATE + datetime.timedelta(days=i) for i in range(n.days + 1)]
return date_series
def holiday_from_stock(self, date_series):
from polaris.mysql8 import mysqlBase
from dev_global.env import GLOBAL_HEADER
mysql = mysqlBase(GLOBAL_HEADER)
result = mysql.select_values('SH000001', 'trade_date')
trade_date = list(result[0])
for dt in trade_date:
date_series.remove(dt)
return date_series
def holiday_from_file(self):
import datetime
import time
holiday = []
with open('/home/friederich/Documents/dev/neutrino/applications/config/holiday', 'r') as f:
dt = f.readline().strip()
while dt:
holiday.append(datetime.date())
dt = f.readline().strip()
print(holiday)
if __name__ == "__main__":
from dev_global.env import GLOBAL_HEADER, TIME_FMT
dateEngine = DateTimeEngine()
date_list = dateEngine.date_range()
holiday = dateEngine.holiday_from_stock(date_list)
dateEngine.holiday_from_file() | bsd-3-clause | 7,995,254,481,442,483,000 | 29.328431 | 112 | 0.58466 | false | 3.477515 | false | false | false |
KleeTaurus/luojilab_toolbox | llt/url.py | 1 | 1642 | # -*- coding: utf-8 -*-
from urlparse import urlparse, urlunparse
import hashlib
from .utils import smart_str
def format_url(params):
"""
将字典对象转换为url字符串(采用utf8编码)
:param params: 字典对象
:return: url字符串
"""
return '&'.join(['%s=%s' % (smart_str(k), smart_str(params[k])) for k in sorted(params)])
def encode_dict(params):
"""
将字典对象中的value值转换为utf8编码,去除value值为空的健值对。
:param params: 字典对象
:return: utf8编码格式的字典对象
"""
return {k: smart_str(params[k]) for k in params if params[k]}
def sign_url(params, key_secret, key_name=None, sign_type='md5', upper_case=False):
"""
计算url参数签名
:param params: 待签名字典对象
:param key_secret: 签名密钥
:param key_name: 签名名称
:param sign_type: 签名方式 md5/sha1
:param upper_case: 是否将签名转换为大写字母
:return: 签名值
"""
url = format_url(params)
url = '%s&%s=%s' % (url, key_name, key_secret) if key_name else '%s%s' % (url, key_secret)
if sign_type == 'md5':
digest = hashlib.md5(url).hexdigest()
elif sign_type == 'sha1':
digest = hashlib.sha1(url).hexdigest()
else:
raise NotImplementedError('Method %s is not supported' % sign_type)
return digest.upper() if upper_case else digest
def append_params_to_url(url, params):
"""
追加参数至目标url中
:param url: 目标url
:param params: 待追加参数
"""
(scheme, netloc, path, params, fragment) = urlparse(url)
pass
| apache-2.0 | 7,041,231,253,622,957,000 | 24.672727 | 94 | 0.623938 | false | 2.430293 | false | false | false |
kjagoo/wger_stark | wger/utils/resources.py | 2 | 1338 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import logging
from tastypie.authorization import ReadOnlyAuthorization
logger = logging.getLogger(__name__)
class UserObjectsOnlyAuthorization(ReadOnlyAuthorization):
'''
Custom authorization class to limit the user's access to his own objects
'''
def read_detail(self, object_list, bundle):
# For models such as userprofile where we don't have an owner function
if hasattr(bundle.obj, 'user'):
return bundle.obj.user == bundle.request.user
try:
return bundle.obj.get_owner_object().user == bundle.request.user
# Objects without owner information can be accessed
except AttributeError:
return True
| agpl-3.0 | -4,364,026,597,023,908,000 | 35.162162 | 78 | 0.7287 | false | 4.52027 | false | false | false |
quasipedia/googios | googios/calendars.py | 1 | 3716 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Interface with Google calendar service.
The module should really be called "calendar", not "calendars", but due to how
[poorly] imports are done by Google python API, that would generate an name
conflict.
'''
import datetime
from collections import namedtuple
from utils import log, dtfy
# This hard limit prevent the query to Google to loop forever, in case there
# are "repeat forever" recurring events in the calendar
CACHE_SIZE_HARD_LIMIT = 666
Event = namedtuple('Event', 'start end fuzzy_name')
class Calendar(object):
'''
A Google calendar interface.
Arguments:
cid: The `CalendarId` to use
'''
def __init__(self, cid, service, min_end, max_start, all_day_offset=0):
self.cid = cid
self.service = service
self.min_end = min_end
self.max_start = max_start
self.all_day_offset = all_day_offset
self.__timezone = False # `None` may be a valid timezone setting
def __iter__(self):
'''Iterate on all the events in the calendar.'''
events = self.get_events(min_end=self.min_end)
for event in events:
start = event['start']['dateTime']
end = event['end']['dateTime']
fuzzy_name = event['summary']
yield start, end, fuzzy_name
def get_events(self, min_end=None, max_start=None):
'''Retrieve a list of events for a given timespan
Arguments:
min_end: the minimum finishing ISO datetime for requested events.
max_start: the maximum starting ISO datetime for requested events.
'''
min_end = dtfy(min_end or self.min_end, as_iso_string=True)
max_start = dtfy(max_start or self.max_start, as_iso_string=True)
msg = 'Querying calendar for range: {} to {}'
log.debug(msg.format(min_end, max_start))
page_token = None
ret = []
while True:
log.debug('Issuing query with page_token = {}'.format(page_token))
events = self.service.events().list(
calendarId=self.cid,
singleEvents=True,
timeMin=min_end,
timeMax=max_start,
orderBy='startTime',
pageToken=page_token)
data = events.execute()
fix = self.fix_all_day_long_events
for event in data['items']:
ret.append(Event(fix(event['start']),
fix(event['end']),
event['summary']))
page_token = data.get('nextPageToken')
if not page_token or len(ret) >= CACHE_SIZE_HARD_LIMIT:
break
return ret
def fix_all_day_long_events(self, something):
'''Shift start date of "all day long" events to match correct start.'''
# All-day events have start and ending dates filed under the key 'date'
# rather than 'dateTime'.
if something['dateTime'] is not None:
return dtfy(something['dateTime'])
else:
date = dtfy(something['date'])
return date + datetime.timedelta(hours=self.all_day_offset)
@property
def timezone(self):
if self.__timezone is False:
tzone = self.service.settings().get(setting='timezone').execute()
self.__timezone = tzone['value']
return self.__timezone
def get_available_calendars(service):
'''Return a dictionary with all available calendars.'''
log.debug('Rtrieving available calendars...')
data = service.calendarList().list(showHidden=True).execute()
return {cal['id']: cal['summary'] for cal in data['items']}
| gpl-3.0 | 1,010,868,637,582,418,600 | 35.07767 | 79 | 0.593918 | false | 4.070099 | false | false | false |
samfu1994/cs838webpage | code/read.py | 1 | 4816 | import re
import csv
from os import listdir
from os.path import isfile, join
from extra_neg import extra_negative_instances
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def main():
mypath = "/Users/fuhao/Development/cs838webpage/textFile/"
files = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[-4:] == ".txt"]
train_files = files[:200]
test_files = files[200:]
data = []
word = ""
state_set = ["Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado", "Connecticut", "Delaware", "Florida", "Georgia",\
"Hawaii", "Idaho", "Illinois", "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland", "Massachusetts", "Michigan", "Minnesota",\
"Mississippi", "Missouri", "Montana", "Nebraska", "Nevada", "Hampshire", "Jersey", "Mexico", "York", "Carolina", "Dakota",\
"Ohio", "Oklahoma", "Oregon", "Pennsylvania", "Rhode", "Tennessee", "Texas", "Utah", "Vermont", "Virginia", "Washington", "Wisconsin", "Wyoming"
]
for i in range(len(state_set)):
state_set[i] = state_set[i].strip().lower()
fieldsName = ['word', 'has_university', 'has_state_name', 'has_state_word', 'length', 'has_dash', 'all_capital', 'has_num', 'label']
with open("train_data.csv", 'w') as csvFile:
csvWriter = csv.DictWriter(csvFile, fieldnames=fieldsName)
csvWriter.writeheader()
for f in train_files:
with open(mypath + f) as file:
lines = file.readlines()
for line in lines: #each line
data = re.findall("<[pn].*?>", line)
l = len(data)
if l != 0:
for i in range(l):#each instance
label = 0
has_university = 0
has_state_name = 0
has_state_word = 0
length = 0
has_dash = 0
all_capital = 1
has_num = 0
cur_list = data[i].split()
tmp = cur_list[0]
tmp = tmp.strip()
if tmp == "<p1" or tmp == "<p2":
label = 1
origin_list = cur_list[1:-1]
cur_list = cur_list[1:-1]
for i in range(len(cur_list)):
cur_list[i] = cur_list[i].strip().lower()
if ("university" in cur_list) or ("college" in cur_list) or ("institute" in cur_list):
has_university = 1
if "state" in cur_list:
has_state_word = 1
word = ""
for ele in cur_list:
word += ele
length += len(ele)
if ele.find("-") != -1:
has_dash = 1
if hasNumbers(ele):
has_num = 1
if ele in state_set:
has_state_name = 1
if len(origin_list) == 1:
for i in range(len(origin_list[0])):
if origin_list[0][i] > 'Z' or origin_list[0][i] < 'A':
all_capital = 0
break
else:
all_capital = 0
row = {'word':word, 'has_university' : has_university, 'has_state_name' : has_state_name, 'has_state_word' : has_state_word,\
'length' : length, 'has_dash' : has_dash, 'all_capital' : all_capital, 'has_num' : has_num, 'label' : label}
csvWriter.writerow(row)
with open("test_data.csv", 'w') as csvFile:
csvWriter = csv.DictWriter(csvFile, fieldnames=fieldsName)
csvWriter.writeheader()
for f in test_files:
with open(mypath + f) as file:
lines = file.readlines()
for line in lines: #each line
data = re.findall("<[pn].*?>", line)
l = len(data)
if l != 0:
for i in range(l):#each instance
label = 0
has_university = 0
has_state_name = 0
has_state_word = 0
length = 0
has_dash = 0
all_capital = 1
has_num = 0
cur_list = data[i].split()
tmp = cur_list[0]
tmp = tmp.strip()
if tmp == "<p1" or tmp == "<p2":
label = 1
origin_list = cur_list[1:-1]
cur_list = cur_list[1:-1]
for i in range(len(cur_list)):
cur_list[i] = cur_list[i].strip().lower()
if ("university" in cur_list) or ("college" in cur_list) or ("institute" in cur_list):
has_university = 1
if "state" in cur_list:
has_state_word = 1
word = ""
for ele in cur_list:
word += ele
length += len(ele)
if ele.find("-") != -1:
has_dash = 1
if hasNumbers(ele):
has_num = 1
if ele in state_set:
has_state_name = 1
if len(origin_list) == 1:
for i in range(len(origin_list[0])):
if origin_list[0][i] > 'Z' or origin_list[0][i] < 'A':
all_capital = 0
break
else:
all_capital = 0
row = {'word':word, 'has_university' : has_university, 'has_state_name' : has_state_name, 'has_state_word' : has_state_word,\
'length' : length, 'has_dash' : has_dash, 'all_capital' : all_capital, 'has_num' : has_num, 'label' : label}
csvWriter.writerow(row)
if __name__ == "__main__":
main()
| mit | 9,044,423,941,146,742,000 | 34.153285 | 150 | 0.557724 | false | 2.771001 | false | false | false |
openprocurement/openprocurement.buildout | aws_startup.py | 1 | 1609 | import argparse
import urlparse
import os
import ConfigParser
import subprocess
from requests import Session
ZONE_TO_ID = {
'eu-west-1a': 'a',
'eu-west-1b': 'b',
'eu-west-1c': 'c'
}
cur_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser(description='------ AWS Startup Script ------')
parser.add_argument('api_dest', type=str, help='Destination to database')
params = parser.parse_args()
api_ini_file_path = os.path.join(cur_dir, 'etc/openprocurement.api.ini')
session = Session()
resp = session.get('http://169.254.169.254/latest/meta-data/placement/availability-zone')
if resp.status_code == 200:
zone = resp.text
zone_suffix = ZONE_TO_ID.get(zone, '')
if zone_suffix:
domain = '{}.{}'.format(zone_suffix, params.api_dest)
else:
domain = params.api_dest
if os.path.isfile(api_ini_file_path):
config = ConfigParser.ConfigParser()
config.read([api_ini_file_path])
for k in ['couchdb.url', 'couchdb.admin_url']:
value = config.get('app:api', k)
url = urlparse.urlparse(value)
if url.username:
url = url._replace(netloc='{}:{}@{}:{}'.format(url.username, url.password,
domain, url.port))
else:
url = url._replace(netloc='{}:{}'.format(domain, url.port))
config.set('app:api', k, url.geturl())
if zone_suffix:
config.set('app:api', 'id', zone_suffix)
with open(api_ini_file_path, 'wb') as configfile:
config.write(configfile)
| apache-2.0 | -726,926,914,793,573,000 | 34.755556 | 90 | 0.585457 | false | 3.490239 | true | false | false |
odoo-arg/odoo_l10n_ar | l10n_ar_account_payment/__manifest__.py | 1 | 1740 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'l10n_ar_account_payment',
'version': '1.0',
'category': 'Accounting',
'summary': 'Recibos y ordenes de pago para Argentina',
'author': 'OPENPYME S.R.L',
'website': 'http://www.openpyme.com.ar',
'depends': [
'l10n_ar_point_of_sale',
],
'data': [
'views/account_payment_view.xml',
'views/account_payment_type_view.xml',
'wizard/account_register_payments_view.xml',
'views/menu.xml',
'data/account_journal.xml',
'security/ir.model.access.csv',
'data/security.xml',
],
'installable': True,
'auto_install': False,
'application': True,
'description': """
Recibos y ordenes de pago para Argentina
========================================
Talonarios
""",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,252,213,929,818,221,000 | 27.52459 | 78 | 0.572989 | false | 3.892617 | false | false | false |
lmotta/gimpselectionfeature_plugin | json2html.py | 1 | 1475 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Json2HTML
Description : Function to get dictionary(json) and create HTML how a list
Date : June, 2018
copyright : (C) 2018 by Luiz Motta
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
def getHtmlTreeMetadata(value, html):
if isinstance( value, dict ):
html += "<ul>"
for key, val in sorted( iter( value.items() ) ):
if not isinstance( val, dict ):
html += "<li>%s: %s</li> " % ( key, val )
else:
html += "<li>%s</li> " % key
html = getHtmlTreeMetadata( val, html )
html += "</ul>"
return html
return html
| gpl-2.0 | -8,841,037,792,462,765,000 | 45.09375 | 82 | 0.360678 | false | 5.363636 | false | false | false |
Eppie/sse-popcount | scripts/report.py | 1 | 6707 | import os
import os.path
import data
from table import Table
from codecs import open
from collections import OrderedDict
TIME_PATTERN = '%0.5f'
class Report(object):
def __init__(self, options):
self.options = options
with open(options.input, 'rt') as f:
self.data = data.ExperimentData(f)
def generate_rest(self):
params = {
'CSV_FILE' : self.options.input,
'ARCHITECTURE' : self.options.architecture,
'RUNS' : self.options.runs,
'CPU' : self.options.cpu,
'COMPILER' : self.options.compiler,
'DATE' : self.options.date,
'PROCEDURES' : self.generate_procedures_descriptions(),
'TIME_TABLE' : self.generate_time_table(),
'TIME_GRAPHS' : self.generate_time_graphs_per_size(),
'SPEEDUP_TABLE' : self.generate_speedup_table(),
}
pattern = self._load_file('main-pattern.rst')
return pattern % params
def generate_time_table(self):
table = Table()
# prepare header
header = ["procedure"]
for size in self.data.sizes:
header.append('%d B' % size)
table.set_header(header)
# get data
for procedure in self.data.procedures:
data = self.data.data_for_procedure(procedure)
row = [procedure]
for item in data:
fmt = TIME_PATTERN % item.time
if item.time == self.data.get_shortest_time(item.size):
row.append('**%s**' % fmt)
else:
row.append(fmt)
table.add_row(row)
return table
def generate_time_graphs_per_size(self):
pattern = self._load_file('detail-pattern.rst')
result = ''
for size in self.data.sizes:
params = {
'SIZE' : size,
'TABLE' : self.generate_time_table_for_size(size),
}
result += pattern % params
return result
def generate_time_table_for_size(self, size):
table = Table()
table.set_header(["procedure", "time [s]", "relative time (less is better)"])
chars = 50
data = self.data.data_for_size(size)
max_time = max(item.time for item in data)
for item in data:
time = TIME_PATTERN % item.time
bar = unicode_bar(item.time/max_time, chars)
table.add_row([item.procedure, time, bar])
return table
def generate_speedup_table(self):
table = Table()
# prepare header
header = ["procedure"]
for size in self.data.sizes:
header.append('%d B' % size)
table.set_header(header)
reference_time = {}
for size in self.data.sizes:
time = self.data.get(self.data.procedures[0], size)
reference_time[size] = time
# get data
for proc in self.data.procedures:
measurments = self.data.data_for_procedure(proc)
row = [proc]
for item in measurments:
speedup = reference_time[item.size] / item.time
row.append('%0.2f' % speedup)
table.add_row(row)
return table
def generate_procedures_descriptions(self):
definitions = self.__parse_cpp()
table = Table()
header = ["procedure", "description"]
table.set_header(header)
for proc, desc in definitions.iteritems():
if proc in self.data.procedures:
table.add_row([proc, desc])
return table
def __parse_cpp(self):
root = os.path.dirname(__file__)
src = os.path.join(root, "../function_registry.cpp")
with open(src) as f:
lines = [line.strip() for line in f]
start = lines.index("// definition start")
end = lines.index("// definition end")
definitions = lines[start + 1:end]
i = 0
L = OrderedDict()
while i < len(definitions):
line = definitions[i]
if line.startswith("add_trusted("):
name = line[len("add_trusted("):][1:-2]
description = definitions[i+1][1:-2]
L[name] = description
i += 2
elif line.startswith("add("):
name = line[len("add("):][1:-2]
description = definitions[i+1][1:-2]
L[name] = description
i += 2
else:
i += 1
return L
def _load_file(self, path):
root = os.path.dirname(__file__)
src = os.path.join(root, path)
with open(src, 'rt', encoding='utf-8') as f:
return f.read()
def unicode_bar(value, width):
fractions = (
'', # 0 - empty
u'\u258f', # 1/8
u'\u258e', # 2/8
u'\u258d', # 3/8
u'\u258c', # 4/8
u'\u258b', # 5/8
u'\u258a', # 6/8
u'\u2589', # 7/8
)
block = u'\u2588'
assert 0.0 <= value <= 1.0
k8 = int(value * width * 8)
k = k8 / 8
f = k8 % 8
return block * k + fractions[f]
def get_options():
import optparse
import sys
import time
current_date = time.strftime('%Y-%m-%d')
default_output = "report.rst"
opt = optparse.OptionParser()
opt.add_option("--csv", dest="input",
help="input CSV filename")
opt.add_option("--output", dest="output", default=default_output,
help="output RST filename [default: %s]" % default_output)
# experiment details
opt.add_option("--runs", dest="runs",
help="how many times measurments were repeated")
opt.add_option("--cpu", dest="cpu",
help="CPU details")
opt.add_option("--compiler", dest="compiler",
help="compiler version")
opt.add_option("--architecture", dest="architecture",
help="target architecture (SSE for -msse, AVX2 for -mavx2, etc.)")
# for archivists :)
opt.add_option("--date", dest="date", default=current_date,
help="date [default: %s]" % current_date)
options, _ = opt.parse_args()
return options
def main():
options = get_options()
report = Report(options)
with open(options.output, 'wt', encoding='utf-8') as out:
out.write(report.generate_rest())
print "%s generated" % options.output
if __name__ == '__main__':
main()
| bsd-2-clause | -4,509,866,872,130,011,600 | 24.697318 | 86 | 0.513642 | false | 3.931419 | false | false | false |
COSMOGRAIL/COSMOULINE | pipe/5_pymcs_psf_scripts/2b_facult_applymasks_NU.py | 1 | 2336 | # We look for the ds9 region files, read them, and mask corresponding regions in the sigma images.
execfile("../config.py")
from kirbybase import KirbyBase, KBError
from variousfct import *
import cosmics # used to read and write the fits files
import ds9reg
import glob
import numpy as np
import star
psfstars = star.readmancat(psfstarcat)
# We read the region files
for i, s in enumerate(psfstars):
print '---------------PSF STAR------------------'
print s.name
print '-----------------------------------------'
s.filenumber = (i+1)
possiblemaskfilepath = os.path.join(configdir, "%s_mask_%s.reg" % (psfkey, s.name))
print 'mask file path is: ',possiblemaskfilepath
if os.path.exists(possiblemaskfilepath):
s.reg = ds9reg.regions(64, 64) # hardcoded for now # Warning, can cause a lot of trouble when dealing with images other than ECAM
s.reg.readds9(possiblemaskfilepath, verbose=False)
s.reg.buildmask(verbose = False)
print "You masked %i pixels of star %s." % (np.sum(s.reg.mask), s.name)
else:
print "No mask file for star %s." % (s.name)
if not update:
proquest(askquestions)
# Select images to treat
db = KirbyBase()
if thisisatest :
print "This is a test run."
images = db.select(imgdb, ['gogogo', 'treatme', 'testlist',psfkeyflag], [True, True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
elif update:
print "This is an update."
images = db.select(imgdb, ['gogogo', 'treatme', 'updating',psfkeyflag], [True, True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
askquestions = False
else :
images = db.select(imgdb, ['gogogo', 'treatme',psfkeyflag], [True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
print "Number of images to treat :", len(images)
proquest(askquestions)
for i, image in enumerate(images):
print "%i : %s" % (i+1, image['imgname'])
imgpsfdir = os.path.join(psfdir, image['imgname'])
os.chdir(os.path.join(imgpsfdir, "results"))
for s in psfstars:
if not hasattr(s, 'reg'): # If there is no mask for this star
continue
# We modify the sigma image
sigfilename = "starsig_%03i.fits" % s.filenumber
(sigarray, sigheader) = fromfits(sigfilename, verbose=False)
sigarray[s.reg.mask] = 1.0e8
tofits(sigfilename, sigarray, sigheader, verbose=False)
print 'saved !'
print "Done."
| gpl-3.0 | -2,536,765,267,724,173,000 | 29.337662 | 148 | 0.679366 | false | 2.956962 | false | false | false |
ErasRasmuson/LA | LogPrep/LogPrep.py | 1 | 15749 | # -*- coding: cp1252 -*-
"""
###############################################################################
HEADER: LogPrep.py
AUTHOR: Esa Heikkinen
DATE: 24.10.2014
DOCUMENT: -
VERSION: "$Id$"
REFERENCES: -
PURPOSE:
CHANGES: "$Log$"
###############################################################################
"""
import argparse
import os.path
import sys
import time
import re
from datetime import datetime, timedelta
from LogPrepColOpers import *
import glob
g_version = "$Id$"
output_lines = []
output_col_lines = {}
divide_col_values = {}
columns_new_list = []
#******************************************************************************
#
# CLASS: LogFile
#
#******************************************************************************
class LogFile:
global variables
global date
name = "Unknown"
#output_lines = []
columns_list = []
column_new_list = []
columns_oper = []
columns_new_oper = {}
line_csv = ""
def __init__(self,name):
self.name=name
self.output_lines = []
self.columns_list = []
self.column_new_list = []
self.columns_oper = []
self.columns_new_oper = {}
self.line_csv = ""
def check_conversions(self):
# Käydään sarakemuutokset läpi
counter = 0
for col_oper_output in self.columns_new_oper.keys():
counter += 1
col_oper = self.columns_new_oper[col_oper_output]
# Suoritetaan sarakemuutos riville
code_str = compile(col_oper,"<string>","eval")
try:
variables[col_oper_output] = eval(code_str)
except:
print("ERR: Executing: \"%s\"\n" % col_oper)
sys.exit()
#print("%3d: %-15s = %s = %s" % (counter,col_oper_output,col_oper,variables[col_oper_output]))
def set_columns_conversions(self,columns_list,columns_oper):
self.columns_list = columns_list
self.columns_oper = columns_oper
self.columns_new_oper = {}
# Käydään sarake-operaattorit läpi
for column_oper in self.columns_oper:
print("column_oper: %s" % column_oper)
columns_oper_list = column_oper.split("=")
columns_oper_list_len = len(columns_oper_list)
if columns_oper_list_len != 2:
print("ERR: in column_oper: %s" % column_oper)
continue
# Erotetaan output-muuttuja (sarake) sekä sen funktio ja input-muuttujat
output_var = columns_oper_list[0]
oper_func_vars = columns_oper_list[1]
output_var = output_var.strip("<>")
# Tukitaan onko uusi sarake ja jos on, lisätään muuttujiin ja sarakelistaan
if not output_var in self.columns_list:
print("New column: %s" % output_var)
variables[output_var]=""
self.column_new_list.append(output_var)
# Etsitään riviltä sarakkeiden (muuttujien) nimet,
# jotka "<"- ja ">"-merkkien sisällä
str_len = len(oper_func_vars)
start_ptr = 0
end_ptr = 0
new_str = oper_func_vars
while end_ptr < str_len:
start_ptr = new_str.find('<',end_ptr)
if start_ptr == -1:
#print("Not found: <")
break
start_ptr += 1
end_ptr = new_str.find('>',start_ptr)
if end_ptr == -1:
#print("Not found: >")
break
col_name = new_str[start_ptr:end_ptr]
print("col_name : %s" % (col_name) )
#print("str_len = %d, start_ptr=%d, end_ptr=%d" % (str_len,start_ptr,end_ptr))
# Korvataan sarakkeen nimet muuttujanimillä
col_name_str = "<" + col_name + ">"
col_name_var_str = "variables[\"" + col_name + "\"]"
new_str = new_str.replace(col_name_str,col_name_var_str)
str_len = len(new_str)
self.columns_new_oper[output_var] = new_str
print("new_str = %s" % new_str)
def read_column_names(self,logfile_name,output_sep_char):
#print("LogFile: read_column_names: %s" % logfile_name)
cols_list = []
# Luetaan 1. rivi lokitiedostosta
if os.path.isfile(logfile_name):
f = open(logfile_name, 'r')
line = f.readline()
# Rivinvaihto ja muut tyhjät merkit rivin lopusta pois
line = line.rstrip()
f.close()
if len(line) > 2:
cols_list = line.split(output_sep_char)
#print("read_column_names: cols_list: %s" % cols_list)
return cols_list
def read(self,logfile_name,regexps,output_sep_char,input_read_mode,output_files_divide_col):
print("")
vars_list_len = len(self.columns_list)
print("LogFile: read logfile_name: %s" % logfile_name)
# Luetaan lokitiedosto
if os.path.isfile(logfile_name):
f = open(logfile_name, 'r')
lines = f.readlines()
f.close()
line_counter = 0
line_sel_counter = 0
error_counter = 0
# Kaydaan lapi loki-tiedoston rivit
for line in lines:
# Hylätään tyhjät rivit
if len(line) < 2:
continue
# Rivinvaihto ja muut tyhjät merkit rivin lopusta pois
line = line.rstrip()
line_counter += 1
#print("LogFile: line: %5d: %s" % (line_counter,line))
# Jos regexp annettu (riviltä pitää parsia arvot)
if len(regexps) > 0:
# Parseroidaan tiedoston rivi ja sijoitetaan arvot välimuuttujiin
p = re.compile(regexps)
m = p.match(line)
#print("m: %s" % (m))
if m != None:
line_sel_counter += 1
#print("")
for cnt in range(vars_list_len):
var_name = self.columns_list[cnt]
var_value = m.group(cnt+1)
variables[var_name]=var_value
#print("%5d: Var name: %-20s value: %s" % (cnt,var_name,var_value))
self.generate_new_line(variables,output_sep_char,output_files_divide_col)
# Muuten, arvot ovat valmiina csv-tyyppisellä rivillä
else:
# Ei käsitellä otsikko riviä
if line_counter == 1:
continue
columns_value_list = line.split(output_sep_char)
vars_value_list_len = len(columns_value_list)
if vars_value_list_len != vars_list_len:
print("ERR: Number of columns: %s and %s are different in line: %s" %
(vars_value_list_len,vars_list_len,line,output_files_divide_col))
sys.exit()
line_sel_counter += 1
for cnt in range(vars_list_len):
var_name = self.columns_list[cnt]
var_value = columns_value_list[cnt]
variables[var_name]=var_value
#print("%5d: Var name: %-20s value: %s" % (cnt,var_name,var_value))
self.generate_new_line(variables,output_sep_char,output_files_divide_col)
print("LogFile: Msg-type = %s" % self.name)
print("LogFile: line_counter = %d" % line_counter)
print("LogFile: line_sel_counter = %d" % line_sel_counter)
else:
print("LogFile: ERR: Not found logfile: %s" % logfile_name)
def get(self):
print("")
return self.output_lines
def get_columns(self):
print("")
#print("self.columns_list = %s" % self.columns_list)
#print("self.column_new_list = %s" % self.column_new_list)
return self.columns_list + self.column_new_list
def generate_new_line(self,variables,output_sep_char,output_files_divide_col):
# Tehdään mahdolliset sarakkeiden konversiot
self.check_conversions()
# Käydään rivin sarakkeet läpi
column_list_all = self.columns_list + self.column_new_list
self.line_csv = ""
for col_name in column_list_all:
col_val = variables[col_name]
# Lisätään arvo tulostiedoston (csv) rivin loppuun
self.line_csv = self.line_csv + output_sep_char + col_val
if output_files_divide_col == None:
# Laitetaan tulostiedoston rivi talteen
self.output_lines.append(self.line_csv)
else:
col_value = variables[output_files_divide_col]
try:
divide_col_values[col_value] += 1
except:
divide_col_values[col_value] = 1
# Laitetaan annetun sarakkeen arvon mukaiseen tulostiedoston rivi talteen
try:
output_col_lines[col_value].append(self.line_csv)
except:
output_col_lines[col_value] = [self.line_csv]
#******************************************************************************
#
# FUNCTION: make_dir_if_no_exist
#
#******************************************************************************
def make_dir_if_no_exist(file_path_name):
# Python3
#os.makedirs(os.path.dirname(file_path_name), exist_ok=True)
# Python2
if not os.path.exists(os.path.dirname(file_path_name)):
try:
os.makedirs(os.path.dirname(file_path_name))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
#******************************************************************************
#
# FUNCTION: write_output_file
#
#******************************************************************************
#def write_output_file(logfile_new_name,output_lines,column_name_prefix,output_sep_char,output_files_divide_col):
def write_output_file(output_path,logfile_new_name,column_name_prefix,output_sep_char,output_files_divide_col,combined_file_name,msg_type):
global output_lines
global output_col_lines
global divide_col_values
if output_files_divide_col == None:
line_cnt = 0
make_dir_if_no_exist(logfile_new_name)
f = open(logfile_new_name, 'w')
# Otsikko
f.writelines("%sCounter" % column_name_prefix)
for col_name in columns_new_list:
# Lisätään prefix sarakkeiden nimien alkuun
column_name_with_prefix = column_name_prefix + col_name
#f.writelines("\t" + col_name)
f.writelines(output_sep_char + column_name_with_prefix)
f.writelines("\n")
# Rivit
for output_line in output_lines:
line_cnt += 1
str = "%d %s\n" % (line_cnt,output_line)
#print("%s" % str)
f.writelines(str)
else:
file_cnt = 0
col_value_list = divide_col_values.keys()
for col_value in col_value_list:
line_cnt = 0
file_cnt += 1
logfile_new_name = output_path + combined_file_name + "_" + col_value + "_" + msg_type + ".csv"
print("writes: %5d: logfile = %s" % (file_cnt,logfile_new_name))
make_dir_if_no_exist(logfile_new_name)
f = open(logfile_new_name, 'w')
# Otsikko
f.writelines("%sCounter" % column_name_prefix)
for col_name in columns_new_list:
# Lisätään prefix sarakkeiden nimien alkuun
column_name_with_prefix = column_name_prefix + col_name
#f.writelines("\t" + col_name)
f.writelines(output_sep_char + column_name_with_prefix)
f.writelines("\n")
# Rivit
for output_line in output_col_lines[col_value]:
line_cnt += 1
str = "%d %s\n" % (line_cnt,output_line)
#print("%s" % str)
f.writelines(str)
f.close()
#******************************************************************************
#
# FUNCTION: main
#
#******************************************************************************
print("version: %s" % g_version)
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('-input_path','--input_path', dest='input_path', help='input_path')
parser.add_argument('-input_files','--input_files', dest='input_files', help='input_files')
parser.add_argument('-input_read_mode','--input_read_mode', dest='input_read_mode', help='input_read_mode')
parser.add_argument('-combined_file_name','--combined_file_name', dest='combined_file_name', help='combined_file_name')
parser.add_argument('-output_path','--output_path', dest='output_path', help='output_path')
parser.add_argument('-output_files_divide_col','--output_files_divide_col', dest='output_files_divide_col', help='output_files_divide_col')
parser.add_argument('-output_sep_char','--output_sep_char', dest='output_sep_char', help='output_sep_char')
parser.add_argument('-date','--date', dest='date', help='date')
parser.add_argument('-msg_type','--msg_type', dest='msg_type', help='msg_type')
parser.add_argument('-column_name_prefix','--column_name_prefix', dest='column_name_prefix', help='column_name_prefix')
parser.add_argument('-columns','--columns', dest='columns', help='columns')
parser.add_argument('-regexps','--regexps', dest='regexps', help='regexps')
parser.add_argument('-column_oper','--column_oper', action='append', dest='column_oper', default=[], help='column_oper')
args = parser.parse_args()
print("input_path : %s" % args.input_path)
print("input_files : %s" % args.input_files)
print("input_read_mode : %s" % args.input_read_mode)
print("combined_file_name : %s" % args.combined_file_name)
print("output_path : %s" % args.output_path)
print("output_files_divide_col : %s" % args.output_files_divide_col)
print("output_sep_char : \"%s\"" % args.output_sep_char)
print("date : %s" % args.date)
print("msg_type : %s" % args.msg_type)
print("column_name_prefix : %s" % args.column_name_prefix)
print("columns : %s" % args.columns)
print("regexps : %s" % args.regexps)
print("column_oper : %s" % args.column_oper)
print(".....")
# Muodostetaan input-tiedostojen lista polkuineen
logfile_name_list = []
input_files_list = args.input_files.split(",")
#print("input_files_list=%s" % input_files_list)
for input_file in input_files_list:
#print("input_file=%s" % input_file)
input_file_path_name_list = glob.glob(args.input_path + input_file)
#print("input_file_path_name_list=%s" % input_file_path_name_list)
for input_file_path_name in input_file_path_name_list:
print("input_file_path_name = %s" % input_file_path_name)
logfile_name_list.append(input_file_path_name)
print(".....")
#print("logfile_name_list = %s" % logfile_name_list)
print("\n")
date = args.date
# Käydään läpi input-tiedosto(t)
for logfile_name in logfile_name_list:
variables = {}
msg_type = args.msg_type
#print("msg_type = \"%s\"" % msg_type)
print("logfile_name = \"%s\"" % logfile_name)
# Output-file path and name
head, tail = os.path.split(logfile_name)
#print("head=%s, tail=%s" % (head,tail))
file_name, file_ext =tail.split(".")
logfile_new_name = args.output_path + file_name + "_" + msg_type + ".csv"
print("logfile_new_name = \"%s\"" % logfile_new_name)
#state_search_string = state_search_strings[msg_type]
regexps = args.regexps
#columns_list = state_search_string_variables[msg_type]
log_file = LogFile(msg_type)
# Jos sarakenimet on annettu komentoriviltä
if len(args.columns) > 0:
columns_list = args.columns.split(",")
# Muuten haetaan sarakenimet tiedoston ekalta riviltä
else:
# Haetaan tiedoston ekalta riviltä sarakenimet
columns_list = log_file.read_column_names(logfile_name,args.output_sep_char)
if len(columns_list) == 0:
print("ERR: Not found column names from parameter or file")
sys.exit()
#print("regexps = \"%s\"" % regexps)
#print("columns_list = \"%s\"" % columns_list)
log_file.set_columns_conversions(columns_list,args.column_oper)
#log_file.read(logfile_name,regexps,columns_list)
log_file.read(logfile_name,regexps,args.output_sep_char,args.input_read_mode,args.output_files_divide_col)
# Haetaan uusi sarakelista (jos tullut uusia tai jotain poistettu)
columns_new_list = log_file.get_columns()
#print("columns_new_list = %s" % columns_new_list)
if args.input_read_mode == None:
# Luetaan lokien tiedot, ei tarvita enää ?
output_lines = log_file.get()
# Kirjoitetaan tiedostoon
write_output_file(args.output_path,logfile_new_name,args.column_name_prefix,
args.output_sep_char,args.output_files_divide_col,args.combined_file_name,args.msg_type)
elif args.input_read_mode == "COMBINE":
print("COMBINE")
output_lines += log_file.get()
else:
print("ERR: Unknown read mode: %s" % args.input_read_mode)
sys.exit()
if args.input_read_mode == "COMBINE":
logfile_new_name = args.output_path + args.combined_file_name + "_" + args.msg_type + ".csv"
# Kirjoitetaan tiedostoon
write_output_file(args.output_path,logfile_new_name,args.column_name_prefix,
args.output_sep_char,args.output_files_divide_col,args.combined_file_name,args.msg_type)
print(" Total execution time: %.3f seconds\n" % (time.time() - start_time))
| gpl-3.0 | -3,477,721,811,518,574,000 | 29.462282 | 139 | 0.621246 | false | 2.838681 | false | false | false |
dnguyen0304/mfit_service | mfit/mfit/__init__.py | 2 | 3612 | # -*- coding: utf-8 -*-
import datetime
import json
import logging.config
import os
import pytz
import redis
from . import protos
from . import enumerations
from . import models
__all__ = ['configuration', 'enumerations', 'models', 'protos']
def get_configuration(application_name):
configuration_file_path = os.environ[
application_name.upper() + '_CONFIGURATION_FILE_PATH']
with open(configuration_file_path, 'r') as file:
parsed_configuration = json.loads(file.read())
return parsed_configuration
configuration = get_configuration(application_name=__name__)
logging.config.dictConfig(config=configuration['logging'])
def add(habit_id, value):
redis_client = redis.StrictRedis(host=configuration['redis']['hostname'],
port=configuration['redis']['port'])
# 1. Create a new event object.
event = models.Event(topic=enumerations.EventTopic.LOG_ADDED)
event.arguments.attemptId = 1
event.arguments.habitId = habit_id
event.arguments.value = value
event.arguments.createdBy = 1
# 2. Serialize the new event object and add it to the queue.
redis_client.rpush('event:all', event.to_string())
# 3. Get the newest event from the queue and deserialize it.
event = models.Event.from_string(redis_client.lindex('event:all', -1))
# 4. Handle the event.
key = 'attempt:{}:summary'.format(event.arguments.attemptId)
# Incrementing a value does not reset it's key's expiration
# timeout.
time_to_live = redis_client.ttl(key)
redis_client.hincrbyfloat(key,
event.arguments.habitId,
event.arguments.value)
if time_to_live < 0:
timezone = pytz.timezone('America/New_York')
timestamp = _get_tomorrow_in_seconds(timezone=timezone)
redis_client.expireat(key, int(timestamp))
def _get_tomorrow(timezone):
"""
Get the start of tomorrow.
The datetime is computed with respect to the specified timezone
and returned converted into UTC.
Parameters
----------
timezone : pytz.tzinfo.DstTzInfo subclass
Returns
-------
datetime.datetime
"""
now = (datetime.datetime.utcnow()
.replace(tzinfo=pytz.utc)
.astimezone(tz=timezone))
offset = now + datetime.timedelta(days=1)
# The implementation of tzinfo in pytz differs from that of the
# standard library. With a couple exceptions, you should therefore
# be using the localize method instead of the tzinfo parameter.
tomorrow_start_naive = datetime.datetime(year=offset.year,
month=offset.month,
day=offset.day)
tomorrow_start = timezone.localize(dt=tomorrow_start_naive)
return tomorrow_start.astimezone(tz=pytz.utc)
def _get_tomorrow_in_seconds(timezone):
"""
Get the start of tomorrow in seconds (i.e. as a Unix timestamp).
Parameters
----------
timezone : pytz.tzinfo.DstTzInfo subclass
Returns
-------
float
"""
epoch = datetime.datetime(year=1970, month=1, day=1, tzinfo=pytz.utc)
tomorrow_start = _get_tomorrow(timezone=timezone)
seconds = (tomorrow_start - epoch).total_seconds()
return seconds
def get_all_from_today():
redis_client = redis.StrictRedis(host=configuration['redis']['hostname'],
port=configuration['redis']['port'])
summary = redis_client.hgetall('attempt:1:summary')
return summary
| mit | -3,276,830,519,192,619,000 | 27.440945 | 77 | 0.63732 | false | 4.067568 | true | false | false |
eng-tools/sfsimodels | docs/conf.py | 1 | 6217 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sfsimodels documentation build configuration file, created by
# sphinx-quickstart on Wed May 23 10:38:42 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import date
file_loc = os.path.split(__file__)[0]
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(file_loc), '.')))
import sfsimodels
# -- Project information -----------------------------------------------------
project = sfsimodels.__about__.__project__
author = sfsimodels.__about__.__author__
copyright = u'Copyright 2016 - {0} {1}'.format(date.today().year, author)
# The short X.Y version
version = sfsimodels.__about__.__version__
# The full version, including alpha/beta/rc tags
release = version
import mock
MOCK_MODULES = ['numpy', 'openpyxl']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# 'sphinx_autodoc_typehints'
]
napoleon_use_param = True # to get type hinting working
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sfsimodels'
author = 'Maxim Millen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.3'
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `to do` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature' # Switch to an ECP theme
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sfsimodelsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sfsimodels.tex', 'sfsimodels Documentation',
'Maxim Millen', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sfsimodels', 'sfsimodels Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sfsimodels', 'sfsimodels Documentation',
author, 'sfsimodels', 'A set of standard models for assessing structural and geotechnical problems.',
'Science'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit | -3,832,521,086,501,274,600 | 29.47549 | 106 | 0.671707 | false | 3.807103 | true | false | false |
a10networks/a10sdk-python | a10sdk/core/slb/slb_server_port_stats.py | 2 | 6162 | from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param es_resp_invalid_http: {"description": "Total non-http response", "format": "counter", "type": "number", "oid": "19", "optional": true, "size": "8"}
:param curr_req: {"description": "Current requests", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param total_rev_pkts_inspected_good_status_code: {"description": "Total reverse packets with good status code inspected", "format": "counter", "type": "number", "oid": "21", "optional": true, "size": "8"}
:param es_resp_count: {"description": "Total proxy response", "format": "counter", "type": "number", "oid": "18", "optional": true, "size": "8"}
:param total_fwd_bytes: {"description": "Forward bytes", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"}
:param es_resp_other: {"description": "Response status other", "format": "counter", "type": "number", "oid": "16", "optional": true, "size": "8"}
:param fastest_rsp_time: {"description": "Fastest response time", "format": "counter", "type": "number", "oid": "23", "optional": true, "size": "8"}
:param total_fwd_pkts: {"description": "Forward packets", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"}
:param es_req_count: {"description": "Total proxy request", "format": "counter", "type": "number", "oid": "17", "optional": true, "size": "8"}
:param es_resp_500: {"description": "Response status 500", "format": "counter", "type": "number", "oid": "15", "optional": true, "size": "8"}
:param peak_conn: {"description": "Peak connections", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "8"}
:param total_req: {"description": "Total Requests", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"}
:param es_resp_400: {"description": "Response status 400", "format": "counter", "type": "number", "oid": "14", "optional": true, "size": "8"}
:param es_resp_300: {"description": "Response status 300", "format": "counter", "type": "number", "oid": "13", "optional": true, "size": "8"}
:param curr_conn: {"description": "Current connections", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"}
:param es_resp_200: {"description": "Response status 200", "format": "counter", "type": "number", "oid": "12", "optional": true, "size": "8"}
:param total_rev_bytes: {"description": "Reverse bytes", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "8"}
:param response_time: {"description": "Response time", "format": "counter", "type": "number", "oid": "22", "optional": true, "size": "8"}
:param total_conn: {"description": "Total connections", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "8"}
:param total_rev_pkts: {"description": "Reverse packets", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "8"}
:param total_req_succ: {"description": "Total requests succ", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"}
:param last_total_conn: {"description": "Last total connections", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "8"}
:param total_rev_pkts_inspected: {"description": "Total reverse packets inspected", "format": "counter", "type": "number", "oid": "20", "optional": true, "size": "8"}
:param slowest_rsp_time: {"description": "Slowest response time", "format": "counter", "type": "number", "oid": "24", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.es_resp_invalid_http = ""
self.curr_req = ""
self.total_rev_pkts_inspected_good_status_code = ""
self.es_resp_count = ""
self.total_fwd_bytes = ""
self.es_resp_other = ""
self.fastest_rsp_time = ""
self.total_fwd_pkts = ""
self.es_req_count = ""
self.es_resp_500 = ""
self.peak_conn = ""
self.total_req = ""
self.es_resp_400 = ""
self.es_resp_300 = ""
self.curr_conn = ""
self.es_resp_200 = ""
self.total_rev_bytes = ""
self.response_time = ""
self.total_conn = ""
self.total_rev_pkts = ""
self.total_req_succ = ""
self.last_total_conn = ""
self.total_rev_pkts_inspected = ""
self.slowest_rsp_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Port(A10BaseClass):
"""Class Description::
Statistics for the object port.
Class port supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param protocol: {"enum": ["tcp", "udp"], "description": "'tcp': TCP Port; 'udp': UDP Port; ", "format": "enum", "type": "string", "oid": "1002", "optional": false}
:param port_number: {"description": "Port Number", "format": "number", "optional": false, "oid": "1001", "maximum": 65534, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/server/{name}/port/{port_number}+{protocol}/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "port_number","protocol"]
self.b_key = "port"
self.a10_url="/axapi/v3/slb/server/{name}/port/{port_number}+{protocol}/stats"
self.DeviceProxy = ""
self.stats = {}
self.protocol = ""
self.port_number = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 | -7,141,457,283,060,898,000 | 56.588785 | 209 | 0.591366 | false | 3.352557 | false | false | false |
dahakawang/svn-helper | common.py | 1 | 4418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import types
import sys
from subprocess import call
from tempfile import NamedTemporaryFile
from abc import ABCMeta, abstractmethod
class STYLE:
NORMAL = "NORMAL"
BOLD = "BOLD"
UNDERLINE = "UNDERLINE"
class ANSICOLOR:
BLACK = "BLACK"
RED = "RED"
GREEN = "GREEN"
YELLOW = "YELLOW"
BLUE = "BLUE"
PURPLE = "PURPLE"
CYAN = "CYAN"
GRAY = "GRAY"
@staticmethod
def translate(color):
if (color == ANSICOLOR.BLACK):
return "30"
elif (color == ANSICOLOR.RED):
return "31"
elif (color == ANSICOLOR.GREEN):
return "32"
elif (color == ANSICOLOR.YELLOW):
return "33"
elif (color == ANSICOLOR.BLUE):
return "34"
elif (color == ANSICOLOR.PURPLE):
return "35"
elif (color == ANSICOLOR.CYAN):
return "36"
elif (color == ANSICOLOR.GRAY):
return "37"
else:
raise RuntimeError("unsupported ANSI color")
def _to_256(color):
if (color < 0 or color > 255):
raise RuntimeError("8bit color must be in range [0, 255]")
return "38;5;" + str(color)
def _normal_text():
return "\033[0m"
def _color_text(color, style):
text = '\033['
if (style == STYLE.NORMAL):
text += "0;"
elif (style == STYLE.BOLD):
text += "1;"
elif (style == STYLE.UNDERLINE):
text += "4;"
else:
raise RuntimeError("unsupported style")
if (isinstance(color, (types.IntType, types.LongType))):
text += _to_256(color)
else:
text += ANSICOLOR.translate(color)
text += "m";
return text;
class ColoredText:
_current_text = ""
@classmethod
def reset(clazz):
clazz._current_text = _normal_text()
sys.stdout.write(clazz._current_text)
@classmethod
def setup(clazz, color, style = STYLE.NORMAL):
clazz._current_text = _color_text(color, style)
sys.stdout.write(clazz._current_text)
@classmethod
def str(clazz, msg, color, style = STYLE.NORMAL):
return _color_text(color, style) + msg + clazz._current_text;
ColoredText.reset()
def system(cmd, rediret= True):
if rediret:
file = NamedTemporaryFile()
ret = call(cmd, shell = True, stdout = file, stderr = file)
file.seek(0)
return (ret, file)
else:
ret = call(cmd, shell = True)
return ret
class Application:
__metaclass__ = ABCMeta
def run(self):
try:
self.main()
except Exception, e:
print(ColoredText.str("[ERROR] ", ANSICOLOR.RED) + str(e))
@abstractmethod
def main(self):
pass
class Node:
def __init__(self, name = None, desc = None):
self.name = name
self.children = []
def _serialize(self, lastones):
str = ""
self.children = sorted(self.children, key=lambda x: x.name)
level = len(lastones)
if level > 0:
for i in range(level - 1):
if lastones[i]:
str += " "
else:
str += " │"
if lastones[-1]:
str += " └─"
else:
str += " ├─"
str += self.name
for i in range(len(self.children)):
str += "\n"
if i == len(self.children) - 1:
str += self.children[i]._serialize(lastones + [True])
else:
str += self.children[i]._serialize(lastones + [False])
return str
def str(self):
ret = ""
self.children = sorted(self.children, key=lambda x: x.name)
if self.name != None and self.name != "":
ret += self.name
for i in range(len(self.children)):
ret += "\n"
if i == len(self.children) - 1:
ret += self.children[i]._serialize([True])
else:
ret += self.children[i]._serialize([False])
else:
for i in range(len(self.children)):
if i != 0:
ret += "\n"
if i == len(self.children) - 1:
ret += self.children[i]._serialize([])
else:
ret += self.children[i]._serialize([])
return ret
| mit | 2,177,035,599,657,675,500 | 25.878049 | 70 | 0.508621 | false | 3.697987 | false | false | false |
sminez/ripl | ripl/prelude.py | 1 | 4320 | '''
Common LISPy / Haskelly functions to use inside RIPL
Std Lib Functional stuff:
https://docs.python.org/3.4/library/itertools.html
https://docs.python.org/3.4/library/functools.html
https://docs.python.org/3.4/library/operator.html
Some info on what haskell does:
https://wiki.haskell.org/Fold
http://learnyouahaskell.com/higher-order-functions
Clojure's core reference:
https://clojuredocs.org/clojure.core
https://clojuredocs.org/quickref
'''
import functools
import itertools
import operator as op
from types import GeneratorType
from .bases import RVector
def reverse(itr):
''' :: Itr[*T] -> Itr[*T]
Reverse an iterable
'''
return itr[::-1]
# gen_reverse = lambda x: reversed(x)
def product(cont):
''' :: Itr|Gen[a] -> a
Find the product of an iterable. Contents of the iterable must
implement __mul__
'''
return functools.reduce(op.mul, cont)
def foldl(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> a
Fold a list with a given binary function from the left
'''
for val in cont:
acc = func(acc, val)
return acc
def foldr(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> a
Fold a list with a given binary function from the right
WARNING: Right folds and scans will blow up for
infinite generators!
'''
if isinstance(cont, GeneratorType):
# Convert to iterator to pass to reduce
cont = [c for c in cont]
for val in cont[::-1]:
acc = func(val, acc)
return acc
def scanl(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> List[a]
Use a given accumulator value to build a list of values obtained
by repeatedly applying acc = func(acc, next(list)) from the left.
'''
# yield acc
# for val in cont:
# acc = func(acc, val)
# yield acc
lst = [acc]
for val in cont:
acc = func(acc, val)
lst.append(acc)
return lst
def scanr(func, acc, cont):
''' :: f(a, a) -> a, Itr|Gen[a] -> List[a]
Use a given accumulator value to build a list of values obtained
by repeatedly applying acc = func(next(list), acc) from the right.
WARNING: Right folds and scans will blow up for
infinite generators!
'''
if isinstance(cont, GeneratorType):
# Convert to iterator to pass to reduce
cont = [c for c in cont]
# yield acc
# for val in cont:
# acc = func(val, acc)
# yield acc
lst = [acc]
for val in cont[::-1]:
acc = func(val, acc)
lst.append(acc)
return lst
def take(num, cont):
''' :: Int, Itr|Gen[*T] -> List[*T]
Return up to the first `num` elements of an iterable or generator.
'''
try:
return cont[:num]
except TypeError:
# Taking from a generator
num_items = []
try:
for n in range(num):
num_items.append(next(cont))
return num_items
except StopIteration:
return num_items
def drop(num, cont):
''' :: Int, Itr|Gen[*T] -> List[*T]
Return everything but the first `num` elements of itr
'''
try:
items = cont[num:]
except TypeError:
items = []
for n in range(num):
# Fetch and drop the initial elements
try:
items.append(next(cont))
except StopIteration:
break
return items
def takeWhile(predicate, container):
''' :: Int, Itr|Gen[*T] -> Gen[*T]
The predicate needs to take a single argument and return a bool.
(takeWhile ~(< 3) '(1 2 3 4 5)) -> '(1 2)
'''
return itertools.takewhile(predicate, container)
def dropWhile(predicate, container):
''' :: Int, Itr|Gen[*T] -> Gen[*T]
The predicate needs to take a single argument and return a bool.
(dropWhile ~(< 3) '(1 2 3 4 5)) -> '(3 4 5)
'''
return itertools.dropwhile(predicate, container)
def flatten(lst):
''' :: Itr|Gen[*T] -> List[*T]
Flatten an arbitrarily nested list of lists down to a single list
'''
_list = ([x] if not isinstance(x, list) else flatten(x) for x in lst)
return sum(_list, [])
def drain(gen):
''' :: Gen[*T] -> List[*T]
Given a generator, convert it to a list (RVector)
'''
return RVector([elem for elem in gen])
| gpl-3.0 | 4,372,563,448,669,054,500 | 24.714286 | 73 | 0.590972 | false | 3.461538 | false | false | false |
MikeTheGreat/GLT | glt/Constants.py | 1 | 1906 | """File contains constants (like EnvOptions)"""
from enum import Enum
INSTRUCTOR_FEEDBACK_TAG_MSG = "Tag left here so the instructor's scripts know when feedback was uploaded"
class EnvOptions(str, Enum):
"""This lists the keys for the rcfile and command line arguments.
Note that this is a 'mix-in' enum, which means that saying
EnvOptions.INFILE automatically uses the str() value for that member
(i.e., EnvOptions.INFILE is "infile", without needing a .value on
the .INFILE"""
ACTION = "action" # Key used to store which command-line option (addStudents, etc) was chosen
# common options:
SERVER = 'server'
SERVER_IP_ADDR = 'server_ip'
USERNAME = 'username'
PASSWORD = 'password'
# command line option for listing projects
# This is mostly useful to check your configuration
# and make sure you can connect to the server
LIST_PROJECTS = "listProjects"
# command line args for creating student accounts
CREATE_STUDENTS = "addStudents"
INFILE = "infile"
INFILE_TYPE = "infile_type"
SECTION = "section"
DELETE_CLASS = "deleteClass"
# Adding a new homework project:
NEW_HOMEWORK = "addHomework"
HOMEWORK_NAME = 'homework_name'
HOMEWORK_DIR = 'homework_path'
DOWNLOAD_HOMEWORK = "download"
STUDENT_WORK_DIR = "student_work_dir"
COMMIT_FEEDBACK = "commitFeedback"
UPLOAD_FEEDBACK = "uploadFeedback"
FEEDBACK_PATTERN = "pattern"
FEEDBACK_PATTERN_DEFAULT = "grade"
GIT_TAG = "tag"
GIT_TAG_DEFAULT = "GradedByInstructor-V1"
GRADING_LIST = "gradeList"
GIT_DO = "gitdo"
GIT_COMMAND = "gitCommand"
# .gltrc file
KNOWN_GOOD_ACCOUNTS = "known_good_accounts"
DATA_DIR = "data_dir"
TEMP_DIR = "temp_dir"
SECTION_LIST = 'section_list'
HW_LOC = 'student_homework_location'
HW_LOC_DEFAULT = "SECTION/ASSIGNMENT/NAME_LAST, NAME_FIRST/"
| gpl-3.0 | 6,785,624,960,861,628,000 | 29.741935 | 105 | 0.680483 | false | 3.497248 | false | false | false |
resmo/ansible | lib/ansible/modules/network/check_point/cp_mgmt_run_script.py | 20 | 2681 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_run_script
short_description: Executes the script on a given list of targets.
description:
- Executes the script on a given list of targets.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
script_name:
description:
- Script name.
type: str
script:
description:
- Script body.
type: str
targets:
description:
- On what targets to execute this command. Targets may be identified by their name, or object unique identifier.
type: list
args:
description:
- Script arguments.
type: str
comments:
description:
- Comments string.
type: str
extends_documentation_fragment: checkpoint_commands
"""
EXAMPLES = """
- name: run-script
cp_mgmt_run_script:
script: ls -l /
script_name: 'Script Example: List files under / dir'
targets:
- corporate-gateway
"""
RETURN = """
cp_mgmt_run_script:
description: The checkpoint run-script output.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_commands, api_command
def main():
argument_spec = dict(
script_name=dict(type='str'),
script=dict(type='str'),
targets=dict(type='list'),
args=dict(type='str'),
comments=dict(type='str')
)
argument_spec.update(checkpoint_argument_spec_for_commands)
module = AnsibleModule(argument_spec=argument_spec)
command = "run-script"
result = api_command(module, command)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,036,465,472,737,189,000 | 25.81 | 118 | 0.680343 | false | 3.852011 | false | false | false |
fishroot/nemoa | nemoa/base/env.py | 1 | 24590 | # -*- coding: utf-8 -*-
"""Environmentan information and functions for filesystem operations.
.. References:
.. _appdirs:
http://github.com/ActiveState/appdirs
.. TODO::
* Add get_file for 'user_package_log', 'temp_log' etc.
"""
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__docformat__ = 'google'
import fnmatch
import os
import shutil
import string
import sys
import getpass
import locale
import platform
import re
from distutils import sysconfig
from pathlib import Path, PurePath
try:
from appdirs import AppDirs
except ImportError as err:
raise ImportError(
"requires package appdirs: "
"https://pypi.org/project/appdirs/") from err
from nemoa.base import check, this
from nemoa.types import Any, Iterable, IterAny, OptStrDict
from nemoa.types import PathLikeList, OptStr, OptStrOrBool, OptPathLike
from nemoa.types import PathLike, StrDict, Sequence, StrDictOfPaths, Union
#
# Structural Types
#
# Nested paths for tree structured path references
# TODO ([email protected]): currently (Python 3.7.1) recursive type
# definition is not fully supported by the typing module. When recursive type
# definition is available replace the following lines by their respective
# recursive definitions
PathLikeSeq = Sequence[PathLike]
PathLikeSeq2 = Sequence[Union[PathLike, PathLikeSeq]]
PathLikeSeq3 = Sequence[Union[PathLike, PathLikeSeq, PathLikeSeq2]]
NestPath = Union[PathLike, PathLikeSeq, PathLikeSeq2, PathLikeSeq3]
#NestPath = Sequence[Union[str, Path, 'NestPath']]
#
# Constants
#
_DEFAULT_APPNAME = 'nemoa'
_DEFAULT_APPAUTHOR = 'frootlab'
_RECURSION_LIMIT = sys.getrecursionlimit()
#
# Public Module Functions
#
def get_var(varname: str, *args: Any, **kwds: Any) -> OptStr:
"""Get environment or application variable.
Environment variables comprise static and runtime properties of the
operating system like 'username' or 'hostname'. Application variables in
turn, are intended to describe the application distribution by authorship
information, bibliographic information, status, formal conditions and notes
or warnings. For mor information see :PEP:`345`.
Args:
varname: Name of environment variable. Typical application variable
names are:
'name': The name of the distribution
'version': A string containing the distribution's version number
'status': Development status of the distributed application.
Typical values are 'Prototype', 'Development', or 'Production'
'description': A longer description of the distribution that can
run to several paragraphs.
'keywords': A list of additional keywords to be used to assist
searching for the distribution in a larger catalog.
'url': A string containing the URL for the distribution's
homepage.
'license': Text indicating the license covering the distribution
'copyright': Notice of statutorily prescribed form that informs
users of the distribution to published copyright ownership.
'author': A string containing the author's name at a minimum;
additional contact information may be provided.
'email': A string containing the author's e-mail address. It can
contain a name and e-mail address, as described in :rfc:`822`.
'maintainer': A string containing the maintainer's name at a
minimum; additional contact information may be provided.
'company': The company, which created or maintains the distribution.
'organization': The organization, twhich created or maintains the
distribution.
'credits': list with strings, acknowledging further contributors,
Teams or supporting organizations.
*args: Optional arguments that specify the application, as required by
the function 'nemoa.base.env.update_vars'.
**kwds: Optional keyword arguments that specify the application, as
required by the function 'nemoa.base.env.update_vars'.
Returns:
String representing the value of the application variable.
"""
# Check type of 'varname'
check.has_type("'varname'", varname, str)
# Update variables if not present or if optional arguments are given
if not '_vars' in globals() or args or kwds:
update_vars(*args, **kwds)
appvars = globals().get('_vars', {})
return appvars.get(varname, None)
def get_vars(*args: Any, **kwds: Any) -> StrDict:
"""Get dictionary with environment and application variables.
Environment variables comprise static and runtime properties of the
operating system like 'username' or 'hostname'. Application variables in
turn, are intended to describe the application distribution by authorship
information, bibliographic information, status, formal conditions and notes
or warnings. For mor information see :PEP:`345`.
Args:
*args: Optional arguments that specify the application, as required by
:func:`~nemoa.base.env.update_vars`.
**kwds: Optional keyword arguments that specify the application, as
required by :func:`~nemoa.base.env.update_vars`.
Returns:
Dictionary containing application variables.
"""
# Update variables if not present or if optional arguments are given
if not '_vars' in globals() or args or kwds:
update_vars(*args, **kwds)
return globals().get('_vars', {}).copy()
def update_vars(filepath: OptPathLike = None) -> None:
"""Update environment and application variables.
Environment variables comprise static and runtime properties of the
operating system like 'username' or 'hostname'. Application variables in
turn, are intended to describe the application distribution by authorship
information, bibliographic information, status, formal conditions and notes
or warnings. For mor information see :PEP:`345`.
Args:
filepath: Valid filepath to python module, that contains the application
variables as module attributes. By default the current top level
module is used.
"""
# Get package specific environment variables by parsing a given file for
# module attributes. By default the file of the current top level module
# is taken. If name is not given, then use the name of the current top level
# module.
filepath = filepath or this.get_root().__file__
text = Path(filepath).read_text()
rekey = "__([a-zA-Z][a-zA-Z0-9_]*)__"
reval = r"['\"]([^'\"]*)['\"]"
pattern = f"^[ ]*{rekey}[ ]*=[ ]*{reval}"
info = {}
for match in re.finditer(pattern, text, re.M):
info[str(match.group(1))] = str(match.group(2))
info['name'] = info.get('name', this.get_module_name().split('.', 1)[0])
# Get plattform specific environment variables
info['encoding'] = get_encoding()
info['hostname'] = get_hostname()
info['osname'] = get_osname()
info['username'] = get_username()
# Update globals
globals()['_vars'] = info
def get_dir(dirname: str, *args: Any, **kwds: Any) -> Path:
"""Get application specific environmental directory by name.
This function returns application specific system directories by platform
independent names to allow platform independent storage for caching,
logging, configuration and permanent data storage.
Args:
dirname: Environmental directory name. Allowed values are:
:user_cache_dir: Cache directory of user
:user_config_dir: Configuration directory of user
:user_data_dir: Data directory of user
:user_log_dir: Logging directory of user
:site_config_dir: Site global configuration directory
:site_data_dir: Site global data directory
:site_package_dir: Site global package directory
:package_dir: Current package directory
:package_data_dir: Current package data directory
*args: Optional arguments that specify the application, as required by
the function 'nemoa.base.env.update_dirs'.
**kwds: Optional keyword arguments that specify the application, as
required by the function 'nemoa.base.env.update_dirs'.
Returns:
String containing path of environmental directory or None if the
pathname is not supported.
"""
# Check type of 'dirname'
check.has_type("argument 'dirname'", dirname, str)
# Update derectories if not present or if any optional arguments are given
if not '_dirs' in globals() or args or kwds:
update_dirs(*args, **kwds)
dirs = globals().get('_dirs', {})
# Check value of 'dirname'
if dirname not in dirs:
raise ValueError(f"directory name '{dirname}' is not valid")
return dirs[dirname]
def get_dirs(*args: Any, **kwds: Any) -> StrDict:
"""Get application specific environmental directories.
This function returns application specific system directories by platform
independent names to allow platform independent storage for caching,
logging, configuration and permanent data storage.
Args:
*args: Optional arguments that specify the application, as required by
the function 'nemoa.base.env.update_dirs'.
**kwds: Optional keyword arguments that specify the application, as
required by the function 'nemoa.base.env.update_dirs'.
Returns:
Dictionary containing paths of application specific environmental
directories.
"""
# Update appdirs if not present or if optional arguments are given
if not '_dirs' in globals() or args or kwds:
update_dirs(*args, **kwds)
return globals().get('_dirs', {}).copy()
def update_dirs(
appname: OptStr = None, appauthor: OptStrOrBool = None,
version: OptStr = None, **kwds: Any) -> None:
"""Update application specific directories from name, author and version.
This function retrieves application specific directories from the package
`appdirs`_. Additionally the directory 'site_package_dir' is retrieved fom
the standard library package distutils and 'package_dir' and
'package_data_dir' from the current top level module.
Args:
appname: is the name of application. If None, just the system directory
is returned.
appauthor: is the name of the appauthor or distributing body for this
application. Typically it is the owning company name. You may pass
False to disable it. Only applied in windows.
version: is an optional version path element to append to the path.
You might want to use this if you want multiple versions of your
app to be able to run independently. If used, this would typically
be "<major>.<minor>". Only applied when appname is present.
**kwds: Optional directory name specific keyword arguments. For more
information see `appdirs`_.
"""
dirs: StrDictOfPaths = {}
# Get system directories
dirs['home'] = get_home()
dirs['cwd'] = get_cwd()
# Get application directories from appdirs
appname = appname or get_var('name') or _DEFAULT_APPNAME
appauthor = appauthor or get_var('author') or _DEFAULT_APPAUTHOR
appdirs = AppDirs(
appname=appname, appauthor=appauthor, version=version, **kwds)
dirnames = [
'user_cache_dir', 'user_config_dir', 'user_data_dir',
'user_log_dir', 'site_config_dir', 'site_data_dir']
for dirname in dirnames:
dirs[dirname] = Path(getattr(appdirs, dirname))
# Get distribution directories from distutils
path = Path(sysconfig.get_python_lib(), appname)
dirs['site_package_dir'] = path
# Get current package directories from top level module
path = Path(this.get_root().__file__).parent
dirs['package_dir'] = path
dirs['package_data_dir'] = path / 'data'
globals()['_dirs'] = dirs
def get_encoding() -> str:
"""Get preferred encoding used for text data.
This is a wrapper function to the standard library function
:func:`locale.getpreferredencoding`. This function returns the encoding
used for text data, according to user preferences. User preferences are
expressed differently on different systems, and might not be available
programmatically on some systems, so this function only returns a guess.
Returns:
String representing the preferred encoding used for text data.
"""
return locale.getpreferredencoding(False)
def get_hostname() -> str:
"""Get hostname of the computer.
This is a wrapper function to the standard library function
:func:`platform.node`. This function returns the computer’s hostname. If
the value cannot be determined, an empty string is returned.
Returns:
String representing the computer’s hostname or None.
"""
return platform.node()
def get_osname() -> str:
"""Get name of the Operating System.
This is a wrapper function to the standard library function
:func:`platform.system`. This function returns the OS name, e.g. 'Linux',
'Windows', or 'Java'. If the value cannot be determined, an empty string is
returned.
Returns:
String representing the OS name or None.
"""
return platform.system()
def get_username() -> str:
"""Login name of the current user.
This is a wrapper function to the standard library function
:func:`getpass.getuser`. This function checks the environment variables
LOGNAME, USER, LNAME and USERNAME, in order, and returns the value of the
first one which is set to a non-empty string. If none are set, the login
name from the password database is returned on systems which support the
pwd module, otherwise, an exception is raised.
Returns:
String representing the login name of the current user.
"""
return getpass.getuser()
def get_cwd() -> Path:
"""Get path of current working directory.
Returns:
Path of current working directory.
"""
return Path.cwd()
def get_home() -> Path:
"""Get path of current users home directory.
Returns:
Path of current users home directory.
"""
return Path.home()
def clear_filename(fname: str) -> str:
r"""Clear filename from invalid characters.
Args:
fname: Arbitrary string, which is be cleared from invalid filename
characters.
Returns:
String containing valid path syntax.
Examples:
>>> clear_filename('3/\nE{$5}.e')
'3E5.e'
"""
valid = "-_.() " + string.ascii_letters + string.digits
fname = ''.join(c for c in fname if c in valid).replace(' ', '_')
return fname
def match_paths(paths: PathLikeList, pattern: str) -> PathLikeList:
"""Filter pathlist to matches with wildcard pattern.
Args:
paths: List of paths, which is filtered to matches with pattern.
pattern: String pattern, containing Unix shell-style wildcards:
'*': matches arbitrary strings
'?': matches single characters
[seq]: matches any character in seq
[!seq]: matches any character not in seq
Returns:
Filtered list of paths.
Examples:
>>> match_paths([Path('a.b'), Path('b.a')], '*.b')
[Path('a.b')]
"""
# Normalize path and pattern representation using POSIX standard
mapping = {PurePath(path).as_posix(): path for path in paths}
pattern = PurePath(pattern).as_posix()
# Match normalized paths with normalized pattern
names = list(mapping.keys())
matches = fnmatch.filter(names, pattern)
# Return original paths
return [mapping[name] for name in matches]
def join_path(*args: NestPath) -> Path:
r"""Join nested iterable path-like structure to single path object.
Args:
*args: Arguments containing nested iterable paths of strings and
PathLike objects.
Returns:
Single Path comprising all arguments.
Examples:
>>> join_path(('a', ('b', 'c')), 'd')
Path('a\\b\\c\\d')
"""
# Generate flat structure
def flatten(tower: Any) -> IterAny:
for token in tower:
if not isinstance(token, Iterable):
yield token
elif isinstance(token, str):
yield token
else:
yield from flatten(token)
flat = [token for token in flatten(args)]
# Create path from flat structure
try:
path = Path(*flat)
except TypeError as err:
raise TypeError(
"the tokens of nested paths require to be of types "
"str, bytes or path-like") from err
return path
def expand(
*args: NestPath, udict: OptStrDict = None,
envdirs: bool = True) -> Path:
r"""Expand path variables.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
udict: dictionary for user variables.
Thereby the keys in the dictionary are encapsulated
by the symbol '%'. The user variables may also include references.
envdirs: Boolen value which determines if environmental path variables
are expanded. For a full list of valid environmental path variables
see 'nemoa.base.env.get_dirs'. Default is True
Returns:
String containing valid path syntax.
Examples:
>>> expand('%var1%/c', 'd', udict = {'var1': 'a/%var2%', 'var2': 'b'})
'a\\b\\c\\d'
"""
path = join_path(*args)
udict = udict or {}
# Create mapping with path variables
pvars = {}
if envdirs:
for key, val in get_dirs().items():
pvars[key] = str(val)
if udict:
for key, val in udict.items():
pvars[key] = str(join_path(val))
# Itereratively expand directories
update = True
i = 0
while update:
update = False
for key, val in pvars.items():
if '%' + key + '%' not in str(path):
continue
try:
path = Path(str(path).replace('%' + key + '%', val))
except TypeError:
del pvars[key]
update = True
i += 1
if i > _RECURSION_LIMIT:
raise RecursionError('cyclic dependency in variables detected')
path = Path(path)
# Expand unix style home path '~'
if envdirs:
path = path.expanduser()
return path
def get_dirname(*args: NestPath) -> str:
r"""Extract directory name from a path like structure.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing normalized directory path of file.
Examples:
>>> get_dirname(('a', ('b', 'c'), 'd'), 'base.ext')
'a\\b\\c\\d'
"""
path = expand(*args)
if path.is_dir():
return str(path)
return str(path.parent)
def filename(*args: NestPath) -> str:
"""Extract file name from a path like structure.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing normalized directory path of file.
Examples:
>>> filename(('a', ('b', 'c')), 'base.ext')
'base.ext'
"""
path = expand(*args)
if path.is_dir():
return ''
return str(path.name)
def basename(*args: NestPath) -> str:
"""Extract file basename from a path like structure.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing basename of file.
Examples:
>>> filename(('a', ('b', 'c')), 'base.ext')
'base'
"""
path = expand(*args)
if path.is_dir():
return ''
return str(path.stem)
def fileext(*args: NestPath) -> str:
"""Fileextension of file.
Args:
*args: Path like arguments, respectively given by a tree of strings,
which can be joined to a path.
Returns:
String containing fileextension of file.
Examples:
>>> fileext(('a', ('b', 'c')), 'base.ext')
'ext'
"""
path = expand(*args)
if path.is_dir():
return ''
return str(path.suffix).lstrip('.')
def is_dir(path: NestPath) -> bool:
"""Determine if given path points to a directory.
Extends :meth:`pathlib.Path.is_dir` by nested paths and path variable
expansion.
Args:
path: Path like structure, which is expandable to a valid path
Returns:
True if the path points to a regular file (or a symbolic link pointing
to a regular file), False if it points to another kind of file.
"""
return expand(path).is_dir()
def is_file(path: NestPath) -> bool:
"""Determine if given path points to a file.
Extends :meth:`pathlib.Path.is_file` by nested paths and path variable
expansion.
Args:
path: Path like structure, which is expandable to a valid path.
Returns:
True if the path points to a directory (or a symbolic link pointing
to a directory), False if it points to another kind of file.
"""
return expand(path).is_file()
def copytree(source: NestPath, target: NestPath) -> None:
"""Copy directory structure from given source to target directory.
Args:
source: Path like structure, which comprises the path of a source folder
target: Path like structure, which comprises the path of a destination
folder
Returns:
True if the operation was successful.
"""
# Recursive copy function, that allows existing files
def copy(source: Path, target: Path) -> None:
if source.is_dir():
if not target.is_dir():
target.mkdir()
for each in source.glob('*'):
copy(each, target / each.name)
else:
shutil.copy(source, target)
copy(expand(source), expand(target))
def mkdir(*args: NestPath) -> bool:
"""Create directory.
Args:
*args: Path like structure, which comprises the path of a new directory
Returns:
True if the directory already exists, or the operation was successful.
"""
path = expand(*args)
if path.is_dir():
return True
try:
os.makedirs(path)
except Exception as err:
raise OSError("could not create directory") from err
return path.is_dir()
def rmdir(*args: NestPath) -> bool:
"""Remove directory.
Args:
*args: Path like structure, which identifies the path of a directory
Returns:
True if the directory could be deleted
"""
path = expand(*args)
if not path.is_dir():
return False
shutil.rmtree(str(path), ignore_errors=True)
return not path.exists()
def touch(
path: NestPath, parents: bool = True, mode: int = 0o666,
exist_ok: bool = True) -> bool:
"""Create an empty file at the specified path.
Args:
path: Nested :term:`path-like object`, which represents a valid filename
in the directory structure of the operating system.
parents: Boolean value, which determines if missing parents of the path
are created as needed.
mode: Integer value, which specifies the properties if the file. For
more information see :func:`os.chmod`.
exist_ok: Boolean value which determines, if the function returns False,
if the file already exists.
Returns:
True if the file could be created, else False.
"""
filepath = expand(path)
# Check type of 'filepath'
if not isinstance(filepath, Path):
return False
# Check if directory exists and optionally create it
dirpath = filepath.parent
if not dirpath.is_dir():
if not parents:
return False
dirpath.mkdir(parents=True, exist_ok=True)
if not dirpath.is_dir():
return False
# Check if file already exsists
if filepath.is_file() and not exist_ok:
return False
# Touch file with given
filepath.touch(mode=mode, exist_ok=exist_ok)
return filepath.is_file()
| gpl-3.0 | 3,866,459,277,916,991,000 | 32.224324 | 80 | 0.647238 | false | 4.324714 | false | false | false |
blckshrk/Weboob | weboob/applications/webcontentedit/webcontentedit.py | 1 | 6946 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import tempfile
import locale
import codecs
from weboob.core.bcall import CallErrors
from weboob.capabilities.content import ICapContent, Revision
from weboob.tools.application.repl import ReplApplication, defaultcount
__all__ = ['WebContentEdit']
class WebContentEdit(ReplApplication):
APPNAME = 'webcontentedit'
VERSION = '0.h'
COPYRIGHT = 'Copyright(C) 2010-2011 Romain Bignon'
DESCRIPTION = "Console application allowing to display and edit contents on various websites."
SHORT_DESCRIPTION = "manage websites content"
CAPS = ICapContent
def do_edit(self, line):
"""
edit ID [ID...]
Edit a content with $EDITOR, then push it on the website.
"""
contents = []
for id in line.split():
_id, backend_name = self.parse_id(id, unique_backend=True)
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
contents += [content for backend, content in self.do('get_content', _id, backends=backend_names) if content]
if len(contents) == 0:
print >>sys.stderr, 'No contents found'
return 3
if sys.stdin.isatty():
paths = {}
for content in contents:
tmpdir = os.path.join(tempfile.gettempdir(), "weboob")
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
with tempfile.NamedTemporaryFile(prefix='%s_' % content.id.replace(os.path.sep, '_'), dir=tmpdir, delete=False) as f:
data = content.content
if isinstance(data, unicode):
data = data.encode('utf-8')
elif data is None:
content.content = u''
data = ''
f.write(data)
paths[f.name.encode('utf-8')] = content
params = ''
editor = os.environ.get('EDITOR', 'vim')
if editor == 'vim':
params = '-p'
os.system("%s %s %s" % (editor, params, ' '.join(['"%s"' % path.replace('"', '\\"') for path in paths.iterkeys()])))
for path, content in paths.iteritems():
with open(path, 'r') as f:
data = f.read()
try:
data = data.decode('utf-8')
except UnicodeError:
pass
if content.content != data:
content.content = data
else:
contents.remove(content)
if len(contents) == 0:
print >>sys.stderr, 'No changes. Abort.'
return 1
print 'Contents changed:\n%s' % ('\n'.join(' * %s' % content.id for content in contents))
message = self.ask('Enter a commit message', default='')
minor = self.ask('Is this a minor edit?', default=False)
if not self.ask('Do you want to push?', default=True):
return
errors = CallErrors([])
for content in contents:
path = [path for path, c in paths.iteritems() if c == content][0]
sys.stdout.write('Pushing %s...' % content.id.encode('utf-8'))
sys.stdout.flush()
try:
self.do('push_content', content, message, minor=minor, backends=[content.backend]).wait()
except CallErrors as e:
errors.errors += e.errors
sys.stdout.write(' error (content saved in %s)\n' % path)
else:
sys.stdout.write(' done\n')
os.unlink(path)
else:
# stdin is not a tty
if len(contents) != 1:
print >>sys.stderr, "Multiple ids not supported with pipe"
return 2
message, minor = '', False
data = sys.stdin.read()
contents[0].content = data.decode(sys.stdin.encoding or locale.getpreferredencoding())
errors = CallErrors([])
for content in contents:
sys.stdout.write('Pushing %s...' % content.id.encode('utf-8'))
sys.stdout.flush()
try:
self.do('push_content', content, message, minor=minor, backends=[content.backend]).wait()
except CallErrors as e:
errors.errors += e.errors
sys.stdout.write(' error\n')
else:
sys.stdout.write(' done\n')
if len(errors.errors) > 0:
raise errors
@defaultcount(10)
def do_log(self, line):
"""
log ID
Display log of a page
"""
if not line:
print >>sys.stderr, 'Error: please give a page ID'
return 2
_id, backend_name = self.parse_id(line)
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
_id = _id.encode('utf-8')
self.start_format()
for backend, revision in self.do('iter_revisions', _id, backends=backend_names):
self.format(revision)
def do_get(self, line):
"""
get ID [revision]
Get page contents
"""
if not line:
print >>sys.stderr, 'Error: please give a page ID'
return 2
line = line.rsplit(' ', 1)
if len(line) > 1:
revision = Revision(line[1])
else:
revision = None
_id, backend_name = self.parse_id(line[0])
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
_id = _id.encode('utf-8')
output = codecs.getwriter(sys.stdout.encoding or locale.getpreferredencoding())(sys.stdout)
for contents in [content for backend, content in self.do('get_content', _id, revision, backends=backend_names) if content]:
output.write(contents.content)
# add a newline unless we are writing
# in a file or in a pipe
if os.isatty(output.fileno()):
output.write('\n')
| agpl-3.0 | 7,499,032,155,930,786,000 | 35.366492 | 133 | 0.547653 | false | 4.295609 | false | false | false |
fbradyirl/home-assistant | tests/components/history_graph/test_init.py | 4 | 1171 | """The tests the Graph component."""
import unittest
from homeassistant.setup import setup_component
from tests.common import init_recorder_component, get_test_home_assistant
class TestGraph(unittest.TestCase):
"""Test the Google component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
self.init_recorder()
config = {"history": {}, "history_graph": {"name_1": {"entities": "test.test"}}}
assert setup_component(self.hass, "history_graph", config)
assert dict(self.hass.states.get("history_graph.name_1").attributes) == {
"entity_id": ["test.test"],
"friendly_name": "name_1",
"hours_to_show": 24,
"refresh": 0,
}
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
| apache-2.0 | -1,391,276,814,122,048,000 | 31.527778 | 88 | 0.611443 | false | 4.010274 | true | false | false |
timkofu/gitstars | stars/models.py | 1 | 1223 |
from django.db import models
class ProgrammingLanguage(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class Meta:
indexes = [models.Index(fields=['name'])]
verbose_name_plural = "Programming Languages"
class Project(models.Model):
name = models.CharField(max_length=255) # .name
full_name = models.CharField(max_length=255, unique=True) # .full_name
description = models.TextField() # .description
url = models.URLField(unique=True) # .html_url
initial_stars = models.IntegerField() # .stargazers_count
current_stars = models.IntegerField(default=0) # .stargazers_count
language = models.ForeignKey(
ProgrammingLanguage, related_name="projects", on_delete=models.CASCADE
) # .language
add_date = models.DateField(auto_now_add=True)
notes = models.TextField(null=True, blank=True)
def __str__(self):
return "{}".format(
self.name
)
class Meta:
verbose_name_plural = "Stars"
indexes = [
models.Index(fields=[
'name',
'full_name',
'description'
])
]
| mit | -5,217,552,569,242,024,000 | 26.795455 | 78 | 0.604252 | false | 3.845912 | false | false | false |
chop-dbhi/prov-extractor | prov_extractor/sources/filesystem.py | 1 | 3692 | import os
import fnmatch
from datetime import datetime
from . import base
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
class Client(base.Client):
name = 'Directory'
description = '''
Generator for a filesystem.
'''
options = {
'required': ['path'],
'properties': {
'path': {
'description': 'A local filesystem directory.',
'type': 'string',
},
'recurse': {
'description': 'If true, directories will be recursed into.',
'type': 'boolean',
'default': True,
},
'pattern': {
'description': 'Glob pattern for directories and files.',
'type': 'string',
'default': '*',
},
'hidden': {
'description': 'If true, hidden files and directories will be included.', # noqa
'type': 'boolean',
'default': False,
},
'depth': {
'description': 'The maximum depth to recurse into.',
'type': 'integer',
}
}
}
def parse_directory(self, path):
path_id = os.path.relpath(path, self.options.path)
return {
'origins:ident': path_id,
'prov:type': 'Directory',
'prov:label': path_id,
'path': path_id,
}
def parse_file(self, path):
path_id = os.path.relpath(path, self.options.path)
stats = os.stat(path)
# Convert into datetime from timestamp floats
atime = datetime.fromtimestamp(stats.st_atime)
mtime = datetime.fromtimestamp(stats.st_mtime)
if hasattr(stats, 'st_birthtime'):
create_time = stats.st_birthtime
else:
create_time = stats.st_ctime
ctime = datetime.fromtimestamp(create_time)
return {
'origins:ident': path_id,
'prov:type': 'File',
'prov:label': path_id,
'path': path_id,
'mode': stats.st_mode,
'uid': stats.st_uid,
'gid': stats.st_gid,
'size': stats.st_size,
'accessed': atime.strftime(DATETIME_FORMAT),
'modified': mtime.strftime(DATETIME_FORMAT),
'created': ctime.strftime(DATETIME_FORMAT),
}
def parse(self):
base_path = self.options.path
for root, dirs, names in os.walk(base_path):
if self.options.depth is not None:
curpath = os.path.relpath(root, base_path)
if curpath == '.':
depth = 0
else:
depth = len(curpath.split(os.path.sep))
# Remove all subdirectories from traversal once the
# desired depth has been reached. Note a `break` does
# not work since this would stop processing sibling
# directories as well.
for dirname in dirs[:]:
if depth >= self.depth:
dirs.pop()
elif not self.options.hidden and dirname.startswith('.'):
dirs.pop()
directory = self.parse_directory(root)
self.document.add('entity', directory)
for f in fnmatch.filter(names, self.options.pattern):
if not self.options.hidden and f.startswith('.'):
continue
path = os.path.join(root, f)
_file = self.parse_file(path)
_file['directory'] = directory
self.document.add('entity', _file)
| bsd-2-clause | 3,052,093,645,219,340,000 | 29.262295 | 97 | 0.489166 | false | 4.541205 | false | false | false |
bcso/351SYDE | neko.py | 1 | 2216 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from math import tan, cos, sin, pi
from scipy.integrate import odeint, simps, cumtrapz
##############
## y0 = yk
## y1 = theta
## y2 = px
## y3 = py
##############
def model(y, t):
yk, theta, vx, vy = y
# constants
m = 0.206 # Mass of pendulum
k = 10 # Stiffness of Spring
b = 0.3 # Torsional Resistance
R = 1.5 # Friction
L = 0.61 # Length of Pendulum
g = 9.81 # Gravitional acceleration
Y = 0 # Equilibrium position
# in between terms
disp = (yk - Y)
d_yk = vy + ((tan(theta) * vx))
d_theta = vx / (L * cos(theta))
d_vy = g + (( -R * d_yk - k * yk)/m)
# the derivative causality is resolved here, so adding some in between
# terms for easier debugging
e_21 = tan(theta) * (d_vy - g) # comes from the left side of bg
e_24 = d_theta * b # torsional resistance
e_22 = d_theta * tan(theta) * vx / (12 * (cos(theta)**2))
factor = 1 / (1 + (1 / ( 12 * (cos(theta)**2))))
d_vx = factor * (e_21 - e_22 - e_24)
return [d_yk, d_theta, d_vx, d_vy]
time = np.linspace(0.0, 8.0, 10000)
# Initial condition parameters
# yinit = [Vertical spring displacement, pendulum angle relative to vertical, horizontal velocity, vertical]
yinit = [0, pi/4, 0, 0]
y = odeint(model, yinit, time)
# the state equations give us velocity
# integrate again to get displacement
# our variable of interest
ped_y = cumtrapz(y[:,3], time, initial=0)
ped_x = cumtrapz(y[:,2], time, initial=0)
plt.figure(1)
plt.subplot(311)
plt.plot(time, y[:,0])
plt.xlabel('t [s]')
plt.ylabel('Displacement [m]')
plt.title('Displacement of Spring in Y')
plt.grid()
plt.legend()
# plt.subplot(312)
# plt.plot(time, y[:,1])
# plt.xlabel('t [s]')
# plt.ylabel('Displacement [rad]')
# plt.title('Angle of rotation')
# plt.legend()
plt.subplot(312)
plt.plot(time, ped_x)
plt.xlabel('t [s]')
plt.ylabel('Displacement [m]')
plt.title('Displacement of Pendulum in X')
plt.grid()
plt.legend()
plt.subplot(313)
plt.plot(time, ped_y)
plt.xlabel('t [s]')
plt.ylabel('Displacement [m]')
plt.title('Displacement of Pendulum in Y')
plt.grid()
plt.legend()
plt.tight_layout()
plt.show() | mit | -332,666,820,275,739,650 | 23.910112 | 108 | 0.622744 | false | 2.752795 | false | false | false |
peteashton/symbionts.org | tools/importGenomes.py | 1 | 4415 | import os
from pymongo import MongoClient
import argparse
parser = argparse.ArgumentParser(description="Python script for going through steps of adding a new genome to the Symbionts database.")
# STEP 1: Before running this script download .gb files from NCBI. If there are any plasmids concatenate these with main chromosome file.
# Place all files in folder and use this folder name as input for this script.
parser.add_argument("dataFolder",
nargs=1,
type=str,
help="Path to the folder which contains the genome files.")
parser.add_argument("toolsFolder",
nargs=1,
type=str,
help="Path to the folder which contains the python scripts (e.g. tools folder in symbionts.org")
parser.add_argument("--host",
type=str,
default='localhost',
help="Hostname for the MongoDB database (default=localhost)")
parser.add_argument("--port",
type=str,
default=27017,
help="Port where MongoDB is listening (default=27017)")
parser.add_argument("--database",
type=str,
default='symbiont',
help="Name of the database to store the data in (default=symbiont)")
parser.add_argument("--blastHitscollection",
type=str,
default='internal_blast_hits',
help="Name of the collection containing internal blast hits (default = internal_blast_hits")
parser.add_argument("--orthologuescollection",
type=str,
default='orthologues',
help="Name of the collection containing orthologues data (default=orthologues)")
args = parser.parse_args()
client = MongoClient(args.host, args.port)
db = client[args.database]
internal_blast_hits = db[args.blastHitscollection]
orthologues = db[args.orthologuescollection]
# STEP 2: Import the genomes into the MongoDB database using importSequence.py script
# dataFolder = args.dataFolder[0]
# toolsFolder = args.toolsFolder[0]
# for filename in os.listdir(dataFolder):
# print filename
# s = toolsFolder + "/importSequence.py "+dataFolder + "/" + filename
# os.system("python "+s)
# STEP 3: Create a FASTA file for all the CDS features in the database using dumpBlastDB.py (without the split file flag)
# s = toolsFolder+"/dumpBlastDB.py "+dataFolder+ "/symbionts_proteins.fasta"
# os.system("python "+s)
# STEP 4: Run blast query - every CDS in database against every other CDS in database.
# On server: makeblastdb -in 'symbionts_proteins.fasta' -dbtype prot
# blastp -db 'symbionts_proteins.fasta' -query 'symbionts_proteins.fasta' -evalue 1e-10 -outfmt 7 -out 'selfmatches.txt'
STEP 5: delete current mongodb collections - drop current internal_blast hits and orthologues collections
db.internal_blast_hits.drop()
db.orthologues.drop()
#STEP 6: import new blast hits into internal_blast hits collection using importBLASTselfmatch.py
s = toolsFolder+"importBLASTselfmatch.py "+dataFolder+ "/selfmatches.txt --host "+args.host
os.system("python "+s)
#STEP 7: create new orthologues collection using createOrthologuesCollection.py
s = toolsFolder+"createOrthologuesCollection.py --host "+args.host
os.system("python "+s)
#STEP 8: Create FASTA files for the new genomes and plasmids using dumpBlastDB.py (with the split file flag)
s = toolsFolder+"dumpBlastDB.py "+dataFolder+ "/symbionts_proteins --split"
# os.system("python "+s)
# #STEP 9: run KAAS queries using FASTA files from each genome and plasmid (http://www.genome.jp/kaas-bin/kaas_main) and save output from each as text file.
# #STEP 10: add KO details to CDS features in database by running addKONumbers.py with the FASTA file as input1 and KONumbers.txt as input2
# newFolder = "/Users/kn675/Python/NewGenomesFasta/KAASoutput"
# for filename in os.listdir(newFolder):
# print filename
# s = toolsFolder + "addKONumbers.py "+folder + "/" + filename + " "+ toolsFolder +"/KONumbers.txt"+ "--host "+args.hostcreat
# os.system("python " +s)
STEP 11: add multifun numbers to CDS features
# s = toolsFolder + "addMultiFunNumbers.py "+toolsFolder +"/uniprot_to_go_mapping.txt " + +toolsFolder +"/go_to_multifun_mapping.txt "+ "--host "+args.host
# os.system("python" +s)
| mit | 4,802,950,537,591,274,000 | 42.712871 | 158 | 0.680861 | false | 3.529177 | false | false | false |
lightopa/Aiopa-Battles | lib/raven/breadcrumbs.py | 2 | 11256 | from __future__ import absolute_import
import time
import logging
from types import FunctionType
from raven._compat import iteritems, get_code, text_type, string_types
from raven.utils import once
special_logging_handlers = []
special_logger_handlers = {}
logger = logging.getLogger('raven')
def event_payload_considered_equal(a, b):
return (
a['type'] == b['type'] and
a['level'] == b['level'] and
a['message'] == b['message'] and
a['category'] == b['category'] and
a['data'] == b['data']
)
class BreadcrumbBuffer(object):
def __init__(self, limit=100):
self.buffer = []
self.limit = limit
def record(self, timestamp=None, level=None, message=None,
category=None, data=None, type=None, processor=None):
if not (message or data or processor):
raise ValueError('You must pass either `message`, `data`, '
'or `processor`')
if timestamp is None:
timestamp = time.time()
self.buffer.append(({
'type': type or 'default',
'timestamp': timestamp,
'level': level,
'message': message,
'category': category,
'data': data,
}, processor))
del self.buffer[:-self.limit]
def clear(self):
del self.buffer[:]
def get_buffer(self):
rv = []
for idx, (payload, processor) in enumerate(self.buffer):
if processor is not None:
try:
processor(payload)
except Exception:
logger.exception('Failed to process breadcrumbs. Ignored')
payload = None
self.buffer[idx] = (payload, None)
if payload is not None and \
(not rv or not event_payload_considered_equal(rv[-1], payload)):
rv.append(payload)
return rv
class BlackholeBreadcrumbBuffer(BreadcrumbBuffer):
def record(self, *args, **kwargs):
pass
def make_buffer(enabled=True):
if enabled:
return BreadcrumbBuffer()
return BlackholeBreadcrumbBuffer()
def record_breadcrumb(type, *args, **kwargs):
# Legacy alias
kwargs['type'] = type
return record(*args, **kwargs)
def record(message=None, timestamp=None, level=None, category=None,
data=None, type=None, processor=None):
"""Records a breadcrumb for all active clients. This is what integration
code should use rather than invoking the `captureBreadcrumb` method
on a specific client.
"""
if timestamp is None:
timestamp = time.time()
for ctx in raven.context.get_active_contexts():
ctx.breadcrumbs.record(timestamp, level, message, category,
data, type, processor)
def _record_log_breadcrumb(logger, level, msg, *args, **kwargs):
for handler in special_logging_handlers:
rv = handler(logger, level, msg, args, kwargs)
if rv:
return
handler = special_logger_handlers.get(logger.name)
if handler is not None:
rv = handler(logger, level, msg, args, kwargs)
if rv:
return
def processor(data):
formatted_msg = msg
# If people log bad things, this can happen. Then just don't do
# anything.
try:
formatted_msg = text_type(msg)
if args:
formatted_msg = msg % args
except Exception:
pass
# We do not want to include exc_info as argument because it often
# lies (set to a constant value like 1 or True) or even if it's a
# tuple it will not be particularly useful for us as we cannot
# process it anyways.
kwargs.pop('exc_info', None)
data.update({
'message': formatted_msg,
'category': logger.name,
'level': logging.getLevelName(level).lower(),
'data': kwargs,
})
record(processor=processor)
def _wrap_logging_method(meth, level=None):
if not isinstance(meth, FunctionType):
func = meth.im_func
else:
func = meth
# We were patched for raven before
if getattr(func, '__patched_for_raven__', False):
return
if level is None:
args = ('level', 'msg')
fwd = 'level, msg'
else:
args = ('msg',)
fwd = '%d, msg' % level
code = get_code(func)
# This requires a bit of explanation why we're doing this. Due to how
# logging itself works we need to pretend that the method actually was
# created within the logging module. There are a few ways to detect
# this and we fake all of them: we use the same function globals (the
# one from the logging module), we create it entirely there which
# means that also the filename is set correctly. This fools the
# detection code in logging and it makes logging itself skip past our
# code when determining the code location.
#
# Because we point the globals to the logging module we now need to
# refer to our own functions (original and the crumb recording
# function) through a closure instead of the global scope.
#
# We also add a lot of newlines in front of the code so that the
# code location lines up again in case someone runs inspect.getsource
# on the function.
ns = {}
eval(compile('''%(offset)sif 1:
def factory(original, record_crumb):
def %(name)s(self, %(args)s, *args, **kwargs):
record_crumb(self, %(fwd)s, *args, **kwargs)
return original(self, %(args)s, *args, **kwargs)
return %(name)s
\n''' % {
'offset': '\n' * (code.co_firstlineno - 3),
'name': func.__name__,
'args': ', '.join(args),
'fwd': fwd,
'level': level,
}, logging._srcfile, 'exec'), logging.__dict__, ns)
new_func = ns['factory'](meth, _record_log_breadcrumb)
new_func.__doc__ = func.__doc__
assert code.co_firstlineno == get_code(func).co_firstlineno
assert new_func.__module__ == func.__module__
assert new_func.__name__ == func.__name__
new_func.__patched_for_raven__ = True
return new_func
def _patch_logger():
cls = logging.Logger
methods = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'warn': logging.WARN,
'error': logging.ERROR,
'exception': logging.ERROR,
'critical': logging.CRITICAL,
'fatal': logging.FATAL
}
for method_name, level in iteritems(methods):
new_func = _wrap_logging_method(
getattr(cls, method_name), level)
setattr(logging.Logger, method_name, new_func)
logging.Logger.log = _wrap_logging_method(
logging.Logger.log)
@once
def install_logging_hook():
"""Installs the logging hook if it was not installed yet. Otherwise
does nothing.
"""
_patch_logger()
def ignore_logger(name_or_logger, allow_level=None):
"""Ignores a logger for the regular breadcrumb code. This is useful
for framework integration code where some log messages should be
specially handled.
"""
def handler(logger, level, msg, args, kwargs):
if allow_level is not None and \
level >= allow_level:
return False
return True
register_special_log_handler(name_or_logger, handler)
def register_special_log_handler(name_or_logger, callback):
"""Registers a callback for log handling. The callback is invoked
with give arguments: `logger`, `level`, `msg`, `args` and `kwargs`
which are the values passed to the logging system. If the callback
returns `True` the default handling is disabled.
"""
if isinstance(name_or_logger, string_types):
name = name_or_logger
else:
name = name_or_logger.name
special_logger_handlers[name] = callback
def register_logging_handler(callback):
"""Registers a callback for log handling. The callback is invoked
with give arguments: `logger`, `level`, `msg`, `args` and `kwargs`
which are the values passed to the logging system. If the callback
returns `True` the default handling is disabled.
"""
special_logging_handlers.append(callback)
hooked_libraries = {}
def libraryhook(name):
def decorator(f):
f = once(f)
hooked_libraries[name] = f
return f
return decorator
@libraryhook('requests')
def _hook_requests():
try:
from requests.sessions import Session
except ImportError:
return
real_send = Session.send
def send(self, request, *args, **kwargs):
def _record_request(response):
record(type='http', category='requests', data={
'url': request.url,
'method': request.method,
'status_code': response and response.status_code or None,
'reason': response and response.reason or None,
})
try:
resp = real_send(self, request, *args, **kwargs)
except Exception:
_record_request(None)
raise
else:
_record_request(resp)
return resp
Session.send = send
ignore_logger('requests.packages.urllib3.connectionpool',
allow_level=logging.WARNING)
@libraryhook('httplib')
def _install_httplib():
try:
from httplib import HTTPConnection
except ImportError:
from http.client import HTTPConnection
real_putrequest = HTTPConnection.putrequest
real_getresponse = HTTPConnection.getresponse
def putrequest(self, method, url, *args, **kwargs):
self._raven_status_dict = status = {}
host = self.host
port = self.port
default_port = self.default_port
def processor(data):
real_url = url
if not real_url.startswith(('http://', 'https://')):
real_url = '%s://%s%s%s' % (
default_port == 443 and 'https' or 'http',
host,
port != default_port and ':%s' % port or '',
url,
)
data['data'] = {
'url': real_url,
'method': method,
}
data['data'].update(status)
return data
record(type='http', category='requests', processor=processor)
return real_putrequest(self, method, url, *args, **kwargs)
def getresponse(self, *args, **kwargs):
rv = real_getresponse(self, *args, **kwargs)
status = getattr(self, '_raven_status_dict', None)
if status is not None and 'status_code' not in status:
status['status_code'] = rv.status
status['reason'] = rv.reason
return rv
HTTPConnection.putrequest = putrequest
HTTPConnection.getresponse = getresponse
def hook_libraries(libraries):
if libraries is None:
libraries = hooked_libraries.keys()
for lib in libraries:
func = hooked_libraries.get(lib)
if func is None:
raise RuntimeError('Unknown library %r for hooking' % lib)
func()
import raven.context
| mit | -3,019,579,466,877,571,600 | 29.923077 | 79 | 0.593106 | false | 4.120059 | false | false | false |
operatorequals/gatheros | gatheros/execute/command_function.py | 1 | 2703 | import socket
import paramiko
import os, sys
import getpass
from time import sleep
client = None
ssh = None
address = None
def runSocketCommand( comm ) :
canc_rand = os.urandom(4).encode('hex')
compl_rand = os.urandom(4).encode('hex')
command = ' ' + comm + ' && echo %s || echo %s \n' % ( compl_rand, canc_rand )
# print "> " + command,
# try :
client.send( command )
# client.sendto( command, address )
resp = ''
while compl_rand not in resp and canc_rand not in resp :
sleep(0.1)
resp += client.recvfrom( 4096 * 4 )[0]
resp = resp.strip()
if compl_rand in resp :
return resp.replace( compl_rand, '' )
if canc_rand in resp :
return ''
return resp
def runLocalhostCommand( comm ) :
return os.popen( " " + comm ).read()
def runSSHCommand( comm ) :
stdin, stdout, stderr = ssh.exec_command( comm )
out = stdout.read()
if not out :
return stderr.read()
return out
def get_command_execute ( args ) :
global client
global ssh
global address
if args.command == "bind" :
if args.udp :
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else :
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = (args.IP, args.port )
client.connect( address )
runCommand = runSocketCommand
elif args.command == "reverse" :
if args.udp :
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client = server
ip, port = raw_input("IP and port of the remote host [IP:address] : ").strip().split(':')
address = ( ip.strip(), int( port.strip()) )
else :
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind( ("0.0.0.0", args.port ) )
print "Waiting for the Reverse Shell at port %d" % args.port
try :
if not args.udp :
server.listen(5)
client, address = server.accept()
except KeyboardInterrupt :
print "Aborted by user..."
sys.exit(-2)
runCommand = runSocketCommand
elif args.command == "local" :
runCommand = runLocalhostCommand
elif args.command == "ssh" :
user, host = args.connection.split('@')[:2]
password = args.password
if not password :
password = getpass.getpass("(SSH) Password for user '%s': " % user)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy( paramiko.AutoAddPolicy() )
try :
ssh.connect( host , username = user, password = password, port = args.port )
except paramiko.ssh_exception.AuthenticationException :
print "Authentication Failed"
sys.exit(-1)
except paramiko.ssh_exception.NoValidConnectionsError :
print "No SSH server found on port %s:%d" % (host, args.port)
sys.exit(-2)
runCommand = runSSHCommand
return runCommand | bsd-3-clause | -5,620,650,275,908,728,000 | 23.807339 | 92 | 0.670736 | false | 3.061155 | false | false | false |
BoldingBruggeman/gotm | gui.py/xmlplot/data/hdf4.py | 1 | 7559 | import numpy
import xmlstore.util
import xmlplot.common
datatypes = {3:numpy.ubyte,
4:numpy.byte,
5:numpy.float32,
6:numpy.float64,
20:numpy.int8,
21:numpy.uint8,
22:numpy.int16,
23:numpy.uint16,
24:numpy.int32,
25:numpy.uint32}
class HDF4Store(xmlplot.common.VariableStore,xmlstore.util.referencedobject):
class Variable(xmlplot.common.Variable):
def __init__(self,store,hdfvar):
xmlplot.common.Variable.__init__(self,store)
self.hdfvar = hdfvar
self.info = self.hdfvar.info()
def getName_raw(self):
return self.info[0]
def getDimensions_raw(self):
dimnames = []
for idim in range(self.info[1]):
dim = self.hdfvar.dim(idim)
dimnames.append(dim.info()[0])
return dimnames
def getLongName(self):
atts = self.getProperties()
if 'long_name' in atts: return atts['long_name']
return xmlplot.common.Variable.getLongName(self)
def getUnit(self):
atts = self.getProperties()
if 'units' in atts: return atts['units']
return xmlplot.common.Variable.getUnit(self)
def getShape(self):
shape = self.info[2]
if isinstance(shape,int): shape = (shape,)
return shape
def getDataType(self):
return datatypes.get(self.info[3],None)
def getProperties(self):
return self.hdfvar.attributes()
def getSlice(self,bounds=None,dataonly=False,transfercoordinatemask=True):
dimnames = self.getDimensions_raw()
shape = self.getShape()
# Determine final slice
if bounds is None: bounds = (Ellipsis,)
newbounds = []
for bound,dimlen,dimname in zip(xmlplot.common.processEllipsis(bounds,shape),shape,dimnames):
if isinstance(bound,int):
# Integer value provided as index.
assert bound>=-dimlen, 'Slice index %i lies below the lowest possible index for dimension %s (%i).' % (bound,dimname,-dimlen )
assert bound< dimlen, 'Slice index %i exceeds the highest possible index for dimension %s (%i).' % (bound,dimname, dimlen-1)
if bound<0: bound += dimlen
elif isinstance(bound,slice):
start,stop,step = bound.indices(dimlen)
bound = slice(start,stop,step)
newbounds.append(bound)
bounds = tuple(newbounds)
# Get data
dat = numpy.asarray(self.hdfvar[bounds])
# Mask fill value
fillvalue = self.hdfvar.attributes().get('_FillValue',None)
if fillvalue is None: fillvalue = self.hdfvar.attributes().get('Fill',None)
if fillvalue is not None: dat = numpy.ma.array(dat,mask=(dat==fillvalue),copy=False)
# Determine scale factor and offset, and cast data to acommodating type if needed.
scale = self.hdfvar.attributes().get('scale_factor',None)
offset = self.hdfvar.attributes().get('add_offset', None)
if scale is not None or offset is not None and dat.dtype!=numpy.float:
dat = dat.astype(numpy.float)
if scale is not None: dat *= scale
if offset is not None: dat += offset
if dataonly: return dat
newdimnames = [d for d,b in zip(dimnames,bounds) if not isinstance(b,int)]
varslice = self.Slice(newdimnames)
varslice.data = dat
inewdim = 0
for dimname,bound in zip(dimnames,bounds):
# Get the coordinate variable
coordvar = self.store.getVariable_raw(dimname)
if coordvar is None:
# No coordinate variable available: use indices
if not isinstance(bound,slice): continue
coorddims = [dimname]
coords = numpy.arange(bound.start,bound.stop,bound.step,dtype=numpy.float)
else:
# Coordinate variable present: use it.
coorddims = list(coordvar.getDimensions())
# Debug check: see if all coordinate dimensions are also used by the variable.
for cd in coorddims:
assert cd in dimnames, 'Coordinate dimension %s is not used by this variable (it uses %s).' % (cd,', '.join(dimnames))
# Get coordinate values
coordslice = [bounds[dimnames.index(cd)] for cd in coorddims]
coords = coordvar.getSlice(coordslice, dataonly=True)
# Get the list of coordinate dimensions after the ones with single index have been sliced out.
newcoorddims = [cd for cd in coorddims if isinstance(bounds[dimnames.index(cd)],slice)]
# Transfer the coordinate mask to the data if desired.
coordmask = numpy.ma.getmask(coords)
if transfercoordinatemask and coordmask is not numpy.ma.nomask:
coordmask = xmlplot.common.broadcastSelective(coordmask,newcoorddims,dat.shape,newdimnames)
if datamask is numpy.ma.nomask:
datamask = coordmask
else:
datamask |= coordmask
# If we take a single index for this dimension, it will not be included in the output.
if not isinstance(bound,slice): continue
# Coordinates should not have a mask - undo the masking.
if coordmask is not numpy.ma.nomask:
coords = numpy.ma.getdata(coords)
# Auto-generate staggered coordinates
coords_stag = xmlplot.common.stagger(coords)
# Insert data dimensions where they are lacking in coordinate
coords = xmlplot.common.broadcastSelective(coords, (dimname,),dat.shape, newdimnames)
coords_stag = xmlplot.common.broadcastSelective(coords_stag,(dimname,),[l+1 for l in dat.shape],newdimnames)
# Assign coordinate values
varslice.coords [inewdim] = coords
varslice.coords_stag[inewdim] = coords_stag
inewdim += 1
return varslice
def __init__(self,path):
xmlplot.common.VariableStore.__init__(self)
xmlstore.util.referencedobject.__init__(self)
from pyhdf.SD import SD, SDC
self.file = SD(str(path),SDC.READ)
def getVariable_raw(self,varname):
"""Returns a Variable object for the given original short variable name.
The method must be implemented by derived classes.
"""
if varname not in self.file.datasets().keys(): return None
return self.Variable(self,self.file.select(varname))
def getVariableNames_raw(self):
"""Returns a list of original short names for all variables present in the store.
The method must be implemented by derived classes.
"""
return self.file.datasets().keys()
def getProperties(self):
return self.file.attributes()
def unlink(self):
self.file.end() | gpl-2.0 | 8,525,952,295,102,005,000 | 42.448276 | 147 | 0.566477 | false | 4.399884 | false | false | false |
bmmalone/pymisc-utils | pyllars/hyperparameter_utils.py | 1 | 3377 | """
This module contains very high-level helpers for selecting hyperparameters
for machine learning models using a train-validation-test strategy. Typical
usage looks as follows:
```
# create the hyperparameter grid
hp_grid = sklearn.model_selection.ParameterGrid({
...
})
# create an iterator over the hyperparameter grid and folds
hp_fold_it = hp_utils.get_hp_fold_iterator(hp_grid, num_folds)
# distribute training to the dask cluster
f_res = dask_utils.apply_iter(
hp_fold_it,
dask_client,
hp_utils.evaluate_hyperparameters_helper,
args=args,
...,
return_futures=True
)
# collect the results from dask
all_res = dask_utils.collect_results(f_res)
# parse the results
df_results = hp_utils.get_hp_results(all_res)
# select the best hyperparameters using the validation set results
evaluation_metric = 'micro_roc_auc_score'
best_val_hps = hp_utils.get_best_hyperparameters(
df_results,
evaluation_metric=evaluation_metric,
selection_function=np.argmax # **this depends on the evaluation metric**
)
# pull out the results on those folds
m_val_best = (df_results['hyperparameters_str'] == val_best)
```
"""
import logging
logger = logging.getLogger(__name__)
import itertools
import json
import pyllars.ml_utils as ml_utils
import pyllars.pandas_utils as pd_utils
def get_hp_fold_iterator(hp_grid, num_folds):
""" Create an iterator over all combinations of hyperparameters and folds
"""
hyperparam_grid = list(hyperparam_grid)
folds = list(range(num_folds))
hp_fold_it = itertools.product(hp_grid, folds)
hp_fold_it = list(hp_fold_it)
return hp_fold_it
def evaluate_hyperparameters_helper(hv, *args, **kwargs):
# these come from our iterator
hyperparameters = hv[0]
validation_folds = hv[1]
test_folds = hv[2]
res = ml_utils.evaluate_hyperparameters(
hyperparameters=hyperparameters,
validation_folds=validation_folds,
test_folds=test_folds,
*args,
**kwargs
)
return res
def _get_res(res):
ret_val = {
'validation_{}'.format(k): v
for k,v in res.metrics_val.items()
}
ret_test = {
'test_{}'.format(k): v
for k,v in res.metrics_test.items()
}
ret = ret_val
ret.update(ret_test)
hp_string = json.dumps(res.hyperparameters)
ret['hyperparameters_str'] = hp_string
ret['hyperparameters'] = res.hyperparameters
ret['validation_fold'] = res.fold_val
ret['test_fold'] = res.fold_test
return ret
def get_hp_results(all_res):
""" Create the results data frame
"""
results = [
_get_res(res) for res in all_res
]
df_results = pd.DataFrame(results)
return df_results
def get_best_hyperparameters(df_results, evaluation_metric, selection_function):
""" Based on the performance on the validation, select the best hyperparameters
"""
hp_groups = df_results.groupby('hyperparameters_str')
validation_evaluation_metric = "validation_{}".format(evaluation_metric)
test_evaluation_metric = "test_{}".format(evaluation_metric)
# find the mean of each set of hp's across all folds
val_performance = hp_groups[validation_evaluation_metric].mean()
# now, select the best
val_best = selection_function(val_performance)
return val_best | mit | -5,544,677,781,735,353,000 | 25.390625 | 83 | 0.674859 | false | 3.569767 | true | false | false |
sbinet-staging/pyrame | bus/cmd_serial/get_dev_linux.py | 1 | 3233 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Frédéric Magniette, Miguel Rubio-Roy
# This file is part of Pyrame.
#
# Pyrame is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrame is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrame. If not, see <http://www.gnu.org/licenses/>
import sys,os,subprocess,re
if len(sys.argv)<3:
print("usage %s vendor product [serial_number]"%(sys.argv[0]))
sys.exit(1)
vendor = sys.argv[1]
product = sys.argv[2]
if len(sys.argv)>3 and sys.argv[3]!="undef":
serialnum = sys.argv[3]
else:
serialnum = None
result = subprocess.Popen(["/usr/bin/lsusb"],stdout=subprocess.PIPE)
res,_ = result.communicate()
if result.returncode!=0:
print("error getting USB list: %s"%(res))
sys.exit(1)
buses_devs=re.findall("Bus (.*?) Device (.*?): ID %s:%s"%(vendor,product),res)
if len(buses_devs)==0:
print("vendor and/or product id's not found")
sys.exit(1)
sys.stderr.write("found %d devices\n"%(len(buses_devs)))
if not serialnum and len(buses_devs)!=1:
print("multiple devices with same vendor and product id and serial number not provided")
sys.exit(1)
devnames=[]
errors=[]
for bus_dev in buses_devs:
result = subprocess.Popen(("udevadm info -q path -n /dev/bus/usb/%s/%s"%(bus_dev[0],bus_dev[1])).split(" "),stdout=subprocess.PIPE)
res,_ = result.communicate()
if result.returncode!=0:
errors.append("error getting USB device path for bus %s dev %s"%(bus_dev[0],bus_dev[1]))
sys.stderr.write(errors[-1]+"\n")
continue
path = "/sys"+res.strip()
sys.stderr.write("\nchecking out %s\n"%(path))
result = subprocess.Popen(("find %s -name tty*"%(path)).split(" "),stdout=subprocess.PIPE)
res,_ = result.communicate()
if result.returncode!=0 or res.strip()=="":
errors.append("error getting ttyUSB device path for %s"%(path))
sys.stderr.write(errors[-1]+"\n")
continue
if serialnum:
if os.path.exists(path+"/serial"):
with open(path+"/serial","r") as f: s = f.read()
if s.strip()!=serialnum:
errors.append("invalid serial number for %s"%(path))
sys.stderr.write(errors[-1]+"\n")
continue
else:
errors.append("no serial number on %s"%(path))
sys.stderr.write(errors[-1]+"\n")
continue
devnames.append("/dev/"+res.split("\n")[0].split("/")[-1])
sys.stderr.write("found device at %s\n"%(devnames[-1]))
sys.stderr.write("\n")
if len(devnames)>1:
print("multiple matches found")
if len(errors)!=0:
print(":"+";".join(errors))
sys.exit(1)
if len(devnames)==0:
print("no device found")
sys.exit(1)
print(devnames[0])
sys.exit(0)
| lgpl-3.0 | 4,265,908,501,331,774,500 | 33.37234 | 135 | 0.641597 | false | 3.283537 | false | false | false |
ajurcevic/calibre-web | cps/web.py | 1 | 121607 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from pydrive.auth import GoogleAuth
from googleapiclient.errors import HttpError
gdrive_support = True
except ImportError:
gdrive_support = False
import mimetypes
import logging
from logging.handlers import RotatingFileHandler
import textwrap
from flask import (Flask, render_template, request, Response, redirect,
url_for, send_from_directory, make_response, g, flash,
abort, Markup, stream_with_context)
from flask import __version__ as flaskVersion
import ub
from ub import config
import helper
import os
import errno
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.expression import false
from sqlalchemy.exc import IntegrityError
from sqlalchemy import __version__ as sqlalchemyVersion
from math import ceil
from flask_login import (LoginManager, login_user, logout_user,
login_required, current_user)
from flask_principal import Principal
from flask_principal import __version__ as flask_principalVersion
from flask_babel import Babel
from flask_babel import gettext as _
import requests
import zipfile
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.datastructures import Headers
from babel import Locale as LC
from babel import negotiate_locale
from babel import __version__ as babelVersion
from babel.dates import format_date
from functools import wraps
import base64
from sqlalchemy.sql import *
import json
import datetime
from iso639 import languages as isoLanguages
from iso639 import __version__ as iso639Version
from uuid import uuid4
import os.path
import sys
import subprocess
import re
import db
from shutil import move, copyfile
from tornado.ioloop import IOLoop
import shutil
import gdriveutils
import tempfile
import hashlib
from tornado import version as tornadoVersion
try:
from urllib.parse import quote
from imp import reload
except ImportError:
from urllib import quote
try:
from flask_login import __version__ as flask_loginVersion
except ImportError:
from flask_login.__about__ import __version__ as flask_loginVersion
import time
current_milli_time = lambda: int(round(time.time() * 1000))
try:
from wand.image import Image
use_generic_pdf_cover = False
except ImportError:
use_generic_pdf_cover = True
# Global variables
gdrive_watch_callback_token = 'target=calibreweb-watch_files'
global_task = None
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'epub', 'mobi', 'azw', 'azw3', 'cbr', 'cbz', 'cbt', 'djvu', 'prc', 'doc', 'docx', 'fb2'])
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Also, the decorated class cannot be
inherited from. Other than that, there are no restrictions that apply
to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class Gauth:
def __init__(self):
self.auth = GoogleAuth(settings_file='settings.yaml')
@Singleton
class Gdrive:
def __init__(self):
self.drive = gdriveutils.getDrive(Gauth.Instance().auth)
class ReverseProxied(object):
"""Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
Code courtesy of: http://flask.pocoo.org/snippets/35/
In nginx:
location /myprefix {
proxy_pass http://127.0.0.1:8083;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
"""
def __init__(self, application):
self.app = application
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ.get('PATH_INFO', '')
if path_info and path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
server = environ.get('HTTP_X_FORWARDED_SERVER', '')
if server:
environ['HTTP_HOST'] = server
return self.app(environ, start_response)
# Main code
mimetypes.init()
mimetypes.add_type('application/xhtml+xml', '.xhtml')
mimetypes.add_type('application/epub+zip', '.epub')
mimetypes.add_type('application/x-mobipocket-ebook', '.mobi')
mimetypes.add_type('application/x-mobipocket-ebook', '.prc')
mimetypes.add_type('application/vnd.amazon.ebook', '.azw')
mimetypes.add_type('application/x-cbr', '.cbr')
mimetypes.add_type('application/x-cbz', '.cbz')
mimetypes.add_type('application/x-cbt', '.cbt')
mimetypes.add_type('image/vnd.djvu', '.djvu')
app = (Flask(__name__))
app.wsgi_app = ReverseProxied(app.wsgi_app)
gevent_server = None
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
file_handler = RotatingFileHandler(os.path.join(config.get_main_dir, "calibre-web.log"), maxBytes=50000, backupCount=2)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
app.logger.setLevel(config.config_log_level)
app.logger.info('Starting Calibre Web...')
logging.getLogger("book_formats").addHandler(file_handler)
logging.getLogger("book_formats").setLevel(config.config_log_level)
Principal(app)
babel = Babel(app)
import uploader
lm = LoginManager(app)
lm.init_app(app)
lm.login_view = 'login'
lm.anonymous_user = ub.Anonymous
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
db.setup_db()
if config.config_log_level == logging.DEBUG:
logging.getLogger("sqlalchemy.engine").addHandler(file_handler)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
logging.getLogger("sqlalchemy.pool").addHandler(file_handler)
logging.getLogger("sqlalchemy.pool").setLevel(config.config_log_level)
logging.getLogger("sqlalchemy.orm").addHandler(file_handler)
logging.getLogger("sqlalchemy.orm").setLevel(config.config_log_level)
def is_gdrive_ready():
return os.path.exists('settings.yaml') and os.path.exists('gdrive_credentials')
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
user = getattr(g, 'user', None)
if user is not None and hasattr(user, "locale"):
return user.locale
translations = [item.language for item in babel.list_translations()] + ['en']
preferred = [x.replace('-', '_') for x in request.accept_languages.values()]
return negotiate_locale(preferred, translations)
@babel.timezoneselector
def get_timezone():
user = getattr(g, 'user', None)
if user is not None:
return user.timezone
@lm.user_loader
def load_user(user_id):
return ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
@lm.header_loader
def load_user_from_header(header_val):
if header_val.startswith('Basic '):
header_val = header_val.replace('Basic ', '', 1)
basic_username = basic_password = ''
try:
header_val = base64.b64decode(header_val)
basic_username = header_val.split(':')[0]
basic_password = header_val.split(':')[1]
except TypeError:
pass
user = ub.session.query(ub.User).filter(ub.User.nickname == basic_username).first()
if user and check_password_hash(user.password, basic_password):
return user
return
def check_auth(username, password):
user = ub.session.query(ub.User).filter(ub.User.nickname == username).first()
return bool(user and check_password_hash(user.password, password))
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def updateGdriveCalibreFromLocal():
gdriveutils.backupCalibreDbAndOptionalDownload(Gdrive.Instance().drive)
gdriveutils.copyToDrive(Gdrive.Instance().drive, config.config_calibre_dir, False, True)
for x in os.listdir(config.config_calibre_dir):
if os.path.isdir(os.path.join(config.config_calibre_dir, x)):
shutil.rmtree(os.path.join(config.config_calibre_dir, x))
def requires_basic_auth_if_no_ano(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if config.config_anonbrowse != 1:
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# simple pagination for the feed
class Pagination(object):
def __init__(self, page, per_page, total_count):
self.page = int(page)
self.per_page = int(per_page)
self.total_count = int(total_count)
@property
def next_offset(self):
return int(self.page * self.per_page)
@property
def previous_offset(self):
return int((self.page - 2) * self.per_page)
@property
def last_offset(self):
last = int(self.total_count) - int(self.per_page)
if last < 0:
last = 0
return int(last)
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in range(self.pages, (self.pages + 1)): # ToDo: can be simplified
if num <= left_edge or (num > self.page - left_current - 1 and num < self.page + right_current) \
or num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
# pagination links in jinja
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
def login_required_if_no_ano(func):
if config.config_anonbrowse == 1:
return func
return login_required(func)
# custom jinja filters
@app.template_filter('shortentitle')
def shortentitle_filter(s):
if len(s) > 60:
s = s.split(':', 1)[0]
if len(s) > 60:
s = textwrap.wrap(s, 60, break_long_words=False)[0] + ' [...]'
return s
@app.template_filter('mimetype')
def mimetype_filter(val):
try:
s = mimetypes.types_map['.' + val]
except Exception:
s = 'application/octet-stream'
return s
@app.template_filter('formatdate')
def formatdate(val):
conformed_timestamp = re.sub(r"[:]|([-](?!((\d{2}[:]\d{2})|(\d{4}))$))", '', val)
formatdate = datetime.datetime.strptime(conformed_timestamp[:15], "%Y%m%d %H%M%S")
return format_date(formatdate, format='medium', locale=get_locale())
@app.template_filter('strftime')
def timestamptodate(date, fmt=None):
date = datetime.datetime.fromtimestamp(
int(date)/1000
)
native = date.replace(tzinfo=None)
if fmt:
time_format = fmt
else:
time_format = '%d %m %Y - %H:%S'
return native.strftime(time_format)
def admin_required(f):
"""
Checks if current_user.role == 1
"""
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def unconfigured(f):
"""
Checks if current_user.role == 1
"""
@wraps(f)
def inner(*args, **kwargs):
if not config.db_configured:
return f(*args, **kwargs)
abort(403)
return inner
def download_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_download() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def upload_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_upload() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
def edit_required(f):
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_edit() or current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner
# Fill indexpage with all requested data from database
def fill_indexpage(page, database, db_filter, order):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
if current_user.show_detail_random():
random = db.session.query(db.Books).filter(lang_filter).order_by(func.random()).limit(config.config_random_books)
else:
random = false
off = int(int(config.config_books_per_page) * (page - 1))
pagination = Pagination(page, config.config_books_per_page,
len(db.session.query(database).filter(db_filter).filter(lang_filter).all()))
entries = db.session.query(database).filter(db_filter).filter(lang_filter).order_by(order).offset(off).limit(
config.config_books_per_page)
return entries, random, pagination
def modify_database_object(input_elements, db_book_object, db_object, db_session, db_type):
input_elements = [x for x in input_elements if x != '']
# we have all input element (authors, series, tags) names now
# 1. search for elements to remove
del_elements = []
for c_elements in db_book_object:
found = False
if db_type == 'languages':
type_elements = c_elements.lang_code
elif db_type == 'custom':
type_elements = c_elements.value
else:
type_elements = c_elements.name
for inp_element in input_elements:
if inp_element == type_elements:
found = True
break
# if the element was not found in the new list, add it to remove list
if not found:
del_elements.append(c_elements)
# 2. search for elements that need to be added
add_elements = []
for inp_element in input_elements:
found = False
for c_elements in db_book_object:
if db_type == 'languages':
type_elements = c_elements.lang_code
elif db_type == 'custom':
type_elements = c_elements.value
else:
type_elements = c_elements.name
if inp_element == type_elements:
found = True
break
if not found:
add_elements.append(inp_element)
# if there are elements to remove, we remove them now
if len(del_elements) > 0:
for del_element in del_elements:
db_book_object.remove(del_element)
if len(del_element.books) == 0:
db_session.delete(del_element)
# if there are elements to add, we add them now!
if len(add_elements) > 0:
if db_type == 'languages':
db_filter = db_object.lang_code
elif db_type == 'custom':
db_filter = db_object.value
else:
db_filter = db_object.name
for add_element in add_elements:
# check if a element with that name exists
new_element = db_session.query(db_object).filter(db_filter == add_element).first()
# if no element is found add it
if new_element is None:
if db_type == 'author':
new_element = db_object(add_element, add_element, "")
elif db_type == 'series':
new_element = db_object(add_element, add_element)
elif db_type == 'custom':
new_element = db_object(value=add_element)
else: # db_type should be tag, or languages
new_element = db_object(add_element)
db_session.add(new_element)
new_element = db.session.query(db_object).filter(db_filter == add_element).first()
# add element to book
db_book_object.append(new_element)
def render_title_template(*args, **kwargs):
return render_template(instance=config.config_calibre_web_title, *args, **kwargs)
@app.before_request
def before_request():
if ub.DEVELOPMENT:
reload(ub)
g.user = current_user
g.allow_registration = config.config_public_reg
g.allow_upload = config.config_uploading
g.public_shelfes = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1).all()
if not config.db_configured and request.endpoint not in ('basic_configuration', 'login') and '/static/' not in request.path:
return redirect(url_for('basic_configuration'))
# Routing functions
@app.route("/opds")
@requires_basic_auth_if_no_ano
def feed_index():
xml = render_title_template('index.xml')
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/osd")
@requires_basic_auth_if_no_ano
def feed_osd():
xml = render_title_template('osd.xml', lang='de-DE')
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/search/<query>")
@requires_basic_auth_if_no_ano
def feed_cc_search(query):
return feed_search(query.strip())
@app.route("/opds/search", methods=["GET"])
@requires_basic_auth_if_no_ano
def feed_normal_search():
return feed_search(request.args.get("query").strip())
def feed_search(term):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
if term:
entries = db.session.query(db.Books).filter(db.or_(db.Books.tags.any(db.Tags.name.like("%" + term + "%")),
db.Books.series.any(db.Series.name.like("%" + term + "%")),
db.Books.authors.any(db.Authors.name.like("%" + term + "%")),
db.Books.publishers.any(db.Publishers.name.like("%" + term + "%")),
db.Books.title.like("%" + term + "%"))).filter(lang_filter).all()
entriescount = len(entries) if len(entries) > 0 else 1
pagination = Pagination(1, entriescount, entriescount)
xml = render_title_template('feed.xml', searchterm=term, entries=entries, pagination=pagination)
else:
xml = render_title_template('feed.xml', searchterm="")
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/new")
@requires_basic_auth_if_no_ano
def feed_new():
off = request.args.get("offset")
if not off:
off = 0
entries, __, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, True, db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/discover")
@requires_basic_auth_if_no_ano
def feed_discover():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Books).filter(lang_filter).order_by(func.random()).limit(config.config_books_per_page)
pagination = Pagination(1, config.config_books_per_page, int(config.config_books_per_page))
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/rated")
@requires_basic_auth_if_no_ano
def feed_best_rated():
off = request.args.get("offset")
if not off:
off = 0
entries, __, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.ratings.any(db.Ratings.rating > 9), db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/hot")
@requires_basic_auth_if_no_ano
def feed_hot():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
all_books = ub.session.query(ub.Downloads, ub.func.count(ub.Downloads.book_id)).order_by(
ub.func.count(ub.Downloads.book_id).desc()).group_by(ub.Downloads.book_id)
hot_books = all_books.offset(off).limit(config.config_books_per_page)
entries = list()
for book in hot_books:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first()
if downloadBook:
entries.append(
db.session.query(db.Books).filter(lang_filter).filter(db.Books.id == book.Downloads.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.Downloads.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
numBooks = entries.__len__()
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, numBooks)
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/author")
@requires_basic_auth_if_no_ano
def feed_authorindex():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Authors).join(db.books_authors_link).join(db.Books).filter(lang_filter)\
.group_by('books_authors_link.author').order_by(db.Authors.sort).limit(config.config_books_per_page).offset(off)
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
len(db.session.query(db.Authors).all()))
xml = render_title_template('feed.xml', listelements=entries, folder='feed_author', pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/author/<int:book_id>")
@requires_basic_auth_if_no_ano
def feed_author(book_id):
off = request.args.get("offset")
if not off:
off = 0
entries, random, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.authors.any(db.Authors.id == book_id), db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/category")
@requires_basic_auth_if_no_ano
def feed_categoryindex():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Tags).join(db.books_tags_link).join(db.Books).filter(lang_filter).\
group_by('books_tags_link.tag').order_by(db.Tags.name).offset(off).limit(config.config_books_per_page)
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
len(db.session.query(db.Tags).all()))
xml = render_title_template('feed.xml', listelements=entries, folder='feed_category', pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/category/<int:book_id>")
@requires_basic_auth_if_no_ano
def feed_category(book_id):
off = request.args.get("offset")
if not off:
off = 0
entries, random, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.tags.any(db.Tags.id == book_id), db.Books.timestamp.desc())
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/series")
@requires_basic_auth_if_no_ano
def feed_seriesindex():
off = request.args.get("offset")
if not off:
off = 0
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Series).join(db.books_series_link).join(db.Books).filter(lang_filter).\
group_by('books_series_link.series').order_by(db.Series.sort).offset(off).all()
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
len(db.session.query(db.Series).all()))
xml = render_title_template('feed.xml', listelements=entries, folder='feed_series', pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
@app.route("/opds/series/<int:book_id>")
@requires_basic_auth_if_no_ano
def feed_series(book_id):
off = request.args.get("offset")
if not off:
off = 0
entries, random, pagination = fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1),
db.Books, db.Books.series.any(db.Series.id == book_id),db.Books.series_index)
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
def partial(total_byte_len, part_size_limit):
s = []
for p in range(0, total_byte_len, part_size_limit):
last = min(total_byte_len - 1, p + part_size_limit - 1)
s.append([p, last])
return s
def do_gdrive_download(df, headers):
total_size = int(df.metadata.get('fileSize'))
download_url = df.metadata.get('downloadUrl')
s = partial(total_size, 1024 * 1024) # I'm downloading BIG files, so 100M chunk size is fine for me
def stream():
for byte in s:
headers = {"Range": 'bytes=%s-%s' % (byte[0], byte[1])}
resp, content = df.auth.Get_Http_Object().request(download_url, headers=headers)
if resp.status == 206:
yield content
else:
app.logger.info('An error occurred: %s' % resp)
return
return Response(stream_with_context(stream()), headers=headers)
@app.route("/opds/download/<book_id>/<book_format>/")
@requires_basic_auth_if_no_ano
@download_required
def get_opds_download_link(book_id, book_format):
startTime = time.time()
book_format = book_format.split(".")[0]
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == book_format.upper()).first()
app.logger.info(data.name)
if current_user.is_authenticated:
helper.update_download(book_id, int(current_user.id))
file_name = book.title
if len(book.authors) > 0:
file_name = book.authors[0].name + '_' + file_name
file_name = helper.get_valid_filename(file_name)
headers = Headers()
headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (quote(file_name.encode('utf8')), book_format)
app.logger.info(time.time()-startTime)
startTime = time.time()
if config.config_use_google_drive:
app.logger.info(time.time() - startTime)
df = gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, book.path, data.name + "." + book_format)
return do_gdrive_download(df, headers)
else:
response = make_response(send_from_directory(os.path.join(config.config_calibre_dir, book.path), data.name + "." + book_format))
response.headers = headers
return response
@app.route("/ajax/book/<string:uuid>")
@requires_basic_auth_if_no_ano
def get_metadata_calibre_companion(uuid):
entry = db.session.query(db.Books).filter(db.Books.uuid.like("%" + uuid + "%")).first()
if entry is not None:
js = render_template('json.txt', entry=entry)
response = make_response(js)
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
else:
return ""
@app.route("/get_authors_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_authors_json():
if request.method == "GET":
query = request.args.get('q')
# entries = db.session.execute("select name from authors where name like '%" + query + "%'")
entries = db.session.query(db.Authors).filter(db.Authors.name.like("%" + query + "%")).all()
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_tags_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_tags_json():
if request.method == "GET":
query = request.args.get('q')
# entries = db.session.execute("select name from tags where name like '%" + query + "%'")
entries = db.session.query(db.Tags).filter(db.Tags.name.like("%" + query + "%")).all()
# for x in entries:
# alfa = dict(name=x.name)
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_update_status", methods=['GET'])
@login_required_if_no_ano
def get_update_status():
status = {}
if request.method == "GET":
# should be automatically replaced by git with current commit hash
commit_id = '$Format:%H$'
commit = requests.get('https://api.github.com/repos/ajurcevic/calibre-web/git/refs/heads/master').json()
if "object" in commit and commit['object']['sha'] != commit_id:
status['status'] = True
commitdate = requests.get('https://api.github.com/repos/ajurcevic/calibre-web/git/commits/'+commit['object']['sha']).json()
if "committer" in commitdate:
status['commit'] = commitdate['committer']['date']
else:
status['commit'] = u'Unknown'
else:
status['status'] = False
return json.dumps(status)
@app.route("/get_updater_status", methods=['GET', 'POST'])
@login_required
@admin_required
def get_updater_status():
status = {}
if request.method == "POST":
commit = request.form.to_dict()
if "start" in commit and commit['start'] == 'True':
text = {
"1": _(u'Requesting update package'),
"2": _(u'Downloading update package'),
"3": _(u'Unzipping update package'),
"4": _(u'Files are replaced'),
"5": _(u'Database connections are closed'),
"6": _(u'Server is stopped'),
"7": _(u'Update finished, please press okay and reload page')
}
status['text']=text
helper.updater_thread = helper.Updater()
helper.updater_thread.start()
status['status']=helper.updater_thread.get_update_status()
elif request.method == "GET":
try:
status['status']=helper.updater_thread.get_update_status()
except Exception:
status['status'] = 7
return json.dumps(status)
@app.route("/get_languages_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_languages_json():
if request.method == "GET":
query = request.args.get('q').lower()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
entries = [s for s in languages if query in s.name.lower()]
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_series_json", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_series_json():
if request.method == "GET":
query = request.args.get('q')
entries = db.session.query(db.Series).filter(db.Series.name.like("%" + query + "%")).all()
# entries = db.session.execute("select name from series where name like '%" + query + "%'")
json_dumps = json.dumps([dict(name=r.name) for r in entries])
return json_dumps
@app.route("/get_matching_tags", methods=['GET', 'POST'])
@login_required_if_no_ano
def get_matching_tags():
tag_dict = {'tags': []}
if request.method == "GET":
q = db.session.query(db.Books)
author_input = request.args.get('author_name')
title_input = request.args.get('book_title')
include_tag_inputs = request.args.getlist('include_tag')
exclude_tag_inputs = request.args.getlist('exclude_tag')
q = q.filter(db.Books.authors.any(db.Authors.name.like("%" + author_input + "%")),
db.Books.title.like("%" + title_input + "%"))
if len(include_tag_inputs) > 0:
for tag in include_tag_inputs:
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
if len(exclude_tag_inputs) > 0:
for tag in exclude_tag_inputs:
q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag)))
for book in q:
for tag in book.tags:
if tag.id not in tag_dict['tags']:
tag_dict['tags'].append(tag.id)
json_dumps = json.dumps(tag_dict)
return json_dumps
@app.route("/", defaults={'page': 1})
@app.route('/page/<int:page>')
@login_required_if_no_ano
def index(page):
entries, random, pagination = fill_indexpage(page, db.Books, True, db.Books.timestamp.desc())
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Latest Books"))
@app.route("/hot", defaults={'page': 1})
@app.route('/hot/page/<int:page>')
@login_required_if_no_ano
def hot_books(page):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
if current_user.show_detail_random():
random = db.session.query(db.Books).filter(lang_filter).order_by(func.random()).limit(config.config_random_books)
else:
random = false
off = int(int(config.config_books_per_page) * (page - 1))
all_books = ub.session.query(ub.Downloads, ub.func.count(ub.Downloads.book_id)).order_by(
ub.func.count(ub.Downloads.book_id).desc()).group_by(ub.Downloads.book_id)
hot_books = all_books.offset(off).limit(config.config_books_per_page)
entries = list()
for book in hot_books:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first()
if downloadBook:
entries.append(
db.session.query(db.Books).filter(lang_filter).filter(db.Books.id == book.Downloads.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.Downloads.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
numBooks = entries.__len__()
pagination = Pagination(page, config.config_books_per_page, numBooks)
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Hot Books (most downloaded)"))
@app.route("/rated", defaults={'page': 1})
@app.route('/rated/page/<int:page>')
@login_required_if_no_ano
def best_rated_books(page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.ratings.any(db.Ratings.rating > 9),
db.Books.timestamp.desc())
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Best rated books"))
@app.route("/discover", defaults={'page': 1})
@app.route('/discover/page/<int:page>')
@login_required_if_no_ano
def discover(page):
entries, __, pagination = fill_indexpage(page, db.Books, True, func.randomblob(2))
pagination = Pagination(1, config.config_books_per_page,config.config_books_per_page)
return render_title_template('discover.html', entries=entries, pagination=pagination, title=_(u"Random Books"))
@app.route("/author")
@login_required_if_no_ano
def author_list():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Authors, func.count('books_authors_link.book').label('count')).join(
db.books_authors_link).join(db.Books).filter(
lang_filter).group_by('books_authors_link.author').order_by(db.Authors.sort).all()
return render_title_template('list.html', entries=entries, folder='author', title=_(u"Author list"))
@app.route("/author/<int:book_id>", defaults={'page': 1})
@app.route("/author/<int:book_id>/<int:page>'")
@login_required_if_no_ano
def author(book_id, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.authors.any(db.Authors.id == book_id),
db.Books.timestamp.desc())
name = db.session.query(db.Authors).filter(db.Authors.id == book_id).first().name
if entries:
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Author: %(name)s", name=name))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/series")
@login_required_if_no_ano
def series_list():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Series, func.count('books_series_link.book').label('count')).join(
db.books_series_link).join(db.Books).filter(
lang_filter).group_by('books_series_link.series').order_by(db.Series.sort).all()
return render_title_template('list.html', entries=entries, folder='series', title=_(u"Series list"))
@app.route("/series/<int:book_id>/", defaults={'page': 1})
@app.route("/series/<int:book_id>/<int:page>'")
@login_required_if_no_ano
def series(book_id, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.series.any(db.Series.id == book_id),
db.Books.series_index)
name = db.session.query(db.Series).filter(db.Series.id == book_id).first().name
if entries:
return render_title_template('index.html', random=random, pagination=pagination, entries=entries,
title=_(u"Series: %(serie)s", serie=name))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/language")
@login_required_if_no_ano
def language_overview():
if current_user.filter_language() == u"all":
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
else:
try:
langfound = 1
cur_l = LC.parse(current_user.filter_language())
except Exception:
langfound = 0
languages = db.session.query(db.Languages).filter(
db.Languages.lang_code == current_user.filter_language()).all()
if langfound:
languages[0].name = cur_l.get_language_name(get_locale())
else:
languages[0].name = _(isoLanguages.get(part3=languages[0].lang_code).name)
lang_counter = db.session.query(db.books_languages_link,
func.count('books_languages_link.book').label('bookcount')).group_by(
'books_languages_link.lang_code').all()
return render_title_template('languages.html', languages=languages, lang_counter=lang_counter,
title=_(u"Available languages"))
@app.route("/language/<name>", defaults={'page': 1})
@app.route('/language/<name>/page/<int:page>')
@login_required_if_no_ano
def language(name, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.languages.any(db.Languages.lang_code == name),
db.Books.timestamp.desc())
try:
cur_l = LC.parse(name)
name = cur_l.get_language_name(get_locale())
except Exception:
name = _(isoLanguages.get(part3=name).name)
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Language: %(name)s", name=name))
@app.route("/category")
@login_required_if_no_ano
def category_list():
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Tags, func.count('books_tags_link.book').label('count')).join(
db.books_tags_link).join(db.Books).filter(
lang_filter).group_by('books_tags_link.tag').all()
return render_title_template('list.html', entries=entries, folder='category', title=_(u"Category list"))
@app.route("/category/<int:book_id>", defaults={'page': 1})
@app.route('/category/<int:book_id>/<int:page>')
@login_required_if_no_ano
def category(book_id, page):
entries, random, pagination = fill_indexpage(page, db.Books, db.Books.tags.any(db.Tags.id == book_id),
db.Books.timestamp.desc())
name = db.session.query(db.Tags).filter(db.Tags.id == book_id).first().name
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(u"Category: %(name)s", name=name))
@app.route("/ajax/toggleread/<int:book_id>", methods=['POST'])
@login_required
def toggle_read(book_id):
book = ub.session.query(ub.ReadBook).filter(ub.and_(ub.ReadBook.user_id == int(current_user.id),
ub.ReadBook.book_id == book_id)).first()
if book:
book.is_read = not book.is_read
else:
readBook = ub.ReadBook()
readBook.user_id = int(current_user.id)
readBook.book_id = book_id
readBook.is_read = True
book = readBook
ub.session.merge(book)
ub.session.commit()
return ""
@app.route("/book/<int:book_id>")
@login_required_if_no_ano
def show_book(book_id):
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Books).filter(db.Books.id == book_id).filter(lang_filter).first()
if entries:
for index in range(0, len(entries.languages)):
try:
entries.languages[index].language_name = LC.parse(entries.languages[index].lang_code).get_language_name(
get_locale())
except Exception:
entries.languages[index].language_name = _(
isoLanguages.get(part3=entries.languages[index].lang_code).name)
tmpcc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if config.config_columns_to_ignore:
cc = []
for col in tmpcc:
r = re.compile(config.config_columns_to_ignore)
if r.match(col.label):
cc.append(col)
else:
cc = tmpcc
book_in_shelfs = []
shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == book_id).all()
for entry in shelfs:
book_in_shelfs.append(entry.shelf)
if not current_user.is_anonymous():
matching_have_read_book = ub.session.query(ub.ReadBook).filter(ub.and_(ub.ReadBook.user_id == int(current_user.id),
ub.ReadBook.book_id == book_id)).all()
have_read = len(matching_have_read_book) > 0 and matching_have_read_book[0].is_read
else:
have_read = None
return render_title_template('detail.html', entry=entries, cc=cc,
title=entries.title, books_shelfs=book_in_shelfs, have_read=have_read)
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/admin")
@login_required
def admin_forbidden():
abort(403)
@app.route("/stats")
@login_required
def stats():
counter = len(db.session.query(db.Books).all())
authors = len(db.session.query(db.Authors).all())
categorys = len(db.session.query(db.Tags).all())
series = len(db.session.query(db.Series).all())
versions = uploader.book_formats.get_versions()
vendorpath = os.path.join(config.get_main_dir, "vendor")
if sys.platform == "win32":
kindlegen = os.path.join(vendorpath, u"kindlegen.exe")
else:
kindlegen = os.path.join(vendorpath, u"kindlegen")
versions['KindlegenVersion'] = _('not installed')
if os.path.exists(kindlegen):
p = subprocess.Popen(kindlegen, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
for lines in p.stdout.readlines():
if isinstance(lines, bytes):
lines = lines.decode('utf-8')
if re.search('Amazon kindlegen\(', lines):
versions['KindlegenVersion'] = lines
versions['PythonVersion'] = sys.version
versions['babel'] = babelVersion
versions['sqlalchemy'] = sqlalchemyVersion
versions['flask'] = flaskVersion
versions['flasklogin'] = flask_loginVersion
versions['flask_principal'] = flask_principalVersion
versions['tornado'] = tornadoVersion
versions['iso639'] = iso639Version
versions['requests'] = requests.__version__
versions['pysqlite'] = db.engine.dialect.dbapi.version
versions['sqlite'] = db.engine.dialect.dbapi.sqlite_version
return render_title_template('stats.html', bookcounter=counter, authorcounter=authors, versions=versions,
categorycounter=categorys, seriecounter=series, title=_(u"Statistics"))
@app.route("/delete/<int:book_id>/")
@login_required
def delete_book(book_id):
if current_user.role_delete_books():
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if book:
if config.config_use_google_drive:
helper.delete_book_gdrive(book) # ToDo really delete file
else:
helper.delete_book(book,config.config_calibre_dir)
# check if only this book links to:
# author, language, series, tags, custom columns
modify_database_object([u''], book.authors, db.Authors, db.session, 'author')
modify_database_object([u''], book.tags, db.Tags, db.session, 'tags')
modify_database_object([u''], book.series, db.Series, db.session, 'series')
modify_database_object([u''], book.languages, db.Languages, db.session, 'languages')
modify_database_object([u''], book.publishers, db.Publishers, db.session, 'series')
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
for c in cc:
cc_string = "custom_column_" + str(c.id)
if not c.is_multiple:
if len(getattr(book, cc_string)) > 0:
if c.datatype == 'bool':
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
db.session.delete(del_cc)
elif c.datatype == 'rating':
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
else:
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
db.session.delete(del_cc)
else:
modify_database_object([u''], getattr(book, cc_string),db.cc_classes[c.id], db.session, 'custom')
db.session.query(db.Books).filter(db.Books.id == book_id).delete()
db.session.commit()
else:
# book not found
app.logger.info('Book with id "'+book_id+'" could not be deleted')
return redirect(url_for('index'))
@app.route("/gdrive/authenticate")
@login_required
@admin_required
def authenticate_google_drive():
authUrl = Gauth.Instance().auth.GetAuthUrl()
return redirect(authUrl)
@app.route("/gdrive/callback")
def google_drive_callback():
auth_code = request.args.get('code')
credentials = Gauth.Instance().auth.flow.step2_exchange(auth_code)
with open('gdrive_credentials', 'w') as f:
f.write(credentials.to_json())
return redirect(url_for('configuration'))
@app.route("/gdrive/watch/subscribe")
@login_required
@admin_required
def watch_gdrive():
if not config.config_google_drive_watch_changes_response:
address = '%sgdrive/watch/callback' % config.config_google_drive_calibre_url_base
notification_id = str(uuid4())
result = gdriveutils.watchChange(Gdrive.Instance().drive, notification_id,
'web_hook', address, gdrive_watch_callback_token, current_milli_time() + 604800*1000)
print (result)
settings = ub.session.query(ub.Settings).first()
settings.config_google_drive_watch_changes_response = json.dumps(result)
ub.session.merge(settings)
ub.session.commit()
settings = ub.session.query(ub.Settings).first()
config.loadSettings()
print (settings.config_google_drive_watch_changes_response)
return redirect(url_for('configuration'))
@app.route("/gdrive/watch/revoke")
@login_required
@admin_required
def revoke_watch_gdrive():
last_watch_response = config.config_google_drive_watch_changes_response
if last_watch_response:
try:
gdriveutils.stopChannel(Gdrive.Instance().drive, last_watch_response['id'], last_watch_response['resourceId'])
except HttpError:
pass
settings = ub.session.query(ub.Settings).first()
settings.config_google_drive_watch_changes_response = None
ub.session.merge(settings)
ub.session.commit()
config.loadSettings()
return redirect(url_for('configuration'))
@app.route("/gdrive/watch/callback", methods=['GET', 'POST'])
def on_received_watch_confirmation():
app.logger.info(request.headers)
if request.headers.get('X-Goog-Channel-Token') == gdrive_watch_callback_token \
and request.headers.get('X-Goog-Resource-State') == 'change' \
and request.data:
data = request.data
def updateMetaData():
app.logger.info('Change received from gdrive')
app.logger.info(data)
try:
j = json.loads(data)
app.logger.info('Getting change details')
response = gdriveutils.getChangeById(Gdrive.Instance().drive, j['id'])
app.logger.info(response)
if response:
dbpath = os.path.join(config.config_calibre_dir, "metadata.db")
if not response['deleted'] and response['file']['title'] == 'metadata.db' and response['file']['md5Checksum'] != md5(dbpath):
tmpDir = tempfile.gettempdir()
app.logger.info('Database file updated')
copyfile(dbpath, os.path.join(tmpDir, "metadata.db_" + str(current_milli_time())))
app.logger.info('Backing up existing and downloading updated metadata.db')
gdriveutils.downloadFile(Gdrive.Instance().drive, None, "metadata.db", os.path.join(tmpDir, "tmp_metadata.db"))
app.logger.info('Setting up new DB')
os.rename(os.path.join(tmpDir, "tmp_metadata.db"), dbpath)
db.setup_db()
except Exception as e:
app.logger.exception(e)
updateMetaData()
return ''
@app.route("/shutdown")
@login_required
@admin_required
def shutdown():
# global global_task
task = int(request.args.get("parameter").strip())
helper.global_task = task
if task == 1 or task == 0: # valid commandos received
# close all database connections
db.session.close()
db.engine.dispose()
ub.session.close()
ub.engine.dispose()
# stop tornado server
server = IOLoop.instance()
server.add_callback(server.stop)
showtext = {}
if task == 0:
showtext['text'] = _(u'Server restarted, please reload page')
else:
showtext['text'] = _(u'Performing shutdown of server, please close window')
return json.dumps(showtext)
else:
if task == 2:
db.session.close()
db.engine.dispose()
db.setup_db()
return json.dumps({})
abort(404)
@app.route("/update")
@login_required
@admin_required
def update():
helper.updater_thread = helper.Updater()
flash(_(u"Update done"), category="info")
return abort(404)
@app.route("/search", methods=["GET"])
@login_required_if_no_ano
def search():
term = request.args.get("query").strip()
if term:
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
entries = db.session.query(db.Books).filter(db.or_(db.Books.tags.any(db.Tags.name.like("%" + term + "%")),
db.Books.series.any(db.Series.name.like("%" + term + "%")),
db.Books.authors.any(db.Authors.name.like("%" + term + "%")),
db.Books.publishers.any(db.Publishers.name.like("%" + term + "%")),
db.Books.title.like("%" + term + "%"))).filter(lang_filter).all()
return render_title_template('search.html', searchterm=term, entries=entries)
else:
return render_title_template('search.html', searchterm="")
@app.route("/advanced_search", methods=["GET"])
@login_required_if_no_ano
def advanced_search():
if request.method == 'GET':
q = db.session.query(db.Books)
include_tag_inputs = request.args.getlist('include_tag')
exclude_tag_inputs = request.args.getlist('exclude_tag')
include_series_inputs = request.args.getlist('include_serie')
exclude_series_inputs = request.args.getlist('exclude_serie')
include_languages_inputs = request.args.getlist('include_language')
exclude_languages_inputs = request.args.getlist('exclude_language')
author_name = request.args.get("author_name")
book_title = request.args.get("book_title")
publisher = request.args.get("publisher")
if author_name: author_name = author_name.strip()
if book_title: book_title = book_title.strip()
if publisher: publisher = publisher.strip()
if include_tag_inputs or exclude_tag_inputs or include_series_inputs or exclude_series_inputs or \
include_languages_inputs or exclude_languages_inputs or author_name or book_title or publisher:
searchterm = []
searchterm.extend((author_name, book_title, publisher))
tag_names = db.session.query(db.Tags).filter(db.Tags.id.in_(include_tag_inputs)).all()
searchterm.extend(tag.name for tag in tag_names)
# searchterm = " + ".join(filter(None, searchterm))
serie_names = db.session.query(db.Series).filter(db.Series.id.in_(include_series_inputs)).all()
searchterm.extend(serie.name for serie in serie_names)
language_names = db.session.query(db.Languages).filter(db.Languages.id.in_(include_languages_inputs)).all()
for lang in language_names:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
searchterm.extend(language.name for language in language_names)
searchterm = " + ".join(filter(None, searchterm))
q = q.filter(db.Books.authors.any(db.Authors.name.like("%" + author_name + "%")),
db.Books.title.like("%" + book_title + "%"),
db.Books.publishers.any(db.Publishers.name.like("%" + publisher + "%")))
for tag in include_tag_inputs:
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
for tag in exclude_tag_inputs:
q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag)))
for serie in include_series_inputs:
q = q.filter(db.Books.series.any(db.Series.id == serie))
for serie in exclude_series_inputs:
q = q.filter(not_(db.Books.series.any(db.Series.id == serie)))
if current_user.filter_language() != "all":
q = q.filter(db.Books.languages.any(db.Languages.lang_code == current_user.filter_language()))
else:
for language in include_languages_inputs:
q = q.filter(db.Books.languages.any(db.Languages.id == language))
for language in exclude_languages_inputs:
q = q.filter(not_(db.Books.series.any(db.Languages.id == language)))
q = q.all()
return render_title_template('search.html', searchterm=searchterm, entries=q, title=_(u"search"))
tags = db.session.query(db.Tags).order_by(db.Tags.name).all()
series = db.session.query(db.Series).order_by(db.Series.name).all()
if current_user.filter_language() == u"all":
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
else:
languages = None
return render_title_template('search_form.html', tags=tags, languages=languages, series=series, title=_(u"search"))
def get_cover_via_gdrive(cover_path):
df = gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, cover_path, 'cover.jpg')
if not gdriveutils.session.query(gdriveutils.PermissionAdded).filter(gdriveutils.PermissionAdded.gdrive_id == df['id']).first():
df.GetPermissions()
df.InsertPermission({
'type': 'anyone',
'value': 'anyone',
'role': 'reader',
'withLink': True})
permissionAdded = gdriveutils.PermissionAdded()
permissionAdded.gdrive_id = df['id']
gdriveutils.session.add(permissionAdded)
gdriveutils.session.commit()
return df.metadata.get('webContentLink')
@app.route("/cover/<path:cover_path>")
@login_required_if_no_ano
def get_cover(cover_path):
if config.config_use_google_drive:
return redirect(get_cover_via_gdrive(cover_path))
else:
return send_from_directory(os.path.join(config.config_calibre_dir, cover_path), "cover.jpg")
@app.route("/opds/thumb_240_240/<path:book_id>")
@app.route("/opds/cover_240_240/<path:book_id>")
@app.route("/opds/cover_90_90/<path:book_id>")
@app.route("/opds/cover/<path:book_id>")
@requires_basic_auth_if_no_ano
def feed_get_cover(book_id):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if config.config_use_google_drive:
return redirect(get_cover_via_gdrive(book.path))
else:
return send_from_directory(os.path.join(config.config_calibre_dir, book.path), "cover.jpg")
def render_read_books(page, are_read, as_xml=False):
readBooks = ub.session.query(ub.ReadBook).filter(ub.ReadBook.user_id == int(current_user.id)).filter(ub.ReadBook.is_read == True).all()
readBookIds = [x.book_id for x in readBooks]
if are_read:
db_filter = db.Books.id.in_(readBookIds)
else:
db_filter = ~db.Books.id.in_(readBookIds)
entries, random, pagination = fill_indexpage(page, db.Books,
db_filter, db.Books.timestamp.desc())
if as_xml:
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
response = make_response(xml)
response.headers["Content-Type"] = "application/xml"
return response
else:
name = u'Read Books' if are_read else u'Unread Books'
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
title=_(name, name=name))
@app.route("/opds/readbooks/")
@login_required_if_no_ano
def feed_read_books():
off = request.args.get("offset")
if not off:
off = 0
return render_read_books(int(off) / (int(config.config_books_per_page)) + 1, True, True)
@app.route("/readbooks/", defaults={'page': 1})
@app.route("/readbooks/<int:page>'")
@login_required_if_no_ano
def read_books(page):
return render_read_books(page, True)
@app.route("/opds/unreadbooks/")
@login_required_if_no_ano
def feed_unread_books():
off = request.args.get("offset")
if not off:
off = 0
return render_read_books(int(off) / (int(config.config_books_per_page)) + 1, False, True)
@app.route("/unreadbooks/", defaults={'page': 1})
@app.route("/unreadbooks/<int:page>'")
@login_required_if_no_ano
def unread_books(page):
return render_read_books(page, False)
@app.route("/read/<int:book_id>/<book_format>")
@login_required_if_no_ano
def read_book(book_id, book_format):
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
if book:
book_dir = os.path.join(config.get_main_dir, "cps", "static", str(book_id))
if not os.path.exists(book_dir):
os.mkdir(book_dir)
if book_format.lower() == "epub":
# check if mimetype file is exists
mime_file = str(book_id) + "/mimetype"
if not os.path.exists(mime_file):
epub_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".epub"
if not os.path.isfile(epub_file):
raise ValueError('Error opening eBook. File does not exist: ', epub_file)
zfile = zipfile.ZipFile(epub_file)
for name in zfile.namelist():
(dirName, fileName) = os.path.split(name)
newDir = os.path.join(book_dir, dirName)
if not os.path.exists(newDir):
try:
os.makedirs(newDir)
except OSError as exception:
if not exception.errno == errno.EEXIST:
raise
if fileName:
fd = open(os.path.join(newDir, fileName), "wb")
fd.write(zfile.read(name))
fd.close()
zfile.close()
return render_title_template('read.html', bookid=book_id, title=_(u"Read a Book"))
elif book_format.lower() == "pdf":
all_name = str(book_id) + "/" + book.data[0].name + ".pdf"
tmp_file = os.path.join(book_dir, book.data[0].name) + ".pdf"
if not os.path.exists(tmp_file):
pdf_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".pdf"
copyfile(pdf_file, tmp_file)
return render_title_template('readpdf.html', pdffile=all_name, title=_(u"Read a Book"))
elif book_format.lower() == "txt":
all_name = str(book_id) + "/" + book.data[0].name + ".txt"
tmp_file = os.path.join(book_dir, book.data[0].name) + ".txt"
if not os.path.exists(all_name):
txt_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".txt"
copyfile(txt_file, tmp_file)
return render_title_template('readtxt.html', txtfile=all_name, title=_(u"Read a Book"))
elif book_format.lower() == "cbr":
all_name = str(book_id) + "/" + book.data[0].name + ".cbr"
tmp_file = os.path.join(book_dir, book.data[0].name) + ".cbr"
if not os.path.exists(all_name):
cbr_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".cbr"
copyfile(cbr_file, tmp_file)
return render_title_template('readcbr.html', comicfile=all_name, title=_(u"Read a Book"))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/download/<int:book_id>/<book_format>")
@login_required_if_no_ano
@download_required
def get_download_link(book_id, book_format):
book_format = book_format.split(".")[0]
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == book_format.upper()).first()
if data:
# collect downloaded books only for registered user and not for anonymous user
if current_user.is_authenticated:
helper.update_download(book_id, int(current_user.id))
file_name = book.title
if len(book.authors) > 0:
file_name = book.authors[0].name + '_' + file_name
file_name = helper.get_valid_filename(file_name)
headers = Headers()
try:
headers["Content-Type"] = mimetypes.types_map['.' + book_format]
except KeyError:
headers["Content-Type"] = "application/octet-stream"
headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (quote(file_name.encode('utf-8')), book_format)
if config.config_use_google_drive:
df = gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, book.path, '%s.%s' % (data.name, book_format))
return do_gdrive_download(df, headers)
else:
response = make_response(send_from_directory(os.path.join(config.config_calibre_dir, book.path), data.name + "." + book_format))
response.headers = headers
return response
else:
abort(404)
@app.route("/download/<int:book_id>/<book_format>/<anyname>")
@login_required_if_no_ano
@download_required
def get_download_link_ext(book_id, book_format, anyname):
return get_download_link(book_id, book_format)
@app.route('/register', methods=['GET', 'POST'])
def register():
if not config.config_public_reg:
abort(404)
if current_user is not None and current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == "POST":
to_save = request.form.to_dict()
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash(_(u"Please fill out all fields!"), category="error")
return render_title_template('register.html', title=_(u"register"))
existing_user = ub.session.query(ub.User).filter(ub.User.nickname == to_save["nickname"]).first()
existing_email = ub.session.query(ub.User).filter(ub.User.email == to_save["email"]).first()
if not existing_user and not existing_email:
content = ub.User()
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
content.email = to_save["email"]
content.role = config.config_default_role
try:
ub.session.add(content)
ub.session.commit()
except Exception:
ub.session.rollback()
flash(_(u"An unknown error occured. Please try again later."), category="error")
return render_title_template('register.html', title=_(u"register"))
flash("Your account has been created. Please login.", category="success")
return redirect(url_for('login'))
else:
flash(_(u"This username or email address is already in use."), category="error")
return render_title_template('register.html', title=_(u"register"))
return render_title_template('register.html', title=_(u"register"))
@app.route('/login', methods=['GET', 'POST'])
def login():
if not config.db_configured:
return redirect(url_for('basic_configuration'))
if current_user is not None and current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == "POST":
form = request.form.to_dict()
user = ub.session.query(ub.User).filter(ub.User.nickname == form['username'].strip()).first()
if user and check_password_hash(user.password, form['password']):
login_user(user, remember=True)
flash(_(u"you are now logged in as: '%(nickname)s'", nickname=user.nickname), category="success")
# test=
return redirect(url_for("index"))
else:
app.logger.info('Login failed for user "'+form['username']+'"')
flash(_(u"Wrong Username or Password"), category="error")
return render_title_template('login.html', title=_(u"login"))
@app.route('/logout')
@login_required
def logout():
if current_user is not None and current_user.is_authenticated:
logout_user()
return redirect(url_for('login'))
@app.route('/send/<int:book_id>')
@login_required
@download_required
def send_to_kindle(book_id):
settings = ub.get_mail_settings()
if settings.get("mail_server", "mail.example.com") == "mail.example.com":
flash(_(u"Please configure the SMTP mail settings first..."), category="error")
elif current_user.kindle_mail:
result = helper.send_mail(book_id, current_user.kindle_mail, config.config_calibre_dir)
if result is None:
flash(_(u"Book successfully send to %(kindlemail)s", kindlemail=current_user.kindle_mail),
category="success")
helper.update_download(book_id, int(current_user.id))
else:
flash(_(u"There was an error sending this book: %(res)s", res=result), category="error")
else:
flash(_(u"Please configure your kindle email address first..."), category="error")
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/add/<int:shelf_id>/<int:book_id>")
@login_required
def add_to_shelf(shelf_id, book_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if not shelf.is_public and not shelf.user_id == int(current_user.id):
app.logger.info("Sorry you are not allowed to add a book to the the shelf: %s" % shelf.name)
return redirect(url_for('index'))
maxOrder = ub.session.query(func.max(ub.BookShelf.order)).filter(ub.BookShelf.shelf == shelf_id).first()
book_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id,
ub.BookShelf.book_id == book_id).first()
if book_in_shelf:
app.logger.info("Book is already part of the shelf: %s" % shelf.name)
return redirect(url_for('index'))
if maxOrder[0] is None:
maxOrder = 0
else:
maxOrder = maxOrder[0]
if (shelf.is_public and current_user.role_edit_shelfs()) or not shelf.is_public:
ins = ub.BookShelf(shelf=shelf.id, book_id=book_id, order=maxOrder + 1)
ub.session.add(ins)
ub.session.commit()
flash(_(u"Book has been added to shelf: %(sname)s", sname=shelf.name), category="success")
return redirect(request.environ["HTTP_REFERER"])
else:
app.logger.info("User is not allowed to edit public shelfs")
return redirect(url_for('index'))
@app.route("/shelf/remove/<int:shelf_id>/<int:book_id>")
@login_required
def remove_from_shelf(shelf_id, book_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if not shelf.is_public and not shelf.user_id == int(current_user.id) \
or (shelf.is_public and current_user.role_edit_shelfs()):
app.logger.info("Sorry you are not allowed to remove a book from this shelf: %s" % shelf.name)
return redirect(url_for('index'))
book_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id,
ub.BookShelf.book_id == book_id).first()
ub.session.delete(book_shelf)
ub.session.commit()
flash(_(u"Book has been removed from shelf: %(sname)s", sname=shelf.name), category="success")
return redirect(request.environ["HTTP_REFERER"])
@app.route("/shelf/create", methods=["GET", "POST"])
@login_required
def create_shelf():
shelf = ub.Shelf()
if request.method == "POST":
to_save = request.form.to_dict()
if "is_public" in to_save:
shelf.is_public = 1
shelf.name = to_save["title"]
shelf.user_id = int(current_user.id)
existing_shelf = ub.session.query(ub.Shelf).filter(
or_((ub.Shelf.name == to_save["title"]) & (ub.Shelf.is_public == 1),
(ub.Shelf.name == to_save["title"]) & (ub.Shelf.user_id == int(current_user.id)))).first()
if existing_shelf:
flash(_(u"A shelf with the name '%(title)s' already exists.", title=to_save["title"]), category="error")
else:
try:
ub.session.add(shelf)
ub.session.commit()
flash(_(u"Shelf %(title)s created", title=to_save["title"]), category="success")
except Exception:
flash(_(u"There was an error"), category="error")
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"create a shelf"))
else:
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"create a shelf"))
@app.route("/shelf/edit/<int:shelf_id>", methods=["GET", "POST"])
@login_required
def edit_shelf(shelf_id):
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if request.method == "POST":
to_save = request.form.to_dict()
existing_shelf = ub.session.query(ub.Shelf).filter(
or_((ub.Shelf.name == to_save["title"]) & (ub.Shelf.is_public == 1),
(ub.Shelf.name == to_save["title"]) & (ub.Shelf.user_id == int(current_user.id)))).filter(
ub.Shelf.id != shelf_id).first()
if existing_shelf:
flash(_(u"A shelf with the name '%(title)s' already exists.", title=to_save["title"]), category="error")
else:
shelf.name = to_save["title"]
if "is_public" in to_save:
shelf.is_public = 1
else:
shelf.is_public = 0
try:
ub.session.commit()
flash(_(u"Shelf %(title)s changed", title=to_save["title"]), category="success")
except Exception:
flash(_(u"There was an error"), category="error")
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"Edit a shelf"))
else:
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"Edit a shelf"))
@app.route("/shelf/delete/<int:shelf_id>")
@login_required
def delete_shelf(shelf_id):
cur_shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first()
if current_user.role_admin():
deleted = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).delete()
else:
if not cur_shelf.is_public and not cur_shelf.user_id == int(current_user.id) \
or (cur_shelf.is_public and current_user.role_edit_shelfs()):
deleted = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id),
ub.Shelf.id == shelf_id),
ub.and_(ub.Shelf.is_public == 1,
ub.Shelf.id == shelf_id))).delete()
if deleted:
ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).delete()
ub.session.commit()
app.logger.info(_(u"successfully deleted shelf %(name)s", name=cur_shelf.name, category="success"))
return redirect(url_for('index'))
@app.route("/shelf/<int:shelf_id>")
@login_required_if_no_ano
def show_shelf(shelf_id):
if current_user.is_anonymous():
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id).first()
else:
shelf = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id),
ub.Shelf.id == shelf_id),
ub.and_(ub.Shelf.is_public == 1,
ub.Shelf.id == shelf_id))).first()
result = list()
if shelf:
books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).order_by(
ub.BookShelf.order.asc()).all()
for book in books_in_shelf:
cur_book = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
result.append(cur_book)
return render_title_template('shelf.html', entries=result, title=_(u"Shelf: '%(name)s'", name=shelf.name),
shelf=shelf)
@app.route("/shelf/order/<int:shelf_id>", methods=["GET", "POST"])
@login_required
def order_shelf(shelf_id):
if request.method == "POST":
to_save = request.form.to_dict()
books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).order_by(
ub.BookShelf.order.asc()).all()
counter = 0
for book in books_in_shelf:
setattr(book, 'order', to_save[str(book.book_id)])
counter += 1
ub.session.commit()
if current_user.is_anonymous():
shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.is_public == 1, ub.Shelf.id == shelf_id).first()
else:
shelf = ub.session.query(ub.Shelf).filter(ub.or_(ub.and_(ub.Shelf.user_id == int(current_user.id),
ub.Shelf.id == shelf_id),
ub.and_(ub.Shelf.is_public == 1,
ub.Shelf.id == shelf_id))).first()
result = list()
if shelf:
books_in_shelf2 = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id) \
.order_by(ub.BookShelf.order.asc()).all()
for book in books_in_shelf2:
cur_book = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
result.append(cur_book)
return render_title_template('shelf_order.html', entries=result,
title=_(u"Change order of Shelf: '%(name)s'", name=shelf.name), shelf=shelf)
@app.route("/me", methods=["GET", "POST"])
@login_required
def profile():
content = ub.session.query(ub.User).filter(ub.User.id == int(current_user.id)).first()
downloads = list()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
translations = babel.list_translations() + [LC('en')]
for book in content.downloads:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
if downloadBook:
downloads.append(db.session.query(db.Books).filter(db.Books.id == book.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
if request.method == "POST":
to_save = request.form.to_dict()
content.random_books = 0
if current_user.role_passwd() or current_user.role_admin():
if to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
content.kindle_mail = to_save["kindle_mail"]
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
if "show_random" in to_save and to_save["show_random"] == "on":
content.random_books = 1
if "default_language" in to_save:
content.default_language = to_save["default_language"]
if to_save["locale"]:
content.locale = to_save["locale"]
content.sidebar_view = 0
if "show_random" in to_save:
content.sidebar_view += ub.SIDEBAR_RANDOM
if "show_language" in to_save:
content.sidebar_view += ub.SIDEBAR_LANGUAGE
if "show_series" in to_save:
content.sidebar_view += ub.SIDEBAR_SERIES
if "show_category" in to_save:
content.sidebar_view += ub.SIDEBAR_CATEGORY
if "show_hot" in to_save:
content.sidebar_view += ub.SIDEBAR_HOT
if "show_best_rated" in to_save:
content.sidebar_view += ub.SIDEBAR_BEST_RATED
if "show_author" in to_save:
content.sidebar_view += ub.SIDEBAR_AUTHOR
if "show_read_and_unread" in to_save:
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
if "show_detail_random" in to_save:
content.sidebar_view += ub.DETAIL_RANDOM
if "default_language" in to_save:
content.default_language = to_save["default_language"]
try:
ub.session.commit()
except IntegrityError:
ub.session.rollback()
flash(_(u"Found an existing account for this email address."), category="error")
return render_title_template("user_edit.html", content=content, downloads=downloads,
title=_(u"%(name)s's profile", name=current_user.nickname))
flash(_(u"Profile updated"), category="success")
return render_title_template("user_edit.html", translations=translations, profile=1, languages=languages,
content=content,
downloads=downloads, title=_(u"%(name)s's profile", name=current_user.nickname))
@app.route("/admin/view")
@login_required
@admin_required
def admin():
commit = '$Format:%cI$'
content = ub.session.query(ub.User).all()
settings = ub.session.query(ub.Settings).first()
return render_title_template("admin.html", content=content, email=settings, config=config, commit=commit,
development=ub.DEVELOPMENT, title=_(u"Admin page"))
@app.route("/admin/config", methods=["GET", "POST"])
@login_required
@admin_required
def configuration():
return configuration_helper(0)
@app.route("/config", methods=["GET", "POST"])
@unconfigured
def basic_configuration():
return configuration_helper(1)
def configuration_helper(origin):
# global global_task
reboot_required = False
db_change = False
success = False
if request.method == "POST":
to_save = request.form.to_dict()
content = ub.session.query(ub.Settings).first()
if "config_calibre_dir" in to_save:
if content.config_calibre_dir != to_save["config_calibre_dir"]:
content.config_calibre_dir = to_save["config_calibre_dir"]
db_change = True
# Google drive setup
create_new_yaml = False
if "config_google_drive_client_id" in to_save:
if content.config_google_drive_client_id != to_save["config_google_drive_client_id"]:
content.config_google_drive_client_id = to_save["config_google_drive_client_id"]
create_new_yaml = True
if "config_google_drive_client_secret" in to_save:
if content.config_google_drive_client_secret != to_save["config_google_drive_client_secret"]:
content.config_google_drive_client_secret = to_save["config_google_drive_client_secret"]
create_new_yaml = True
if "config_google_drive_calibre_url_base" in to_save:
if content.config_google_drive_calibre_url_base != to_save["config_google_drive_calibre_url_base"]:
content.config_google_drive_calibre_url_base = to_save["config_google_drive_calibre_url_base"]
create_new_yaml = True
if ("config_use_google_drive" in to_save and not content.config_use_google_drive) or ("config_use_google_drive" not in to_save and content.config_use_google_drive):
content.config_use_google_drive = "config_use_google_drive" in to_save
db_change = True
if not content.config_use_google_drive:
create_new_yaml = False
if create_new_yaml:
with open('settings.yaml', 'w') as f:
with open('gdrive_template.yaml', 'r') as t:
f.write(t.read() % {'client_id': content.config_google_drive_client_id, 'client_secret': content.config_google_drive_client_secret,
"redirect_uri": content.config_google_drive_calibre_url_base + 'gdrive/callback'})
if "config_google_drive_folder" in to_save:
if content.config_google_drive_folder != to_save["config_google_drive_folder"]:
content.config_google_drive_folder = to_save["config_google_drive_folder"]
db_change = True
##
if "config_port" in to_save:
if content.config_port != int(to_save["config_port"]):
content.config_port = int(to_save["config_port"])
reboot_required = True
if "config_calibre_web_title" in to_save:
content.config_calibre_web_title = to_save["config_calibre_web_title"]
if "config_columns_to_ignore" in to_save:
content.config_columns_to_ignore = to_save["config_columns_to_ignore"]
if "config_title_regex" in to_save:
if content.config_title_regex != to_save["config_title_regex"]:
content.config_title_regex = to_save["config_title_regex"]
reboot_required = True
if "config_log_level" in to_save:
content.config_log_level = int(to_save["config_log_level"])
if "config_random_books" in to_save:
content.config_random_books = int(to_save["config_random_books"])
if "config_books_per_page" in to_save:
content.config_books_per_page = int(to_save["config_books_per_page"])
content.config_uploading = 0
content.config_anonbrowse = 0
content.config_public_reg = 0
if "config_uploading" in to_save and to_save["config_uploading"] == "on":
content.config_uploading = 1
if "config_anonbrowse" in to_save and to_save["config_anonbrowse"] == "on":
content.config_anonbrowse = 1
if "config_public_reg" in to_save and to_save["config_public_reg"] == "on":
content.config_public_reg = 1
content.config_default_role = 0
if "admin_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_EDIT
if "delete_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_PASSWD
if "passwd_role" in to_save:
content.config_default_role = content.config_default_role + ub.ROLE_EDIT_SHELFS
try:
if content.config_use_google_drive and is_gdrive_ready() and not os.path.exists(config.config_calibre_dir + "/metadata.db"):
gdriveutils.downloadFile(Gdrive.Instance().drive, None, "metadata.db", config.config_calibre_dir + "/metadata.db")
if db_change:
if config.db_configured:
db.session.close()
db.engine.dispose()
ub.session.commit()
flash(_(u"Calibre-web configuration updated"), category="success")
config.loadSettings()
app.logger.setLevel(config.config_log_level)
logging.getLogger("book_formats").setLevel(config.config_log_level)
except e:
flash(e, category="error")
return render_title_template("config_edit.html", content=config, origin=origin, gdrive=gdrive_support,
title=_(u"Basic Configuration"))
if db_change:
reload(db)
if not db.setup_db():
flash(_(u'DB location is not valid, please enter correct path'), category="error")
return render_title_template("config_edit.html", content=config, origin=origin, gdrive=gdrive_support,
title=_(u"Basic Configuration"))
if reboot_required:
# db.engine.dispose() # ToDo verify correct
ub.session.close()
ub.engine.dispose()
# stop tornado server
server = IOLoop.instance()
server.add_callback(server.stop)
helper.global_task = 0
app.logger.info('Reboot required, restarting')
if origin:
success = True
return render_title_template("config_edit.html", origin=origin, success=success, content=config,
show_authenticate_google_drive=not is_gdrive_ready(), gdrive=gdrive_support,
title=_(u"Basic Configuration"))
@app.route("/admin/user/new", methods=["GET", "POST"])
@login_required
@admin_required
def new_user():
content = ub.User()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
translations = [LC('en')] + babel.list_translations()
if request.method == "POST":
to_save = request.form.to_dict()
if not to_save["nickname"] or not to_save["email"] or not to_save["password"]:
flash(_(u"Please fill out all fields!"), category="error")
return render_title_template("user_edit.html", new_user=1, content=content, translations=translations,
title=_(u"Add new user"))
content.password = generate_password_hash(to_save["password"])
content.nickname = to_save["nickname"]
content.email = to_save["email"]
content.default_language = to_save["default_language"]
if "locale" in to_save:
content.locale = to_save["locale"]
content.sidebar_view = 0
if "show_random" in to_save:
content.sidebar_view += ub.SIDEBAR_RANDOM
if "show_language" in to_save:
content.sidebar_view += ub.SIDEBAR_LANGUAGE
if "show_series" in to_save:
content.sidebar_view += ub.SIDEBAR_SERIES
if "show_category" in to_save:
content.sidebar_view += ub.SIDEBAR_CATEGORY
if "show_hot" in to_save:
content.sidebar_view += ub.SIDEBAR_HOT
if "show_read_and_unread" in to_save:
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
if "show_best_rated" in to_save:
content.sidebar_view += ub.SIDEBAR_BEST_RATED
if "show_author" in to_save:
content.sidebar_view += ub.SIDEBAR_AUTHOR
if "show_detail_random" in to_save:
content.sidebar_view += ub.DETAIL_RANDOM
content.role = 0
if "admin_role" in to_save:
content.role = content.role + ub.ROLE_ADMIN
if "download_role" in to_save:
content.role = content.role + ub.ROLE_DOWNLOAD
if "upload_role" in to_save:
content.role = content.role + ub.ROLE_UPLOAD
if "edit_role" in to_save:
content.role = content.role + ub.ROLE_DELETE_BOOKS
if "delete_role" in to_save:
content.role = content.role + ub.ROLE_EDIT
if "passwd_role" in to_save:
content.role = content.role + ub.ROLE_PASSWD
if "edit_shelf_role" in to_save:
content.role = content.role + ub.ROLE_EDIT_SHELFS
try:
ub.session.add(content)
ub.session.commit()
flash(_(u"User '%(user)s' created", user=content.nickname), category="success")
return redirect(url_for('admin'))
except IntegrityError:
ub.session.rollback()
flash(_(u"Found an existing account for this email address or nickname."), category="error")
else:
content.role = config.config_default_role
return render_title_template("user_edit.html", new_user=1, content=content, translations=translations,
languages=languages, title=_(u"Add new user"))
@app.route("/admin/mailsettings", methods=["GET", "POST"])
@login_required
@admin_required
def edit_mailsettings():
content = ub.session.query(ub.Settings).first()
if request.method == "POST":
to_save = request.form.to_dict()
content.mail_server = to_save["mail_server"]
content.mail_port = int(to_save["mail_port"])
content.mail_login = to_save["mail_login"]
content.mail_password = to_save["mail_password"]
content.mail_from = to_save["mail_from"]
content.mail_use_ssl = int(to_save["mail_use_ssl"])
try:
ub.session.commit()
flash(_(u"Mail settings updated"), category="success")
except e:
flash(e, category="error")
if "test" in to_save and to_save["test"]:
if current_user.kindle_mail:
result = helper.send_test_mail(current_user.kindle_mail)
if result is None:
flash(_(u"Test E-Mail successfully send to %(kindlemail)s", kindlemail=current_user.kindle_mail),
category="success")
else:
flash(_(u"There was an error sending the Test E-Mail: %(res)s", res=result), category="error")
else:
flash(_(u"Please configure your kindle email address first..."), category="error")
else:
flash(_(u"E-Mail settings updated"), category="success")
return render_title_template("email_edit.html", content=content, title=_(u"Edit mail settings"))
@app.route("/admin/user/<int:user_id>", methods=["GET", "POST"])
@login_required
@admin_required
def edit_user(user_id):
content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()
downloads = list()
languages = db.session.query(db.Languages).all()
for lang in languages:
try:
cur_l = LC.parse(lang.lang_code)
lang.name = cur_l.get_language_name(get_locale())
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
translations = babel.list_translations() + [LC('en')]
for book in content.downloads:
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
if downloadBook:
downloads.append(db.session.query(db.Books).filter(db.Books.id == book.book_id).first())
else:
ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
ub.session.commit()
if request.method == "POST":
to_save = request.form.to_dict()
if "delete" in to_save:
ub.session.delete(content)
flash(_(u"User '%(nick)s' deleted", nick=content.nickname), category="success")
return redirect(url_for('admin'))
else:
if "password" in to_save and to_save["password"]:
content.password = generate_password_hash(to_save["password"])
if "admin_role" in to_save and not content.role_admin():
content.role = content.role + ub.ROLE_ADMIN
elif "admin_role" not in to_save and content.role_admin():
content.role = content.role - ub.ROLE_ADMIN
if "download_role" in to_save and not content.role_download():
content.role = content.role + ub.ROLE_DOWNLOAD
elif "download_role" not in to_save and content.role_download():
content.role = content.role - ub.ROLE_DOWNLOAD
if "upload_role" in to_save and not content.role_upload():
content.role = content.role + ub.ROLE_UPLOAD
elif "upload_role" not in to_save and content.role_upload():
content.role = content.role - ub.ROLE_UPLOAD
if "edit_role" in to_save and not content.role_edit():
content.role = content.role + ub.ROLE_EDIT
elif "edit_role" not in to_save and content.role_edit():
content.role = content.role - ub.ROLE_EDIT
if "delete_role" in to_save and not content.role_delete_books():
content.role = content.role + ub.ROLE_DELETE_BOOKS
elif "delete_role" not in to_save and content.role_delete_books():
content.role = content.role - ub.ROLE_DELETE_BOOKS
if "passwd_role" in to_save and not content.role_passwd():
content.role = content.role + ub.ROLE_PASSWD
elif "passwd_role" not in to_save and content.role_passwd():
content.role = content.role - ub.ROLE_PASSWD
if "edit_shelf_role" in to_save and not content.role_edit_shelfs():
content.role = content.role + ub.ROLE_EDIT_SHELFS
elif "edit_shelf_role" not in to_save and content.role_edit_shelfs():
content.role = content.role - ub.ROLE_EDIT_SHELFS
if "show_random" in to_save and not content.show_random_books():
content.sidebar_view += ub.SIDEBAR_RANDOM
elif "show_random" not in to_save and content.show_random_books():
content.sidebar_view -= ub.SIDEBAR_RANDOM
if "show_language" in to_save and not content.show_language():
content.sidebar_view += ub.SIDEBAR_LANGUAGE
elif "show_language" not in to_save and content.show_language():
content.sidebar_view -= ub.SIDEBAR_LANGUAGE
if "show_series" in to_save and not content.show_series():
content.sidebar_view += ub.SIDEBAR_SERIES
elif "show_series" not in to_save and content.show_series():
content.sidebar_view -= ub.SIDEBAR_SERIES
if "show_category" in to_save and not content.show_category():
content.sidebar_view += ub.SIDEBAR_CATEGORY
elif "show_category" not in to_save and content.show_category():
content.sidebar_view -= ub.SIDEBAR_CATEGORY
if "show_hot" in to_save and not content.show_hot_books():
content.sidebar_view += ub.SIDEBAR_HOT
elif "show_hot" not in to_save and content.show_hot_books():
content.sidebar_view -= ub.SIDEBAR_HOT
if "show_best_rated" in to_save and not content.show_best_rated_books():
content.sidebar_view += ub.SIDEBAR_BEST_RATED
elif "show_best_rated" not in to_save and content.show_best_rated_books():
content.sidebar_view -= ub.SIDEBAR_BEST_RATED
if "show_read_and_unread" in to_save and not content.show_read_and_unread():
content.sidebar_view += ub.SIDEBAR_READ_AND_UNREAD
elif "show_read_and_unread" not in to_save and content.show_read_and_unread():
content.sidebar_view -= ub.SIDEBAR_READ_AND_UNREAD
if "show_author" in to_save and not content.show_author():
content.sidebar_view += ub.SIDEBAR_AUTHOR
elif "show_author" not in to_save and content.show_author():
content.sidebar_view -= ub.SIDEBAR_AUTHOR
if "show_detail_random" in to_save and not content.show_detail_random():
content.sidebar_view += ub.DETAIL_RANDOM
elif "show_detail_random" not in to_save and content.show_detail_random():
content.sidebar_view -= ub.DETAIL_RANDOM
if "default_language" in to_save:
content.default_language = to_save["default_language"]
if "locale" in to_save and to_save["locale"]:
content.locale = to_save["locale"]
if to_save["email"] and to_save["email"] != content.email:
content.email = to_save["email"]
if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
content.kindle_mail = to_save["kindle_mail"]
try:
ub.session.commit()
flash(_(u"User '%(nick)s' updated", nick=content.nickname), category="success")
except IntegrityError:
ub.session.rollback()
flash(_(u"An unknown error occured."), category="error")
return render_title_template("user_edit.html", translations=translations, languages=languages, new_user=0,
content=content, downloads=downloads,
title=_(u"Edit User %(nick)s", nick=content.nickname))
@app.route("/admin/book/<int:book_id>", methods=['GET', 'POST'])
@login_required_if_no_ano
@edit_required
def edit_book(book_id):
# create the function for sorting...
db.session.connection().connection.connection.create_function("title_sort", 1, db.title_sort)
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if current_user.filter_language() != "all":
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
else:
lang_filter = True
book = db.session.query(db.Books).filter(db.Books.id == book_id).filter(lang_filter).first()
author_names = []
if book:
for index in range(0, len(book.languages)):
try:
book.languages[index].language_name = LC.parse(book.languages[index].lang_code).get_language_name(
get_locale())
except Exception:
book.languages[index].language_name = _(isoLanguages.get(part3=book.languages[index].lang_code).name)
for author in book.authors:
author_names.append(author.name)
if request.method == 'POST':
edited_books_id = set()
to_save = request.form.to_dict()
if book.title != to_save["book_title"]:
book.title = to_save["book_title"]
edited_books_id.add(book.id)
input_authors = to_save["author_name"].split('&')
input_authors = map(lambda it: it.strip(), input_authors)
# we have all author names now
if input_authors == ['']:
input_authors = [_(u'unknown')] # prevent empty Author
if book.authors:
author0_before_edit = book.authors[0].name
else:
author0_before_edit = db.Authors(_(u'unknown'),'',0)
modify_database_object(input_authors, book.authors, db.Authors, db.session, 'author')
if book.authors:
if author0_before_edit != book.authors[0].name:
edited_books_id.add(book.id)
book.author_sort = helper.get_sorted_author(input_authors[0])
if to_save["cover_url"] and os.path.splitext(to_save["cover_url"])[1].lower() == ".jpg":
img = requests.get(to_save["cover_url"])
if config.config_use_google_drive:
tmpDir = tempfile.gettempdir()
f = open(os.path.join(tmpDir, "uploaded_cover.jpg"), "wb")
f.write(img.content)
f.close()
gdriveutils.uploadFileToEbooksFolder(Gdrive.Instance().drive, os.path.join(book.path, 'cover.jpg'), os.path.join(tmpDir, f.name))
else:
f = open(os.path.join(config.config_calibre_dir, book.path, "cover.jpg"), "wb")
f.write(img.content)
f.close()
book.has_cover = 1
if book.series_index != to_save["series_index"]:
book.series_index = to_save["series_index"]
if len(book.comments):
book.comments[0].text = to_save["description"]
else:
book.comments.append(db.Comments(text=to_save["description"], book=book.id))
input_tags = to_save["tags"].split(',')
input_tags = map(lambda it: it.strip(), input_tags)
modify_database_object(input_tags, book.tags, db.Tags, db.session, 'tags')
input_series = [to_save["series"].strip()]
input_series = [x for x in input_series if x != '']
modify_database_object(input_series, book.series, db.Series, db.session, 'series')
input_languages = to_save["languages"].split(',')
input_languages = map(lambda it: it.strip().lower(), input_languages)
# retranslate displayed text to language codes
languages = db.session.query(db.Languages).all()
input_l = []
for lang in languages:
try:
lang.name = LC.parse(lang.lang_code).get_language_name(get_locale()).lower()
except Exception:
lang.name = _(isoLanguages.get(part3=lang.lang_code).name).lower()
for inp_lang in input_languages:
if inp_lang == lang.name:
input_l.append(lang.lang_code)
modify_database_object(input_l, book.languages, db.Languages, db.session, 'languages')
if to_save["rating"].strip():
old_rating = False
if len(book.ratings) > 0:
old_rating = book.ratings[0].rating
ratingx2 = int(float(to_save["rating"]) * 2)
if ratingx2 != old_rating:
is_rating = db.session.query(db.Ratings).filter(db.Ratings.rating == ratingx2).first()
if is_rating:
book.ratings.append(is_rating)
else:
new_rating = db.Ratings(rating=ratingx2)
book.ratings.append(new_rating)
if old_rating:
book.ratings.remove(book.ratings[0])
else:
if len(book.ratings) > 0:
book.ratings.remove(book.ratings[0])
for c in cc:
cc_string = "custom_column_" + str(c.id)
if not c.is_multiple:
if len(getattr(book, cc_string)) > 0:
cc_db_value = getattr(book, cc_string)[0].value
else:
cc_db_value = None
if to_save[cc_string].strip():
if c.datatype == 'bool':
if to_save[cc_string] == 'None':
to_save[cc_string] = None
else:
to_save[cc_string] = 1 if to_save[cc_string] == 'True' else 0
if to_save[cc_string] != cc_db_value:
if cc_db_value is not None:
if to_save[cc_string] is not None:
setattr(getattr(book, cc_string)[0], 'value', to_save[cc_string])
else:
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
db.session.delete(del_cc)
else:
cc_class = db.cc_classes[c.id]
new_cc = cc_class(value=to_save[cc_string], book=book_id)
db.session.add(new_cc)
else:
if c.datatype == 'rating':
to_save[cc_string] = str(int(float(to_save[cc_string]) * 2))
if to_save[cc_string].strip() != cc_db_value:
if cc_db_value is not None:
# remove old cc_val
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
cc_class = db.cc_classes[c.id]
new_cc = db.session.query(cc_class).filter(
cc_class.value == to_save[cc_string].strip()).first()
# if no cc val is found add it
if new_cc is None:
new_cc = cc_class(value=to_save[cc_string].strip())
db.session.add(new_cc)
new_cc = db.session.query(cc_class).filter(
cc_class.value == to_save[cc_string].strip()).first()
# add cc value to book
getattr(book, cc_string).append(new_cc)
else:
if cc_db_value is not None:
# remove old cc_val
del_cc = getattr(book, cc_string)[0]
getattr(book, cc_string).remove(del_cc)
if len(del_cc.books) == 0:
db.session.delete(del_cc)
else:
input_tags = to_save[cc_string].split(',')
input_tags = map(lambda it: it.strip(), input_tags)
modify_database_object(input_tags, getattr(book, cc_string),db.cc_classes[c.id], db.session, 'custom')
db.session.commit()
author_names = []
for author in book.authors:
author_names.append(author.name)
for b in edited_books_id:
if config.config_use_google_drive:
helper.update_dir_structure_gdrive(b)
else:
helper.update_dir_stucture(b, config.config_calibre_dir)
if config.config_use_google_drive:
updateGdriveCalibreFromLocal()
if "detail_view" in to_save:
return redirect(url_for('show_book', book_id=book.id))
else:
return render_title_template('book_edit.html', book=book, authors=author_names, cc=cc,
title=_(u"edit metadata"))
else:
return render_title_template('book_edit.html', book=book, authors=author_names, cc=cc,
title=_(u"edit metadata"))
else:
flash(_(u"Error opening eBook. File does not exist or file is not accessible:"), category="error")
return redirect(url_for("index"))
@app.route("/upload", methods=["GET", "POST"])
@login_required_if_no_ano
@upload_required
def upload():
if not config.config_uploading:
abort(404)
# create the function for sorting...
db.session.connection().connection.connection.create_function("title_sort", 1, db.title_sort)
db.session.connection().connection.connection.create_function('uuid4', 0, lambda: str(uuid4()))
if request.method == 'POST' and 'btn-upload' in request.files:
requested_file = request.files['btn-upload']
if '.' in requested_file.filename:
file_ext = requested_file.filename.rsplit('.', 1)[-1].lower()
if file_ext not in ALLOWED_EXTENSIONS:
flash(
_('File extension "%s" is not allowed to be uploaded to this server' %
file_ext),
category="error"
)
return redirect(url_for('index'))
else:
flash(_('File to be uploaded must have an extension'), category="error")
return redirect(url_for('index'))
meta = uploader.upload(requested_file)
title = meta.title
author = meta.author
title_dir = helper.get_valid_filename(title, False)
author_dir = helper.get_valid_filename(author, False)
data_name = title_dir
filepath = config.config_calibre_dir + os.sep + author_dir + os.sep + title_dir
saved_filename = filepath + os.sep + data_name + meta.extension
if not os.path.exists(filepath):
try:
os.makedirs(filepath)
except OSError:
flash(_(u"Failed to create path %s (Permission denied)." % filepath), category="error")
return redirect(url_for('index'))
try:
copyfile(meta.file_path, saved_filename)
except OSError:
flash(_(u"Failed to store file %s (Permission denied)." % saved_filename), category="error")
return redirect(url_for('index'))
try:
os.unlink(meta.file_path)
except OSError:
flash(_(u"Failed to delete file %s (Permission denied)." % meta.file_path), category="warning")
file_size = os.path.getsize(saved_filename)
if meta.cover is None:
has_cover = 0
basedir = os.path.dirname(__file__)
copyfile(os.path.join(basedir, "static/generic_cover.jpg"), os.path.join(filepath, "cover.jpg"))
else:
has_cover = 1
move(meta.cover, os.path.join(filepath, "cover.jpg"))
is_author = db.session.query(db.Authors).filter(db.Authors.name == author).first()
if is_author:
db_author = is_author
else:
db_author = db.Authors(author, helper.get_sorted_author(author), "")
db.session.add(db_author)
# add language actually one value in list
input_language = meta.languages
db_language = None
if input_language != "":
input_language = isoLanguages.get(name=input_language).part3
hasLanguage = db.session.query(db.Languages).filter(db.Languages.lang_code == input_language).first()
if hasLanguage:
db_language = hasLanguage
else:
db_language = db.Languages(input_language)
db.session.add(db_language)
# combine path and normalize path from windows systems
path = os.path.join(author_dir, title_dir).replace('\\', '/')
db_book = db.Books(title, "", db_author.sort, datetime.datetime.now(), datetime.datetime(101, 1, 1), 1,
datetime.datetime.now(), path, has_cover, db_author, [], db_language)
db_book.authors.append(db_author)
if db_language is not None:
db_book.languages.append(db_language)
db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, data_name)
db_book.data.append(db_data)
db.session.add(db_book)
db.session.flush() # flush content get db_book.id avalible
# add comment
upload_comment = Markup(meta.description).unescape()
if upload_comment != "":
db.session.add(db.Comments(upload_comment, db_book.id))
db.session.commit()
if db_language is not None: # display Full name instead of iso639.part3
db_book.languages[0].language_name = _(meta.languages)
author_names = []
for author in db_book.authors:
author_names.append(author.name)
if config.config_use_google_drive:
updateGdriveCalibreFromLocal()
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
if current_user.role_edit() or current_user.role_admin():
return render_title_template('book_edit.html', book=db_book, authors=author_names, cc=cc,
title=_(u"edit metadata"))
book_in_shelfs = []
return render_title_template('detail.html', entry=db_book, cc=cc, title=db_book.title,
books_shelfs=book_in_shelfs, )
else:
return redirect(url_for("index"))
def start_gevent():
from gevent.wsgi import WSGIServer
global gevent_server
gevent_server = WSGIServer(('', ub.config.config_port), app)
gevent_server.serve_forever() | gpl-3.0 | -8,819,939,616,448,573,000 | 43.092821 | 172 | 0.597984 | false | 3.660436 | true | false | false |
mcs07/mongodb-chemistry | mchem/postgres.py | 1 | 6525 | # -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def profile(conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start)
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
"""Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close()
| mit | 5,491,135,645,863,577,000 | 44.950704 | 284 | 0.656398 | false | 3.126497 | false | false | false |
toastdriven/alligator | alligator/backends/sqlite_backend.py | 1 | 4414 | import sqlite3
import time
class Client(object):
def __init__(self, conn_string):
"""
A SQLite-based ``Client``.
Args:
conn_string (str): The DSN. The host/port/db are parsed out of it.
Should be of the format ``sqlite:///path/to/db/file.db``
"""
# This is actually the filepath to the DB file.
self.conn_string = conn_string
# Kill the 'sqlite://' portion.
path = self.conn_string.split("://", 1)[1]
self.conn = sqlite3.connect(path)
def _run_query(self, query, args):
cur = self.conn.cursor()
if not args:
cur.execute(query)
else:
cur.execute(query, args)
self.conn.commit()
return cur
def setup_tables(self, queue_name="all"):
"""
Allows for manual creation of the needed tables.
Args:
queue_name (str): Optional. The name of the queue. Default is
`all`.
"""
# For manually creating the tables...
query = (
"CREATE TABLE `queue_{}` "
"(task_id text, data text, delay_until integer)"
).format(queue_name)
self._run_query(query, None)
def len(self, queue_name):
"""
Returns the length of the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
`Gator`` instance.
Returns:
int: The length of the queue
"""
query = "SELECT COUNT(task_id) FROM `queue_{}`".format(queue_name)
cur = self._run_query(query, [])
res = cur.fetchone()
return res[0]
def drop_all(self, queue_name):
"""
Drops all the task in the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
"""
query = "DELETE FROM `queue_{}`".format(queue_name)
self._run_query(query, [])
def push(self, queue_name, task_id, data, delay_until=None):
"""
Pushes a task onto the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
task_id (str): The identifier of the task.
data (str): The relevant data for the task.
delay_until (float): Optional. The Unix timestamp to delay
processing of the task until. Default is `None`.
Returns:
str: The task ID.
"""
if delay_until is None:
delay_until = time.time()
query = (
"INSERT INTO `queue_{}` "
"(task_id, data, delay_until) "
"VALUES (?, ?, ?)"
).format(queue_name)
self._run_query(query, [task_id, data, int(delay_until)])
return task_id
def pop(self, queue_name):
"""
Pops a task off the queue.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
Returns:
str: The data for the task.
"""
now = int(time.time())
query = (
"SELECT task_id, data, delay_until "
"FROM `queue_{}` "
"WHERE delay_until <= ? "
"LIMIT 1"
).format(queue_name)
cur = self._run_query(query, [now])
res = cur.fetchone()
if res:
query = "DELETE FROM `queue_{}` WHERE task_id = ?".format(
queue_name
)
self._run_query(query, [res[0]])
return res[1]
def get(self, queue_name, task_id):
"""
Pops a specific task off the queue by identifier.
Args:
queue_name (str): The name of the queue. Usually handled by the
``Gator`` instance.
task_id (str): The identifier of the task.
Returns:
str: The data for the task.
"""
# fmt: off
query = (
"SELECT task_id, data "
"FROM `queue_{}` "
"WHERE task_id = ?"
).format(queue_name)
# fmt: on
cur = self._run_query(query, [task_id])
res = cur.fetchone()
query = "DELETE FROM `queue_{}` WHERE task_id = ?".format(queue_name)
self._run_query(query, [task_id])
return res[1]
| bsd-3-clause | 9,057,854,631,460,570,000 | 27.849673 | 78 | 0.500453 | false | 4.060718 | false | false | false |
RylanGotto/web-dash | websterton/user_manager/views.py | 1 | 2711 | # -*- coding: utf-8 -*-
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required
from websterton.user.models import User
from random import randint
from forismatic import Forismatic
import os
import praw
import json
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
blueprint = Blueprint("user_manager", __name__, url_prefix='/user_manager',
static_folder="../static")
@blueprint.route("/get_new_background")
@login_required
def get_new_background():
user = load_user(session['user_id'])
theme = user.current_theme
path = os.path.join(SITE_ROOT, "../static", theme)
print path
backgrounds = os.listdir(path)[1:]
new_background_num = randint(0,len(backgrounds)-1)
return url_for('static', filename='%s/%s' % (theme, backgrounds[new_background_num]))
@blueprint.route("/save_user_settings", methods=["GET", "POST"])
@login_required
def save_user_settings():
user_info = request.args.to_dict()
user = load_user(session['user_id'])
news_feed = {}
user.current_theme = user_info.pop('theme')
user.location = user_info.pop('location')
print user_info
for i, k in user_info.iteritems():
news_feed.update({i:k})
user.news_feed = news_feed
user.save()
return "theme changed"
@blueprint.route("/save_new_reddit", methods=["GET", "POST"])
@login_required
def save_new_reddit():
info = request.args.to_dict()
user = load_user(session['user_id'])
new_key = ""
for i, k in info.iteritems():
new_key = i
upvote_limit = k
monitored_reddits = json.loads(user.monitored_reddits)
if monitored_reddits.has_key(new_key) and upvote_limit > 0:
return "failed", 404
else:
for i, k in info.iteritems():
monitored_reddits.update({i : k})
user.monitored_reddits = json.dumps(monitored_reddits)
user.save()
return "success"
@blueprint.route("/remove_reddit", methods=["GET", "POST"])
@login_required
def remove_reddit():
info = request.args.to_dict()
user = load_user(session['user_id'])
monitored_reddits = json.loads(user.monitored_reddits)
for i, k in info.iteritems():
monitored_reddits.pop(i.strip())
user.monitored_reddits = json.dumps(monitored_reddits)
user.save()
return "deleted"
@blueprint.route("/get_user_location", methods=["GET", "POST"])
@login_required
def get_user_location():
return load_user(session['user_id']).location
@blueprint.route("/get_quote", methods=["GET", "POST"])
@login_required
def get_quote():
# Initializing manager
f = Forismatic()
q = f.get_quote()
quote = {'quote':q.quote, 'author': q.author}
print quote
return json.dumps(quote)
def load_user(id):
return User.get_by_id(int(id))
| bsd-3-clause | -1,981,855,954,654,157,800 | 25.067308 | 86 | 0.687938 | false | 2.99558 | false | false | false |
chungjjang80/FRETBursts | fretbursts/dataload/pytables_array_list.py | 2 | 3688 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014 Antonino Ingargiola <[email protected]>
#
"""
This module implements a list of arrays stored into a file with pytables.
The list is created empty (if the file does not exist) and must be populated
with the `append()` method.
If the file-name exists the list is populated with arrays stored
in the file.
Each list element is a reference to a pytable array. To read the array in
memory use the slicing notation (like pytable_array[:]).
"""
from __future__ import print_function
from builtins import range, zip
import os
import tables
_default_compression = dict(complevel=6, complib='blosc')
class PyTablesList(list):
def __init__(self, file, overwrite=False, parent_node='/',
group_name='array_list', group_descr='List of arrays',
prefix='data', compression=_default_compression,
load_array=False):
"""List of arrays stored in a pytables file.
The list is inizialized empty and populated with `.append()`.
Arguments:
load_array (bool): if True, read the data and put numpy arrays
in the list. If False, put only pytable arrays.
`group_descr`, `prefix`, `compression` are only used if a new group is
created (for example for a new file).
"""
super(PyTablesList, self).__init__()
self.parent_node = parent_node
self.group_name = group_name
self.load_array = load_array
# Ignored if group exist
self.size = 0
self.prefix = prefix
self.compression = compression
## Retrive the file reference file
if type(file) is tables.file.File:
self.data_file = file
elif os.path.exists(file) and not overwrite:
self.data_file = tables.open_file(file, mode = "a")
else:
self.data_file = tables.open_file(file, mode = "w",
title = "Container for lists of arrays")
## Create the group if not existent
if group_name not in self.data_file.get_node(parent_node):
self.data_file.create_group(parent_node, group_name,
title=group_descr)
self.group = self.data_file.get_node(parent_node, group_name)
if 'size' in self.group._v_attrs:
## If the group was already present read the data
self.size = self.group._v_attrs.size
self.prefix = self.group._v_attrs.prefix
for i in range(self.group._v_attrs.size):
array_ = self.group._f_get_child(self.get_name(i))
if self.load_array:
array_ = array_[:]
super(PyTablesList, self).append(array_)
else:
## If a new group save some metadata
self.group._v_attrs.size = self.size
self.group._v_attrs.prefix = self.prefix
self.group._v_attrs.load_array = self.load_array
def get_name(self, i=None):
if i is None:
i = self.size
return self.prefix + str(i)
def append(self, ndarray):
name = self.get_name()
comp_filter = tables.Filters(**self.compression)
tarray = self.data_file.create_carray(self.group, name, obj=ndarray,
filters=comp_filter)
self.data_file.flush()
super(PyTablesList, self).append(tarray)
#print(self.prefix+str(self.size), ndarray)
self.size += 1
self.group._v_attrs.size = self.size
def get_array_list(self):
return [array_[:] for array_ in self] | gpl-2.0 | 4,568,916,660,779,588,000 | 35.524752 | 78 | 0.596258 | false | 3.915074 | false | false | false |
jdeligt/Genetics | FixFreebayesHeader.py | 3 | 1824 | #!/usr/local/bin/python
#fix freebayes header
import os
import glob
from optparse import OptionParser
# -------------------------------------------------
parser = OptionParser()
parser.add_option("--vcfdir", dest="vcfdir", help="Path to directory containing VCF files", default=False)
(options, args) = parser.parse_args()
# -------------------------------------------------
SAMPLEHEADER="#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT %s %s\n"
# -------------------------------------------------
# CHECK AND GENERATE GZ AND TBI
def fix_header(vcffile):
freader = open(vcffile, 'r')
fwriter = open(vcffile.replace(".vcf","_fixed.vcf"), 'w')
samples = []
header = False
for line in freader:
if line.startswith("##"):
fwriter.write(line)
if line.startswith("##commandline="):
##commandline="/home/cog/pprins/run6/bin/freebayes -f /hpc/cog_bioinf/GENOMES/human_GATK_GRCh37/GRCh37_gatk.fasta -C 3 -t /hpc/cog_bioinf/ENRICH/kinome_design_SS_V2_110811.bed --pooled-discrete --genotype-qualities --min-coverage 5 --no-indels --no-mnps --no-complex /home/cog/pprins/run6/data/freebayes/merged_MBC019R_F3_20130528_rmdup_kinome_design_SS_V2_110811.bam /home/cog/pprins/run6/data/freebayes/merged_MBC019T_F3_20130528_rmdup_kinome_design_SS_V2_110811.bam""
items = line.strip().split(" ")[-2:]
#print items
samples = [k.split("_")[1] for k in items]
#print samples
elif not header:
fwriter.write(SAMPLEHEADER%(samples[0], samples[1]))
header=True
else:
fwriter.write(line)
freader.close()
fwriter.close()
# -------------------------------------------------
file_list = glob.glob(os.path.join(options.vcfdir, "*.vcf"))
for vcf_file in file_list:
fix_header(vcf_file)
os.system("mkdir fixed")
os.system("mv *_fixed.vcf fixed")
# -------------------------------------------------
| mit | -654,241,141,323,404,800 | 35.48 | 474 | 0.609101 | false | 2.937198 | false | false | false |
ddico/odoo | addons/resource/models/res_company.py | 29 | 1286 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ResCompany(models.Model):
_inherit = 'res.company'
resource_calendar_ids = fields.One2many(
'resource.calendar', 'company_id', 'Working Hours')
resource_calendar_id = fields.Many2one(
'resource.calendar', 'Default Working Hours', ondelete='restrict')
@api.model
def _init_data_resource_calendar(self):
self.search([('resource_calendar_id', '=', False)])._create_resource_calendar()
def _create_resource_calendar(self):
for company in self:
company.resource_calendar_id = self.env['resource.calendar'].create({
'name': _('Standard 40 hours/week'),
'company_id': company.id
}).id
@api.model
def create(self, values):
company = super(ResCompany, self).create(values)
if not company.resource_calendar_id:
company.sudo()._create_resource_calendar()
# calendar created from form view: no company_id set because record was still not created
if not company.resource_calendar_id.company_id:
company.resource_calendar_id.company_id = company.id
return company
| agpl-3.0 | -3,491,781,130,162,775,000 | 36.823529 | 97 | 0.641524 | false | 4.06962 | false | false | false |
smartcities-livinglab-udg/IOT-SensorNetwork | PythonMQTT/sendMetadata.py | 1 | 2316 | #!/usr/bin/env python
'''
* Description: This code is a bridge between Arduino and Linux in an Intel Galileo Gen 1 board
used for send data via MQTT. The user must specify the host and topic in order to send data.
* Author: Gustavo Adrián Jiménez González (Ruxaxup)
* Date: 03 - 27 - 2017
* Version: 1.0.0
* Contact: [email protected]
'''
import sys
import os
import paho.mqtt.publish as publish
import errno
from socket import error as socket_error
host = ""
topic = ""
idSensor = {4:"temperature",5:"pressure",6:"light",3:"noise",0:"power",2:"gas",1:"humidity"}
readings = {"temperature":"C",
"pressure":"kPa",
"light":"lumens",
"noise":"dB",
"power":"W",
"gas":"ppm",
"humidity":"%"}
gases = {0:"NH3",1:"CO",2:"NO2",3:"C3H8",4:"C4H10",5:"CH4",6:"H2",7:"C2H5OH"}
statusDict = {"U":"update","S":"start"}
deli = ';'
def buildMetaData():
binary = open('binary.txt','w')
binario = str(bin(int(sys.argv[2]))[2:])
binary.write(binario)
binary.close()
binSize = len(binario)
diferencia = 7 - binSize
#Llena la cadena con ceros
for i in range(0,diferencia):
binario = '0' + binario
print "Binary string: " + binario
sensorsString = ""
for x in range(0,7):
print str(x) + " " + idSensor[x] + " -- " + binario[x]
if binario[x] != '0':
if idSensor[x] == "gas":
sensorsString = sensorsString + "setIni" + deli
for gas in range(0,8):
sensorsString = sensorsString + gases[gas] + deli + readings[idSensor[x]] + deli
sensorsString = sensorsString + "setEnd" + deli
else:
sensorsString = sensorsString + idSensor[x] + deli + readings[idSensor[x]] + deli
return sensorsString
if len(sys.argv) == 3:
mensaje = buildMetaData()
f = open('/sys/class/net/eth0/address','r')
mac = f.readline()
mensaje = mensaje + "macAddress" + deli + mac.strip('\n') + deli + "status" + deli + statusDict[sys.argv[1]]
print mensaje
f.close()
try:
publish.single(topic, mensaje, hostname=host)
except socket_error as serr:
print "No internet connection."
else:
print "3 arguments are needed."
| lgpl-2.1 | -2,066,873,295,816,355,000 | 33.014706 | 116 | 0.579767 | false | 3.067639 | false | false | false |
cshallue/models | research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py | 2 | 3250 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 PPN features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 PPN features."""
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=0,
num_layers=6,
image_features={
'image_features': image_features['Conv2d_11_pointwise']
})
return feature_maps.values()
| apache-2.0 | 5,200,068,533,454,063,000 | 37.690476 | 80 | 0.667077 | false | 4.037267 | false | false | false |
burzumishi/XCompWMaker | CompMgrs/compton/bin/compton-convgen.py | 1 | 3785 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
import math, argparse
class CGError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CGBadArg(CGError): pass
class CGInternal(CGError): pass
def mbuild(width, height):
"""Build a NxN matrix filled with 0."""
result = list()
for i in range(height):
result.append(list())
for j in range(width):
result[i].append(0.0)
return result
def mdump(matrix):
"""Dump a matrix in natural format."""
for col in matrix:
print("[ ", end = '');
for ele in col:
print(format(ele, "13.6g") + ", ", end = " ")
print("],")
def mdumpcompton(matrix):
"""Dump a matrix in compton's format."""
width = len(matrix[0])
height = len(matrix)
print("{},{},".format(width, height), end = '')
for i in range(height):
for j in range(width):
if int(height / 2) == i and int(width / 2) == j:
continue;
print(format(matrix[i][j], ".6f"), end = ",")
print()
def mnormalize(matrix):
"""Scale a matrix according to the value in the center."""
width = len(matrix[0])
height = len(matrix)
factor = 1.0 / matrix[int(height / 2)][int(width / 2)]
if 1.0 == factor: return matrix
for i in range(height):
for j in range(width):
matrix[i][j] *= factor
return matrix
def mmirror4(matrix):
"""Do a 4-way mirroring on a matrix from top-left corner."""
width = len(matrix[0])
height = len(matrix)
for i in range(height):
for j in range(width):
x = min(i, height - 1 - i)
y = min(j, width - 1 - j)
matrix[i][j] = matrix[x][y]
return matrix
def gen_gaussian(width, height, factors):
"""Build a Gaussian blur kernel."""
if width != height:
raise CGBadArg("Cannot build an uneven Gaussian blur kernel.")
size = width
sigma = float(factors.get('sigma', 0.84089642))
result = mbuild(size, size)
for i in range(int(size / 2) + 1):
for j in range(int(size / 2) + 1):
diffx = i - int(size / 2);
diffy = j - int(size / 2);
result[i][j] = 1.0 / (2 * math.pi * sigma) * pow(math.e, - (diffx * diffx + diffy * diffy) / (2 * sigma * sigma))
mnormalize(result)
mmirror4(result)
return result
def gen_box(width, height, factors):
"""Build a box blur kernel."""
result = mbuild(width, height)
for i in range(height):
for j in range(width):
result[i][j] = 1.0
return result
def gen_invalid(width, height, factors):
raise CGBadArg("Unknown kernel type.")
def args_readfactors(lst):
"""Parse the factor arguments."""
factors = dict()
if lst:
for s in lst:
res = s.partition('=')
if not res[0]:
raise CGBadArg("Factor has no key.")
if not res[2]:
raise CGBadArg("Factor has no value.")
factors[res[0]] = float(res[2])
return factors
parser = argparse.ArgumentParser(description='Build a convolution kernel.')
parser.add_argument('type', help='Type of convolution kernel. May be "gaussian" (factor sigma = 0.84089642) or "box".')
parser.add_argument('width', type=int, help='Width of convolution kernel. Must be an odd number.')
parser.add_argument('height', nargs='?', type=int, help='Height of convolution kernel. Must be an odd number. Equals to width if omitted.')
parser.add_argument('-f', '--factor', nargs='+', help='Factors of the convolution kernel, in name=value format.')
parser.add_argument('--dump-compton', action='store_true', help='Dump in compton format.')
args = parser.parse_args()
width = args.width
height = args.height
if not height:
height = width
if not (width > 0 and height > 0):
raise CGBadArg("Invalid width/height.")
factors = args_readfactors(args.factor)
funcs = dict(gaussian = gen_gaussian, box = gen_box)
matrix = (funcs.get(args.type, gen_invalid))(width, height, factors)
if args.dump_compton:
mdumpcompton(matrix)
else:
mdump(matrix)
| gpl-2.0 | -4,534,203,917,627,179,000 | 27.674242 | 139 | 0.658388 | false | 2.950117 | false | false | false |
WillCusick/HashRace | analyzer.py | 1 | 4267 | import pika
import json
import redis
from datetime import timedelta, datetime
from consumer import HRConsumer
red = None
expiryTTL = timedelta(minutes=5)
candidate_dict = None
party_dict = None
def callback(ch, method, properties, body):
data = json.loads(body)
geoCoords = None
if 'coordinates' in data and data['coordinates'] is not None:
geoCoords = {'type': 'Point',
'coordinates': data['coordinates']['coordinates']}
elif 'place' in data and data['place'] is not None:
if 'bounding_box' in data['place'] and data['place']['bounding_box'] is not None:
coordinates = data['place']['bounding_box']['coordinates'][0]
num_c = len(coordinates)
coords = [0.0, 0.0]
# Note: faster to do one div at the end but may lose
# some precision because floating-points are more
# accurate closer to 0
for c in coordinates:
coords[0] += c[0]
coords[1] += c[1]
coords[0] /= num_c
coords[1] /= num_c
geoCoords = {'type':'Point', 'coordinates':coords}
if geoCoords is not None:
tweet = {'geometry': geoCoords,
'properties': categorize(data)}
# Ignore people with no direct hashtags, very rare
if bool(tweet['properties']):
tweet['properties']['id'] = data['id'].encode('ascii')
store(tweet)
def categorize(data):
dict = {}
for hash in data['hashtags']:
if hash['text'].lower() in candidate_dict:
dict['candidate'] = candidate_dict[hash['text'].lower()]
if hash['text'].lower() in party_dict:
dict['party'] = party_dict[hash['text'].lower()]
return dict
def store(tweet):
datastring = str(tweet) + ":\\:" + str(datetime.now()+expiryTTL)
red.sadd("tweets", datastring)
if __name__ == "__main__":
red = redis.StrictRedis(host='localhost', port=6379, db=0)
candidate_dict = {'hillary2016': 'Hillary',
'hillaryforpresident': 'Hillary',
'clinton2016': 'Hillary',
'imwithher': 'Hillary',
'bernie2016': 'Bernie',
'bernieforpresident': 'Bernie',
'sanders2016': 'Bernie',
'voteberniesanders': 'Bernie',
'feelthebern': 'Bernie',
'debatewithbernie': 'Bernie',
'trump2016': 'Trump',
'donaldtrumpforpresident': 'Trump',
'trumpforpresident2016': 'Trump',
'votetrump2016': 'Trump',
'votetrump': 'Trump',
'makeamericagreatagain': 'Trump',
'bencarsonforprez': 'Carson',
'carson2016': 'Carson',
'omalley2016': 'OMalley',
'newleadership': 'OMalley',
'actionsnotwords': 'OMalley'}
party_dict = {'hillary2016': 'democrat',
'hillaryforpresident': 'democrat',
'clinton2016': 'democrat',
'imwithher': 'democrat',
'bernie2016': 'democrat',
'bernieforpresident': 'democrat',
'sanders2016': 'democrat',
'voteberniesanders': 'democrat',
'feelthebern':'democrat',
'debatewithbernie': 'democrat',
'omalley2016': 'democrat',
'newleadership': 'democrat',
'actionsnotwords': 'democrat',
'donaldtrumpforpresident': 'republican',
'trump2016': 'republican',
'trumpforpresident2016': 'republican',
'votetrump2016': 'republican',
'votetrump': 'republican',
'makeamericagreatagain': 'republican',
'bencarsonforprez': 'republican',
'carson2016': 'republican'}
rmq_connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost'))
rmq_consumer = HRConsumer(rmq_connection, callback)
rmq_consumer.consume()
| apache-2.0 | -5,391,561,238,380,275,000 | 39.254717 | 89 | 0.515585 | false | 3.925483 | false | false | false |
wmaciel/mbtc-api | mbtcapi/public.py | 1 | 3394 | # -*- coding: utf-8 *-*
import httplib
import json
import common
def getTicker(coin):
""" Return the ticker information of the current state of the exchange.
The ticker contains a summary of the current state of the exchange for a
given coin.
This information is given as a dict in the following arrangement:
{
"ticker": {
"high": Highest traded price (BRL) today,
"low": Lowest traded price (BRL) today,
"vol": Amount of coins (LTC or BTC) traded today,
"last": Price (BRL) of the last transaction,
"buy": Current highest price (BRL) offered by people buying,
"sell": Current lowest price (BRL) offered by people selling,
"date": timestamp of the last ticker update
}
}
Arguments:
coin -- "btc" or "ltc", defines which coin the info is about
"""
headers = {"Content-type": "application/x-www-form-urlencoded"}
connection = httplib.HTTPSConnection(common.mbDomain)
address = "/api/ticker/"
if coin == 'ltc':
address = address[:-1] + "_litecoin" + address[-1:]
connection.request("GET", address, "", headers)
response = connection.getresponse()
output = json.load(response)
return common.convert(output)
def getOrderBook(coin):
"""Return the active orders for the given coin
The orders are given as a dict of lists of lists in the following
arrangement
{
"asks": list of the selling offers available.
"bids": list of the buying offers available.
}
Where each offer is a list of two elements [price per unit, amount]
Arguments:
coin -- "btc" or "ltc", defines which coin the info is about
"""
headers = {"Content-type": "application/x-www-form-urlencoded"}
connection = httplib.HTTPSConnection(common.mbDomain)
address = "/api/orderbook/"
if coin == 'ltc':
address = address[:-1] + "_litecoin" + address[-1:]
connection.request("GET", address, "", headers)
response = connection.getresponse()
output = json.load(response)
return common.convert(output)
def getTrades(coin, timeBegin=None, timeEnd=None):
""" Return the history of trades of a given coin in a period of time
The history of the transactions is given as a list of dicts in the
following arrangement:
[
{
"date": Timestamp of the transaction,
"price": Price (BRL) per unit of coin (LTC or BTC),
"amount": Amount of coin (LTC or BTC),
"tid": Transaction ID,
"type": 'buy' or 'sell'
}
]
Arguments:
coin -- "btc" or "ltc", defines which coin the info is about
timeBegin -- (optional) Timestamp of the beginning of the wanted history
timeEnd -- (optional) Timestamp of the end of the wanted history
"""
headers = {"Content-type": "application/x-www-form-urlencoded"}
connection = httplib.HTTPSConnection(common.mbDomain)
address = "/api/trades/"
if coin == 'ltc':
address = address[:-1] + "_litecoin" + address[-1:]
if timeBegin is not None:
address = address[:-1] + str(timeBegin) + address[-1:]
if timeEnd is not None:
address = address[:-1] + str(timeEnd) + address[-1:]
connection.request("GET", address, "", headers)
response = connection.getresponse()
output = json.load(response)
return common.convert(output)
| mit | -7,228,638,762,171,353,000 | 30.137615 | 76 | 0.63789 | false | 3.923699 | false | false | false |
sysopfb/Malware_Scripts | qakbot/blzpack.py | 1 | 2649 | """
Example using brieflz
I have another example on a fork of BriefLz with test python scripts here: https://github.com/sysopfb/brieflz
"""
from ctypes import *
import binascii
import zlib
import struct
try:
brieflz = cdll.LoadLibrary('./blzpack_lib.so')
except OSError:
brieflz = cdll.LoadLibrary('./qakbot/blzpack_lib.so')
DEFAULT_BLOCK_SIZE = 1024 * 1024
#MAX_BLOCK_SIZE = (0xFFFFFFFFUL - 0xFFFFFFFFUL / 9UL - 64UL)
def compress_data(data, blocksize, level):
compressed_data = ""
while len(data) > 0:
buf = create_string_buffer(data[:blocksize])
cb = c_int(len(buf))
cbOut = brieflz.blz_max_packed_size(blocksize)
packed = create_string_buffer(cbOut)
workmem = create_string_buffer(brieflz.blz_workmem_size_level(blocksize,1))
cbOut = c_int(cbOut)
retval = brieflz.blz_pack_level(byref(buf), byref(packed), cb, byref(workmem), level)
if retval > 0:
temp = packed.raw[:retval]
tempret = struct.pack(">IIIIII", 1651276314, level, len(temp), zlib.crc32(temp) % (1<<32), len(buf), zlib.crc32(data[:blocksize])%(1<<32)) + temp
compressed_data += tempret
else:
print("Compression Error")
return None
data = data[blocksize:]
return compressed_data
def decompress_data(data, blocksize=DEFAULT_BLOCK_SIZE, level=1):
decompressed_data = ""
max_packed_size = brieflz.blz_max_packed_size(blocksize);
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
while magic == 0x626C7A1A and len(data) > 0:
compressed_data = create_string_buffer(data[:packedsize])
workdata = create_string_buffer(blocksize)
depackedsize = brieflz.blz_depack(byref(compressed_data), byref(workdata), c_int(hdr_depackedsize))
if depackedsize != hdr_depackedsize:
print("Decompression error")
print("DepackedSize: "+str(depackedsize) + "\nHdrVal: "+str(hdr_depackedsize))
return None
decompressed_data += workdata.raw[:depackedsize]
data = data[packedsize:]
if len(data) > 0:
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
else:
break
return decompressed_data
def main():
#blocksize = DEFAULT_BLOCK_SIZE
blocksize = 100
level = 1
data = "This is a test of brieflz compression"*100
retval = compress_data(data, blocksize, level)
if retval != None:
print("Compression SUCCESS!\nCompressed Data: ")
print(binascii.hexlify(retval))
retval = decompress_data(retval, blocksize, level)
if retval != None and retval == data:
print("Decompress SUCCESS!\nDecompress Data: ")
print(retval)
if __name__ == "__main__":
main()
| mit | -6,509,980,660,653,067,000 | 31.703704 | 169 | 0.697244 | false | 2.953177 | false | false | false |
Cnidarias/al-go-rithms | graphsearch/topological_sorting/python/topological_sorting.py | 5 | 1347 | graph_tasks = { "wash the dishes" : ["have lunch"],
"cook food" : ["have lunch"],
"have lunch" : [],
"wash laundry" : ["dry laundry"],
"dry laundry" : ["fold laundry"],
"fold laundry" : [] }
def dfs_topsort(graph): # recursive dfs with
L = [] # additional list for order of nodes
color = { u : "white" for u in graph }
found_cycle = [False]
for u in graph:
if color[u] == "white":
dfs_visit(graph, u, color, L, found_cycle)
if found_cycle[0]:
break
if found_cycle[0]: # if there is a cycle,
L = [] # then return an empty list
L.reverse() # reverse the list
return L # L contains the topological sort
def dfs_visit(graph, u, color, L, found_cycle):
if found_cycle[0]:
return
color[u] = "gray"
for v in graph[u]:
if color[v] == "gray":
found_cycle[0] = True
return
if color[v] == "white":
dfs_visit(graph, v, color, L, found_cycle)
color[u] = "black" # when we're done with u,
L.append(u) # add u to list (reverse it later!)
order = dfs_topsort(graph_tasks)
for task in order:
print(task)
| mit | 6,308,431,731,876,907,000 | 30.325581 | 68 | 0.481069 | false | 3.516971 | false | false | false |
silveregg/moto | tests/test_rds/test_rds.py | 2 | 10465 | from __future__ import unicode_literals
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2, mock_rds
from tests.helpers import disable_on_py3
@disable_on_py3()
@mock_rds
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError)
@mock_rds
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError)
@mock_rds
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@disable_on_py3()
@mock_rds
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2
@mock_rds
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2
@mock_rds
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError)
@mock_ec2
@mock_rds
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError)
@disable_on_py3()
@mock_ec2
@mock_rds
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@disable_on_py3()
@mock_rds
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica("replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')
| apache-2.0 | -9,063,545,334,461,242,000 | 34.11745 | 105 | 0.693454 | false | 2.878955 | true | false | false |
jayhetee/dask | dask/imperative.py | 2 | 9914 | import operator
from functools import partial, wraps
from itertools import chain, count
from collections import Iterator
from toolz import merge, unique, curry
from .optimize import cull, fuse
from .utils import concrete
from . import base
from .compatibility import apply
from . import threaded
__all__ = ['compute', 'do', 'value', 'Value']
def flat_unique(ls):
"""Flatten ``ls``, filter by unique id, and return a list"""
return list(unique(chain.from_iterable(ls), key=id))
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dasks(expr):
"""Normalize a python object and extract all sub-dasks.
- Replace ``Values`` with their keys
- Convert literals to things the schedulers can handle
- Extract dasks from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Value``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dasks : list of dasks that form the dag for this task
Examples
--------
>>> a = value(1, 'a')
>>> b = value(2, 'b')
>>> task, dasks = to_task_dasks([a, b, 3])
>>> task # doctest: +SKIP
(list, ['a', 'b', 3])
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
>>> task, dasks = to_task_dasks({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, (list, [(list, ['a', 1]), (list, ['b', 2])]))
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
"""
if isinstance(expr, Value):
return expr.key, expr._dasks
elif isinstance(expr, base.Base):
name = tokenize(str(expr), True)
keys = expr._keys()
dsk = expr._optimize(expr.dask, keys)
dsk[name] = (expr._finalize, expr, (concrete, keys))
return name, [dsk]
elif isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip(map(to_task_dasks, expr), 2)
args = list(args)
dasks = flat_unique(dasks)
# Ensure output type matches input type
if isinstance(expr, (list, tuple, set)):
return (type(expr), args), dasks
else:
return args, dasks
elif isinstance(expr, dict):
args, dasks = to_task_dasks(list([k, v] for k, v in expr.items()))
return (dict, args), dasks
else:
return expr, []
tokens = ('_{0}'.format(i) for i in count(1))
def tokenize(v, pure=False):
"""Mapping function from task -> consistent name.
Parameters
----------
v : object
Any python object (or tuple of objects) that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
# TODO: May have hash collisions...
if pure:
try:
return str(hash(v))
except TypeError:
pass
return next(tokens)
def applyfunc(func, args, kwargs, pure=False):
"""Create a Value by applying a function to args.
Given a function and arguments, return a Value that represents the result
of that computation."""
args, dasks = unzip(map(to_task_dasks, args), 2)
dasks = flat_unique(dasks)
name = tokenize((func, args, frozenset(kwargs.items())), pure)
if kwargs:
func = partial(func, **kwargs)
dasks.append({name: (func,) + args})
return Value(name, dasks)
@curry
def do(func, pure=False):
"""Wraps a function so that it outputs a ``Value``.
Examples
--------
Can be used as a decorator:
>>> @do
... def add(a, b):
... return a + b
>>> res = add(1, 2)
>>> type(res) == Value
True
>>> res.compute()
3
For other cases, it may be cleaner to call ``do`` on a function at call
time:
>>> res2 = do(sum)([res, 2, 3])
>>> res2.compute()
8
``do`` also accepts an optional keyword ``pure``. If False (default), then
subsequent calls will always produce a different ``Value``. This is useful
for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = do(random)()
>>> out2 = do(random)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @do(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
"""
@wraps(func)
def _dfunc(*args, **kwargs):
return applyfunc(func, args, kwargs, pure=pure)
return _dfunc
def optimize(dsk, keys):
dsk2 = cull(dsk, keys)
return fuse(dsk2)
def compute(*args, **kwargs):
"""Evaluate several ``Value``s at once.
Note that the only difference between this function and
``dask.base.compute`` is that this implicitly converts python objects to
``Value``s, allowing for collections of dask objects to be computed.
Examples
--------
>>> a = value(1)
>>> b = a + 2
>>> c = a + 3
>>> compute(b, c) # Compute both simultaneously
(3, 4)
>>> compute(a, [b, c]) # Works for lists of Values
(1, [3, 4])
"""
args = [value(a) for a in args]
return base.compute(*args, **kwargs)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
class Value(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', '_dasks')
_optimize = staticmethod(optimize)
_finalize = staticmethod(lambda a, r: r[0])
_default_get = staticmethod(threaded.get)
def __init__(self, name, dasks):
object.__setattr__(self, '_key', name)
object.__setattr__(self, '_dasks', dasks)
@property
def dask(self):
return merge(*self._dasks)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Value({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return list(self.__dict__.keys())
def __getattr__(self, attr):
if not attr.startswith('_'):
return do(getattr, True)(self, attr)
else:
raise AttributeError("Attribute {0} not found".format(attr))
def __setattr__(self, attr, val):
raise TypeError("Value objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Value objects are immutable")
def __iter__(self):
raise TypeError("Value objects are not iterable")
def __call__(self, *args, **kwargs):
return do(apply)(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Value objects is not supported")
__nonzero__ = __bool__
__abs__ = do(operator.abs, True)
__add__ = do(operator.add, True)
__and__ = do(operator.and_, True)
__div__ = do(operator.floordiv, True)
__eq__ = do(operator.eq, True)
__floordiv__ = do(operator.floordiv, True)
__ge__ = do(operator.ge, True)
__getitem__ = do(operator.getitem, True)
__gt__ = do(operator.gt, True)
__index__ = do(operator.index, True)
__invert__ = do(operator.invert, True)
__le__ = do(operator.le, True)
__lshift__ = do(operator.lshift, True)
__lt__ = do(operator.lt, True)
__mod__ = do(operator.mod, True)
__mul__ = do(operator.mul, True)
__ne__ = do(operator.ne, True)
__neg__ = do(operator.neg, True)
__or__ = do(operator.or_, True)
__pos__ = do(operator.pos, True)
__pow__ = do(operator.pow, True)
__radd__ = do(right(operator.add), True)
__rand__ = do(right(operator.and_), True)
__rdiv__ = do(right(operator.floordiv), True)
__rfloordiv__ = do(right(operator.floordiv), True)
__rlshift__ = do(right(operator.lshift), True)
__rmod__ = do(right(operator.mod), True)
__rmul__ = do(right(operator.mul), True)
__ror__ = do(right(operator.or_), True)
__rpow__ = do(right(operator.pow), True)
__rrshift__ = do(right(operator.rshift), True)
__rshift__ = do(operator.rshift, True)
__rsub__ = do(right(operator.sub), True)
__rtruediv__ = do(right(operator.truediv), True)
__rxor__ = do(right(operator.xor), True)
__sub__ = do(operator.sub, True)
__truediv__ = do(operator.truediv, True)
__xor__ = do(operator.xor, True)
def value(val, name=None):
"""Create a ``Value`` from a python object.
Parameters
----------
val : object
Object to be wrapped.
name : string, optional
Name to be used in the resulting dask.
Examples
--------
>>> a = value([1, 2, 3])
>>> a.compute()
[1, 2, 3]
Values can act as a proxy to the underlying object. Many operators are
supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
"""
if isinstance(val, Value):
return val
name = name or tokenize(val, True)
task, dasks = to_task_dasks(val)
dasks.append({name: task})
return Value(name, dasks)
| bsd-3-clause | -7,175,269,820,245,830,000 | 27.164773 | 79 | 0.58029 | false | 3.511867 | false | false | false |
koduj-z-klasa/python101 | docs/podstawy/przyklady/06_slownik_csv.py | 1 | 2677 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os # moduł udostępniający funkcję isfile()
import csv # moduł do obsługi formatu csv
slownik = {} # pusty słownik
sFile = "slownik.csv" # nazwa pliku zawierającego wyrazy i ich tłumaczenia
def otworz(plik):
if os.path.isfile(sFile): # czy istnieje plik słownika?
with open(sFile, newline='') as plikcsv: # otwórz plik do odczytu
tresc = csv.reader(plikcsv)
for linia in tresc: # przeglądamy kolejne linie
slownik[linia[0]] = linia[1:]
return len(slownik) # zwracamy ilość elementów w słowniku
def zapisz(slownik):
# otwieramy plik do zapisu, istniejący plik zostanie nadpisany(!)
with open(sFile, "w", newline='') as plikcsv:
tresc = csv.writer(plikcsv)
for wobcy in slownik:
lista = slownik[wobcy]
lista.insert(0, wobcy)
tresc.writerow(lista)
def oczysc(str):
str = str.strip() # usuń początkowe lub końcowe białe znaki
str = str.lower() # zmień na małe litery
return str
def main(args):
print("""Podaj dane w formacie:
wyraz obcy: znaczenie1, znaczenie2
Aby zakończyć wprowadzanie danych, podaj 0.
""")
# wobce = set() # pusty zbiór wyrazów obcych
# zmienna oznaczająca, że użytkownik uzupełnił lub zmienił słownik
nowy = False
ileWyrazow = otworz(sFile)
print("Wpisów w bazie:", ileWyrazow)
# główna pętla programu
while True:
dane = input("Podaj dane: ")
t = dane.split(":")
wobcy = t[0].strip().lower() # robimy to samo, co funkcja oczysc()
if wobcy == 'koniec':
break
elif dane.count(":") == 1: # sprawdzamy poprawność danych
if wobcy in slownik:
print("Wyraz", wobcy, " i jego znaczenia są już w słowniku.")
op = input("Zastąpić wpis (t/n)? ")
# czy wyrazu nie ma w słowniku? a może chcemy go zastąpić?
if wobcy not in slownik or op == "t":
znaczenia = t[1].split(",") # znaczenia zapisujemy w liście
znaczenia = list(map(oczysc, znaczenia)) # oczyszczamy listę
slownik[wobcy] = znaczenia
nowy = True
else:
print("Błędny format!")
if nowy:
zapisz(slownik)
print("=" * 50)
print("{0: <15}{1: <40}".format("Wyraz obcy", "Znaczenia"))
print("=" * 50)
for wobcy in slownik:
print("{0: <15}{1: <40}".format(wobcy, ",".join(slownik[wobcy])))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| mit | -2,994,548,995,392,500,700 | 31.395062 | 77 | 0.587652 | false | 2.418433 | false | false | false |
bruceyou/NewsBlur | apps/profile/forms.py | 1 | 8675 | # -*- encoding: utf-8 -*-
import re
import requests
from django import forms
from vendor.zebra.forms import StripePaymentForm
from django.utils.safestring import mark_safe
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from apps.profile.models import change_password, blank_authenticate
from apps.social.models import MSocialProfile
PLANS = [
("newsblur-premium-12", mark_safe("$12 / year <span class='NB-small'>($1/month)</span>")),
("newsblur-premium-24", mark_safe("$24 / year <span class='NB-small'>($2/month)</span>")),
("newsblur-premium-36", mark_safe("$36 / year <span class='NB-small'>($3/month)</span>")),
]
class HorizRadioRenderer(forms.RadioSelect.renderer):
""" this overrides widget method to put radio buttons horizontally
instead of vertically.
"""
def render(self):
"""Outputs radios"""
choices = '\n'.join(['%s\n' % w for w in self])
return mark_safe('<div class="NB-stripe-plan-choice">%s</div>' % choices)
class StripePlusPaymentForm(StripePaymentForm):
def __init__(self, *args, **kwargs):
email = kwargs.pop('email')
plan = kwargs.pop('plan', '')
super(StripePlusPaymentForm, self).__init__(*args, **kwargs)
self.fields['email'].initial = email
if plan:
self.fields['plan'].initial = plan
email = forms.EmailField(widget=forms.TextInput(attrs=dict(maxlength=75)),
label='邮件地址',
required=False)
plan = forms.ChoiceField(required=False, widget=forms.RadioSelect(renderer=HorizRadioRenderer),
choices=PLANS, label='Plan')
class DeleteAccountForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput(),
label="确认密码",
required=False)
confirm = forms.CharField(label="请输入“Delete”以确认",
widget=forms.TextInput(),
required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(DeleteAccountForm, self).__init__(*args, **kwargs)
def clean_password(self):
user_auth = authenticate(username=self.user.username,
password=self.cleaned_data['password'])
if not user_auth:
user_auth = blank_authenticate(username=self.user.username)
if not user_auth:
raise forms.ValidationError('你的密码不匹配。')
return self.cleaned_data['password']
def clean_confirm(self):
if self.cleaned_data.get('confirm', "").lower() != "delete":
raise forms.ValidationError('请输入“Delete”以确认删除。')
return self.cleaned_data['confirm']
class ForgotPasswordForm(forms.Form):
email = forms.CharField(widget=forms.TextInput(),
label="你的邮件地址",
required=False)
def __init__(self, *args, **kwargs):
super(ForgotPasswordForm, self).__init__(*args, **kwargs)
def clean_email(self):
if not self.cleaned_data['email']:
raise forms.ValidationError('请输入邮件地址。')
try:
User.objects.get(email__iexact=self.cleaned_data['email'])
except User.MultipleObjectsReturned:
pass
except User.DoesNotExist:
raise forms.ValidationError('没有用户使用此邮件地址。')
return self.cleaned_data['email']
class ForgotPasswordReturnForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput(),
label="你的新密码",
required=True)
class AccountSettingsForm(forms.Form):
username = forms.RegexField(regex=r'^[a-zA-Z0-9]+$',
max_length=30,
widget=forms.TextInput(attrs={'class': 'NB-input'}),
label='用户名',
required=False,
error_messages={
'invalid': "用户名只能包含字母或数字"
})
email = forms.EmailField(widget=forms.TextInput(attrs={'maxlength': 75, 'class': 'NB-input'}),
label='邮件地址',
required=True,
error_messages={'required': '请输入邮件地址。'})
new_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'NB-input'}),
label='密码',
required=False,
error_messages={'required': '请输入密码。'})
old_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'NB-input'}),
label='密码',
required=False,
error_messages={'required': '请输入密码。'})
def __init__(self, user, *args, **kwargs):
self.user = user
super(AccountSettingsForm, self).__init__(*args, **kwargs)
def clean_username(self):
username = self.cleaned_data['username']
return username
def clean_password(self):
if not self.cleaned_data['password']:
return ""
return self.cleaned_data['password']
def clean_email(self):
return self.cleaned_data['email']
def clean(self):
username = self.cleaned_data.get('username', '')
new_password = self.cleaned_data.get('new_password', '')
old_password = self.cleaned_data.get('old_password', '')
email = self.cleaned_data.get('email', None)
if username and self.user.username != username:
try:
User.objects.get(username__iexact=username)
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("此用户名已被使用,请尝试其他用户名。")
if self.user.email != email:
if email and User.objects.filter(email__iexact=email).count():
raise forms.ValidationError("此邮件地址已被其他帐户使用,请尝试其他邮件地址。")
if old_password or new_password:
code = change_password(self.user, old_password, new_password, only_check=True)
if code <= 0:
raise forms.ValidationError("你的旧密码不正确。")
return self.cleaned_data
def save(self, profile_callback=None):
username = self.cleaned_data['username']
new_password = self.cleaned_data.get('new_password', None)
old_password = self.cleaned_data.get('old_password', None)
email = self.cleaned_data.get('email', None)
if username and self.user.username != username:
change_password(self.user, self.user.username, username)
self.user.username = username
self.user.save()
social_profile = MSocialProfile.get_user(self.user.pk)
social_profile.username = username
social_profile.save()
if self.user.email != email:
self.user.email = email
self.user.save()
if old_password or new_password:
change_password(self.user, old_password, new_password)
class RedeemCodeForm(forms.Form):
gift_code = forms.CharField(widget=forms.TextInput(),
label="Gift code",
required=True)
def clean_gift_code(self):
gift_code = self.cleaned_data['gift_code']
gift_code = re.sub(r'[^a-zA-Z0-9]', '', gift_code).lower()
if len(gift_code) != 12:
raise forms.ValidationError('Your gift code should be 12 characters long.')
req = requests.get('https://www.thinkup.com/join/api/bundle/', params={'code': gift_code})
response = req.json()
is_valid = response.get('is_valid', None)
if is_valid:
return gift_code
elif is_valid == False:
raise forms.ValidationError('Your gift code is invalid. Check it for errors.')
elif response.get('error', None):
raise forms.ValidationError('Your gift code is invalid, says the server: %s' % response['error'])
return gift_code
| mit | -5,822,965,316,974,157,000 | 39.558252 | 109 | 0.555356 | false | 4.028447 | false | false | false |
rbarrois/django-runtests | doc/conf.py | 1 | 8326 | # -*- coding: utf-8 -*-
#
# Python Lib Template documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 6 19:31:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django runtests'
copyright = u'2012, Raphaël Barrois'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
root_dir = os.path.abspath(os.path.dirname(__file__))
def get_version(package_name):
import re
version_re = re.compile(r"^__version__ = [\"']([\w_.-]+)[\"']$")
package_components = package_name.split('.')
path_components = package_components + ['__init__.py']
with open(os.path.join(root_dir, os.pardir, *path_components)) as f:
for line in f:
match = version_re.match(line[:-1])
if match:
return match.groups()[0]
return '0.1.0'
release = get_version('django_runtests')
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonLibTemplatedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PythonLibTemplate.tex', u'Python Lib Template Documentation',
u'Raphaël Barrois', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythonlibtemplate', u'Python Lib Template Documentation',
[u'Raphaël Barrois'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PythonLibTemplate', u'Python Lib Template Documentation',
u'Raphaël Barrois', 'PythonLibTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -317,534,893,427,430,600 | 31.635294 | 80 | 0.697789 | false | 3.762206 | true | false | false |
mzc/restapi | restapi/oauth_conn.py | 1 | 3075 | # Copyright 2011 Max Z. Chao
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oauth2 as oauth
import re
from error import RestAPIError
class OAuthConn(object):
def __init__(self, consumer_key, consumer_secret, token_key=None, token_secret=None):
self._consumer = oauth.Consumer(consumer_key, consumer_secret)
if token_key and token_secret:
self._token_key = token_key
self._token_secret = token_secret
token = oauth.Token(token_key, token_secret)
self._client = oauth.Client(self._consumer, token)
else:
self._client = oauth.Client(self._consumer)
def update(self, token_key, token_secret):
token = oauth.Token(token_key, token_secret)
self._token_key = token_key
self._token_secret = token_secret
self._client = oauth.Client(self._consumer, token)
return self
@property
def token_key(self):
return self._token_key
@property
def token_secret(self):
return self._token_secret
def request(self, url, method):
return self._client.request(url, method)
class OAuthOOB(object):
def __init__(self, request_token_url, authenticate_url, access_token_url):
self._request_token_url = request_token_url
self._authenticate_url = authenticate_url
self._access_token_url = access_token_url
def _parse_token(self, content):
return re.findall('oauth_token=([^&]+)&oauth_token_secret=([^&]+)', content)[0]
def get_temp_credentials(self, oauth_conn):
resp, content = oauth_conn.request(self._request_token_url, method = 'GET')
if resp.status != 200:
raise RestAPIError('Failed to get Temp Credentials: ' + str(resp.status) + ' ' + resp.reason)
self._temp_credentials_url = self._authenticate_url + '?' + content
token_key, token_secret = self._parse_token(content)
return oauth_conn.update(token_key, token_secret)
@property
def temp_credentials_url(self):
return self._temp_credentials_url
def get_credentials(self, oauth_conn, pin_code):
access_token_pin_code_url = self._access_token_url + '?oauth_verifier=' + pin_code
resp, content = oauth_conn.request(access_token_pin_code_url, method = 'GET')
if resp.status != 200:
raise RestAPIError('Failed to get Credentials: ' + str(resp.status) + ' ' + resp.reason)
token_key, token_secret = self._parse_token(content)
return oauth_conn.update(token_key, token_secret)
| apache-2.0 | 4,999,563,005,065,049,000 | 38.423077 | 105 | 0.657561 | false | 3.819876 | false | false | false |
ttm/gmaneLegacy | tests/taggerTrain.py | 1 | 4092 | import nltk as k, pickle
tagger=k.data.load('taggers/maxent_treebank_pos_tagger/english.pickle')
# levou muito tempo, retornou:
# tagger.evaluate(k.corpus.brown.tagged_sents())
# 0.5952331741865255
# pq as tags não são as mesmas?
# Receita do Brill na própria classe do nltk
from nltk.tbl.template import Template
from nltk.tag.brill import Pos, Word
from nltk.tag import RegexpTagger, BrillTaggerTrainer
from nltk.corpus import treebank
training_data = treebank.tagged_sents()[:100]
baseline_data = treebank.tagged_sents()[100:200]
gold_data = treebank.tagged_sents()[200:300]
#testing_data = [untag(s) for s in gold_data]
testing_data = [[ss[0] for ss in s] for s in gold_data]
backoff = RegexpTagger([
(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
baseline = backoff
baseline.evaluate(gold_data)
Template._cleartemplates() #clear any templates created in earlier tests
templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))]
tt = BrillTaggerTrainer(baseline, templates, trace=3)
tagger1 = tt.train(training_data, max_rules=10)
tagger1.rules()[1:3]
train_stats = tagger1.train_stats()
tagger1.print_template_statistics(printunused=False)
tagger1.evaluate(gold_data)
tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data)
tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99)
print(tagger2.evaluate(gold_data)) # doctest: +ELLIPSIS
tagger2.rules()[2:4]
#nn_cd_tagger = k.tag.RegexpTagger([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])
nn_cd_tagger = baseline
#tagged_data = k.corpus.treebank.tagged_sents()
tagged_data = k.corpus.treebank.tagged_sents(tagset="universal")
tagged_data2 = k.corpus.brown.tagged_sents(tagset="universal")
num_sents=len(tagged_data)
num_sents2=len(tagged_data2)
train=0.8
cutoff = int(num_sents *train)
cutoff2 = int(num_sents2*train)
training_data = tagged_data[:cutoff]+tagged_data2[:cutoff2]
gold_data = tagged_data[cutoff:]+tagged_data2[cutoff2:]
testing_data = [[t[0] for t in sent] for sent in gold_data]
print("Done loading.")
unigram_tagger = k.tag.UnigramTagger(training_data,backoff=nn_cd_tagger)
bigram_tagger = k.tag.BigramTagger(training_data,
backoff=unigram_tagger)
##templates = [
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,1)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (2,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,3)),
##
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,1)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (2,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,3)),
##
## k.tag.brill.ProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (-1, -1), (1,1)),
## k.tag.brill.ProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (-1, -1), (1,1)),
## ]
trace=5
trainer = k.tag.BrillTaggerTrainer(bigram_tagger, templates, 0)
#trainer = k.tag.BrillTaggerTrainer(bigram_tagger, templates, 2)
#trainer = k.tag.brill.BrillTaggerTrainer(bigram_tagger, trace)
##trainer = brill.BrillTaggerTrainer(u, templates, trace)
max_rules=40000
min_score=2
#brill_tagger = trainer.train(training_data, max_rules, min_score)
brill_tagger = trainer.train(training_data, max_rules, 1)
f=open("./pickledir/brill_tagger5", 'wb')
pickle.dump(brill_tagger,f,-1)
f.close()
# acerto de: 0.9180
| unlicense | -2,526,869,134,886,263,000 | 40.30303 | 122 | 0.691612 | false | 2.66558 | true | false | false |
dionysio/django_upwork_portfolio | base/migrations/0001_initial.py | 1 | 1282 | # Generated by Django 2.0.2 on 2018-02-23 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('project_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.TextField()),
('image', models.ImageField(upload_to='projects/')),
('description', models.TextField()),
('url', models.URLField(blank=True, null=True)),
('order', models.IntegerField(default=0)),
],
options={
'ordering': ['-order'],
},
),
migrations.CreateModel(
name='School',
fields=[
('school_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.TextField()),
('started', models.DateField()),
('finished', models.DateField()),
('description', models.TextField()),
('major', models.TextField()),
],
options={
'ordering': ['-started'],
},
),
]
| mit | 2,744,757,639,075,758,600 | 29.52381 | 84 | 0.478159 | false | 5.087302 | false | false | false |
willingc/oh-mainline | mysite/customs/management/commands/customs_debugger.py | 15 | 3664 | # This file is part of OpenHatch.
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from django.core.management.base import BaseCommand
from mysite.search.models import Bug
class Command(BaseCommand):
help = "A bunch of tools for checking and cleaning the Bug database."
def list_old_bugs(self, days, hours=0):
count = 0
x_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=days, hours=hours))
for bug in Bug.all_bugs.filter(last_polled__lt=x_days_ago):
count += 1
print "%d - %s" % (count, str(bug))
print "There are a total of %d Bug objects that are %d days %d hours old." % (count, days, hours)
def list_closed_bugs(self):
count = 0
for bug in Bug.all_bugs.filter(looks_closed=True):
count += 1
print "%d - %s" % (count, str(bug))
print "There are a total of %d closed Bug objects." % count
def delete_old_bugs(self, days, hours=0):
x_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=days, hours=hours))
Bug.all_bugs.filter(last_polled__lt=x_days_ago).delete()
def delete_closed_bugs(self):
Bug.all_bugs.filter(looks_closed=True).delete()
def delete_all_bugs(self):
Bug.all_bugs.all().delete()
def show_usage(self):
print """
usage: ./manage.py customs_debugger COMMAND
The following commands are available:
list_old_bugs List all Bug objects older than one day plus one hour.
list_very_old_bugs List all Bug objects older than two days.
list_closed_bugs List all Bug objects that look closed.
delete_old_bugs Delete all Bug objects older than one day plus one hour.
delete_very_old_bugs Delete all Bug objects older than two days.
delete_closed_bugs Delete all Bug objects that look closed.
delete_all_bugs Delete ALL Bug objects. Period. Useful if you want to
test a bug import from scratch. Not so useful on a
production server.
NOTE: These commands are executed immediately, so make sure you are
executing what you want, especially with the deleting commands."""
def handle(self, *args, **options):
if len(args) > 1:
self.show_usage()
elif 'list_old_bugs' in args:
self.list_old_bugs(days=1, hours=1)
elif 'list_very_old_bugs' in args:
self.list_old_bugs(days=2)
elif 'list_closed_bugs' in args:
self.list_closed_bugs()
elif 'delete_old_bugs' in args:
self.delete_old_bugs(days=1, hours=1)
elif 'delete_very_old_bugs' in args:
self.delete_old_bugs(days=2)
elif 'delete_closed_bugs' in args:
self.delete_closed_bugs()
elif 'delete_all_bugs' in args:
self.delete_all_bugs()
else:
self.show_usage()
| agpl-3.0 | -4,301,763,859,955,644,000 | 39.263736 | 105 | 0.637828 | false | 3.816667 | false | false | false |
Haunter17/MIR_SU17 | exp4/exp4_rnn.py | 1 | 6465 | '''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import h5py
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SMALL_FLAG = 0
print('==> Experiment 4 RNN')
filepath = '/pylon2/ci560sp/haunter/exp3_taylorswift_d15_1s_C1C8.mat'
if SMALL_FLAG:
filepath = '/pylon2/ci560sp/haunter/exp3_small.mat'
print('==> Loading data from {}...'.format(filepath))
# benchmark
t_start = time.time()
# ==============================================
# reading data
# ==============================================
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
# ==============================================
# RNN configs
# ==============================================
# Network Parameters
num_training_vec, total_features = X_train.shape
num_freq = 169
num_frames = int(total_features / num_freq)
max_iter = 300
print_freq = 10
if SMALL_FLAG:
max_iter = 10
print_freq = 1
batch_size = 1000
learning_rate = 0.001
n_input = num_freq # number of sequences (rows)
n_steps = num_frames # size of each sequence (number of columns), timesteps
n_hidden = 512 # hidden layer num of features
n_classes = int(max(y_train.max(), y_val.max()) + 1)
# ==============================================
# RNN architecture
# ==============================================
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), n_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), n_classes)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, n_steps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# evaluation metrics
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
# ==============================================
# RNN training
# ==============================================
# Launch the graph
with tf.Session() as sess:
sess.run(init)
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
print('==> Training the full network...')
t_start = time.time()
# Keep training until reach max iterations
for epoch in range(max_iter):
for i in range(0, num_training_vec, batch_size):
end_ind = min(i + batch_size, num_training_vec)
batch_x = X_train[i : end_ind]
batch_y = y_train[i : end_ind]
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((-1, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x: X_train.reshape((-1, n_steps, n_input)),\
y: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val.reshape((-1, n_steps, n_input)),\
y: y_val})
val_acc_list.append(val_acc)
train_err = cost.eval(feed_dict={x: X_train.reshape((-1, n_steps, n_input)),\
y: y_train})
train_err_list.append(train_err)
val_err = cost.eval(feed_dict={x: X_val.reshape((-1, n_steps, n_input)),\
y: y_val})
val_err_list.append(val_err)
print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
# ==============================================
# RNN Evaluation
# ==============================================
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = range(0, print_freq * len(train_acc_list), print_freq)
train_err_plot = plt.plot(x_list, train_err_list, 'b-', label='training')
val_err_plot = plt.plot(x_list, val_err_list, '-', color='orange', label='validation')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs with {} Hidden Units'.format(n_hidden))
plt.legend(loc='best')
plt.savefig('rnn_{}.png'.format(n_hidden), format='png')
plt.close()
print('==> Finished!')
| mit | -3,723,938,204,682,870,300 | 34.521978 | 98 | 0.59768 | false | 3.283393 | false | false | false |
rbuffat/pyidf | tests/test_hvactemplatezoneunitary.py | 1 | 7994 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateZoneUnitary
log = logging.getLogger(__name__)
class TestHvactemplateZoneUnitary(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatezoneunitary(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateZoneUnitary()
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_template_unitary_system_name = "object-list|Template Unitary System Name"
obj.template_unitary_system_name = var_template_unitary_system_name
# object-list
var_template_thermostat_name = "object-list|Template Thermostat Name"
obj.template_thermostat_name = var_template_thermostat_name
# real
var_supply_air_maximum_flow_rate = 4.4
obj.supply_air_maximum_flow_rate = var_supply_air_maximum_flow_rate
# real
var_zone_heating_sizing_factor = 0.0
obj.zone_heating_sizing_factor = var_zone_heating_sizing_factor
# real
var_zone_cooling_sizing_factor = 0.0
obj.zone_cooling_sizing_factor = var_zone_cooling_sizing_factor
# alpha
var_outdoor_air_method = "Flow/Person"
obj.outdoor_air_method = var_outdoor_air_method
# real
var_outdoor_air_flow_rate_per_person = 8.8
obj.outdoor_air_flow_rate_per_person = var_outdoor_air_flow_rate_per_person
# real
var_outdoor_air_flow_rate_per_zone_floor_area = 9.9
obj.outdoor_air_flow_rate_per_zone_floor_area = var_outdoor_air_flow_rate_per_zone_floor_area
# real
var_outdoor_air_flow_rate_per_zone = 10.1
obj.outdoor_air_flow_rate_per_zone = var_outdoor_air_flow_rate_per_zone
# object-list
var_supply_plenum_name = "object-list|Supply Plenum Name"
obj.supply_plenum_name = var_supply_plenum_name
# object-list
var_return_plenum_name = "object-list|Return Plenum Name"
obj.return_plenum_name = var_return_plenum_name
# alpha
var_baseboard_heating_type = "HotWater"
obj.baseboard_heating_type = var_baseboard_heating_type
# object-list
var_baseboard_heating_availability_schedule_name = "object-list|Baseboard Heating Availability Schedule Name"
obj.baseboard_heating_availability_schedule_name = var_baseboard_heating_availability_schedule_name
# real
var_baseboard_heating_capacity = 15.15
obj.baseboard_heating_capacity = var_baseboard_heating_capacity
# alpha
var_zone_cooling_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_cooling_design_supply_air_temperature_input_method = var_zone_cooling_design_supply_air_temperature_input_method
# real
var_zone_cooling_design_supply_air_temperature = 17.17
obj.zone_cooling_design_supply_air_temperature = var_zone_cooling_design_supply_air_temperature
# real
var_zone_cooling_design_supply_air_temperature_difference = 18.18
obj.zone_cooling_design_supply_air_temperature_difference = var_zone_cooling_design_supply_air_temperature_difference
# alpha
var_zone_heating_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_heating_design_supply_air_temperature_input_method = var_zone_heating_design_supply_air_temperature_input_method
# real
var_zone_heating_design_supply_air_temperature = 20.2
obj.zone_heating_design_supply_air_temperature = var_zone_heating_design_supply_air_temperature
# real
var_zone_heating_design_supply_air_temperature_difference = 21.21
obj.zone_heating_design_supply_air_temperature_difference = var_zone_heating_design_supply_air_temperature_difference
# object-list
var_design_specification_outdoor_air_object_name = "object-list|Design Specification Outdoor Air Object Name"
obj.design_specification_outdoor_air_object_name = var_design_specification_outdoor_air_object_name
# object-list
var_design_specification_zone_air_distribution_object_name = "object-list|Design Specification Zone Air Distribution Object Name"
obj.design_specification_zone_air_distribution_object_name = var_design_specification_zone_air_distribution_object_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].zone_name, var_zone_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].template_unitary_system_name, var_template_unitary_system_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].template_thermostat_name, var_template_thermostat_name)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].supply_air_maximum_flow_rate, var_supply_air_maximum_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_sizing_factor, var_zone_heating_sizing_factor)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_sizing_factor, var_zone_cooling_sizing_factor)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_method, var_outdoor_air_method)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_flow_rate_per_person, var_outdoor_air_flow_rate_per_person)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_flow_rate_per_zone_floor_area, var_outdoor_air_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_flow_rate_per_zone, var_outdoor_air_flow_rate_per_zone)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].supply_plenum_name, var_supply_plenum_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].return_plenum_name, var_return_plenum_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].baseboard_heating_type, var_baseboard_heating_type)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].baseboard_heating_availability_schedule_name, var_baseboard_heating_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].baseboard_heating_capacity, var_baseboard_heating_capacity)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_design_supply_air_temperature_input_method, var_zone_cooling_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_design_supply_air_temperature, var_zone_cooling_design_supply_air_temperature)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_design_supply_air_temperature_difference, var_zone_cooling_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_design_supply_air_temperature_input_method, var_zone_heating_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_design_supply_air_temperature, var_zone_heating_design_supply_air_temperature)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_design_supply_air_temperature_difference, var_zone_heating_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].design_specification_outdoor_air_object_name, var_design_specification_outdoor_air_object_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].design_specification_zone_air_distribution_object_name, var_design_specification_zone_air_distribution_object_name) | apache-2.0 | -8,063,789,658,988,243,000 | 62.452381 | 177 | 0.734176 | false | 3.325291 | false | false | false |
frederic-michaud/chibre | bin/base_jeu/donne.py | 1 | 2455 | # coding: utf8
from rang import *
from couleur import *
import numpy as np
from hand import *
from joueur import *
class donne:
def __init__(self,partie):
"""fonction qui commence une nouvelle donne en distribuant les cartes et en choississant l'atout"""
carte_jouee = []
self.partie = partie
self.distribue()
self.atout = partie.joueur_atout.decide_atout()
self.points = [0,0]
self.joue_donne()
self.fin_donne()
def distribue(self):
"""fonction qui distribue les cartes de manières aléatoires aux quatre joueurs"""
permutations = (np.random.permutation(36) +1).reshape(4,9)
hands = [hand(perm = i) for i in permutations]
for i in range(4):
self.partie.get_joueur(i+1).hand = hands[i]
self.atout = self.partie.joueur_atout.decide_atout()
def compatibilise(self,equipe_gagnant,cartes):
""" fonction qui détermine le nombre de points gagnés dans une plie"""
score = sum([carte.valeur_point(self) for carte in cartes])
#equipe_gagnant.points+=score
self.points[equipe_gagnant.ide -1] += score
def joue_donne(self):
"""fonction qui définit comment on joue chaque donne"""
joueur_commence = self.partie.joueur_atout
for i in range (1,10):
(joueur_gagnant, cartes_jouees) = self.joue_plie(joueur_commence)
self.compatibilise(self.partie.get_equipe(joueur = joueur_gagnant), cartes_jouees)
joueur_commence = joueur_gagnant
self.points[self.partie.get_equipe(joueur = joueur_gagnant).ide -1] += 5
def joue_plie(self,joueur_commence):
"""fonction qui définit comment on joue chaque plie"""
cartes_jouees = []
for i in range(4):
cartes_jouees.append(self.partie.get_joueur(i + joueur_commence.ide).joue(self,cartes_jouees))
joueur_gagnant = self.determine_gagnant(cartes_jouees,joueur_commence)
print cartes_jouees
return (joueur_gagnant, cartes_jouees)
def determine_gagnant(self,cartes_jouees,joueur_commence):
"""fonction qui determine le combientième joueur a gagné la plie"""
couleur_plie = cartes_jouees[0].couleur
values = [carte.valeur_force(self.atout,couleur_plie) for carte in cartes_jouees]
print values
joueur_gagnant = values.index(max(values))
return self.partie.get_joueur(joueur_commence.ide + joueur_gagnant)
def fin_donne(self):
print "equipe 1: " + str(self.points[0])
print "equipe 2: " + str(self.points[1])
self.partie.get_equipe(1).points += self.points[0]
self.partie.get_equipe(1).points += self.points[0]
| gpl-3.0 | -8,207,348,853,094,176,000 | 34.985294 | 101 | 0.716796 | false | 2.364251 | false | false | false |
KWARC/mwetoolkit | bin/libs/base/word.py | 1 | 20032 | #!/usr/bin/python
# -*- coding:UTF-8 -*-
################################################################################
#
# Copyright 2010-2014 Carlos Ramisch, Vitor De Araujo, Silvio Ricardo Cordeiro,
# Sandra Castellanos
#
# word.py is part of mwetoolkit
#
# mwetoolkit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mwetoolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mwetoolkit. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
"""
This module provides the `Word` class. This class represents an orthographic
word (as in mwetoolkit-corpus.dtd, mwetoolkit-patterns.dtd and
mwetoolkit-candidates.dtd) defined by a surface form, a lemma and a POS tag.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from xml.sax.saxutils import quoteattr
from .. import util
from .feature import FeatureSet
from .__common import WILDCARD, SEPARATOR
# List of valid word attributes. Must appear in the same order as the
# arguments for the Word class constructor.
WORD_ATTRIBUTES = ["surface", "lemma", "pos", "syn"]
################################################################################
class Word(object):
"""
An orthographic word (in languages for which words are separated from
each other by a space) is the simplest lexical unit recognisable by a
native speaker, and it is characterized by its surface form, its lemma
and its Part Of Speech tag.
"""
################################################################################
def __init__(self, surface=WILDCARD, lemma=WILDCARD,
pos=WILDCARD, syn=WILDCARD, freqs=None):
"""
Instantiates a new `Word`. A Word might be one of: a token in a
corpus, in which case it will probably have at least a defined
surface form (mwetoolkit-corpus.dtd); a part of a pattern, in which
case it will probably contain some `WILDCARD`s; a part of a
reference or gold standard entry, in which case it will have at
least a defined lemma (mwetoolkit-patterns.dtd); a part of an n-gram
in a candidates list, in which case most of the parts should be
defined (mwetoolkit-candidates.dtd). Besides the surface form, the
lemma and the Part Of Speech tag, a word also contains a list of
`Frequency`ies, each one corresponding to its number of occurrences
in a given corpus.
@param surface A string corresponding to the surface form of the
word, i.e. the form in which it occurs in the corpus. A surface form
might include morphological inflection such as plural and gender
marks, conjugation for verbs, etc. For example, "went", "going",
"go", "gone", are all different surface forms for a same lemma, the
verb "(to) go".
@param lemma A string corresponding to the lemma of the word, i.e.
the normalized non-inflected form of the word. A lemma is generally
the preferred simplest form of a word as it appears in a dictionary,
like infinitive for verbs or singular for nouns. Notice that a lemma
is a well formed linguistic word, and thus differs from a root or
a stem. For example, the lemma of the noun "preprocessing" is
"preprocessing" while the root (without prefixes and suffixes) is
"process". Analagously, the lemma of the verb "studied" is "(to)
study" whereas a stem would be "stud-", which is not an English
word.
@param pos A string corresponding to a Part Of Speech tag of the
word. A POS tag is a morphosyntactic class like "Noun", "Adjective"
or "Verb". You should use a POS tagger system to tag your corpus
before you use mwetoolkit. The tag set, i.e. the set of valid POS
tags, is your choice. You can use a very simple set that
distinguishes only top-level classes ("N", "A", "V") or a fine-
grained classification, e.g. "NN" is a proper noun, "NNS" a proper
noun in plural form, etc.
@param syn A string corresponding to a syntax information of the
word. AS the jungle of syntactic formalisms is wild, we assume that
each word has a string that encodes the syntactic information. If
you use a dependency parser, for instance, you might encode the
syntax information as "rel:>index" where "rel" is the type of
syntactic relation (object, subject, det, etc.) and the "index" is
the index of the word on which this word depends. An example can be
found in the corpus DTD file.
@param freqs A dict of `corpus_name`->`Frequency` corresponding to counts of
occurrences of this word in a certain corpus. Please notice that
the frequencies correspond to occurrences of a SINGLE word in a
corpus. Joint `Ngram` frequencies are attached to the corresponding
`Ngram` object that contains this `Word`, if any.
"""
self.surface = surface
self.lemma = lemma
self.pos = pos
self.syn = syn
assert freqs is None or isinstance(freqs, FeatureSet), freqs
self.freqs = freqs or FeatureSet("freq", lambda x,y: x+y)
################################################################################
def copy(self):
r"""Return a copy of this Word."""
return Word(self.surface, self.lemma, self.pos, self.syn, self.freqs.copy())
################################################################################
def lemma_or_surface(self):
r"""Return lemma if it is defined; otherwise, return surface."""
if self.lemma != WILDCARD:
return self.lemma
if self.surface != WILDCARD:
return self.surface
return None
################################################################################
def add_frequency( self, freq ) :
"""
Add a `Frequency` to the list of frequencies of the word.
@param freq `Frequency` that corresponds to a count of this word in
a corpus. No test is performed in order to verify whether this is a
repeated frequency in the list.
"""
self.freqs.add(freq.name, freq.value)
################################################################################
def to_string( self ) :
"""
Converts this word to an internal string representation where each
part of the word is separated with a special `SEPARATOR`. This is
only used internally by the scripts and is of little use to the
user because of reduced legibility. Deconversion is made by the
function `from_string`.
@return A string with a special internal representation of the
word.
"""
return SEPARATOR.join((self.surface, self.lemma, self.pos))
################################################################################
def from_string( self, s ) :
"""
Instanciates the current word by converting to an object
an internal string representation where each part of the word is
separated with a special `SEPARATOR`. This is only used internally
by the scripts and is of little use to the user because of reduced
legibility. Deconversion is made by the function `to_string`.
@param s A string with a special internal representation of
the word, as generated by the function `to_string`
"""
[ self.surface, self.lemma, self.pos ] = s.split( SEPARATOR )
################################################################################
def to_html( self, wid ) :
"""
TODO
@return TODO
"""
# TODO: properly escape this stuff
wtempl = "<a href=\"#\" class=\"word\">%(surface)s" \
"<span class=\"wid\">%(wid)d</span>" \
"<span class=\"lps\">%(lemma)s%(pos)s%(syn)s</span></a>"
templ = lambda x: "<span class=\"%s\">%s</span>" % (x, getattr(self,x))
attr_map = map( lambda x: (x, templ(x)), WORD_ATTRIBUTES) + [("wid", wid)]
return wtempl % dict(attr_map)
################################################################################
def to_xml(self, **kwargs):
"""
Provides an XML string representation of the current object,
including internal variables. The printed attributes of the word
depend on the boolean parameters.
@param print_surface If print_surface is True, will include the
`surface` of the word in the XML <w> element, otherwise the surface
form will not be printed. Default True.
@param print_lemma If print_lemma is True, will include the `lemma`
of the word in the XML <w> element, otherwise the lemma will not be
printed. Default True.
@param print_pos If print_pos is True, will include the `pos` of the
word in the XML <w> element, otherwise the Part Of Speech will not
be printed. Default True.
@param print_freqs If print_freqs is True, will include the `freqs`
of the word as children of the XML <w> element, otherwise the word
frequencies will not be printed. Default True.
@return A string containing the XML element <w> with its attributes
and internal structure, according to mwetoolkit-candidates.dtd,
mwetoolkit-patterns.dtd and mwetoolkit-corpus.dtd and
depending on the input flags.
"""
ret = []
self._to_xml_into(ret)
return "".join(ret)
def _to_xml_into(self, output, print_surface=True, print_lemma=True,
print_pos=True, print_syn=True, print_freqs=True):
output.append("<w")
if self.surface != WILDCARD and print_surface:
output.append(" surface=")
output.append(quoteattr(self.surface))
if self.lemma != WILDCARD and print_lemma:
output.append(" lemma=")
output.append(quoteattr(self.lemma))
if self.pos != WILDCARD and print_pos:
output.append(" pos=")
output.append(quoteattr(self.pos))
if self.syn != WILDCARD and print_syn:
output.append(" syn=")
output.append(quoteattr(self.syn))
if not self.freqs or not print_freqs:
output.append(" />")
else:
output.append(" >")
self.freqs._to_xml_into(output)
output.append("</w>")
################################################################################
def __eq__( self, a_word ) :
"""
Equivalent to match( w )
"""
return self.match( a_word )
################################################################################
def __len__( self ) :
"""
Returns the number of characters in a word. Chooses upon available
information, in priority order surface > lemma > pos.
@return The number of characters in this word. Zero if this is an
empty word (or all fields are wildcards)
"""
if self.surface != WILDCARD :
return len( self.surface )
elif self.lemma != WILDCARD :
return len( self.lemma )
elif self.pos != WILDCARD :
return len( self.pos )
else :
return 0
################################################################################
def compare( self, s1, s2, ignore_case ) :
"""
Compares two strings for equality conditioning the type of
comparison (case sensitive/insensitive) to boolean argument
`ignore_case`.
@param s1 A string to compare.
@param s2 Another string to compare.
@param ignore_case True if comparison should be case insensitive,
False if comparision should be case sensitive.
@return True if the strings are identical, False if they are
different.
"""
if ignore_case :
return s1.lower() == s2.lower()
else :
return s1 == s2
################################################################################
def match( self, w, ignore_case=False, lemma_or_surface=False ) :
"""
A simple matching algorithm that returns true if the parts of the
current word match the parts of the given word. The matching at the
word level considers only the parts that are defined, for example,
POS tags for candidate extraction or lemmas for automatic gold
standard evaluation. A match on a part of the current word is True
when this part equals to the corresponding part of `w` or when the
part of the current word is not defined (i.e. equals `WILDCARD`).
All the three parts (surface, lemma and pos) need to match so that
the match of the word is true. If ANY of these three word parts does
not match the correspondent part of the given word `w`, this
function returns False.
@param w A `Word` against which we would like to compare the current
word. In general, the current word contains the `WILDCARD`s while
`w` has all the parts (surface, lemma, pos) with a defined value.
@return Will return True if ALL the word parts of `w` match ALL
the word parts of the current pattern (i.e. they have the same
values for all the defined parts). Will return False if
ANY of the three word parts does not match the correspondent part of
the given word `w`.
"""
if self.pos!=WILDCARD and not self.compare(self.pos, w.pos, ignore_case):
return False
if lemma_or_surface:
return ((self.compare(self.lemma, w.lemma, ignore_case)
or (self.compare(self.lemma, w.surface, ignore_case))
or (self.compare(self.surface, w.lemma, ignore_case))
or (self.compare(self.surface, w.surface, ignore_case))))
else:
return ((self.surface==WILDCARD or self.compare(self.surface, w.surface, ignore_case))
and (self.lemma==WILDCARD or self.compare(self.lemma, w.lemma, ignore_case)))
#return ((self.surface != WILDCARD and self.compare( self.surface,w.surface,ignore_case)) or \
# self.surface == WILDCARD) and \
# ((self.lemma != WILDCARD and self.compare( self.lemma, w.lemma, ignore_case ) ) or \
# self.lemma == WILDCARD) and \
# ((self.pos != WILDCARD and self.compare( self.pos, w.pos, ignore_case ) ) or \
# self.pos == WILDCARD)
################################################################################
def get_case_class( self, s_or_l="surface" ) :
"""
For a given word (surface form), assigns a class that can be:
* lowercase - All characters are lowercase
* UPPERCASE - All characters are uppercase
* Firstupper - All characters are lowercase except for the first
* MiXeD - This token contains mixed lowercase and uppercase characters
* ? - This token contains non-alphabetic characters
@param s_or_l Surface or lemma? Default value is "surface" but set it
to "lemma" if you want to know the class based on the lemma.
@return A string that describes the case class according to the list
above.
"""
form = getattr( self, s_or_l )
if form != WILDCARD :
token_list = list( form )
else :
token_list = []
case_class = "?"
for letter_i in range( len( token_list ) ) :
letter = token_list[ letter_i ]
if letter.isupper() :
if letter_i > 0 :
if case_class == "lowercase" or case_class == "Firstupper" :
case_class = "MiXeD"
elif case_class == "?" :
case_class = "UPPERCASE"
else :
case_class = "UPPERCASE"
elif letter.islower() :
if letter_i > 0 :
if case_class == "UPPERCASE" :
if letter_i == 1 :
case_class = "Firstupper"
else :
case_class = "MiXeD"
elif case_class == "?" :
case_class = "lowercase"
else :
case_class = "lowercase"
return case_class
################################################################################
def get_freq_value( self, freq_name ) :
"""
Returns the value of a `Frequency` in the frequencies list. The
frequency is identified by the frequency name provided as input to
this function. If two frequencies have the same name, only the first
value found will be returned.
@param freq_name A string that identifies the `Frequency` of the
candidate for which you would like to know the value.
@return Value of the searched frequency. If there is no frequency
with this name, then it will return 0.
"""
for freq in self.freqs :
if freq.name == freq_name :
return freq.value
return 0
################################################################################
def syn_iter(self):
r"""Yield pairs (synrel, index) based on `self.syn`."""
if self.syn != WILDCARD and self.syn != "":
for syn_pair in self.syn.split(";"):
try:
a, b = syn_pair.split(":")
except ValueError:
util.warn("Bad colon-separated syn pair: {pair!r}", pair=syn_pair)
else:
try:
b = int(b) - 1
except ValueError:
util.warn("Bad syn index reference: {index!r}", index=b)
else:
yield (a, b)
################################################################################
@staticmethod
def syn_encode(syn_pairs):
r"""Return a representation of the
list of (synrel, index) pairs `syn_pairs`.
The result can be assigned to a Word's `syn` attribute.
"""
return ";".join("{}:{}".format(rel, index+1)
for (rel, index) in syn_pairs)
| gpl-3.0 | 4,862,443,331,162,582,000 | 44.01573 | 102 | 0.534295 | false | 4.598714 | false | false | false |
Rademade/taiga-back | tests/integration/resources_permissions/test_projects_choices_resources.py | 3 | 80358 | from django.core.urlresolvers import reverse
from taiga.base.utils import json
from taiga.projects import choices as project_choices
from taiga.projects import serializers
from taiga.users.serializers import RoleSerializer
from taiga.permissions.permissions import MEMBERS_PERMISSIONS
from tests import factories as f
from tests.utils import helper_test_http_method
import pytest
pytestmark = pytest.mark.django_db
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_member_with_perms = f.UserFactory.create()
m.project_member_without_perms = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.other_user = f.UserFactory.create()
m.superuser = f.UserFactory.create(is_superuser=True)
m.public_project = f.ProjectFactory(is_private=False,
anon_permissions=['view_project'],
public_permissions=['view_project'],
owner=m.project_owner)
m.private_project1 = f.ProjectFactory(is_private=True,
anon_permissions=['view_project'],
public_permissions=['view_project'],
owner=m.project_owner)
m.private_project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner)
m.blocked_project = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
blocked_code=project_choices.BLOCKED_BY_STAFF)
m.public_membership = f.MembershipFactory(project=m.public_project,
user=m.project_member_with_perms,
email=m.project_member_with_perms.email,
role__project=m.public_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.private_membership1 = f.MembershipFactory(project=m.private_project1,
user=m.project_member_with_perms,
email=m.project_member_with_perms.email,
role__project=m.private_project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project1,
user=m.project_member_without_perms,
email=m.project_member_without_perms.email,
role__project=m.private_project1,
role__permissions=[])
m.private_membership2 = f.MembershipFactory(project=m.private_project2,
user=m.project_member_with_perms,
email=m.project_member_with_perms.email,
role__project=m.private_project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project2,
user=m.project_member_without_perms,
email=m.project_member_without_perms.email,
role__project=m.private_project2,
role__permissions=[])
m.blocked_membership = f.MembershipFactory(project=m.blocked_project,
user=m.project_member_with_perms,
role__project=m.blocked_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.blocked_project,
user=m.project_member_without_perms,
role__project=m.blocked_project,
role__permissions=[])
f.MembershipFactory(project=m.public_project,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project1,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project2,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.blocked_project,
user=m.project_owner,
is_admin=True)
m.public_points = f.PointsFactory(project=m.public_project)
m.private_points1 = f.PointsFactory(project=m.private_project1)
m.private_points2 = f.PointsFactory(project=m.private_project2)
m.blocked_points = f.PointsFactory(project=m.blocked_project)
m.public_user_story_status = f.UserStoryStatusFactory(project=m.public_project)
m.private_user_story_status1 = f.UserStoryStatusFactory(project=m.private_project1)
m.private_user_story_status2 = f.UserStoryStatusFactory(project=m.private_project2)
m.blocked_user_story_status = f.UserStoryStatusFactory(project=m.blocked_project)
m.public_task_status = f.TaskStatusFactory(project=m.public_project)
m.private_task_status1 = f.TaskStatusFactory(project=m.private_project1)
m.private_task_status2 = f.TaskStatusFactory(project=m.private_project2)
m.blocked_task_status = f.TaskStatusFactory(project=m.blocked_project)
m.public_issue_status = f.IssueStatusFactory(project=m.public_project)
m.private_issue_status1 = f.IssueStatusFactory(project=m.private_project1)
m.private_issue_status2 = f.IssueStatusFactory(project=m.private_project2)
m.blocked_issue_status = f.IssueStatusFactory(project=m.blocked_project)
m.public_issue_type = f.IssueTypeFactory(project=m.public_project)
m.private_issue_type1 = f.IssueTypeFactory(project=m.private_project1)
m.private_issue_type2 = f.IssueTypeFactory(project=m.private_project2)
m.blocked_issue_type = f.IssueTypeFactory(project=m.blocked_project)
m.public_priority = f.PriorityFactory(project=m.public_project)
m.private_priority1 = f.PriorityFactory(project=m.private_project1)
m.private_priority2 = f.PriorityFactory(project=m.private_project2)
m.blocked_priority = f.PriorityFactory(project=m.blocked_project)
m.public_severity = f.SeverityFactory(project=m.public_project)
m.private_severity1 = f.SeverityFactory(project=m.private_project1)
m.private_severity2 = f.SeverityFactory(project=m.private_project2)
m.blocked_severity = f.SeverityFactory(project=m.blocked_project)
m.project_template = m.public_project.creation_template
return m
def test_roles_retrieve(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_roles_update(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
role_data = RoleSerializer(data.public_project.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', public_url, role_data, users)
assert results == [401, 403, 403, 403, 200]
role_data = RoleSerializer(data.private_project1.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', private1_url, role_data, users)
assert results == [401, 403, 403, 403, 200]
role_data = RoleSerializer(data.private_project2.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', private2_url, role_data, users)
assert results == [401, 403, 403, 403, 200]
role_data = RoleSerializer(data.blocked_project.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', blocked_url, role_data, users)
assert results == [401, 403, 403, 403, 451]
def test_roles_delete(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_roles_list(client, data):
url = reverse('roles-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 3
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 3
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 3
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 7
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 7
assert response.status_code == 200
def test_roles_patch(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_points_retrieve(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_points_update(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
points_data = serializers.PointsSerializer(data.public_points).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', public_url, points_data, users)
assert results == [401, 403, 403, 403, 200]
points_data = serializers.PointsSerializer(data.private_points1).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', private1_url, points_data, users)
assert results == [401, 403, 403, 403, 200]
points_data = serializers.PointsSerializer(data.private_points2).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', private2_url, points_data, users)
assert results == [401, 403, 403, 403, 200]
points_data = serializers.PointsSerializer(data.blocked_points).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', blocked_url, points_data, users)
assert results == [401, 403, 403, 403, 451]
def test_points_delete(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_points_list(client, data):
url = reverse('points-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_points_patch(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_points_action_bulk_update_order(client, data):
url = reverse('points-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_retrieve(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_user_story_status_update(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
user_story_status_data = serializers.UserStoryStatusSerializer(data.public_user_story_status).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', public_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 200]
user_story_status_data = serializers.UserStoryStatusSerializer(data.private_user_story_status1).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', private1_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 200]
user_story_status_data = serializers.UserStoryStatusSerializer(data.private_user_story_status2).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', private2_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 200]
user_story_status_data = serializers.UserStoryStatusSerializer(data.blocked_user_story_status).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', blocked_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_delete(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_list(client, data):
url = reverse('userstory-statuses-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_user_story_status_patch(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_action_bulk_update_order(client, data):
url = reverse('userstory-statuses-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_retrieve(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_task_status_update(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
task_status_data = serializers.TaskStatusSerializer(data.public_task_status).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', public_url, task_status_data, users)
assert results == [401, 403, 403, 403, 200]
task_status_data = serializers.TaskStatusSerializer(data.private_task_status1).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', private1_url, task_status_data, users)
assert results == [401, 403, 403, 403, 200]
task_status_data = serializers.TaskStatusSerializer(data.private_task_status2).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', private2_url, task_status_data, users)
assert results == [401, 403, 403, 403, 200]
task_status_data = serializers.TaskStatusSerializer(data.blocked_task_status).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', blocked_url, task_status_data, users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_delete(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_list(client, data):
url = reverse('task-statuses-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_task_status_patch(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_action_bulk_update_order(client, data):
url = reverse('task-statuses-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_retrieve(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_issue_status_update(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
issue_status_data = serializers.IssueStatusSerializer(data.public_issue_status).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', public_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 200]
issue_status_data = serializers.IssueStatusSerializer(data.private_issue_status1).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', private1_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 200]
issue_status_data = serializers.IssueStatusSerializer(data.private_issue_status2).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', private2_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 200]
issue_status_data = serializers.IssueStatusSerializer(data.blocked_issue_status).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', blocked_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_delete(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_list(client, data):
url = reverse('issue-statuses-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_issue_status_patch(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_action_bulk_update_order(client, data):
url = reverse('issue-statuses-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_retrieve(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_issue_type_update(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
issue_type_data = serializers.IssueTypeSerializer(data.public_issue_type).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', public_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 200]
issue_type_data = serializers.IssueTypeSerializer(data.private_issue_type1).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', private1_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 200]
issue_type_data = serializers.IssueTypeSerializer(data.private_issue_type2).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', private2_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 200]
issue_type_data = serializers.IssueTypeSerializer(data.blocked_issue_type).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', blocked_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_delete(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_list(client, data):
url = reverse('issue-types-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_issue_type_patch(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_action_bulk_update_order(client, data):
url = reverse('issue-types-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_priority_retrieve(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_priority_update(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
priority_data = serializers.PrioritySerializer(data.public_priority).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', public_url, priority_data, users)
assert results == [401, 403, 403, 403, 200]
priority_data = serializers.PrioritySerializer(data.private_priority1).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', private1_url, priority_data, users)
assert results == [401, 403, 403, 403, 200]
priority_data = serializers.PrioritySerializer(data.private_priority2).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', private2_url, priority_data, users)
assert results == [401, 403, 403, 403, 200]
priority_data = serializers.PrioritySerializer(data.blocked_priority).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', blocked_url, priority_data, users)
assert results == [401, 403, 403, 403, 451]
def test_priority_delete(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_priority_list(client, data):
url = reverse('priorities-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_priority_patch(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_priority_action_bulk_update_order(client, data):
url = reverse('priorities-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_severity_retrieve(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_severity_update(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
severity_data = serializers.SeveritySerializer(data.public_severity).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', public_url, severity_data, users)
assert results == [401, 403, 403, 403, 200]
severity_data = serializers.SeveritySerializer(data.private_severity1).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', private1_url, severity_data, users)
assert results == [401, 403, 403, 403, 200]
severity_data = serializers.SeveritySerializer(data.private_severity2).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', private2_url, severity_data, users)
assert results == [401, 403, 403, 403, 200]
severity_data = serializers.SeveritySerializer(data.blocked_severity).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', blocked_url, severity_data, users)
assert results == [401, 403, 403, 403, 451]
def test_severity_delete(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_severity_list(client, data):
url = reverse('severities-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_severity_patch(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_severity_action_bulk_update_order(client, data):
url = reverse('severities-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_retrieve(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_membership_update(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
membership_data = serializers.MembershipSerializer(data.public_membership).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', public_url, membership_data, users)
assert results == [401, 403, 403, 403, 200]
membership_data = serializers.MembershipSerializer(data.private_membership1).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', private1_url, membership_data, users)
assert results == [401, 403, 403, 403, 200]
membership_data = serializers.MembershipSerializer(data.private_membership2).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', private2_url, membership_data, users)
assert results == [401, 403, 403, 403, 200]
membership_data = serializers.MembershipSerializer(data.blocked_membership).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', blocked_url, membership_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_delete(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_list(client, data):
url = reverse('memberships-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 5
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 5
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 5
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 11
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 11
assert response.status_code == 200
def test_membership_patch(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_membership_create(client, data):
url = reverse('memberships-list')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
membership_data = serializers.MembershipSerializer(data.public_membership).data
membership_data["id"] = None
membership_data["email"] = "[email protected]"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 201]
membership_data = serializers.MembershipSerializer(data.private_membership1).data
membership_data["id"] = None
membership_data["email"] = "[email protected]"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 201]
membership_data = serializers.MembershipSerializer(data.private_membership2).data
membership_data["id"] = None
membership_data["email"] = "[email protected]"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 201]
membership_data = serializers.MembershipSerializer(data.blocked_membership).data
membership_data["id"] = None
membership_data["email"] = "[email protected]"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_action_bulk_create(client, data):
url = reverse('memberships-bulk-create')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
bulk_data = {
"project_id": data.public_project.id,
"bulk_memberships": [
{"role_id": data.public_membership.role.pk, "email": "[email protected]"},
{"role_id": data.public_membership.role.pk, "email": "[email protected]"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 200]
bulk_data = {
"project_id": data.private_project1.id,
"bulk_memberships": [
{"role_id": data.private_membership1.role.pk, "email": "[email protected]"},
{"role_id": data.private_membership1.role.pk, "email": "[email protected]"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 200]
bulk_data = {
"project_id": data.private_project2.id,
"bulk_memberships": [
{"role_id": data.private_membership2.role.pk, "email": "[email protected]"},
{"role_id": data.private_membership2.role.pk, "email": "[email protected]"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 200]
bulk_data = {
"project_id": data.blocked_project.id,
"bulk_memberships": [
{"role_id": data.private_membership2.role.pk, "email": "[email protected]"},
{"role_id": data.private_membership2.role.pk, "email": "[email protected]"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_action_resend_invitation(client, data):
public_invitation = f.InvitationFactory(project=data.public_project, role__project=data.public_project)
private_invitation1 = f.InvitationFactory(project=data.private_project1, role__project=data.private_project1)
private_invitation2 = f.InvitationFactory(project=data.private_project2, role__project=data.private_project2)
blocked_invitation = f.InvitationFactory(project=data.blocked_project, role__project=data.blocked_project)
public_url = reverse('memberships-resend-invitation', kwargs={"pk": public_invitation.pk})
private1_url = reverse('memberships-resend-invitation', kwargs={"pk": private_invitation1.pk})
private2_url = reverse('memberships-resend-invitation', kwargs={"pk": private_invitation2.pk})
blocked_url = reverse('memberships-resend-invitation', kwargs={"pk": blocked_invitation.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'post', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'post', private2_url, None, users)
assert results == [404, 404, 404, 403, 204]
results = helper_test_http_method(client, 'post', blocked_url, None, users)
assert results == [404, 404, 404, 403, 451]
def test_project_template_retrieve(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
results = helper_test_http_method(client, 'get', url, None, users)
assert results == [200, 200, 200]
def test_project_template_update(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
project_template_data = serializers.ProjectTemplateSerializer(data.project_template).data
project_template_data["default_owner_role"] = "test"
project_template_data = json.dumps(project_template_data)
results = helper_test_http_method(client, 'put', url, project_template_data, users)
assert results == [401, 403, 200]
def test_project_template_delete(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
results = helper_test_http_method(client, 'delete', url, None, users)
assert results == [401, 403, 204]
def test_project_template_list(client, data):
url = reverse('project-templates-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 1
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 1
assert response.status_code == 200
client.login(data.superuser)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 1
assert response.status_code == 200
def test_project_template_patch(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
results = helper_test_http_method(client, 'patch', url, '{"name": "Test"}', users)
assert results == [401, 403, 200]
| agpl-3.0 | -8,109,246,578,482,246,000 | 41.028243 | 113 | 0.652343 | false | 3.490033 | true | false | false |
jdevera/imex | src/imex/metadataeditor.py | 1 | 10950 | import imex
from imex.metadata import Tag, ImageMetadata
class MetadataEditor(object):
def __init__(self, rules, keep_timestamps = True, **kwargs):
"""
Supported keyword arguments:
* debug
* dry_run
"""
self._keep_timestamps = keep_timestamps
self._debug = kwargs.pop('debug', False)
self._dry_run = kwargs.pop('dry_run', False)
self._rules = rules
def apply_rule(self, image_metadata, rule):
log = imex.log
changed = False
for new_tag_name in rule:
new_tag_value = rule[new_tag_name]
if not new_tag_name in image_metadata:
changed = True
image_metadata[new_tag_name] = Tag(new_tag_name)
new_tag = image_metadata[new_tag_name] # Just a convenience alias.
if new_tag.repeatable:
# Separate the values to be added from the values to be deleted
add_list, del_list = self._rules.parse_repeatable_tag_values(new_tag_value)
# # -------------------------------------------------------------------------
# # Deferred deletion of value new_tag_value for new_tag_name:
# # If the new tag is the same as the matching tag and its matching value was
# # set for deletion, add this value to the list of values to delete.
# # -------------------------------------------------------------------------
# if new_tag_name == search_tag_name and rules.must_remove(search_tag_name, search_tag_value):
# del_list.append(search_tag_value)
# log.qdebug(' Deferred removal of value \'{0}\' for tag {1}'.format(search_tag_value, search_tag_name))
if add_list:
log.qdebug(' Adding values \'{0}\' to tag {1}'.format(', '.join(add_list), new_tag_name))
if del_list:
log.qdebug(' Deleting values \'{0}\' from tag {1}'.format(', '.join(del_list), new_tag_name))
# Add and delete (in this order) the new values from the current rule
if new_tag.combine_raw_values(add_list, del_list):
changed = True
log.dump()
else:
log.clear()
else:
# For non-repeatable tags, simply set the new value (this will take care of
# deferred removal, too).
new_adjusted_tag_value = [new_tag_value] if new_tag.is_iptc() else new_tag_value
if new_tag.raw_value != new_adjusted_tag_value:
log.dump()
log.debug(' Setting new value \'{0}\' for tag {1}'.format(new_tag_value, new_tag_name))
new_tag.raw_value = new_adjusted_tag_value
changed = True
log.clear()
return changed
def process_image(self, image_filename, rules):
"""
Find all matching tags in an image's metadata and apply changes according
to the given set of rules.
This is the structure of a rule:
search_tag_name : search_tag_value : (new_tag_name : new_tag_value)
And it is read as: if the *search_tag_name* tag is found on the image with
a value of *search_tag_value*, then set the value of each *new_tag_name* to
its corresponding *new_tag_value*
A search_tag_value can be set for removal once it has been found.
"""
log = imex.log
log.info('Processing {0}'.format(image_filename))
imd = ImageMetadata(image_filename)
imd.read()
log.qdebug(' Applying default assignment')
need_write = self.apply_rule(imd, rules.default_rule)
# Tags that are present in the current image and have an associated rule
matching_tags = rules.get_matching_tags(imd)
for search_tag_name in matching_tags:
for search_tag_value in rules.get_search_tag_values(search_tag_name):
# --------------------------------------------------------------------------------
# Skip this search_tag_value if it is not one of the values of the search_tag_name
# tag in the current image
# --------------------------------------------------------------------------------
if not imd[search_tag_name].has_raw_value(search_tag_value):
continue
log.debug(' Found match: value \'{0}\' for tag {1}'.format(search_tag_value, search_tag_name))
# --------------------------------------------------------------------------------
# The current search_tag_value can be marked for removal in the rules.
#
# We will normally delete the value right away, but if the same search_tag_name is
# going to be modified as part of this rule, defer this deletion.
#
# In the case of a non-repeatable tag, the value will simply be replaced with the
# new one. If it is a repeatable tag, we'll simply add search_tag_value to the
# list of values to delete
# --------------------------------------------------------------------------------
if rules.must_remove(search_tag_name, search_tag_value):
# Remove now if we are not touching this search_tag_name in
# the current rule
if search_tag_name not in rules.get_new_tag_names(search_tag_name, search_tag_value):
if imd[search_tag_name].repeatable:
# If the list is empty, the tag will be deleted when
# the metadata is written
imd[search_tag_name].combine_raw_values([], [search_tag_value])
else:
del imd[search_tag_name]
log.debug(' Removed value \'{0}\' for tag {1}'.format(search_tag_value, search_tag_name))
# ------------------------------------------------------------------------------
# The current image has a search_tag_name tag and its value is search_tag_value,
# now set all new_tag_names to their corresponding new_tag_values
# ------------------------------------------------------------------------------
for new_tag_name in rules.get_new_tag_names(search_tag_name, search_tag_value):
# Track any changes, only then we will need to run the rules again
changed = False
new_tag_value = rules.get_new_tag_value(search_tag_name, search_tag_value, new_tag_name)
# Add the new tag if it is not already present in the image. We will set it's
# value later.
if not new_tag_name in imd:
changed = True
imd[new_tag_name] = Tag(new_tag_name)
new_tag = imd[new_tag_name] # Just a convenience alias.
if new_tag.repeatable:
# Separate the values to be added from the values to be deleted
add_list, del_list = rules.parse_repeatable_tag_values(new_tag_value)
# -------------------------------------------------------------------------
# Deferred deletion of value new_tag_value for new_tag_name:
# If the new tag is the same as the matching tag and its matching value was
# set for deletion, add this value to the list of values to delete.
# -------------------------------------------------------------------------
if new_tag_name == search_tag_name and rules.must_remove(search_tag_name, search_tag_value):
del_list.append(search_tag_value)
log.qdebug(' Deferred removal of value \'{0}\' for tag {1}'.format(search_tag_value,
search_tag_name))
if add_list:
log.qdebug(' Adding values \'{0}\' to tag {1}'.format(', '.join(add_list),
new_tag_name))
if del_list:
log.qdebug(' Deleting values \'{0}\' from tag {1}'.format(', '.join(del_list),
new_tag_name))
# Add and delete (in this order) the new values from the current rule
if new_tag.combine_raw_values(add_list, del_list):
changed = True
log.dump()
else:
log.clear()
else:
# For non-repeatable tags, simply set the new value (this will take care of
# deferred removal, too).
if new_tag.raw_value != new_tag_value:
log.debug(' Setting new value \'{0}\' for tag {1}'.format(new_tag_value, new_tag_name))
new_tag.raw_value = [new_tag_value] if new_tag.is_iptc() else new_tag_value
changed = True
if changed:
need_write = True
# ------------------------------------------------------------------------
# The current tag has changed, if there are any rules that have the
# current new_tag_name as their search_tag_name, then we need to apply the
# rules for that tag again, since some of their search_tag_value could
# match the new values.
# ------------------------------------------------------------------------
if new_tag_name in rules:
matching_tags.append(new_tag_name) # Extend the outermost for loop
log.debug(' **A matching tag has been modified. Revisiting all rules**')
# for new_tag_name
# for search_tag_value
# for search_tag_name
if need_write:
if self._dry_run:
log.debug(' Changes detected. File not saved (dry-run)')
else:
imd.write(self._keep_timestamps)
log.debug(' Changes saved')
else:
log.debug(' No changes detected')
log.debug('')
| mit | 4,928,579,276,155,820,000 | 50.408451 | 127 | 0.458813 | false | 4.828042 | false | false | false |
jonnybazookatone/adsws | adsws/api/discoverer/views.py | 1 | 3273 | from flask import request, current_app
from flask.ext.restful import Resource
from flask.ext.consulate import ConsulService
from urlparse import urljoin
import requests
import json
class ProxyView(Resource):
"""Proxies a request to a remote webservice"""
def __init__(self, endpoint, service_uri, deploy_path):
self.endpoint = endpoint
self.service_uri = service_uri
self.deploy_path = deploy_path
self.cs = None
if service_uri.startswith('consul://'):
self.cs = ConsulService(
service_uri,
nameservers=[current_app.config.get("CONSUL_DNS", "172.17.42.1")]
)
self.session = self.cs
else:
self.session = requests.Session()
@staticmethod
def get_body_data(request):
"""
Returns the correct payload data coming from the flask.Request object
"""
payload = request.get_json(silent=True)
if payload:
return json.dumps(payload)
return request.form or request.data
def dispatcher(self, **kwargs):
"""
Having a dispatch based on request.method solves being able to set up
ProxyViews on the same resource for different routes. However, it
limits the ability to scope a resouce on a per-method basis
"""
path = request.full_path.replace(self.deploy_path, '', 1)
path = path[1:] if path.startswith('/') else path
if self.cs is None:
ep = urljoin(self.service_uri, path)
else:
ep = path
resp = self.__getattribute__(request.method.lower())(ep, request)
headers = {}
if resp.headers:
[headers.update({key: resp.headers[key]}) for key in current_app.config['REMOTE_PROXY_ALLOWED_HEADERS'] if key in resp.headers]
if headers:
return resp.text, resp.status_code, headers
else:
return resp.text, resp.status_code
def get(self, ep, request):
"""
Proxy to remote GET endpoint, should be invoked via self.dispatcher()
"""
return self.session.get(ep, headers=request.headers)
def post(self, ep, request):
"""
Proxy to remote POST endpoint, should be invoked via self.dispatcher()
"""
if not isinstance(request.data, basestring):
request.data = json.dumps(request.data)
return self.session.post(
ep, data=ProxyView.get_body_data(request), headers=request.headers
)
def put(self, ep, request):
"""
Proxy to remote PUT endpoint, should be invoked via self.dispatcher()
"""
if not isinstance(request.data, basestring):
request.data = json.dumps(request.data)
return self.session.put(
ep, data=ProxyView.get_body_data(request), headers=request.headers
)
def delete(self, ep, request):
"""
Proxy to remote PUT endpoint, should be invoked via self.dispatcher()
"""
if not isinstance(request.data, basestring):
request.data = json.dumps(request.data)
return self.session.delete(
ep, data=ProxyView.get_body_data(request), headers=request.headers
)
| gpl-2.0 | -7,048,274,146,685,120,000 | 34.193548 | 139 | 0.606172 | false | 4.261719 | false | false | false |
digris/openbroadcast.org | website/apps/statistics/utils/xls_output_label.py | 2 | 4859 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import calendar
import xlsxwriter
from django.utils import timezone
from django.conf import settings
SITE_URL = getattr(settings, "SITE_URL")
ISRC_HINT_TEXT = """Please be aware that collecting societies only will distribute the earnings properly if an ISRC code is present."""
log = logging.getLogger(__name__)
def label_statistics_as_xls(label, years, title=None, output=None):
ROW_LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXZY"
title = title or "Airplay Statistics: open broadcast radio"
output = output or "Airplay statistics - {}.xlsx".format(label.name)
log.info("output to: {}".format(output))
###################################################################
# workbook preparation
###################################################################
workbook = xlsxwriter.Workbook(output, {"in_memory": True})
workbook.set_properties(
{
"title": title,
"subject": title,
"author": "digris AG",
"company": "digris AG",
"created": timezone.now(),
}
)
###################################################################
# workbook style definitions
###################################################################
bold = workbook.add_format({"bold": True})
border_top = workbook.add_format({"bold": 1, "top": 1})
border_bottom = workbook.add_format({"bold": 1, "bottom": 1})
small = workbook.add_format({"font_size": 9, "italic": 1})
isrc_hint = workbook.add_format({"color": "red"})
###################################################################
# add statistics as sheet per year
###################################################################
for year in years:
start = year.get("start")
end = year.get("end")
objects = year.get("objects")
first_row = 7
last_row = len(objects) - 1 + first_row
total_events = sum([i.num_events for i in objects])
worksheet = workbook.add_worksheet("{}".format(start.year))
# Widen the first columns
worksheet.set_column("A:C", 32)
worksheet.set_column("D:D", 18)
worksheet.set_row("1:1", 200)
worksheet.merge_range(
"A1:C1", "{} - {:%Y-%m-%d} - {:%Y-%m-%d}".format(title, start, end), bold
)
worksheet.merge_range("A2:C2", "Label: {}".format(label.name), bold)
worksheet.merge_range("A3:C3", "Total: {}".format(total_events), bold)
worksheet.merge_range("A4:C4", "{}".format(ISRC_HINT_TEXT), isrc_hint)
worksheet.merge_range("A5:C5", "File created: {}".format(timezone.now()), small)
worksheet.write("A{}".format(first_row), "Title", border_bottom)
worksheet.write("B{}".format(first_row), "Artist", border_bottom)
worksheet.write("C{}".format(first_row), "Release", border_bottom)
worksheet.write("D{}".format(first_row), "ISRC", border_bottom)
try:
header = [
calendar.month_name[dt.month]
for dt in [i[0] for i in objects[0].time_series]
]
except IndexError:
header = []
# write date (month) headers
for index, item in enumerate(header, start=4):
worksheet.write(first_row - 1, index, item, border_bottom)
# set column width
worksheet.set_column(index, index, 14)
# write entries
for index, item in enumerate(objects, start=first_row):
worksheet.write(index, 0, item.name)
worksheet.write(index, 1, item.artist.name)
worksheet.write(index, 2, item.release.name)
if item.isrc:
worksheet.write(index, 3, item.isrc)
else:
worksheet.write_url(
index,
3,
"{}{}".format(SITE_URL, item.get_edit_url()),
string="Add ISRC",
)
# add monthly numbers
for ts_index, ts_item in enumerate(
[ts[1] for ts in item.time_series], start=4
):
worksheet.write(index, ts_index, ts_item)
# add summs / formula
worksheet.merge_range(
"A{}:D{}".format(last_row + 2, last_row + 2), "Total", border_top
)
for index, item in enumerate(header, start=4):
letter = ROW_LETTERS[index]
formula = "=SUM({}{}:{}{})".format(
letter, first_row + 1, letter, last_row + 1
)
worksheet.write_formula(last_row + 1, index, formula, border_top)
# worksheet.merge_range('A{}:C{}'.format(last_row + 4, last_row + 4), '{}'.format(timezone.now()), small)
workbook.close()
| gpl-3.0 | -7,084,791,290,015,005,000 | 33.707143 | 135 | 0.512657 | false | 4.009076 | false | false | false |
spcs/synaps | synaps/db/__init__.py | 1 | 33852 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, 2013 Samsung SDS Co., LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import pycassa
from datetime import datetime, timedelta
from pycassa import (types, create_index_clause, create_index_expression, EQ,
GT, GTE, LT, LTE)
import struct
import json
import pickle
from collections import OrderedDict
from synaps import flags
from synaps import log as logging
from synaps import utils
from synaps import exception
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class Cassandra(object):
STATISTICS = ["Sum", "SampleCount", "Average", "Minimum", "Maximum"]
def __init__(self, keyspace=None):
self.statistics_ttl = FLAGS.get('statistics_ttl')
self.ARCHIVE = map(lambda x: int(x) * 60,
FLAGS.get('statistics_archives'))
if not keyspace:
keyspace = FLAGS.get("cassandra_keyspace", "synaps_test")
serverlist = FLAGS.get("cassandra_server_list")
cassandra_timeout = FLAGS.get("cassandra_timeout")
self.pool = pycassa.ConnectionPool(keyspace, server_list=serverlist,
timeout=cassandra_timeout)
self.cf_metric = pycassa.ColumnFamily(self.pool, 'Metric')
self.scf_stat_archive = pycassa.ColumnFamily(self.pool, 'StatArchive')
self.cf_metric_alarm = pycassa.ColumnFamily(self.pool, 'MetricAlarm')
self.cf_alarm_history = pycassa.ColumnFamily(self.pool,
'AlarmHistory')
self.cf_alarm_counter = pycassa.ColumnFamily(self.pool,
'AlarmCounter')
self.cf_notification_group = pycassa.ColumnFamily(self.pool,
'NotificationGroup')
def delete_metric_alarm(self, alarm_key, project_id=None):
try:
if not project_id:
alarm = self.cf_metric_alarm.get(alarm_key)
project_id = alarm.get('project_id')
self.cf_metric_alarm.remove(alarm_key)
self.cf_alarm_counter.add(project_id, 'alarm_counter', -1)
except pycassa.NotFoundException:
LOG.info(_("alarm key %s is not deleted" % alarm_key))
def _describe_alarms_by_names(self, project_id, alarm_names):
for alarm_name in alarm_names:
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("alarm_name", alarm_name)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
yield k, v
def get_alarm_by_name(self, project_id, alarm_name):
alarms = list(self._describe_alarms_by_names(project_id, [alarm_name]))
if alarms:
return alarms[0]
else:
return None
def describe_alarms(self, project_id, action_prefix=None,
alarm_name_prefix=None, alarm_names=None,
max_records=100, next_token=None, state_value=None):
"""
params:
project_id: string
action_prefix: TODO: not implemented yet.
alarm_name_prefix: string
alarm_names: string list
max_records: integer
next_token: string (uuid type)
state_value: string (OK | ALARM | INSUFFICIENT_DATA)
"""
if alarm_names:
return self._describe_alarms_by_names(project_id, alarm_names)
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = []
prj_expr = create_index_expression("project_id", project_id)
expr_list.append(prj_expr)
if alarm_name_prefix:
expr_s = create_index_expression("alarm_name", alarm_name_prefix,
GTE)
expr_e = create_index_expression("alarm_name",
utils.prefix_end(alarm_name_prefix),
LT)
expr_list.append(expr_s)
expr_list.append(expr_e)
if state_value:
expr = create_index_expression("state_value", state_value)
expr_list.append(expr)
LOG.info("expr %s" % expr_list)
index_clause = create_index_clause(expr_list=expr_list,
start_key=next_token,
count=max_records)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
return items
def describe_alarms_for_metric(self, project_id, namespace, metric_name,
dimensions=None, period=None,
statistic=None, unit=None):
metric_key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
if not metric_key:
raise exception.InvalidParameterValue("no metric")
expr_list = [create_index_expression("metric_key", metric_key)]
if period:
expr = create_index_expression("period", int(period))
expr_list.append(expr)
if statistic:
expr = create_index_expression("statistic", statistic)
expr_list.append(expr)
if unit:
expr = create_index_expression("unit", unit)
expr_list.append(expr)
LOG.info("expr %s" % expr_list)
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
return items
def get_alarms_per_metric_count(self, project_id, namespace, metric_name,
dimensions=None):
alarms = self.describe_alarms_for_metric(project_id, namespace,
metric_name, dimensions)
return sum(1 for a in alarms)
def describe_alarm_history(self, project_id, alarm_name=None,
end_date=None, history_item_type=None,
max_records=100, next_token=None,
start_date=None):
"""
params:
project_id: string
alarm_name: string
end_date: datetime
history_item_type: string (ConfigurationUpdate | StateUpdate |
Action)
max_records: integer
next_token: string (uuid type)
start_date: datetime
"""
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = [
pycassa.create_index_expression("project_id", project_id),
]
if alarm_name:
expr = create_index_expression("alarm_name", alarm_name)
expr_list.append(expr)
if end_date:
expr = create_index_expression("timestamp", end_date, LTE)
expr_list.append(expr)
if start_date:
expr = create_index_expression("timestamp", start_date, GTE)
expr_list.append(expr)
if history_item_type:
expr = create_index_expression("history_item_type",
history_item_type)
expr_list.append(expr)
index_clause = pycassa.create_index_clause(expr_list=expr_list,
start_key=next_token,
count=max_records)
items = self.cf_alarm_history.get_indexed_slices(index_clause)
return items
def get_metric_alarm_key(self, project_id, alarm_name):
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("alarm_name", alarm_name)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
return k
return None
def get_metric_alarm(self, alarm_key):
ret = None
try:
ret = self.cf_metric_alarm.get(alarm_key)
except pycassa.NotFoundException:
pass
return ret
def delete_metric(self, key):
try:
expr_list = [create_index_expression("metric_key", key)]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
project_id = v.get('project_id')
self.delete_metric_alarm(k, project_id)
self.scf_stat_archive.remove(key)
self.cf_metric.remove(key)
LOG.debug("metric is deleted(%s)" % str(key))
except pycassa.NotFoundException:
LOG.error("failed to delete metric(%s)" % str(key))
def get_metric_key(self, project_id, namespace, metric_name, dimensions):
dimensions = utils.pack_dimensions(dimensions)
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("name", metric_name),
pycassa.create_index_expression("namespace", namespace),
pycassa.create_index_expression("dimensions", dimensions)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric.get_indexed_slices(index_clause)
for k, v in items:
return k
else:
return None
def get_metric_key_or_create(self, project_id, namespace, metric_name,
dimensions, unit='None'):
# get metric key
key = None
try:
key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
except Exception as e:
LOG.exception(e)
# or create metric
if not key:
json_dim = utils.pack_dimensions(dimensions)
key = utils.generate_metric_key(project_id, namespace, metric_name,
dimensions)
columns = {'project_id': project_id, 'namespace': namespace,
'name': metric_name, 'dimensions': json_dim,
'unit': unit or 'None',
'updated_timestamp': datetime.utcnow(),
'created_timestamp': datetime.utcnow()}
self.cf_metric.insert(key=key, columns=columns)
LOG.info("New metric is created (%s, %s)" % (key, columns))
return key
def get_metric_statistics(self, project_id, namespace, metric_name,
start_time, end_time, period, statistics,
dimensions=None):
def get_stat(key, super_column, column_start, column_end):
stat = {}
count = (column_end - column_start).total_seconds() / 60
try:
stat = self.scf_stat_archive.get(key,
super_column=super_column,
column_start=column_start,
column_finish=column_end,
column_count=count)
except pycassa.NotFoundException:
LOG.debug("data not found - %s %s %s %s" % (key, super_column,
column_start,
column_end))
return stat
# get metric key
key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
# or return {}
if not key:
return {}
statistics = map(utils.to_ascii, statistics)
stats = map(lambda x: get_stat(key, x, start_time, end_time),
statistics)
return stats
def get_metric_statistics_for_key(self, key, time_idx):
def get_stat(key, super_column, column_start, column_end):
stat = {}
try:
stat = self.scf_stat_archive.get(key,
super_column=super_column,
column_start=column_start,
column_finish=column_end,
column_count=1440)
except pycassa.NotFoundException:
LOG.info("not found data - %s %s %s %s" % (key, super_column,
column_start,
column_end))
return stat
if not key:
return {}
stats = map(lambda x: get_stat(key, x, time_idx, time_idx),
self.STATISTICS)
return stats
def get_metric_unit(self, metric_key):
try:
metric = self.cf_metric.get(key=metric_key)
except pycassa.NotFoundException:
return "None"
return metric.get('unit', "None")
def insert_stat(self, metric_key, stat, ttl=None):
LOG.debug("scf_stat_archive.insert (%s, %s)" % (metric_key, stat))
ttl = ttl if ttl else self.statistics_ttl
self.scf_stat_archive.insert(metric_key, stat, ttl=ttl)
def insert_alarm_history(self, key, column, ttl=None):
LOG.debug("cf_alarm_history.insert (%s, %s)" % (key, column))
ttl = ttl or self.statistics_ttl
self.cf_alarm_history.insert(key, column, ttl=ttl)
def update_alarm_state(self, alarmkey, state, reason, reason_data,
timestamp):
state_info = {'state_value': state, 'state_reason': reason,
'state_reason_data': reason_data,
'state_updated_timestamp':timestamp}
self.cf_metric_alarm.insert(alarmkey, state_info)
LOG.debug("cf_metric_alarm.insert (%s, %s)" % (str(alarmkey),
str(state_info)))
def list_metrics(self, project_id, namespace=None, metric_name=None,
dimensions=None, next_token=""):
def parse_filter(filter_dict):
if not filter_dict:
return None
full_filter, name_filter, value_filter = [], [], []
for k, v in filter_dict.iteritems():
k, v = utils.utf8(k), utils.utf8(v)
if k and v:
full_filter.append((k, v))
elif k and not v:
name_filter.append(k)
elif not k and v:
value_filter.append(v)
else:
msg = "Invalid dimension filter - both name and value "\
"can not be empty."
raise exception.InvalidRequest(msg)
return full_filter, name_filter, value_filter
filters = parse_filter(dimensions)
LOG.info("parse filter: %s", filters)
ret = []
skip_first = False
while True:
metrics, new_next_token, next_skip_first = self._list_metrics(
project_id, namespace, metric_name, filters, next_token)
if skip_first and metrics:
ret = ret + metrics[1:]
else:
ret = ret + metrics
skip_first = next_skip_first
if len(ret) > 500:
last_key, last_value = ret[500]
next_token = str(last_key) if last_key else None
break
elif new_next_token == next_token:
next_token = None
break
else:
next_token = new_next_token
LOG.info("next token: %s", next_token)
return ret[:500], next_token
def _list_metrics(self, project_id, namespace=None, metric_name=None,
filters=None, next_token=""):
def to_dict(v):
return {'project_id': v['project_id'],
'dimensions': json.loads(v['dimensions']),
'name': v['name'],
'namespace': v['namespace']}
def apply_filter(metric, filters):
if not filters:
return True
dimensions = metric.get('dimensions')
dimensions = json.loads(dimensions) if dimensions else {}
full_filter, name_filter, value_filter = filters
if full_filter:
if not set(full_filter).issubset(set(dimensions.items())):
return False
if name_filter:
if set(dimensions.keys()) != set(name_filter):
return False
if value_filter:
for v_in_dim in dimensions.values():
for v in value_filter:
if v in utils.utf8(v_in_dim):
return True
return False
return True
next_token = uuid.UUID(next_token) if next_token else ''
new_next_token = None
expr_list = [pycassa.create_index_expression("project_id",
project_id), ]
if namespace:
expr = pycassa.create_index_expression("namespace", namespace)
expr_list.append(expr)
if metric_name:
expr = pycassa.create_index_expression("name", metric_name)
expr_list.append(expr)
index_clause = pycassa.create_index_clause(expr_list, count=501,
start_key=next_token)
items = self.cf_metric.get_indexed_slices(index_clause,
column_count=100)
last_token = None
metrics = []
for key, value in items:
new_next_token = key
if value and apply_filter(value, filters):
last_token = key
metrics.append((key, to_dict(value)))
skip_first = last_token and last_token == new_next_token
LOG.info("%s %s %s", next_token, new_next_token, last_token)
new_next_token = str(new_next_token) if new_next_token \
else new_next_token
return metrics, new_next_token, skip_first
def get_all_metrics(self):
return self.cf_metric.get_range()
def get_all_alarms(self):
return self.cf_metric_alarm.get_range()
def get_metric(self, metric_key):
try:
data = self.cf_metric.get(metric_key)
except pycassa.NotFoundException:
data = {}
return data
def update_metric(self, metric_key, columns):
try:
data = self.cf_metric.get(metric_key)
except pycassa.NotFoundException:
LOG.debug("Metric Not Found %s" % str(metric_key))
else:
data.update(columns)
self.cf_metric.insert(key=metric_key, columns=data)
def load_metric_data(self, metric_key):
try:
data = self.cf_metric_archive.get(metric_key, column_count=1440)
except pycassa.NotFoundException:
data = {}
return data
def load_statistics(self, metric_key, start, finish):
def get_stat(statistic):
datapoints = self.scf_stat_archive.get(metric_key,
super_column=statistic,
column_start=start,
column_finish=finish)
return statistic, datapoints
try:
stat = dict([get_stat(statistic)
for statistic in self.STATISTICS])
except pycassa.NotFoundException:
stat = {}
return stat
def load_alarms(self, metric_key):
expr_list = [
pycassa.create_index_expression("metric_key", metric_key),
]
index_clause = pycassa.create_index_clause(expr_list)
try:
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
except pycassa.NotFoundException:
LOG.debug("no alarm found")
items = {}
return items
def put_metric_alarm(self, alarm_key, metricalarm):
"""
update MetricAlarm CF
"""
LOG.debug("cf_metric_alarm.insert (%s, %s)" % (alarm_key, metricalarm))
project_id = metricalarm.get('project_id')
self.cf_metric_alarm.insert(key=alarm_key, columns=metricalarm)
self.cf_alarm_counter.add(project_id, 'alarm_counter', 1)
return alarm_key
def restructed_stats(self, stat):
def get_stat(timestamp):
ret = {}
for key in stat.keys():
ret[key] = stat[key][timestamp]
return ret
ret = []
timestamps = reduce(lambda x, y: x if x == y else None,
map(lambda x: x.keys(), stat.values()))
for timestamp in timestamps:
ret.append((timestamp, get_stat(timestamp)))
return ret
def reset_alarm_counter(self):
counter = {}
for k, v in self.cf_metric_alarm.get_range():
project_id = v.get('project_id')
if counter.has_key(project_id):
counter[project_id] += 1
else:
counter[project_id] = 1
# reset counter
for k in counter:
self.cf_alarm_counter.remove_counter(k, 'alarm_counter')
rows = {k: {'alarm_counter': v} for k, v in counter.iteritems()}
self.cf_alarm_counter.batch_insert(rows)
def get_alarm_count(self, project_id):
try:
counter = self.cf_alarm_counter.get(project_id)
except:
return 0
return counter.get('alarm_counter', 0)
def get_notification_group(self, name):
try:
values = self.cf_notification_group.get(name)
except:
return []
return values.keys()
@staticmethod
def syncdb(keyspace=None):
"""
Create Cassandra keyspace, CF, SCF
"""
if not keyspace:
keyspace = FLAGS.get("cassandra_keyspace", "synaps_test")
serverlist = FLAGS.get("cassandra_server_list")
replication_factor = FLAGS.get("cassandra_replication_factor")
manager = pycassa.SystemManager(server=serverlist[0])
strategy_options = {'replication_factor':replication_factor}
# create keyspace
LOG.info(_("cassandra syncdb is started for keyspace(%s)" % keyspace))
if keyspace not in manager.list_keyspaces():
LOG.info(_("cassandra keyspace %s does not exist.") % keyspace)
manager.create_keyspace(keyspace, strategy_options=strategy_options)
LOG.info(_("cassandra keyspace %s is created.") % keyspace)
else:
property = manager.get_keyspace_properties(keyspace)
# check strategy_option
if not (strategy_options == property.get('strategy_options')):
manager.alter_keyspace(keyspace,
strategy_options=strategy_options)
LOG.info(_("cassandra keyspace strategy options is updated - %s"
% str(strategy_options)))
# create CF, SCF
column_families = manager.get_keyspace_column_families(keyspace)
if 'Metric' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='Metric',
comparator_type=pycassa.ASCII_TYPE,
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
column_validation_classes={
'project_id': pycassa.UTF8_TYPE,
'name': pycassa.UTF8_TYPE,
'namespace': pycassa.UTF8_TYPE,
'unit': pycassa.UTF8_TYPE,
'dimensions': pycassa.UTF8_TYPE,
'updated_timestamp': pycassa.DATE_TYPE,
'created_timestamp': pycassa.DATE_TYPE
}
)
manager.create_index(keyspace=keyspace, column_family='Metric',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='namespace',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='dimensions',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='created_timestamp',
value_type=types.DateType())
if 'StatArchive' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='StatArchive', super=True,
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
subcomparator_type=pycassa.DATE_TYPE,
default_validation_class=pycassa.DOUBLE_TYPE
)
if 'MetricAlarm' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='MetricAlarm',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
column_validation_classes={
'metric_key': pycassa.LEXICAL_UUID_TYPE,
'project_id': pycassa.UTF8_TYPE,
'actions_enabled': pycassa.BOOLEAN_TYPE,
'alarm_actions': pycassa.UTF8_TYPE,
'alarm_arn': pycassa.UTF8_TYPE,
'alarm_configuration_updated_timestamp': pycassa.DATE_TYPE,
'alarm_description': pycassa.UTF8_TYPE,
'alarm_name': pycassa.UTF8_TYPE,
'comparison_operator': pycassa.UTF8_TYPE,
'dimensions':pycassa.UTF8_TYPE,
'evaluation_periods':pycassa.INT_TYPE,
'insufficient_data_actions': pycassa.UTF8_TYPE,
'metric_name':pycassa.UTF8_TYPE,
'namespace':pycassa.UTF8_TYPE,
'ok_actions':pycassa.UTF8_TYPE,
'period':pycassa.INT_TYPE,
'state_reason':pycassa.UTF8_TYPE,
'state_reason_data':pycassa.UTF8_TYPE,
'state_updated_timestamp':pycassa.DATE_TYPE,
'state_value':pycassa.UTF8_TYPE,
'statistic':pycassa.UTF8_TYPE,
'threshold':pycassa.DOUBLE_TYPE,
'unit':pycassa.UTF8_TYPE
}
)
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='metric_key',
value_type=types.LexicalUUIDType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='alarm_name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='state_updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='alarm_configuration_updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='state_value',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='period',
value_type=types.IntegerType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='statistic',
value_type=types.UTF8Type())
if 'AlarmHistory' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='AlarmHistory',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
column_validation_classes={
'project_id': pycassa.UTF8_TYPE,
'alarm_key': pycassa.LEXICAL_UUID_TYPE,
'alarm_name': pycassa.UTF8_TYPE,
'history_data': pycassa.UTF8_TYPE,
'history_item_type': pycassa.UTF8_TYPE,
'history_summary': pycassa.UTF8_TYPE,
'timestamp': pycassa.DATE_TYPE,
}
)
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='alarm_key',
value_type=types.LexicalUUIDType())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='alarm_name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='history_item_type',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='timestamp',
value_type=types.DateType())
if 'AlarmCounter' not in column_families.keys():
manager.create_column_family(keyspace=keyspace,
name='AlarmCounter',
default_validation_class=pycassa.COUNTER_COLUMN_TYPE,
key_validation_class=pycassa.UTF8_TYPE)
if 'NotificationGroup' not in column_families.keys():
manager.create_column_family(keyspace=keyspace,
name='NotificationGroup',
key_validation_class=pycassa.UTF8_TYPE,
comparator_type=pycassa.UTF8_TYPE,
default_validation_class=pycassa.UTF8_TYPE)
LOG.info(_("cassandra syncdb has finished"))
| apache-2.0 | 4,337,725,965,951,864,300 | 39.785542 | 81 | 0.502363 | false | 4.577688 | false | false | false |
minj/foxtrick | maintainer/locale/Hattrick/Parsers/CHPPHolderParser.py | 1 | 2538 | #/Club/Players/?TeamID=818875
import sys
if sys.version > '3':
import html.parser as HTMLParser
else:
import HTMLParser
import re
# Parses chpp holders
# CatzHoek
class CHPPHolderParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.users = []
self.currentUser = {}
self.currentUser['appNames'] = []
self.currentAppname = ""
#in relevant area?
self.in_creator_paragraph = False;
self.in_approvedApplications = False;
self.in_approvedApplicationsSubDivCount = 0
def getUserIdFromUrl(self, url):
pattern = re.compile("\/Club\/Manager\/\?userId=(\d+)")
match = re.match(pattern, url)
if match and match.group(1):
return int(match.group(1))
def handle_starttag(self, tag, attrs):
if tag == 'p':
for name, value in attrs:
if name == 'id' and value == 'creator':
self.in_creator_paragraph = True;
if tag == 'a' and self.in_creator_paragraph:
for key, value in attrs:
if key == 'title':
self.currentUser["name"] = value
if key == 'href':
try:
id = self.getUserIdFromUrl( value )
self.currentUser["id"] = id
except Exception:
pass
if tag == 'div':
if self.in_approvedApplications:
self.in_approvedApplicationsSubDivCount += 1
if self.in_approvedApplicationsSubDivCount == 1:
for name, value in attrs:
if name == "title":
self.currentAppname = value
#print value.encode('utf-8')
return
for name, value in attrs:
if name == 'id' and value == 'approvedApplications':
self.in_approvedApplications = True
def handle_endtag(self, tag):
if tag == 'div' and self.in_approvedApplications:
if self.in_approvedApplicationsSubDivCount == 0:
self.in_approvedApplications = False
else:
self.in_approvedApplicationsSubDivCount -= 1
if tag == 'p':
if self.in_creator_paragraph:
found = False
for u in self.users:
if u['id'] == self.currentUser['id']:
found = True
if not found:
self.currentUser["appNames"].append(self.currentAppname)
self.users.append(self.currentUser)
else:
#print "already in there"
for u in self.users:
if u['id'] == self.currentUser['id']:
u['appNames'].append(self.currentAppname)
self.currentUser = {}
self.currentUser['appNames'] = []
self.in_creator_paragraph = False; #no nested divs in playerinfo, this is okay
def get(self):
for u in self.users:
u['appNames'] = sorted(u['appNames'])
return self.users
| gpl-3.0 | 3,078,703,800,076,418,000 | 25.715789 | 81 | 0.648542 | false | 3.168539 | false | false | false |
untitaker/python-webuntis | tests/utils/test_remote.py | 2 | 1228 | import webuntis
import mock
from webuntis.utils.third_party import json
from .. import WebUntisTestCase, BytesIO
class BasicUsage(WebUntisTestCase):
def test_parse_result(self):
x = webuntis.utils.remote._parse_result
a = {'id': 2}
b = {'id': 3}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'Request ID', x, a, b)
a = b = {'id': 2}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'no information', x, a, b)
a = {'id': 2}
b = {'id': 2, 'result': 'YESSIR'}
assert x(a, b) == 'YESSIR'
def test_parse_error_code(self):
x = webuntis.utils.remote._parse_error_code
a = b = {}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'no information', x, a, b)
b = {'error': {'code': 0, 'message': 'hello world'}}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'hello world', x, a, b)
for code, exc in webuntis.utils.remote._errorcodes.items():
self.assertRaises(exc, x, a, {
'error': {'code': code, 'message': 'hello'}
})
| bsd-3-clause | 2,690,891,607,397,577,000 | 31.315789 | 67 | 0.527687 | false | 3.721212 | false | false | false |
FlannelFox/FlannelFox | flannelfox/datasources/common.py | 1 | 4132 | #-------------------------------------------------------------------------------
# Name: Settings
# Purpose: Contains the settings for the application and threads
#
# TODO: Move the reading of config xml into this file
# Move some setting into external xml file
# Move the config files to ~/.flannelfox
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
# System Includes
import datetime, json, math, time, os
from flannelfox import logging
def getConfigFiles(directory):
logger = logging.getLogger(__name__)
configFiles = []
if os.path.isdir(directory):
for configFile in os.listdir(directory):
configFilePath = os.path.join(directory,configFile)
try:
if configFile.endswith('.json'):
configFileJson = __readConfigFile(configFilePath)
if configFile != None:
configFiles.append((configFilePath, configFileJson))
except Exception as e:
logger.warning('There was a problem reading the a config file\n{}\n{}'.format(
configFilePath,
e
))
continue
return configFiles
def __readConfigFile(file):
# Try to read in the rss lists
logger = logging.getLogger(__name__)
try:
logger.debug('Reading RSS config file: {0}'.format(file))
with open(file) as rssJson:
return json.load(rssJson)
except Exception as e:
logger.error('There was a problem reading the rss config file\n{0}'.format(e))
return []
def __getModificationDate(filename):
'''
Checks the modification time of the file it is given
filename: The full path of the file to return the timestamp of.
Returns the timestamp in seconds since epoch
'''
logger = logging.getLogger(__name__)
try:
return int(datetime.datetime.fromtimestamp(os.path.getmtime(filename)).strftime('%s'))
except Exception:
logger.error('There was a problem getting the timestamp for:\n{0}'.format(filename))
return -1
def isCacheStillValid(force=False, cacheFileName=None, frequency=360):
'''
Used to determine if a cachefile needs to be updated
force: force an update
cacheFileName: The full path of the file to check
frequency: how often the file should be updated in minutes
Returns Boolean
'''
logger = logging.getLogger(__name__)
try:
if not os.path.exists(os.path.dirname(cacheFileName)):
try:
os.makedirs(os.path.dirname(cacheFileName))
except OSError: # Guard against race condition
pass
lastModified = __getModificationDate(cacheFileName)
if lastModified == -1:
return False
logger.debug('Checking cache: {0} {1}:{2}'.format(cacheFileName, frequency, math.ceil((time.time()/60 - lastModified/60))))
difference = math.ceil((time.time()/60 - lastModified/60))
if difference >= frequency:
logger.debug('Cache update needed')
return False
else:
logger.debug('Cache update not needed')
return True
except Exception:
logger.error('Cache validity for {0} could not be determined'.format(cacheFileName))
return False
def readCacheFile(cacheFileName):
logger = logging.getLogger(__name__)
try:
logger.debug('Reading cache file for [{0}]'.format(cacheFileName))
with open(cacheFileName) as cacheFile:
return json.load(cacheFile)
except Exception as e:
logger.error('There was a problem reading a lastfm list cache file: {0}'.format(e))
return []
def updateCacheFile(force=False, cacheFileName=None, data=None):
'''
Used to update cache files for api calls. This is needed so we do not keep
asking the api servers for the same information on a frequent basis. The
fault frequency is to ask once an hour.
force: preform the update regardless of frequency
location: where to save the file
frequency: how often to update the file in minutes
'''
directory = os.path.dirname(cacheFileName)
if not os.path.exists(directory):
os.makedirs(directory)
logger = logging.getLogger(__name__)
try:
logger.debug('Cache update for {0} needed'.format(cacheFileName))
with open(cacheFileName, 'w') as cache:
cache.write(json.dumps(data))
except Exception as e:
logger.error('There was a problem writing a cache file {0}: {1}'.format(cacheFileName, e))
| mit | -8,668,762,496,566,740,000 | 26.364238 | 125 | 0.694095 | false | 3.663121 | true | false | false |
Conan-Kudo/bodhi | bodhi/server/graphql_schemas.py | 2 | 2055 | # Copyright © 2020 Red Hat Inc., and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Defines schemas related to GraphQL objects."""
from graphene import relay, Field, String
from graphene_sqlalchemy import SQLAlchemyObjectType
from bodhi.server.models import (
Release as ReleaseModel,
Update as UpdateModel,
BuildrootOverride as BuildrootOverrideModel
)
class Release(SQLAlchemyObjectType):
"""Type object representing a distribution release from bodhi.server.models like Fedora 27."""
class Meta:
"""Allow to set different options to the class."""
model = ReleaseModel
interfaces = (relay.Node, )
state = Field(String)
package_manager = Field(String)
class Update(SQLAlchemyObjectType):
"""Type object representing an update from bodhi.server.models."""
class Meta:
"""Allow to set different options to the class."""
model = UpdateModel
interfaces = (relay.Node, )
status = Field(String)
request = Field(String)
date_approved = Field(String)
class BuildrootOverride(SQLAlchemyObjectType):
"""Type object representing an update from bodhi.server.models."""
class Meta:
"""Allow to set different options to the class."""
model = BuildrootOverrideModel
interfaces = (relay.Node, )
submitter = Field(String)
| gpl-2.0 | 5,457,646,105,845,578,000 | 32.129032 | 98 | 0.718111 | false | 4.2881 | false | false | false |
sitn/crdppf_core | crdppf/views/ogcproxy.py | 2 | 1321 | # -*- coding: UTF-8 -*-
from pyramid.response import Response
from pyramid.view import view_config
import httplib2
from urllib.parse import urlencode
from urllib.parse import urlparse
from crdppf.lib.wfsparsing import is_get_feature, limit_featurecollection
@view_config(route_name='ogcproxy', renderer='json')
def ogcproxy(request):
params = dict(request.params)
params_encoded = {}
for k, v in params.items():
if k == 'callback':
continue
params_encoded[k] = v
query_string = urlencode(params_encoded)
if len(params_encoded) > 0:
_url = '?' + query_string
else:
_url = ''
method = request.method
url = request.registry.settings['crdppf_wms']
h = dict(request.headers)
if urlparse(url).hostname != 'localhost':
h.pop("Host", h)
body = None
if method in ("POST", "PUT"):
body = request.body
url += _url
http = httplib2.Http()
resp, content = http.request(url, method=method, body=body, headers=h)
if method == "POST" and is_get_feature(body):
content = limit_featurecollection(content, limit=4)
headers = {"Content-Type": resp["content-type"]}
return Response(content, status=resp.status, headers=headers)
| gpl-3.0 | 2,165,775,917,155,989,800 | 22.018182 | 74 | 0.614686 | false | 3.806916 | false | false | false |
deepmind/open_spiel | open_spiel/python/examples/response_graph_ucb_2x2_game.py | 1 | 2279 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of ResponseGraphUCB run on a 2x2 game."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import matplotlib.pyplot as plt
import numpy as np
from open_spiel.python.algorithms import response_graph_ucb
from open_spiel.python.algorithms import response_graph_ucb_utils
def get_example_2x2_payoffs():
mean_payoffs = np.random.uniform(-1, 1, size=(2, 2, 2))
mean_payoffs[0, :, :] = np.asarray([[0.5, 0.85], [0.15, 0.5]])
mean_payoffs[1, :, :] = 1 - mean_payoffs[0, :, :]
return mean_payoffs
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
mean_payoffs = get_example_2x2_payoffs()
game = response_graph_ucb_utils.BernoulliGameSampler(
[2, 2], mean_payoffs, payoff_bounds=[-1., 1.])
game.p_max = mean_payoffs
game.means = mean_payoffs
print('Game means:\n', game.means)
exploration_strategy = 'uniform-exhaustive'
confidence_method = 'ucb-standard'
r_ucb = response_graph_ucb.ResponseGraphUCB(
game,
exploration_strategy=exploration_strategy,
confidence_method=confidence_method,
delta=0.1)
results = r_ucb.run()
# Plotting
print('Number of total samples: {}'.format(np.sum(r_ucb.count[0])))
r_ucb.visualise_2x2x2(real_values=game.means, graph=results['graph'])
r_ucb.visualise_count_history(figsize=(5, 3))
plt.gca().xaxis.label.set_fontsize(15)
plt.gca().yaxis.label.set_fontsize(15)
# Compare to ground truth graph
real_graph = r_ucb.construct_real_graph()
r_ucb.plot_graph(real_graph)
plt.show()
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -9,097,289,864,187,538,000 | 32.028986 | 74 | 0.709961 | false | 3.228045 | false | false | false |
berndporr/comedi2py | Thermocouple.py | 1 | 3656 | import sys
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import PyQt4.Qwt5.anynumpy as np
# Thermocouple application: channel 0 has the thermocouple
# connected to and channel 1 receives the temperture of the cold junction
# check out http://www.linux-usb-daq.co.uk/howto2/thermocouple/
class DAQThermo(Qt.QWidget):
def __init__(self, *args):
Qt.QWidget.__init__(self, *args)
self.thermo = Qwt.QwtThermo(self)
self.thermo.setOrientation(Qt.Qt.Vertical,Qwt.QwtThermo.LeftScale)
self.thermo.setFillColor(Qt.Qt.green)
label = Qt.QLabel("Temperature", self)
label.setAlignment(Qt.Qt.AlignCenter)
layout = Qt.QVBoxLayout(self)
layout.setMargin(0)
layout.addWidget(self.thermo)
layout.addWidget(label)
self.setFixedWidth(3*label.sizeHint().width())
# __init__()
def setValue(self, value):
self.thermo.setValue(value)
# setValue()
def setRange(self,mi,ma):
self.thermo.setRange(mi,ma)
# this is taken from the QWT demos and slightly modified
# to get this scrolling plot
class ScrollingPlot(Qwt.QwtPlot):
def __init__(self, *args):
Qwt.QwtPlot.__init__(self, *args)
def initPlotwindow(self,y,samplingrate):
self.samplingrate = samplingrate;
# set axis titles
self.setAxisTitle(Qwt.QwtPlot.xBottom, 't/sec -->')
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'temperature/C -->')
# insert a few curves
self.cData = Qwt.QwtPlotCurve('y = temperature')
self.cData.setPen(Qt.QPen(Qt.Qt.red))
self.cData.attach(self)
# make a Numeric array for the horizontal data
self.x = np.arange(0.0, 500, 1)
self.x = self.x / samplingrate;
# sneaky way of creating an array of just zeroes
self.y = self.x * 0 + y
# initialize the data
self.cData.setData(self.x,self.y)
# insert a horizontal marker at y = 0
mY = Qwt.QwtPlotMarker()
mY.setLineStyle(Qwt.QwtPlotMarker.HLine)
mY.setYValue(0.0)
mY.attach(self)
# replot
self.replot()
# __init__()
def new_data(self,d):
# shift the data to create a scrolling dataplotx
self.y = np.concatenate( ([d], self.y[0:-1] ) )
self.cData.setData(self.x,self.y)
self.replot()
# class Plot
# calculate the temperature
def calcTemperature(voltageTheormocouple,temperatureLM35):
# gain of the instrumentation amplifier INA126
GAIN_INSTR_AMP=((5+80/0.456))
# zero offset of the instrumentation amplifier
ZERO_INSTR_AMP=(-0.05365)
return (((voltageTheormocouple-ZERO_INSTR_AMP)/GAIN_INSTR_AMP)/39E-6) + temperatureLM35
def makePlot(samplingrate):
scrollplot = ScrollingPlot()
scrollplot.initPlotwindow(0,samplingrate)
scrollplot.resize(500, 300)
scrollplot.show()
return scrollplot
def makeThermo():
thermo = DAQThermo()
thermo.resize(100,400)
thermo.setRange(-20,300)
thermo.show()
return thermo
#########################################################
# functions called by comedi2py
# called once with the samplingrate in Hz
def comedistart(samplingrate,minValue,maxValue):
global scrollplot
global thermo
scrollplot = makePlot(samplingrate)
thermo = makeThermo()
# called every sample
def comedidata(a):
global scrollplot
global thermo
voltage_thermo = a[0]
temperature_lm35 = a[1] / 10E-3
temperature = calcTemperature(voltage_thermo,temperature_lm35)
scrollplot.new_data(temperature);
thermo.setValue(temperature);
# called at the end
def comedistop():
print "\n"
| gpl-2.0 | -8,846,850,269,125,298,000 | 26.283582 | 91 | 0.650438 | false | 3.305606 | false | false | false |
kzfm/ashioto | ashioto/nameserver.py | 1 | 2682 | # -*- encoding:utf-8 -*-
from datetime import datetime
import socket
import re
from twython import Twython
import os
import json
home = os.path.expanduser("~")
twitter_conf_file = os.path.join(home, '.ashioto', 'twitter.json')
tc = json.load(open(twitter_conf_file))
CONSUMER_KEY = tc["CONSUMER_KEY"]
CONSUMER_SECRET = tc["CONSUMER_SECRET"]
ACCESS_TOKEN = tc["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = tc["ACCESS_TOKEN_SECRET"]
class NameServer(object):
def __init__(self, host="localhost", port=8000, buffer_size=8192, timeout=1):
self.host = host
self.port = port
self.buffer_size = buffer_size
self.timeout = timeout
self.conn = None
self.connected = False
self.twitter = Twython(app_key=CONSUMER_KEY,
app_secret=CONSUMER_SECRET,
oauth_token=ACCESS_TOKEN,
oauth_token_secret=ACCESS_TOKEN_SECRET)
def response_ok(self):
self.conn.send('HTTP/1.0 200 OK\r\n\r\n')
def tweet(self, name, title):
songinfo = '♪ "{}" ({})'.format(title, name)
print songinfo
self.twitter.update_status(status=songinfo)
def run(self):
cue = SongCue(callback=self.tweet)
artist_title_re = re.compile("ARTIST=(.*)TITLE=(.*)vorbis")
print "NameServer start at {}:{}".format(self.host, self.port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
self.conn = conn
while 1:
data = conn.recv(8192)
if not data: break
if not self.connected:
self.response_ok()
self.connected = True
at = artist_title_re.search(data)
if at:
name = at.group(1)
title = at.group(2)
cue.add(name, title)
cue.noop()
self.conn.close()
print "NameServer stop"
class SongCue(object):
def __init__(self, bytime=60, callback=None):
self.bytime = bytime
self.callback = callback
self.new = {}
def add(self, name, title):
self.new["title"] = title
self.new["name"] = name
self.new["time"] = datetime.now()
def noop(self):
if "time" in self.new:
dur = datetime.now() - self.new["time"]
if dur.seconds > self.bytime:
self.fire()
def fire(self):
self.callback(self.new["name"], self.new["title"])
self.new = {}
if __name__ == '__main__':
NameServer().run()
| mit | -4,532,109,098,526,120,000 | 29.11236 | 81 | 0.553358 | false | 3.597315 | false | false | false |
openstack/powervc-driver | common-powervc/powervc/common/netutils.py | 1 | 2910 | # Copyright 2013 IBM Corp.
import json
import socket
import urllib2
import urlparse
def is_ipv4_address(ip_or_host):
"""Determines if a netloc is an IPv4 address.
:param ip_or_host: the host/ip to check
"""
try:
socket.inet_aton(ip_or_host)
return True
except:
return False
def hostname_url(url):
"""Converts the URL into its FQHN form.
This requires DNS to be setup on the OS or the hosts table
to be updated.
:param url: the url to convert to FQHN form
"""
frags = urlparse.urlsplit(url)
if is_ipv4_address(frags.hostname) is True:
return url
try:
fqhn, alist, ip = socket.gethostbyaddr(frags.hostname)
except:
# likely no DNS configured, return inital url
return url
port_str = ''
if frags.port is not None:
port_str = ':' + str(frags.port)
return frags.scheme + '://' + fqhn + port_str + frags.path
def extract_url_segment(url, needles):
"""searches the url segments for the 1st occurence
of an element in the list of search keys.
:param url: the url or uri to search
:param needles: the keys to search for
"""
for seg in reversed(url.split('/')):
if seg in needles:
return seg
return None
class JSONRESTClient(object):
"""a simple json rest client
"""
def __init__(self, token):
self.token = token
def get(self, url):
"""perform a http GET on the url
:param url: the url to GET
"""
return self._rest_call(url)
def post(self, url, json_body):
"""perform a http POST on the url
:param url: the url to POST
:param json_body: the body to POST
"""
return self._rest_call(url, 'POST', json_body)
def put(self, url, json_body):
"""perform a http PUT on the url
:param url: the url to PUT
:param json_body: the body to PUT
"""
return self._rest_call(url, 'PUT', json_body)
def delete(self, url):
"""perform an http DELETE on the url
:param url: the url to DELETE
"""
return self._rest_call(url, 'DELETE')
def _rest_call(self, url, method='GET', json_body=None):
request = urllib2.Request(url)
request.add_header('Content-Type', 'application/json;charset=utf8')
request.add_header('Accept', 'application/json')
request.add_header('User-Agent', 'python-client')
if self.token:
request.add_header('X-Auth-Token', self.token)
if json_body:
request.add_data(json.dumps(json_body))
request.get_method = lambda: method
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 300:
return json.loads(e.read())
raise e
return json.loads(response.read())
| apache-2.0 | 6,568,505,798,722,032,000 | 26.196262 | 75 | 0.591409 | false | 3.745174 | false | false | false |
MicrosoftGenomics/LEAP | leap/regression/leapUtils.py | 1 | 5785 | import numpy as np
from optparse import OptionParser
import scipy.linalg as la
import scipy.linalg.blas as blas
import csv
import time
import fastlmm.util.VertexCut as vc
from pysnptools.snpreader.bed import Bed
import pysnptools.util as pstutil
import pysnptools.util.pheno as phenoUtils
np.set_printoptions(precision=3, linewidth=200)
def loadData(bfile, extractSim, phenoFile, missingPhenotype='-9', loadSNPs=False, standardize=True):
bed = Bed(bfile)
if (extractSim is not None):
f = open(extractSim)
csvReader = csv.reader(f)
extractSnpsSet = set([])
for l in csvReader: extractSnpsSet.add(l[0])
f.close()
keepSnpsInds = [i for i in xrange(bed.sid.shape[0]) if bed.sid[i] in extractSnpsSet]
bed = bed[:, keepSnpsInds]
phe = None
if (phenoFile is not None): bed, phe = loadPheno(bed, phenoFile, missingPhenotype)
if (loadSNPs):
bed = bed.read()
if (standardize): bed = bed.standardize()
return bed, phe
def loadPheno(bed, phenoFile, missingPhenotype='-9', keepDict=False):
pheno = phenoUtils.loadOnePhen(phenoFile, missing=missingPhenotype, vectorize=True)
checkIntersection(bed, pheno, 'phenotypes')
bed, pheno = pstutil.intersect_apply([bed, pheno])
if (not keepDict): pheno = pheno['vals']
return bed, pheno
def checkIntersection(bed, fileDict, fileStr, checkSuperSet=False):
bedSet = set((b[0], b[1]) for b in bed.iid)
fileSet = set((b[0], b[1]) for b in fileDict['iid'])
if checkSuperSet:
if (not fileSet.issuperset(bedSet)): raise Exception(fileStr + " file does not include all individuals in the bfile")
intersectSet = bedSet.intersection(fileSet)
if (len(intersectSet) != len (bedSet)):
print len(intersectSet), 'individuals appear in both the plink file and the', fileStr, 'file'
def symmetrize(a):
return a + a.T - np.diag(a.diagonal())
def loadRelatedFile(bed, relFile):
relatedDict = phenoUtils.loadOnePhen(relFile, vectorize=True)
checkIntersection(bed, relatedDict, 'relatedness', checkSuperSet=True)
_, relatedDict = pstutil.intersect_apply([bed, relatedDict])
related = relatedDict['vals']
keepArr = (related < 0.5)
print np.sum(~keepArr), 'individuals will be removed due to high relatedness'
return keepArr
def findRelated(bed, cutoff):
print 'Computing kinship matrix...'
t0 = time.time()
XXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1) / bed.val.shape[1])
print 'Done in %0.2f'%(time.time()-t0), 'seconds'
#Find related individuals
removeSet = set(np.sort(vc.VertexCut().work(XXT, cutoff))) #These are the indexes of the IIDs to remove
print 'Marking', len(removeSet), 'individuals to be removed due to high relatedness'
#keepArr = np.array([(1 if iid in keepSet else 0) for iid in bed.iid], dtype=bool)
keepArr = np.ones(bed.iid.shape[0], dtype=bool)
for i in removeSet: keepArr[i] = False
return keepArr
def eigenDecompose(XXT):
t0 = time.time()
print 'Computing eigendecomposition...'
s,U = la.eigh(XXT)
if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found')
s[s<0]=0
ind = np.argsort(s)
ind = ind[s>1e-12]
U = U[:, ind]
s = s[ind]
print 'Done in %0.2f'%(time.time()-t0), 'seconds'
return s,U
def loadCovars(bed, covarFile):
covarsDict = phenoUtils.loadOnePhen(covarFile, vectorize=False)
checkIntersection(bed, covarsDict, 'covariates', checkSuperSet=True)
_, covarsDict = pstutil.intersect_apply([bed, covarsDict])
covar = covarsDict['vals']
covar -= np.mean(covar, axis=0)
covar /= np.std(covar, axis=0)
return covar
def getSNPCovarsMatrix(bed, resfile, pthresh, mindist):
snpNameToNumDict = dict([])
for i,s in enumerate(bed.sid): snpNameToNumDict[s] = i
f = open(resfile)
csvReader = csv.reader(f, delimiter="\t")
csvReader.next()
significantSNPs = []
significantSNPNames = []
lastPval = 0
featuresPosList = []
for l in csvReader:
snpName, pVal = l[0], float(l[4])
if (pVal < lastPval): raise Exception('P-values are not sorted in descending order: ' + str(pVal) + ">" + str(lastPval))
lastPval = pVal
if (pVal > pthresh): break
if (snpName not in snpNameToNumDict): continue
significantSNPNames.append(snpName)
if (mindist == 0):
significantSNPs.append(snpNameToNumDict[snpName])
print 'Using SNP', snpName, 'with p<%0.2e'%pVal, 'as a fixed effect'
else:
posArr = bed.pos[snpNameToNumDict[snpName]]
chrom, pos = posArr[0], int(posArr[2])
addSNP = True
for (c,p) in featuresPosList:
if (chrom == c and abs(pos-p) < mindist):
addSNP = False
break
if addSNP:
significantSNPs.append(snpNameToNumDict[snpName])
featuresPosList.append((chrom, pos))
print 'Using SNP', snpName, '('+str(int(chrom))+':'+str(pos)+') with p<%0.2e'%pVal, 'as a fixed effect'
f.close()
snpCovarsMat = bed.val[:, significantSNPs]
return snpCovarsMat
def getExcludedChromosome(bfile, chrom):
bed = Bed(bfile)
indsToKeep = (bed.pos[:,0] != chrom)
bed = bed[:, indsToKeep]
return bed.read().standardize()
def getChromosome(bfile, chrom):
bed = Bed(bfile)
indsToKeep = (bed.pos[:,0] == chrom)
bed = bed[:, indsToKeep]
return bed.read().standardize()
def _fixupBedAndPheno(bed, pheno, missingPhenotype='-9'):
bed = _fixupBed(bed)
bed, pheno = _fixup_pheno(pheno, bed, missingPhenotype)
return bed, pheno
def _fixupBed(bed):
if isinstance(bed, str):
return Bed(bed).read().standardize()
else: return bed
def _fixup_pheno(pheno, bed=None, missingPhenotype='-9'):
if (isinstance(pheno, str)):
if (bed is not None):
bed, pheno = loadPheno(bed, pheno, missingPhenotype, keepDict=True)
return bed, pheno
else:
phenoDict = phenoUtils.loadOnePhen(pheno, missing=missingPhenotype, vectorize=True)
return phenoDict
else:
if (bed is not None): return bed, pheno
else: return pheno
| apache-2.0 | 3,901,861,025,153,968,000 | 29.771277 | 122 | 0.700605 | false | 2.668358 | false | false | false |
delfick/bespin | bespin/option_spec/deployment_check.py | 2 | 6720 | from bespin.errors import BadDeployment, BadStack, BadOption
from bespin import helpers as hp
from input_algorithms.spec_base import NotSpecified
from input_algorithms.dictobj import dictobj
import requests
import fnmatch
import logging
log = logging.getLogger("bespin.option_spec.deployment")
class UrlChecker(dictobj):
fields = {
"expect": "The value we expect for a successful deployment"
, "endpoint": "The domain of the url to hit"
, "check_url": "The path of the url to hit"
, "timeout_after": "Stop waiting after this many seconds"
}
def wait(self, environment):
endpoint = self.endpoint().resolve()
while endpoint.endswith("/"):
endpoint = endpoint[:-1]
while endpoint.endswith("."):
endpoint = endpoint[:-1]
while self.check_url.startswith("/"):
self.check_url = self.check_url[1:]
url = endpoint + '/' + self.check_url
expected = self.expect.format(**environment)
log.info("Asking server for version till we match %s", expected)
for _ in hp.until(self.timeout_after, step=15):
log.info("Asking %s", url)
try:
res = requests.get(url)
result = res.text
status = res.status_code
except requests.exceptions.ConnectionError as error:
log.warning("Failed to ask server\terror=%s", error)
else:
log.info("\tgot back (%s) '%s'", status, result)
if fnmatch.fnmatch(result, expected):
log.info("Deployment successful!")
return
raise BadStack("Timedout waiting for the app to give back the correct version")
class SNSConfirmation(dictobj):
fields = {
"version_message": "The expected version that indicates successful deployment"
, "deployment_queue": "The sqs queue to check for messages"
, ("timeout", 300): "Stop waiting after this amount of time"
}
def wait(self, instances, environment, sqs):
version_message = self.version_message.format(**environment)
deployment_queue = self.deployment_queue.format(**environment)
failed = []
success = []
attempt = 0
log.info("Checking sqs for %s", version_message)
log.info("Checking for message for instances [%s]", ",".join(instances))
for _ in hp.until(timeout=self.timeout, step=5, action="Checking for valid deployment actions"):
messages = sqs.get_all_deployment_messages(deployment_queue)
# Look for success and failure in the messages
for message in messages:
log.info("Message received for instance %s with content [%s]", message.instance_id, message.output)
# Ignore the messages for instances outside this deployment
if message.instance_id in instances:
if fnmatch.fnmatch(message.output, version_message):
log.info("Deployed instance %s", message.instance_id)
success.append(message.instance_id)
else:
log.info("Failed to deploy instance %s", message.instance_id)
log.info("Failure Message: %s", message.output)
failed.append(message.instance_id)
# Stop trying if we have all the instances
if set(failed + success) == set(instances):
break
# Record the iteration of checking for a valid deployment
attempt += 1
log.info("Completed attempt %s of checking for a valid deployment state", attempt)
if success:
log.info("Succeeded to deploy %s", success)
if failed:
log.error("Failed to deploy %s", failed)
raise BadDeployment(failed=failed)
if not success and not failed:
log.error("Failed to receive any messages")
raise BadDeployment("Failed to receive any messages")
log.info("All instances have been confirmed to be deployed with version_message [%s]!", version_message)
class ConfirmDeployment(dictobj):
fields = {
"deploys_s3_path": "A list of s3 paths that we expect to be created as part of the deployment"
, "zero_instances_is_ok": "Don't do deployment confirmation if the scaling group has no instances"
, "auto_scaling_group_name": "The name of the auto scaling group that has the instances to be checked"
, "url_checker": "Check an endpoint on our instances for a particular version message"
, "sns_confirmation": "Check an sqs queue for messages our Running instances produced"
}
def instances(self, stack):
auto_scaling_group_name = self.auto_scaling_group_name
asg_physical_id = stack.cloudformation.map_logical_to_physical_resource_id(auto_scaling_group_name)
return stack.ec2.get_instances_in_asg_by_lifecycle_state(asg_physical_id, lifecycle_state="InService")
def confirm(self, stack, environment, start=None):
instances = []
if self.auto_scaling_group_name is not NotSpecified:
instances = self.instances(stack)
if len(instances) is 0:
if self.zero_instances_is_ok:
log.info("No instances to check, but config says that's ok!")
return
else:
raise BadDeployment("No instances are InService in the auto scaling group!", stack=stack.name, auto_scaling_group_name=self.auto_scaling_group_name)
else:
if any(item is not NotSpecified for item in (self.sns_confirmation, self.url_checker)):
raise BadOption("Auto_scaling_group_name must be specified if sns_confirmation or url_checker are specified")
for checker in (self.check_sns, self.check_url, self.check_deployed_s3_paths):
checker(stack, instances, environment, start)
def check_sns(self, stack, instances, environment, start=None):
if self.sns_confirmation is not NotSpecified:
self.sns_confirmation.wait(instances, environment, stack.sqs)
def check_url(self, stack, instances, environment, start=None):
if self.url_checker is not NotSpecified:
self.url_checker.wait(environment)
def check_deployed_s3_paths(self, stack, instances, environment, start=None):
if self.deploys_s3_path is not NotSpecified:
for path in self.deploys_s3_path:
stack.s3.wait_for(path.bucket.format(**environment), path.key.format(**environment), path.timeout, start=start)
| mit | 468,202,991,382,302,340 | 44.405405 | 168 | 0.625149 | false | 4.327109 | false | false | false |
ghxandsky/zstack-utility | virtualrouter/virtualrouter/virtualrouter.py | 3 | 3204 | '''
@author: Frank
'''
from zstacklib.utils import plugin
from zstacklib.utils import log
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import shell
from zstacklib.utils import daemon
from zstacklib.utils import iptables
import os.path
import traceback
import pprint
import functools
class VRAgent(plugin.Plugin):
pass
class VirtualRouterError(Exception):
'''vritual router error'''
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
class AgentCommand(object):
def __init__(self):
pass
class InitRsp(AgentResponse):
def __init__(self):
super(InitRsp, self).__init__()
class PingRsp(AgentResponse):
def __init__(self):
super(PingRsp, self).__init__()
self.uuid = None
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = str(e)
logger.warn(err)
return jsonobject.dumps(rsp)
return wrap
class VirtualRouter(object):
http_server = http.HttpServer(port=7272)
http_server.logfile_path = log.get_logfile_path()
PLUGIN_PATH = "plugin_path"
INIT_PATH = "/init"
PING_PATH = "/ping"
def __init__(self, config={}):
self.config = config
plugin_path = self.config.get(self.PLUGIN_PATH, None)
if not plugin_path:
plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins')
self.plugin_path = plugin_path
self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
self.init_command = None
self.uuid = None
@replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self.init_command = cmd
self.uuid = cmd.uuid;
return jsonobject.dumps(InitRsp())
@replyerror
def ping(self ,req):
rsp = PingRsp()
rsp.uuid = self.uuid
return jsonobject.dumps(rsp)
def start(self, in_thread=True):
self.plugin_rgty.configure_plugins(self)
self.plugin_rgty.start_plugins()
self.http_server.register_async_uri(self.INIT_PATH, self.init)
self.http_server.register_async_uri(self.PING_PATH, self.ping)
if in_thread:
self.http_server.start_in_thread()
else:
self.http_server.start()
def stop(self):
self.plugin_rgty.stop_plugins()
self.http_server.stop()
class VirutalRouterDaemon(daemon.Daemon):
def __init__(self, pidfile):
super(VirutalRouterDaemon, self).__init__(pidfile)
def run(self):
self.agent = VirtualRouter()
self.agent.start(False) | apache-2.0 | 5,194,251,650,686,618,000 | 25.637931 | 94 | 0.597066 | false | 3.747368 | false | false | false |
ritchiewilson/majormajor | tests/ot/test_expand_deletion_range.py | 1 | 4109 | # MajorMajor - Collaborative Document Editing Library
# Copyright (C) 2013 Ritchie Wilson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Some expanded deletion ranges overlapping other deletion ranges
"""
from majormajor.document import Document
from tests.test_utils import build_changesets_from_tuples
class TestExpandDeletionRange:
def test_expand_deletion_range(self):
doc = Document(snapshot='HjpRFtZXW5')
doc.HAS_EVENT_LOOP = False
css_data = [
('si', 7, 'OeI', ['root'], 'c3c'), # HjpRFtZ OeI XW5
('sd', 2, 5, ['c3c'], '950'), # delete pRFtZ
('si', 2, 'Qx', ['950'], 'bf0'), # Hj Qx OeIXW5
('sd', 2, 4, ['bf0'], '4c5'), # delete QxOe
('si', 6, 'U6', ['4c5'], '61a'), # HjIXW5 U6
('si', 3, 'AG', ['61a'], '1f0'), # HjI AG XW5U6
('si', 3, 'qwEg', ['1f0'], '393'), # HjI qwEg AGXW5U6
('si', 9, 'vsY', ['393'], '18d'), # HjIqwEgAG vsY XW5U6
('si', 0, 'MiNV', ['18d'], '688'), # MiNV HjIqwEgAGvsYXW5U6
('si', 20, 'L4n', ['688'], '796'), # MiNVHjIqwEgAGvsYXW5U L4n 6
('si', 5, '9l', ['796'], 'b29'), # MiNVH 9l jIqwEgAGvsYXW5UL4n6
('si', 1, 'k0Jf', ['b29'], 'e1a'),
# M k0Jf iNVH9ljIqwEgAGvsYXW5UL4n6
('si', 8, 'd', ['e1a'], 'a23'),
# Mk0JfiNV d H9ljIqwEgAGvsYXW5UL4n6
('sd', 3, 1, ['1f0'], '47a'), # delete A
('sd', 0, 3, ['47a'], 'cc0'), # delete HjI
('si', 4, 'K1DT', ['cc0'], 'd32'), # GXW5 K1DT U6
('si', 5, 'b3oS', ['d32'], '175'), # GXW5K b3oS 1DTU6
('si', 3, 'hm8z', ['175'], 'd28'), # GXW hm8z 5Kb3oS1DTU6
('sd', 0, 5, ['1f0'], '997'), # delete HjIAG
('si', 0, 'rBya', ['997'], '17a'), # rBya XW5U6
('sd', 7, 1, ['17a'], '592'), # delete U
('si', 8, 'cPu', ['592'], '893'), # rByaXW56 cPu
('si', 1, 'C72', ['d28', '893'], 'b20'),
# r C72 ByaXWhm8z5Kb3oS1DT6cPu
('sd', 37, 3, ['a23', 'b20'], '9e0'), # delete 6cP
]
self.css = build_changesets_from_tuples(css_data, doc)
get_cs = self.get_cs
for i in self.css[:13]:
doc.receive_changeset(i)
assert doc.get_snapshot() == 'Mk0JfiNVdH9ljIqwEgAGvsYXW5UL4n6'
for i in self.css[13:18]:
doc.receive_changeset(i)
assert doc.get_snapshot() == 'Mk0JfiNVdqwEgGvsYXWhm8z5Kb3oS1DTUL4n6'
cs = get_cs('997')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYXWhm8z5Kb3oS1DTUL4n6'
cs = get_cs('17a')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrByaXWhm8z5Kb3oS1DTUL4n6'
cs = get_cs('592')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrByaXWhm8z5Kb3oS1DTL4n6'
cs = get_cs('893')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrByaXWhm8z5Kb3oS1DTL4n6cPu'
cs = get_cs('b20')
doc.receive_changeset(cs)
assert doc.get_snapshot() == \
'Mk0JfiNVdvsYrC72ByaXWhm8z5Kb3oS1DTL4n6cPu'
cs = get_cs('9e0')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrC72ByaXWhm8z5Kb3oS1DTL4nu'
def get_cs(self, _id):
for cs in self.css:
if cs.get_short_id() == _id:
return cs
raise Exception("wrong id, jerk", _id)
| gpl-3.0 | 4,469,329,829,727,452,700 | 37.401869 | 77 | 0.558287 | false | 2.650968 | false | false | false |
heartsucker/diceware | cli.py | 1 | 4963 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import argparse
import os
from diceware_cli import subcommands
from diceware_cli.persistence import word_states
from os import path
from sys import argv
diceware_dir = path.dirname(path.abspath(__file__))
os.chdir(diceware_dir)
def get_args(cli_args):
parser = argparse.ArgumentParser(prog=path.basename(__file__),
allow_abbrev=False,
)
subparsers = parser.add_subparsers()
subparsers.required = True
subparsers.dest = 'command'
load_db_subparser = subparsers.add_parser('load-db',
help='Load words into the database',
allow_abbrev=False,
)
load_db_subparser.add_argument('-l',
'--language',
help='The language of the wordlist',
type=str,
required=True,
)
load_db_subparser.add_argument('-f',
'--file',
help='A file to load into the db. Use \'-\' for stdin.'
'Repeat this argument for multiple files.',
action='append',
dest='files',
type=argparse.FileType('r'),
required=True,
)
load_db_subparser.add_argument('-s',
'--state',
help='The initial state for the loaded words',
type=str,
default='pending',
choices=word_states,
)
load_db_subparser.add_argument('--allow-updates',
help='Allow words in the DB to have their state updated.'
'Default behavior is insert only.',
dest='allow_updates',
action='store_true',
)
load_db_subparser.set_defaults(func=subcommands.load_db)
clean_subparser = subparsers.add_parser('clean',
help='Clean the project',
allow_abbrev=False,
)
clean_subparser.set_defaults(func=subcommands.clean_project)
finalize_subparser = subparsers.add_parser('finalize',
help='Run checks and generate enumerated wordlists',
allow_abbrev=False,
)
finalize_subparser.set_defaults(func=subcommands.finalize)
select_words_subparser = subparsers.add_parser('select-words',
help='Iterate through the DB and select or reject words',
allow_abbrev=False,
)
select_words_subparser.add_argument('-l',
'--language',
help='The language of the wordlist',
type=str,
required=True,
)
select_words_subparser.add_argument('--include-skipped',
help='Re-evaluated words that were previously skipped',
dest='include_skipped',
action='store_true',
)
select_words_subparser.set_defaults(func=subcommands.select_words)
dump_db_subparser = subparsers.add_parser('dump-db',
help='Dump the contents of the sqlite db to disk',
allow_abbrev=False,
)
dump_db_subparser.set_defaults(func=subcommands.dump_sqlite)
db_state_subparser = subparsers.add_parser('db-state',
help='Get the state of the db',
allow_abbrev=False,
)
db_state_subparser.set_defaults(func=subcommands.db_state)
return parser.parse_args(cli_args)
if __name__ == '__main__':
try:
args = get_args(argv[1:])
args.func(args)
except KeyboardInterrupt:
print('') # for a pretty newline
exit(1)
| mit | -1,152,191,218,769,473,500 | 45.383178 | 108 | 0.408624 | false | 5.880332 | false | false | false |
ThunderDynamics/tdic | forms.py | 1 | 3566 | import os
from sys import getsizeof
import models
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, BooleanField, FileField
from wtforms.validators import ValidationError, DataRequired, regexp, Email, EqualTo, Length
from flask_bcrypt import check_password_hash
if 'HEROKU' in os.environ:
AUTH_PASS = os.environ['auth_pass']
else:
AUTH_PASS = 'gjdfskghl'
def username_exists(form, field):
print(form)
try:
models.User.get(models.User.username ** field.data)
except models.DoesNotExist:
pass
else:
raise ValidationError('User with that username already exists')
def email_exists(form, field):
print(form)
try:
models.User.get(models.User.email ** field.data)
except models.DoesNotExist:
pass
else:
raise ValidationError('User with that email already exists')
def auth_matches(form, field):
print(form)
if 'HEROKU' in os.environ:
if check_password_hash(AUTH_PASS, field.data):
pass
else:
raise ValidationError('Special Password Incorrect')
def valid_image(form, field):
print(form)
if field.data:
ext = os.path.splitext(field.data.filename)[1].strip(".")
if ext in ['jpeg', 'jpg', 'png', 'psd', 'gif', 'bmp', 'exif', 'tif', 'tiff']:
file_u = field.data
if getsizeof(file_u) <= 3000000:
pass
else:
raise ValidationError('Avatar is bigger than 3 mb.')
else:
raise ValidationError('Avatar is not an image.')
else:
pass
class SignUpForm(Form):
username = StringField(
'Username',
validators=[
DataRequired(),
username_exists,
regexp(r'^[a-z0-9]{3,10}$',
message='Username can only be lowercase letters & numbers, '
'and length can only be 3-10 characters long')
]
)
email = StringField(
'Email',
validators=[
DataRequired(),
email_exists,
Email()
]
)
first_name = StringField(
'First Name',
validators=[
DataRequired(),
regexp(r'[A-Z][a-z]+', message='Name can only be uppercase first letter and lowercase proceeding letters')
]
)
last_name = StringField(
'Last Name',
validators=[
DataRequired(),
regexp(r'[A-Z][a-z]+', message='Name can only be uppercase first letter and lowercase proceeding letters')
]
)
password = PasswordField(
'Password',
validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match'),
]
)
password2 = PasswordField(
'Confirm Password',
validators=[DataRequired()]
)
auth = PasswordField(
'Special Password',
validators=[
DataRequired(),
auth_matches
]
)
class SignInForm(Form):
name_email = StringField('Username or Email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
class PostForm(Form):
content = TextAreaField('What do you have to say?', validators=[Length(1, 255)],
render_kw={'class': 'materialize-textarea',
'data-length': '255'})
image = FileField('Optional Image (Up to 3 MB)', validators=[valid_image])
| apache-2.0 | -6,980,047,512,911,187,000 | 27.301587 | 118 | 0.581884 | false | 4.317191 | false | false | false |
pmonta/GNSS-DSP-tools | gnsstools/glonass/p.py | 1 | 1030 | # GLONASS P code construction
#
# Copyright 2014 Peter Monta
import numpy as np
chip_rate = 5110000
code_length = 5110000
def glonass_p_shift(x):
return [x[24]^x[2]] + x[0:24]
def make_glonass_p():
n = code_length
x = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
y = np.zeros(n)
for i in range(n):
y[i] = x[9]
x = glonass_p_shift(x)
return y
c = make_glonass_p()
def p_code():
return c
def code(chips,frac,incr,n):
idx = (chips%code_length) + frac + incr*np.arange(n)
idx = np.floor(idx).astype('int')
idx = np.mod(idx,code_length)
x = c[idx]
return 1.0 - 2.0*x
try:
from numba import jit
except:
def jit(**kwargs):
return lambda x: x
@jit(nopython=True)
def correlate(x,chips,frac,incr,c):
n = len(x)
p = 0.0j
cp = (chips+frac)%code_length
for i in range(n):
p += x[i]*(1.0-2.0*c[int(cp)])
cp += incr
if cp>=code_length:
cp -= code_length
return p
#
# testing: print out a small sample of the code
#
if __name__=='__main__':
print(c[0:100])
| mit | 6,232,927,010,712,315,000 | 17.070175 | 57 | 0.595146 | false | 2.263736 | false | false | false |
bmwiedemann/linuxcnc-mirror | lib/python/gladevcp/hal_gremlin.py | 1 | 11812 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# 2014 Steffen Noack
# add property 'mouse_btn_mode'
# 0 = default: left rotate, middle move, right zoom
# 1 = left zoom, middle move, right rotate
# 2 = left move, middle rotate, right zoom
# 3 = left zoom, middle rotate, right move
# 4 = left move, middle zoom, right rotate
# 5 = left rotate, middle zoom, right move
import os
import gtk, gobject
import linuxcnc
import gremlin
import rs274.glcanon
import gcode
from hal_actions import _EMC_ActionBase
from hal_glib import GStat
class HAL_Gremlin(gremlin.Gremlin, _EMC_ActionBase):
__gtype_name__ = "HAL_Gremlin"
__gsignals__ = {
'line-clicked': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_INT,)),
'gcode_error': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
}
__gproperties__ = {
'view' : ( gobject.TYPE_STRING, 'View type', 'Default view: p, x, y, y2, z, z2',
'p', gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'enable_dro' : ( gobject.TYPE_BOOLEAN, 'Enable DRO', 'Show DRO on graphics',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'metric_units' : ( gobject.TYPE_BOOLEAN, 'Use Metric Units', 'Show DRO in metric or imperial units',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_relative' : ( gobject.TYPE_BOOLEAN, 'Show Relative', 'Show DRO relative to active system or machine origin',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_commanded' : ( gobject.TYPE_BOOLEAN, 'Show Commanded', 'Show commanded or actual position',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_extents_option' : ( gobject.TYPE_BOOLEAN, 'Show Extents', 'Show machine extents',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_limits' : ( gobject.TYPE_BOOLEAN, 'Show limits', 'Show machine limits',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_live_plot' : ( gobject.TYPE_BOOLEAN, 'Show live plot', 'Show machine plot',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_velocity' : ( gobject.TYPE_BOOLEAN, 'Show tool speed', 'Show tool velocity',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_program' : ( gobject.TYPE_BOOLEAN, 'Show program', 'Show program',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_rapids' : ( gobject.TYPE_BOOLEAN, 'Show rapids', 'Show rapid moves',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_tool' : ( gobject.TYPE_BOOLEAN, 'Show tool', 'Show tool',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_dtg' : ( gobject.TYPE_BOOLEAN, 'Show DTG', 'Show Distance To Go',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_lathe_radius' : ( gobject.TYPE_BOOLEAN, 'Show Lathe Radius', 'Show X axis in Radius',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'grid_size' : ( gobject.TYPE_FLOAT, 'Grid Size', 'Grid Size',
0, 100, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_joints_mode' : ( gobject.TYPE_BOOLEAN, 'Use joints mode', 'Use joints mode',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_default_controls' : ( gobject.TYPE_BOOLEAN, 'Use Default Mouse Controls', 'Use Default Mouse Controls',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'mouse_btn_mode' : ( gobject.TYPE_INT, 'Mouse Button Mode',
('Mousebutton assignment, l means left, m middle, r right \n'
'0 = default: l-rotate, m-move, r-zoom \n'
'1 = l-zoom, m-move, r-rotate\n'
'2 = l-move, m-rotate, r-zoom\n'
'3 = l-zoom, m-rotate, r-move\n'
'4 = l-move, m-zoom, r-rotate\n'
'5 = l-rotate, m-zoom, r-move\n'
'6 = l-move, m-zoom, r-zoom'),
0, 6, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self, *a, **kw):
gobject.GObject.__init__(self)
inifile = os.environ.get('INI_FILE_NAME', '/dev/null')
inifile = linuxcnc.ini(inifile)
gremlin.Gremlin.__init__(self, inifile)
self._reload_filename = None
self.gstat = GStat()
self.gstat.connect('file-loaded', self.fileloaded)
self.gstat.connect('reload-display', self.reloadfile)
self.show()
def reloadfile(self,w):
try:
self.fileloaded(None,self._reload_filename)
except:
pass
def fileloaded(self,w,f):
self._reload_filename=f
try:
self._load(f)
except AttributeError,detail:
#AttributeError: 'NoneType' object has no attribute 'gl_end'
print 'hal_gremlin: continuing after',detail
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'view':
return self.current_view
elif name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'view':
view = value.lower()
if self.lathe_option:
if view not in ['p','y','y2']:
return False
elif view not in ['p', 'x', 'y', 'z', 'z2']:
return False
self.current_view = view
if self.initialised:
self.set_current_view()
elif name == 'enable_dro':
self.enable_dro = value
elif name == 'metric_units':
self.metric_units = value
elif name in self.__gproperties.keys():
setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
self.queue_draw()
return True
# This overrides glcannon.py method so we can change the DRO
def dro_format(self,s,spd,dtg,limit,homed,positions,axisdtg,g5x_offset,g92_offset,tlo_offset):
if not self.enable_dro:
return limit, homed, [''], ['']
if self.metric_units:
format = "% 6s:% 9.3f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.3f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.3f G92 %1s:% 9.3f"
rotformat = "% 5s %1s:% 9.3f"
else:
format = "% 6s:% 9.4f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.4f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.4f G92 %1s:% 9.4f"
rotformat = "% 5s %1s:% 9.4f"
diaformat = " " + format
posstrs = []
droposstrs = []
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
posstrs.append(format % (a, positions[i]))
if self.show_dtg:
droposstrs.append(droformat % (a, positions[i], a, axisdtg[i]))
else:
droposstrs.append(droformat % (a, positions[i]))
droposstrs.append("")
for i in range(9):
index = s.g5x_index
if index<7:
label = "G5%d" % (index+3)
else:
label = "G59.%d" % (index-6)
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(offsetformat % (label, a, g5x_offset[i], a, g92_offset[i]))
droposstrs.append(rotformat % (label, 'R', s.rotation_xy))
droposstrs.append("")
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(rotformat % ("TLO", a, tlo_offset[i]))
# if its a lathe only show radius or diameter as per property
# we have to adjust the homing icon to line up:
if self.is_lathe():
if homed[0]:
homed.pop(0)
homed.pop(0)
homed.insert(0,1)
homed.insert(0,0)
posstrs[0] = ""
if self.show_lathe_radius:
posstrs.insert(1, format % ("Rad", positions[0]))
else:
posstrs.insert(1, format % ("Dia", positions[0]*2.0))
droposstrs[0] = ""
if self.show_dtg:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0], "R", axisdtg[0]))
else:
droposstrs.insert(1, droformat % ("Dia", positions[0]*2.0, "D", axisdtg[0]*2.0))
else:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0]))
else:
droposstrs.insert(1, diaformat % ("Dia", positions[0]*2.0))
if self.show_velocity:
posstrs.append(format % ("Vel", spd))
pos=0
for i in range(9):
if s.axis_mask & (1<<i): pos +=1
if self.is_lathe:
pos +=1
droposstrs.insert(pos, " " + format % ("Vel", spd))
if self.show_dtg:
posstrs.append(format % ("DTG", dtg))
return limit, homed, posstrs, droposstrs
# Override gremlin's / glcannon.py function so we can emit a GObject signal
def update_highlight_variable(self,line):
self.highlight_line = line
if line == None:
line = -1
self.emit('line-clicked', line)
def realize(self, widget):
gremlin.Gremlin.realize(self, widget)
@rs274.glcanon.with_context
def _load(self, filename):
return self.load(filename)
def report_gcode_error(self, result, seq, filename):
error_str = gcode.strerror(result)
errortext = "G-Code error in " + os.path.basename(filename) + "\n" + "Near line " \
+ str(seq) + " of\n" + filename + "\n" + error_str + "\n"
print(errortext)
self.emit("gcode-error", errortext)
| lgpl-2.1 | 1,227,178,718,324,151,000 | 44.083969 | 121 | 0.526414 | false | 3.729713 | false | false | false |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/examples/preprocessing/plot_xdawn_denoising.py | 8 | 2719 | """
================
XDAWN Denoising
================
XDAWN filters are trained from epochs, signal is projected in the sources
space and then projected back in the sensor space using only the first two
XDAWN components. The process is similar to an ICA, but is
supervised in order to maximize the signal to signal + noise ratio of the
evoked response.
WARNING: As this denoising method exploits the known events to
maximize SNR of the contrast between conditions it can lead to overfit.
To avoid a statistical analysis problem you should split epochs used
in fit with the ones used in apply method.
References
----------
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
"""
# Authors: Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
from mne import (io, compute_raw_covariance, read_events, pick_types,
Epochs)
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.viz import plot_epochs_image
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 20, method='iir') # replace baselining with high-pass
events = read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
exclude='bads')
# Epoching
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Plot image epoch before xdawn
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
# Estimates signal covariance
signal_cov = compute_raw_covariance(raw, picks=picks)
# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
# Fit xdawn
xd.fit(epochs)
# Denoise epochs
epochs_denoised = xd.apply(epochs)
# Plot image epoch after xdawn
plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
| bsd-3-clause | 5,871,426,801,680,955,000 | 32.9875 | 79 | 0.69327 | false | 3.143353 | false | false | false |
lbt/boss-launcher-webhook | src/webhook_launcher/app/models.py | 1 | 20150 | # Copyright (C) 2013 Jolla Ltd.
# Contact: Islam Amer <[email protected]>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to
# the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import re
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from webhook_launcher.app.boss import launch_notify, launch_build
from webhook_launcher.app.misc import get_or_none, giturlparse
# FIXME: All null=True + blank=True text fields
# Unless it is intentional that text field can be set to either NULL or ''
# (emtpy string), then it is recommended not to use null=True, to avoid
# situatioon where the field has two possible values for empty. As that can
# problematic for example in lookups where NULL and '' behave differently
class BuildService(models.Model):
namespace = models.CharField(
max_length=50,
unique=True,
help_text="This is also used to identify the OBS alias "
"in BOSS processes",
)
apiurl = models.CharField(
max_length=250,
unique=True,
)
weburl = models.CharField(
max_length=250,
unique=True,
)
def __unicode__(self):
return self.weburl
class VCSService(models.Model):
name = models.CharField(
max_length=50,
unique=True,
help_text="Friendly name of this VCS hosting service",
)
netloc = models.CharField(
max_length=200,
unique=True,
help_text="Network location from payload "
"(for example: [email protected]:1234)",
)
ips = models.TextField(
blank=True,
null=True,
help_text="Known IP adresses of this service (optional)",
)
def __unicode__(self):
return self.netloc
class VCSNameSpace(models.Model):
service = models.ForeignKey(
VCSService,
help_text="VCS service where this namespace is hosted",
)
path = models.CharField(
max_length=200,
help_text="the network path "
"(gitlab group or github organization eg. /mer-core)",
)
default_project = models.ForeignKey(
"Project",
blank=True,
null=True,
help_text="Default project for webhook placeholder creation",
)
def __unicode__(self):
return "%s%s" % (self.service, self.path)
@staticmethod
def find(repourl):
url = giturlparse(repourl)
return get_or_none(
VCSNameSpace,
service__netloc=url.netloc,
path=os.path.dirname(url.path)
)
class Project(models.Model):
name = models.CharField(
max_length=250,
help_text="The OBS project name. eg nemo:mw",
)
obs = models.ForeignKey(
BuildService,
)
official = models.BooleanField(
default=True,
help_text="If set then only valid namespaces can be used for the "
"git repo",
)
allowed = models.BooleanField(
default=True,
help_text="If not set then webhooks are not allowed for this project. "
"This is useful for projects which should only have "
"specific versions of packages promoted to them.",
)
gated = models.BooleanField(
default=False,
help_text="If set then webhooks pointing at this project will be "
"triggered to a side project instead and then "
"an autopromotion attempted. This is useful for projects "
"which apply formal entry checks and/or QA.",
)
groups = models.ManyToManyField(
Group,
blank=True,
)
vcsnamespaces = models.ManyToManyField(
VCSNameSpace,
blank=True,
)
match = models.CharField(
max_length=250,
blank=True,
null=True,
help_text="If set then used as well as name to re.match() "
"project names",
)
class Meta:
unique_together = (("name", "obs"),)
def __unicode__(self):
return "%s on %s" % (self.name, self.obs)
def is_repourl_allowed(self, repourl):
repourl = giturlparse(repourl)
netloc = repourl.netloc
path = repourl.path.rsplit("/", 1)[1]
if self.vcsnamespaces.count():
return self.vcsnamespaces.filter(
path=path,
service__netloc=netloc,
).count()
else:
return True
def is_user_allowed(self, user):
user_groups = set(user.groups.all())
groups = set(self.groups.all())
if groups and (user_groups & groups):
return True
else:
return False
def matches(self, proj_name):
# TODO Update if/when
# https://pypi.python.org/pypi/django-regex-field/0.1.4 is used
if proj_name == self.name:
return True
if self.match:
# this is optimised to a cache in regex-field
reg = re.compile(self.match)
if reg.match(proj_name):
return True
return False
class WebHookMapping(models.Model):
# If any fields are added/removed then ensure they are handled
# correctly in to_fields and the webhook_diff.py
repourl = models.CharField(
max_length=200,
help_text="url of git repo to clone from. Should be a remote http[s]",
)
branch = models.CharField(
max_length=100,
default="master",
help_text="name of branch to use. If not specified default branch "
"(or currently checked out one) will be used",
)
project = models.CharField(
max_length=250,
default=settings.DEFAULT_PROJECT,
help_text="name of an existing project under which to create "
"or update the package",
)
package = models.CharField(
max_length=250,
help_text="name of the package to create or update in OBS",
)
token = models.CharField(
max_length=100,
default="",
null=True,
blank=True,
help_text="a token that should exist in tag names and "
"changelog entry headers to enable handling them",
)
debian = models.CharField(
max_length=2,
default="",
null=True,
blank=True,
choices=(
('N', 'N'),
('Y', 'Y'),
),
help_text="Choose Y to turn on debian packaging support",
)
dumb = models.CharField(
max_length=2,
default="",
null=True,
blank=True,
choices=(
('N', 'N'),
('Y', 'Y'),
),
help_text="Choose Y to take content of revision as-is without "
"automatic processing (example: tarballs in git)",
)
notify = models.BooleanField(
default=True,
help_text="Enable IRC notifications of events",
)
build = models.BooleanField(
default=True,
help_text="Enable OBS build triggering",
)
comment = models.TextField(
blank=True,
null=True,
default="",
)
user = models.ForeignKey(
User,
editable=False,
)
obs = models.ForeignKey(
BuildService,
)
class Meta:
unique_together = (("project", "package", "obs"),)
def __unicode__(self):
return "%s/%s -> %s/%s" % (
self.repourl, self.branch, self.project, self.package
)
@property
def tag(self):
lsr = self.lsr
if lsr:
return lsr.tag
@property
def revision(self):
lsr = self.lsr
if lsr:
return lsr.revision
@property
def lsr(self):
# TODO: refactor the WebHookMapping and LastSeenRevision relation
if not hasattr(self, '_lsr'):
if self.pk:
self._lsr, _ = LastSeenRevision.objects.get_or_create(
mapping=self
)
else:
return None
return self._lsr
@property
def mapped(self):
return self.project and self.package
@property
def rev_or_head(self):
return self.revision or self.branch
@property
def project_disabled(self):
# Just search all Projects for a match
for project in Project.objects.all():
if project.matches(self.project):
print "Project disable check: %s matches rules in %s" % (
self.project, project.name
)
if project and not project.allowed:
# Disabled if Project is marked not-allowed
return True
if project and project.official:
# Disabled if Project is official and namespace is not
# valid
repourl = giturlparse(self.repourl)
service = get_or_none(
VCSService,
netloc=repourl.netloc,
)
if not service:
return True
namespace = get_or_none(
VCSNameSpace,
service=service,
path=os.path.dirname(repourl.path),
)
if not namespace:
return True
return False
def clean(self, exclude=None):
self.repourl = self.repourl.strip()
self.branch = self.branch.strip()
self.project = self.project.strip()
self.package = self.package.strip()
if WebHookMapping.objects.exclude(pk=self.pk).filter(
project=self.project,
package=self.package,
obs=self.obs
).count():
raise ValidationError(
'A mapping object with the same parameters already exists'
)
repourl = giturlparse(self.repourl)
service = get_or_none(VCSService, netloc=repourl.netloc)
if settings.SERVICE_WHITELIST and service is None:
raise ValidationError(
'%s is not an allowed service' % repourl.netloc
)
project = get_or_none(Project, name=self.project)
if project and not project.allowed:
raise ValidationError(
'Project %s does not allow mappings' % project
)
if project and project.official:
namespace = get_or_none(
VCSNameSpace,
service=service,
path=os.path.dirname(repourl.path),
)
if not service or not namespace:
raise ValidationError(
'Official project %s allows mapping from known service '
'namespaces only' % project
)
if settings.STRICT_MAPPINGS:
if project and not project.is_repourl_allowed(self.repourl):
raise ValidationError(
"Webhook mapping repourl is not allowed by %s's "
"strict rules" % project
)
if project and not project.is_user_allowed(self.user):
raise ValidationError(
"Webhook mapping to %s not allowed for %s" %
(project, self.user)
)
if (
not self.project.startswith("home:%s" % self.user.username) and
not self.user.is_superuser
):
raise ValidationError(
"Webhook mapping to %s not allowed for %s" %
(project, self.user)
)
def trigger_build(self, user=None, tag=None, force=False):
if not self.pk:
raise RuntimeError(
"trigger_build() on unsaved WebHookMapping"
)
# Only fire for projects which allow webhooks. We can't just
# rely on validation since a Project may forbid hooks after
# the hook was created
if self.project_disabled:
print "Project has build disabled"
return
handled = self.lsr.handled and self.lsr.tag == tag and not force
if handled:
print "build already handled, skipping"
build = self.build and self.mapped and not handled
qp = None
if user is None:
user = self.user.username
if build:
if tag:
self.lsr.tag = tag
# Find possible queue period objects
qps = QueuePeriod.objects.filter(
projects__name=self.project,
projects__obs=self.obs,
)
for qp in qps:
if qp.delay() and not qp.override(webuser=user):
print "Build trigger for %s delayed by %s" % (self, qp)
print qp.comment
build = False
break
else:
qp = None
message = self._get_build_message(user, force, handled, qp)
fields = self.to_fields()
fields['msg'] = message
if self.notify:
launch_notify(fields)
if build:
fields = self.to_fields()
launch_build(fields)
self.lsr.handled = True
self.lsr.save()
return message
def _get_build_message(self, user, force=None, handled=False, qp=None):
parts = []
if force:
parts.append("Forced build trigger:")
if self.tag:
parts.append("Tag %s" % self.tag)
else:
parts.append(self.revision)
parts.append(
"by %s in %s branch of %s" % (
user, self.branch, self.repourl,
)
)
if not self.mapped:
parts.append("- which is not mapped yet. Please map it.")
elif self.build:
parts.append(
"- which will trigger build in project %s package "
"%s (%s/package/show/%s/%s)" % (
self.project, self.package, self.obs.weburl,
self.package, self.project,
)
)
elif handled:
parts.append("- which was already handled; skipping")
elif qp:
parts.append("- which will be delayed by %s" % qp)
if qp.comment:
parts.append("(%s)" % qp.comment)
return " ".join(parts)
def handle_commit(self, user=None, notify=None):
if not self.pk:
raise RuntimeError(
"handle_commit() on unsaved WebHookMapping"
)
if user is None:
user = self.user.username
if notify is None:
notify = self.notify
self.lsr.tag = ""
self.lsr.handled = False
self.lsr.save()
if not notify:
return
message = "Commit(s) pushed by %s to %s branch of %s" % (
user, self.branch, self.repourl
)
if not self.mapped:
message = "%s, which is not mapped yet. Please map it." % message
fields = self.to_fields()
fields['msg'] = message
print message
launch_notify(fields)
def to_fields(self):
fields = {}
fields['repourl'] = self.repourl
fields['branch'] = self.branch
fields['pk'] = self.pk
if self.project:
fields['project'] = self.project
fields['package'] = self.package
fields['ev'] = {
'namespace': self.obs.namespace
}
if self.token:
fields['token'] = self.token
if self.debian:
fields['debian'] = self.debian
if self.dumb:
fields['dumb'] = self.dumb
if self.revision:
fields['revision'] = self.revision
if self.tag:
fields['tag'] = self.tag
return fields
class LastSeenRevision(models.Model):
mapping = models.ForeignKey(
WebHookMapping,
)
revision = models.CharField(
max_length=250,
)
tag = models.CharField(
max_length=50,
blank=True,
null=True
)
handled = models.BooleanField(
default=False,
editable=False,
)
timestamp = models.DateTimeField(
auto_now=True,
)
emails = models.TextField(
blank=True,
null=True,
editable=False,
)
payload = models.TextField(
blank=True,
null=True,
editable=False,
)
def __unicode__(self):
return "%s @ %s/%s" % (
self.revision, self.mapping.repourl, self.mapping.branch
)
class QueuePeriod(models.Model):
start_time = models.TimeField(
default=timezone.now,
)
end_time = models.TimeField(
default=timezone.now,
)
start_date = models.DateField(
blank=True,
null=True,
)
end_date = models.DateField(
blank=True,
null=True,
)
recurring = models.BooleanField(
default=False,
)
comment = models.TextField(
blank=True,
null=True,
)
projects = models.ManyToManyField(
Project,
)
class Meta:
permissions = (
("can_override_queueperiod", "Can override queue periods"),
)
def __unicode__(self):
return "Queue period from %s %s to %s %s for %s" % (
self.start_date or "", self.start_time, self.end_date or "",
self.end_time,
",".join([str(prj) for prj in self.projects.all()])
)
def override(self, user):
if not user:
return False
if user.has_perm("app.can_override_queueperiod"):
return True
def delay(self, dto=timezone.now()):
if self.start_time <= self.end_time:
if not (self.start_time <= dto.time() <= self.end_time):
# wrong time of day
return False
if self.start_time >= self.end_time:
if (self.start_time >= dto.time() >= self.end_time):
# wrong time of day
return False
if self.start_date and (dto.date() < self.start_date):
# not started yet
return False
if self.end_date and (dto.date() > self.end_date):
# already ended
return False
return True
class RelayTarget(models.Model):
active = models.BooleanField(
default=True,
help_text="Whether this relay will fire on matching events",
)
name = models.CharField(
max_length=50,
help_text="Friendly name of recipient, for example: Organization name",
)
url = models.CharField(
max_length=200,
help_text="HTTP(S) endpoint which will receive POST of GIT events "
"(for example http://webhook.example.com/webhook/)",
)
verify_SSL = models.BooleanField(
default=True,
help_text="Turn on SSL certificate verification",
)
sources = models.ManyToManyField(
VCSNameSpace,
help_text="List of VCS namespaces "
"(for example github organization or gitlab groups)",
)
def __unicode__(self):
return "%s webhook relay" % self.name
| gpl-2.0 | 3,786,978,616,088,420,000 | 28.807692 | 79 | 0.548486 | false | 4.351112 | false | false | false |
gengwg/leetcode | 099_recover_bst.py | 1 | 2263 | # -*- coding: utf-8 -*-
"""
99. Recover Binary Search Tree
Two elements of a binary search tree (BST) are swapped by mistake.
Recover the tree without changing its structure.
Note:
A solution using O(n) space is pretty straight forward. Could you devise a constant space solution?
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __repr__(self):
if self:
serial = []
queue = [self]
while queue:
cur = queue[0]
if cur:
serial.append(cur.val)
queue.append(cur.left)
queue.append(cur.right)
else:
serial.append("#")
queue = queue[1:]
while serial[-1] == "#":
serial.pop()
return repr(serial)
else:
return None
class Solution(object):
def recoverTree(self, root):
"""
算法一:思路很简单,一颗二叉查找树的中序遍历应该是升序的,
而两个节点被交换了,那么对这个错误的二叉查找树中序遍历,肯定不是升序的。
那我们只需把顺序恢复过来然后进行重新赋值就可以了。
开辟两个列表,list用来存储被破坏的二叉查找树的节点值,
listp用来存储二叉查找树的节点的指针。
然后将list排序,再使用listp里面存储的节点指针赋值就可以了。
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
list = []
listp = []
self.inorder(root, list, listp)
list.sort()
for i in range(len(list)):
listp[i].val = list[i]
return root
def inorder(self, root, list, listp):
if root:
self.inorder(root.left, list, listp)
list.append(root.val)
listp.append(root)
self.inorder(root.right, list, listp)
if __name__ == "__main__":
root = TreeNode(0)
root.left = TreeNode(1)
print root
print Solution().recoverTree(root)
| apache-2.0 | -6,397,628,704,604,629,000 | 23.3875 | 99 | 0.536135 | false | 2.747887 | false | false | false |
desihub/desisim | py/desisim/scripts/pixsim.py | 1 | 4345 | """
desisim.scripts.pixsim
======================
This is a module.
"""
from __future__ import absolute_import, division, print_function
import os,sys
import os.path
import shutil
import random
from time import asctime
import numpy as np
import desimodel.io
from desiutil.log import get_logger
import desispec.io
from desispec.parallel import stdouterr_redirected
from ..pixsim import simulate_exposure
from .. import io
log = get_logger()
def expand_args(args):
'''expand camera string into list of cameras
'''
if args.simspec is None:
if args.night is None or args.expid is None:
msg = 'Must set --simspec or both --night and --expid'
log.error(msg)
raise ValueError(msg)
args.simspec = io.findfile('simspec', args.night, args.expid)
#- expand camera list
if args.cameras is not None:
args.cameras = args.cameras.split(',')
#- write to same directory as simspec
if args.rawfile is None:
rawfile = os.path.basename(desispec.io.findfile('raw', args.night, args.expid))
args.rawfile = os.path.join(os.path.dirname(args.simspec), rawfile)
if args.simpixfile is None:
outdir = os.path.dirname(os.path.abspath(args.rawfile))
args.simpixfile = io.findfile(
'simpix', night=args.night, expid=args.expid, outdir=outdir)
#-------------------------------------------------------------------------
#- Parse options
def parse(options=None):
import argparse
parser = argparse.ArgumentParser(
description = 'Generates simulated DESI pixel-level raw data',
)
#- Inputs
parser.add_argument("--simspec", type=str, help="input simspec file")
parser.add_argument("--psf", type=str, help="PSF filename")
parser.add_argument("--cosmics", action="store_true", help="Add cosmics")
# parser.add_argument("--cosmics_dir", type=str,
# help="Input directory with cosmics templates")
# parser.add_argument("--cosmics_file", type=str,
# help="Input file with cosmics templates")
#- Outputs
parser.add_argument("--rawfile", type=str, help="output raw data file")
parser.add_argument("--simpixfile", type=str,
help="output truth image file")
#- Alternately derive inputs/outputs from night, expid, and cameras
parser.add_argument("--night", type=str, help="YEARMMDD")
parser.add_argument("--expid", type=int, help="exposure id")
parser.add_argument("--cameras", type=str, help="cameras, e.g. b0,r5,z9")
parser.add_argument("--ccd_npix_x", type=int,
help="for testing; number of x (columns) to include in output",
default=None)
parser.add_argument("--ccd_npix_y", type=int,
help="for testing; number of y (rows) to include in output",
default=None)
parser.add_argument("--verbose", action="store_true",
help="Include debug log info")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing raw and simpix files")
#- Not yet supported so don't pretend it is
### parser.add_argument("--seed", type=int, help="random number seed")
parser.add_argument("--ncpu", type=int,
help="Number of cpu cores per thread to use", default=0)
parser.add_argument("--wavemin", type=float,
help="Minimum wavelength to simulate")
parser.add_argument("--wavemax", type=float,
help="Maximum wavelength to simulate")
parser.add_argument("--nspec", type=int,
help="Number of spectra to simulate per camera")
if options is None:
args = parser.parse_args()
else:
options = [str(x) for x in options]
args = parser.parse_args(options)
expand_args(args)
return args
def main(args, comm=None):
if args.verbose:
import logging
log.setLevel(logging.DEBUG)
if comm is None or comm.rank == 0:
log.info('Starting pixsim at {}'.format(asctime()))
if args.overwrite and os.path.exists(args.rawfile):
log.debug('Removing {}'.format(args.rawfile))
os.remove(args.rawfile)
simulate_exposure(args.simspec, args.rawfile, cameras=args.cameras,
simpixfile=args.simpixfile, addcosmics=args.cosmics,
nspec=args.nspec, wavemin=args.wavemin, wavemax=args.wavemax,
comm=comm)
| bsd-3-clause | -9,126,274,683,811,697,000 | 32.945313 | 87 | 0.640046 | false | 3.599834 | false | false | false |
tBaxter/tango-admin | tango_admin/admin_actions.py | 1 | 4475 | from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.models import Session
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from .forms import BlacklistForm
from .models import Blacklist
def nuke_users(modeladmin, request, queryset):
"""
Deactivates user, removes their comments, deletes their session,
and leaves a record of what they did to get nuked.
This action can be used from user or comment admin.
If you would like to use it in other model admins,
you'll need to add appropriate content type handling.
"""
users = None
form = BlacklistForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
contenttype = ContentType.objects.get_for_model(queryset.model)
# Because we want this action available from comments or user admin lists, sort out content type
ctype_as_string = unicode(contenttype)
if ctype_as_string == 'user':
users = queryset
if ctype_as_string == 'comment':
# build list of unique users within comment list.
users = []
for comment in queryset:
if not comment.user in users:
users.append(comment.user)
if ctype_as_string == 'contact':
# build list of unique users from contact list.
users = []
for c in queryset:
if c.user and c.user not in users:
users.append(c.user)
if not users:
# we haven't built out a content-type appropriate user list.
return HttpResponse("Error finding content type: %s" % contenttype)
if 'apply_blacklist' in request.POST: # we're returning from the intermediate page and are ready to do some work.
form = BlacklistForm(request.POST)
if form.is_valid():
reason = form.cleaned_data['reason']
spammer = form.cleaned_data['is_spammer']
for user in users:
# Deactivate user accounts
# Note: Update is more efficient,
# but we can't use it because we may have a list (from comments)
# rather than a proper queryset.
user.is_active = False
user.save()
for c in user.comment_comments.all(): # remove their comments from public view.
if spammer:
c.delete()
else:
c.is_public = False
c.is_removed = True
c.save()
for c in user.contact_set.all(): # and contact messages
if spammer:
c.delete()
else:
c.publish = False
c.save()
# remove their session. -- Is there a more efficient way than looping through all sessions? That can become a mighty big table.
for s in Session.objects.all():
decoded_session = s.get_decoded()
if '_auth_user_id' in decoded_session and decoded_session['_auth_user_id'] == user.id:
s.delete()
# and add them to the blacklist
blacklist = Blacklist(
user = user,
blacklister = request.user,
reason = reason,
)
blacklist.save()
if spammer:
resp_str = 'Any related accounts will still be visible, but related comments have been deleted.'
else:
resp_str = 'Any related accounts and comments will still be visible in the admin.'
count = len(users)
if count == 1:
modeladmin.message_user(request, "%s was removed and blocked from the site. %s" % (users[0].username, resp_str))
else:
modeladmin.message_user(request, "%s users were removed and blocked from the site. %s" % (count, resp_str))
return HttpResponseRedirect(request.get_full_path())
else:
return HttpResponse("error!")
# We haven't captured intermediate page data. Go there...
return render(request, 'admin/blacklist.html', {'users': users, 'form': form})
nuke_users.short_description = "Blacklist Users"
| mit | -4,542,085,057,113,488,400 | 44.20202 | 144 | 0.570503 | false | 4.720464 | false | false | false |
Onager/plaso | tests/cli/helpers/analysis_plugins.py | 2 | 2103 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the analysis plugins CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import analysis_plugins
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class AnalysisPluginsArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the analysis plugins CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--analysis PLUGIN_LIST]
Test argument parser.
optional arguments:
--analysis PLUGIN_LIST
A comma separated list of analysis plugin names to be
loaded or "--analysis list" to see a list of available
plugins.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
analysis_plugins.AnalysisPluginsArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.analysis_plugins = 'tagging'
test_tool = tools.CLITool()
analysis_plugins.AnalysisPluginsArgumentsHelper.ParseOptions(
options, test_tool)
self.assertEqual(test_tool._analysis_plugins, ['tagging'])
with self.assertRaises(errors.BadConfigObject):
analysis_plugins.AnalysisPluginsArgumentsHelper.ParseOptions(
options, None)
options.analysis_plugins = 'bogus'
with self.assertRaises(errors.BadConfigOption):
analysis_plugins.AnalysisPluginsArgumentsHelper.ParseOptions(
options, test_tool)
# TODO: add test for '--analysis list'
# TODO: improve test coverage.
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,826,347,112,232,595,000 | 28.619718 | 78 | 0.700903 | false | 4.206 | true | false | false |
coolharsh55/hdd-indexer | movie_metadata/organize.py | 1 | 9443 | """organizer
organize movies by:
release date:
current decade - by year: 2015, 2014, 2013...
previous decards - by decade: 2000, 1990, 1980
imdb rating:
5.0 and below
7.0 and below
7.5, 8.0, 8.5 ... (0.5 increments)
"""
# TODO: propogate error to browser if thread fails
from datetime import datetime
from math import floor
from ntpath import splitext
from os import makedirs
from os import path
from os import walk
from shutil import move
from shutil import rmtree
from threading import Thread
from hdd_settings.models import HDDRoot
from hdd_settings.models import MovieFolder
from movie_metadata.models import Movie
import logging
log = logging.getLogger('organize')
log.info(72 * '-')
log.info('organize module loaded')
def organizer_status(key=None, value=None):
"""Organizer status
Args:
key(dict key): key to search in dict
value(dict value): value to assign to key
Returns:
bool: True for ON, False for OFF
"""
if 'status' not in organizer_status.__dict__:
organizer_status.status = {
'STATUS': False,
'FILES_EVALUATED': 0,
}
_ORGANIZER = organizer_status.status
if _ORGANIZER.get(key) is not None:
if value is not None:
log.info('organizer status: %s -> %s' % (key, value))
# TODO: check if key and value are valid
_ORGANIZER[key] = value
else:
return _ORGANIZER[key]
return _ORGANIZER['STATUS']
def make_fname(title, relpath):
"""creates a new filename for the movie from its title
Uses the movie title saved in database along with the original
file extension to create a new and correct filename
Args:
title(str): title of the movie
relpath(str): path stored in database
Returns:
str: new filename.ext
Raises:
None
"""
# TODO: validate relpath contains filename.ext
extension = splitext(relpath)[1]
# TODO: validate that this is a valid/legal filename
return title + extension
def _criterion_tools(criterion):
"""select the organization criteria
Attaches functions based on user choice for organization criterion.
Supported criterions are: release date, imdb score
Args:
criterion(str): choice selected by user
Returns:
None
Raises:
ValueError: invalid criterion
"""
assert type(criterion) == str or type(criterion) == unicode
log.info('organization criterion: %s' % criterion)
if criterion == 'release':
_get_folder = _folder_by_release_date
_field_exists = lambda m: m.release is not None
elif criterion == 'imdb_score':
_get_folder = _folder_by_imdb_score
_field_exists = lambda m: m.imdb_score is not None
else:
raise ValueError('Invalid organization criterion: %s' % criterion)
return _get_folder, _field_exists
def _organize(criterion):
"""organize movies on disk/database by provided criterion
Selects all movies and updates their filenames based on their
metadata titles. Moves their files to organized folders whose
name and hierarchy are based on criterion selected.
Args:
criterion(str): user choice of organization criterion
Returns:
None
Raises:
None
"""
def create_folder(folder):
""" creates a folder if it does not exist
Args:
folder(str): path of the folder
Returns:
None
Raises:
None
"""
# TODO: check if path is valid
if not path.exists(path.join(destination, folder)):
log.info('created directory %s' % folder)
makedirs(path.join(destination, folder))
# functions for selected criterion
_get_folder, _field_exists = _criterion_tools(criterion)
# temporary folder for holding created folders
tempname = 'tmp'
log.debug('temporary folder set to ./%s' % tempname)
uncategorized = 'uncategorized'
log.debug('uncategorized folder set to ./%s/%s' % (
tempname, uncategorized))
parentpath = path.join(
HDDRoot.get_solo().path,
MovieFolder.get_solo().relpath)
destination = path.join(parentpath, tempname)
create_folder(destination)
movies = Movie.objects.all()
for movie in movies:
# parent folder for the movie file
if _field_exists(movie):
folder = _get_folder(movie)
else:
folder = uncategorized
log.debug('folder: %s' % folder)
create_folder(folder)
# create new filename -> title with extension
fname = make_fname(movie.title, movie.relpath)
# move the file to its new location
newpath = path.join(
path.join(destination, folder),
fname)
oldpath = path.join(parentpath, movie.relpath)
move(oldpath, newpath)
log.debug('%s moved from %s to %s' % (
movie.title, movie.relpath, newpath))
# update movie path to the newpath
movie.relpath = path.join(folder, fname)
# save updated movie to database
movie.save()
# move other files from movie_folder to new folder
other_files = path.join(destination, 'other_files')
create_folder(other_files)
for root, directory, files in walk(parentpath):
# don't go into the temporary folder
if not root.startswith(destination):
for somefile in files:
move(
path.join(root, somefile),
path.join(other_files, somefile))
log.info('moved other files into %s' % other_files)
# remove all directories from movie folder
for directory in walk(parentpath).next()[1]:
if directory != tempname:
rmtree(path.join(parentpath, directory))
log.info('removed all directories from movie folder')
# move all new folders into movie folder directory
for directory in walk(destination).next()[1]:
move(
path.join(destination, directory),
path.join(parentpath, directory))
# delete temporary directory
rmtree(destination)
# update status of organizer
organizer_status('STATUS', False)
def _folder_by_release_date(movie):
"""identifies the correct folder from movie release date
If the movie's release date is in the current decade, it assigns
the release year as its folder name. Otherwise, the decade year
is assigned as its folder name.
E.g. release dates in 2015 (now) will be stored in '2015'
release dates (2001, 2006, ...) will be stored in '2000'
Args:
movie(Movie): movie object from database
Returns:
str: foldername for the movie file
Raises:
None
"""
# TODO: check if movie is a valid Movie object
# TODO: check if movie has a valid release date
if 'this_decade' not in _folder_by_release_date.__dict__:
_folder_by_release_date.this_decade = \
datetime.now().year - datetime.now().year % 10
if 'get_decade' not in _folder_by_release_date.__dict__:
_folder_by_release_date.get_decade = lambda year: year - year % 10
if movie.release.year < _folder_by_release_date.this_decade:
folder = _folder_by_release_date.get_decade(movie.release.year)
else:
folder = movie.release.year
return str(folder)
def _folder_by_imdb_score(movie):
"""identifies the correct folder from movie score
If the movie's score is below a certain threshold, dumps all such
movies together. Otherwise saves each movie in folder based on
IMDb score with 0.5 incrememnts.
For e.g. movie with score 4.5, 3.2, ... go into 'below 5.0'
movie with score 5.1, 6.2, 6.9, ... go into 'below 7.0'
movie with score 7.3 go into '7.0', 7.8 go into '7.5'
Args:
movie(Movie): movie object from database
Returns:
str: foldername for movie file
Raises:
None
"""
imdb_score = movie.imdb_score
# movies rated 5.0 and below
if imdb_score < 5.0:
folder = 'below 5.0'
# movies rated 7.0 and below
elif imdb_score < 7.0:
folder = 'below 7.0'
else:
# 8.2 -> 8.2 + 0.5 -> floor(8.7) -> 8.0 -> 8.0
# 8.7 -> 8.7 + 0.5 -> floot(9.2) -> 9.0 -> 8.5
base = floor(imdb_score + 0.5)
# movie is rated something like x.y
if imdb_score < base:
# y > 0.5, e.g. score:8.7 -> folder:8.5
folder = str(base - 0.5) + ' and above'
else:
# y < 0.5 e.g. score:8.2 -> folder:8.0
folder = str(base) + ' and above'
return folder
def start_organizer(criterion='release'):
"""Start the organizer
Args:
criterion(str): specifies organization structure
Returns:
None
Raises:
None
"""
log.info('Started organizer with criterion: %s' % criterion)
thread = Thread(target=_organize, args=(criterion, ))
thread.daemon = True
thread.start()
log.info('organizer started on daemon thread')
organizer_status('STATUS', True)
def stop_organizer():
"""Stop the organizer
Args:
None
Returns:
None
Raises:
None
"""
log.info('Stopped organizer')
organizer_status('STATUS', False)
| mit | 4,160,380,292,026,635,000 | 28.509375 | 74 | 0.618024 | false | 3.954355 | false | false | false |
carolinux/QGIS | python/plugins/processing/algs/taudem/TauDEMUtils.py | 21 | 4504 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TauDEMUtils.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
from PyQt4.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import isMac
class TauDEMUtils:
TAUDEM_FOLDER = 'TAUDEM_FOLDER'
TAUDEM_MULTIFILE_FOLDER = 'TAUDEM_MULTIFILE_FOLDER'
TAUDEM_USE_SINGLEFILE = 'TAUDEM_USE_SINGLEFILE'
TAUDEM_USE_MULTIFILE = 'TAUDEM_USE_MULTIFILE'
MPIEXEC_FOLDER = 'MPIEXEC_FOLDER'
MPI_PROCESSES = 'MPI_PROCESSES'
@staticmethod
def taudemPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.TAUDEM_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
return folder
@staticmethod
def taudemMultifilePath():
folder = ProcessingConfig.getSetting(TauDEMUtils.TAUDEM_MULTIFILE_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
return folder
@staticmethod
def mpiexecPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.MPIEXEC_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
return folder
@staticmethod
def taudemDescriptionPath():
return os.path.normpath(
os.path.join(os.path.dirname(__file__), 'description'))
@staticmethod
def executeTauDEM(command, progress):
loglines = []
loglines.append(TauDEMUtils.tr('TauDEM execution console output'))
fused_command = ''.join(['"%s" ' % c for c in command])
progress.setInfo(TauDEMUtils.tr('TauDEM command:'))
progress.setCommand(fused_command.replace('" "', ' ').strip('"'))
proc = subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in iter(proc.readline, ''):
progress.setConsoleInfo(line)
loglines.append(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
@staticmethod
def tr(string, context=''):
if context == '':
context = 'TauDEMUtils'
return QCoreApplication.translate(context, string)
| gpl-2.0 | -1,296,424,911,571,098,400 | 35.032 | 81 | 0.547069 | false | 4.273245 | true | false | false |
kvalo/pwcli | cmdtests/cmdtestlib.py | 1 | 6311 | #!/usr/bin/env python3
#
# Copyright (c) 2016, The Linux Foundation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import sys
import os
import logging
import stubs
import email.header
# logging
logging.basicConfig()
logger = logging.getLogger('cmdtestlib')
# uncomment to get debug logs
# logger.setLevel(logging.DEBUG)
# Note: these prompts are regexps, escape accordingly!
PROMPT = 'master@data >'
# there's some odd word wrapping happening (pexpect?) so had to cut this
PROMPT_REVIEW_STATE = 'aPplicable/rFc/aBort\? '
PROMPT_REVIEW_REASON = 'Reason \(RET for no mail\): '
PROMPT_COMMIT_ALL = 'commit All/aBort\?'
# there's some odd word wrapping happening (pexpect?) so had to cut this
PROMPT_COMMIT_ACCEPT = 'aPplicable/rFc/aBort\? '
PROMPT_REPLY = 'Send/Edit/aBort?'
PROMPT_REPLY_RETRY = 'Retry/aBort?'
PROMPT_REVIEW_ACCEPT = 'Apply \d+ patches to the pending branch\? \[Apply/Skip/aBort\]'
PROMPT_ACCEPT_CONFIRM = 'Are you sure want to ACCEPT these patches [y/N]: '
PROMPT_UNDER_REVIEW_CONFIRM = 'Are you sure want to set these patches to UNDER REVIEW? [y/N]: '
# the toplevel source directory
srcdir = os.environ['SRCDIR']
# the directory where the tests can store temporary data
testdatadir = os.environ['DATADIR']
stubsdir = os.path.join(srcdir, 'stubs')
logger.debug('srcdir=%r' % (srcdir))
logger.debug('testdatadir=%r' % (testdatadir))
logger.debug('stubsdir=%r' % (stubsdir))
def decode_mime_encoded_words(text):
# Yeah, I know this looks stupid but couldn't figure out a better way
return str(email.header.make_header(email.header.decode_header(text)))
class StubContext():
def __init__(self, start=False, debug=False, stgit=False, builder='builder'):
self.debug = debug
self.git = stubs.GitStub()
if stgit:
self.stgit = stubs.StgStub()
else:
self.stgit = None
self.smtpd = stubs.SmtpdStub()
self.patchwork = stubs.PatchworkStub()
self.editor = stubs.EditorStub()
self.pwcli = None
self.builder = stubs.BuilderStub()
self.builder_cmd = builder
# move to the fake git repository before starting pwcli
os.chdir(testdatadir)
if start:
self.start()
@staticmethod
def run_test(func, stgit=False, builder='builder'):
ctxt = StubContext(start=True, stgit=stgit, builder=builder)
pwcli = ctxt.pwcli
try:
func(ctxt, pwcli)
except Exception as e:
print(e)
ctxt.stop_and_cleanup()
def start(self):
stgit = False
try:
self.git.start()
if self.stgit:
stgit = True
self.stgit.start()
self.smtpd.start()
self.patchwork.start()
# FIXME: should this be start()?
self.editor.stop()
# must be instiated only after daemon stubs are running,
# as this immediately starts pwcli
self.pwcli = PwcliSpawn(debug=self.debug, stgit=stgit,
builder=self.builder_cmd)
except Exception as e:
print('Failed to start stubs: %s' % (e))
self.stop_and_cleanup()
sys.exit(1)
def stop(self):
self.git.stop()
if self.stgit:
self.stgit = self.stgit.stop()
self.smtpd.stop()
self.patchwork.stop()
self.editor.stop()
def cleanup(self):
if self.pwcli:
self.pwcli.cleanup()
if self.git:
self.git.cleanup()
if self.stgit:
self.stgit = self.stgit.cleanup()
if self.smtpd:
self.smtpd.cleanup()
if self.patchwork:
self.patchwork.cleanup()
if self.editor:
self.editor.cleanup()
if self.builder:
self.builder.cleanup()
def stop_and_cleanup(self):
self.stop()
self.cleanup()
class PwcliSpawn(pexpect.spawn):
def __init__(self, debug=False, stgit=False, builder='builder',
signature='Sent by pwcli\n$URL\n'):
cmd = 'pwcli'
if debug:
cmd += ' --debug'
self.pwcli_wrapper = stubs.PwcliWrapper(stgit=stgit, builder=builder,
signature=signature)
self.pwcli_wrapper.write_config()
# use short timeout so that failures don't take too long to detect
super(PwcliSpawn, self).__init__(os.path.join(srcdir, cmd),
timeout=3,
logfile=sys.stdout,
encoding='utf-8')
def cleanup(self):
self.pwcli_wrapper.cleanup()
def expect_prompt(self):
return super(PwcliSpawn, self).expect(PROMPT)
| bsd-3-clause | -531,545,003,178,729,100 | 30.08867 | 95 | 0.636666 | false | 3.881304 | true | false | false |
thusoy/pwm | pwm/encoding.py | 1 | 3106 | from ._compat import ord_byte
import math
import string
from logging import getLogger
_logger = getLogger('pwm.encoding')
# 'full' repeats digits twice, to increase the probablity of a digit appearing in a default 16
# character password, for sites that suck at estimating entropy and requires digits to be present
PRESETS = {
'full': string.ascii_letters + 2 * string.digits + '!#$%&()*+,-./:;=?@[]^_|~',
'alpha': string.ascii_letters,
'numeric': string.digits,
'alphanumeric': string.ascii_letters + string.digits,
}
def ceildiv(dividend, divisor):
''' integer ceiling division '''
return (dividend + divisor - 1) // divisor
def calc_chunklen(alph_len):
'''
computes the ideal conversion ratio for the given alphabet.
A ratio is considered ideal when the number of bits in one output
encoding chunk that don't add up to one input encoding chunk is minimal.
'''
binlen, enclen = min([
(i, i*8 / math.log(alph_len, 2))
for i in range(1, 7)
], key=lambda k: k[1] % 1)
return binlen, int(enclen)
class Encoder(object):
'''
general-purpose encoder. Encodes arbitrary binary data with a given
specific base ("alphabet").
'''
def __init__(self, alphabet):
self.alphabet = alphabet
self.chunklen = calc_chunklen(len(alphabet))
def encode(self, digest, total_len):
nchunks = ceildiv(len(digest), self.chunklen[0])
binstr = digest.ljust(nchunks * self.chunklen[0], b'\0')
return ''.join([
self._encode_chunk(binstr, i) for i in range(0, nchunks)
])[:total_len]
def _encode_chunk(self, data, index):
'''
gets a chunk from the input data, converts it to a number and
encodes that number
'''
chunk = self._get_chunk(data, index)
return self._encode_long(self._chunk_to_long(chunk))
def _encode_long(self, val):
'''
encodes an integer of 8*self.chunklen[0] bits using the specified
alphabet
'''
return ''.join([
self.alphabet[(val//len(self.alphabet)**i) % len(self.alphabet)]
for i in reversed(range(self.chunklen[1]))
])
def _chunk_to_long(self, chunk):
'''
parses a chunk of bytes to integer using big-endian representation
'''
return sum([
256**(self.chunklen[0]-1-i) * ord_byte(chunk[i])
for i in range(self.chunklen[0])
])
def _get_chunk(self, data, index):
'''
partition the data into chunks and retrieve the chunk at the given index
'''
return data[index*self.chunklen[0]:(index+1)*self.chunklen[0]]
def lookup_alphabet(charset):
'''
retrieves a named charset or treats the input as a custom alphabet and use that
'''
if charset in PRESETS:
return PRESETS[charset]
if len(charset) < 16:
_logger.warning('very small alphabet in use, possibly a failed lookup?')
return charset
| mit | 5,288,213,428,455,828,000 | 31.020619 | 97 | 0.597875 | false | 3.867995 | false | false | false |
dwang159/oncall | src/oncall/app.py | 1 | 5191 | # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from __future__ import absolute_import
from urllib import unquote_plus
from importlib import import_module
import falcon
import re
from beaker.middleware import SessionMiddleware
from falcon_cors import CORS
from . import db, constants, iris, auth
import logging
logger = logging.getLogger('oncall.app')
security_headers = [
('X-Frame-Options', 'SAMEORIGIN'),
('X-Content-Type-Options', 'nosniff'),
('X-XSS-Protection', '1; mode=block'),
('Strict-Transport-Security', 'max-age=31536000; includeSubDomains'),
]
def json_error_serializer(req, resp, exception):
resp.body = exception.to_json()
resp.content_type = 'application/json'
class SecurityHeaderMiddleware(object):
def process_request(self, req, resp):
resp.set_headers(security_headers)
class ReqBodyMiddleware(object):
'''
Falcon's req object has a stream that we read to obtain the post body. However, we can only read this once, and
we often need the post body twice (once for authentication and once in the handler method). To avoid this
problem, we read the post body into the request context and access it from there.
IMPORTANT NOTE: Because we use stream.read() here, all other uses of this method will return '', not the post body.
'''
def process_request(self, req, resp):
req.context['body'] = req.stream.read()
class AuthMiddleware(object):
def process_resource(self, req, resp, resource, params):
try:
if resource.allow_no_auth:
return
except AttributeError:
pass
auth_token = req.get_header('AUTHORIZATION')
if auth_token:
auth.authenticate_application(auth_token, req)
else:
auth.authenticate_user(req)
application = None
def init_falcon_api(config):
global application
cors = CORS(allow_origins_list=config.get('allow_origins_list', []))
middlewares = [
SecurityHeaderMiddleware(),
ReqBodyMiddleware(),
cors.middleware
]
if config.get('require_auth'):
middlewares.append(AuthMiddleware())
application = falcon.API(middleware=middlewares)
application.req_options.auto_parse_form_urlencoded = False
application.set_error_serializer(json_error_serializer)
from .auth import init as init_auth
init_auth(application, config['auth'])
from .ui import init as init_ui
init_ui(application, config)
from .api import init as init_api
init_api(application, config)
from .healthcheck import init as init_hc
init_hc(application, config)
for hook in config.get('post_init_hook', []):
try:
logger.debug('loading post init hook <%s>', hook)
getattr(import_module(hook), 'init')(application, config)
except:
logger.exception('Failed loading post init hook <%s>', hook)
return application
class RawPathPatcher(object):
slash_re = re.compile(r'%2[Ff]')
def __init__(self, app):
self.app = app
def __call__(self, env, start_response):
"""
Patch PATH_INFO wsgi variable so that '/api/v0/teams/foo%2Fbar' is not
treated as '/api/v0/teams/foo/bar'
List of extensions for raw URI:
* REQUEST_URI (uwsgi)
* RAW_URI (gunicorn)
"""
raw_path = env.get('REQUEST_URI', env.get('RAW_URI')).split('?', 1)[0]
env['PATH_INFO'] = unquote_plus(self.slash_re.sub('%252F', raw_path))
return self.app(env, start_response)
def init(config):
db.init(config['db'])
constants.init(config)
if 'iris_plan_integration' in config:
iris.init(config['iris_plan_integration'])
if not config.get('debug', False):
security_headers.append(
("Content-Security-Policy",
# unsafe-eval is required for handlebars without precompiled templates
"default-src 'self' %s 'unsafe-eval' ; "
"font-src 'self' data: blob; img-src data: uri https: http:; "
"style-src 'unsafe-inline' https: http:;" %
config.get('iris_plan_integration', {}).get('api_host', '')))
logging.basicConfig(level=logging.INFO)
logger.info('%s', security_headers)
else:
logging.basicConfig(level=logging.DEBUG)
init_falcon_api(config)
global application
session_opts = {
'session.type': 'cookie',
'session.cookie_expires': True,
'session.key': 'oncall-auth',
'session.encrypt_key': config['session']['encrypt_key'],
'session.validate_key': config['session']['sign_key'],
'session.secure': not (config.get('debug', False) or config.get('allow_http', False)),
'session.httponly': True,
'session.crypto_type': 'cryptography'
}
application = SessionMiddleware(application, session_opts)
application = RawPathPatcher(application)
def get_wsgi_app():
import sys
from . import utils
init(utils.read_config(sys.argv[1]))
return application
| bsd-2-clause | -2,143,582,619,884,856,300 | 30.846626 | 119 | 0.647467 | false | 3.828171 | true | false | false |
mrunge/horizon | openstack_dashboard/dashboards/admin/info/tables.py | 11 | 6531 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils import filters as utils_filters
SERVICE_ENABLED = "enabled"
SERVICE_DISABLED = "disabled"
SERVICE_STATUS_DISPLAY_CHOICES = (
(SERVICE_ENABLED, _("Enabled")),
(SERVICE_DISABLED, _("Disabled")),
)
class ServiceFilterAction(tables.FilterAction):
filter_field = 'type'
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
attr = getattr(service, self.filter_field, '')
if attr is not None and q in attr.lower():
return True
return False
return filter(comp, services)
class SubServiceFilterAction(ServiceFilterAction):
filter_field = 'binary'
def get_stats(service):
return template.loader.render_to_string('admin/services/_stats.html',
{'service': service})
def get_status(service):
# if not configured in this region, neither option makes sense
if service.host:
return SERVICE_ENABLED if not service.disabled else SERVICE_DISABLED
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_status,
verbose_name=_('Status'),
status=True,
display_choices=SERVICE_STATUS_DISPLAY_CHOICES)
class Meta:
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["status"]
def get_available(zone):
return zone.zoneState['available']
def get_nova_agent_status(agent):
template_name = 'admin/info/_cell_status.html'
context = {
'status': agent.status,
'disabled_reason': agent.disabled_reason
}
return template.loader.render_to_string(template_name, context)
class NovaServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column(get_nova_agent_status, verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title,))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "nova_services"
verbose_name = _("Compute Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class CinderServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column('status', verbose_name=_('Status'),
filters=(filters.title, ))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title, ))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "cinder_services"
verbose_name = _("Block Storage Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class NetworkAgentsFilterAction(tables.FilterAction):
def filter(self, table, agents, filter_string):
q = filter_string.lower()
def comp(agent):
if q in agent.agent_type.lower():
return True
return False
return filter(comp, agents)
def get_network_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_network_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class NetworkAgentsTable(tables.DataTable):
agent_type = tables.Column('agent_type', verbose_name=_('Type'))
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_network_agent_status, verbose_name=_('Status'))
state = tables.Column(get_network_agent_state, verbose_name=_('State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s" % (obj.binary, obj.host)
class Meta:
name = "network_agents"
verbose_name = _("Network Agents")
table_actions = (NetworkAgentsFilterAction,)
multi_select = False
| apache-2.0 | -279,604,993,973,928,320 | 33.739362 | 78 | 0.593324 | false | 4.403911 | false | false | false |
450W16/MODACT | src/characters/enemy.py | 1 | 4046 | import pygame
from directions import Directions
from utils import *
class Enemy(pygame.sprite.Sprite):
def __init__(self, width, height, x, y):
pygame.sprite.Sprite.__init__(self)
if width <= BLOCK_WIDTH:
#print "WARNING WIDTH MAY CAUSE PROBLEMS"
pass
self.image = pygame.Surface((width, height))
self.image.fill((255,0,0))
self.rect = self.image.get_rect()
self.delta_y = 0
self.delta_x = 0
self.rect.x = x
self.rect.y = y
self.aggroRange = 300
# Default heading
self.heading = Directions.Right
# Sprite animation counter
self.curr_sprite_index = 0
self.frame_counter = 0
self.frames_per_sprite = 4
def checkAggro(self, c, default):
# Check aggro
dist = c.player.rect.x - self.rect.x
if abs(dist) < self.aggroRange:
# Close enough, set direction
if default:
if dist > 0:
self.dir = "R"
self.delta_x += self.speed
else:
self.dir = "L"
self.delta_x -= self.speed
return True
return False
# Basic left right mob update
def update(self, c):
self.update_sprites()
self.gravity()
# Check aggro
if not self.checkAggro(c, True):
if self.dir == "R":
self.delta_x += self.speed
else: # self.dir = "L"
self.delta_x -= self.speed
pl = c.lvl_current.platform_list
# collision detection in y
# check first so mob is positioned properly on top of platform
self.rect.y += self.delta_y
collide_list = pygame.sprite.spritecollide(self, pl, False)
for platform in collide_list:
if self.delta_y > 0:
self.rect.bottom = platform.rect.top
elif self.delta_y < 0:
self.rect.top = platform.rect.bottom
self.delta_y = 0
# Check to see if mob will fall off
# Find platform mob is standing on
p_cand = None
# If right, check right of rectangle against platforms
if self.dir == "R":
for platform in pl:
if platform.rect.left < self.rect.right \
and platform.rect.right >= self.rect.right \
and self.rect.bottom == platform.rect.top:
p_cand = platform
# min_dist = self.rect.bottom - platform.rect.top
else: # dir = "L" check left of rectangle against platforms
for platform in pl:
if platform.rect.right > self.rect.left \
and platform.rect.left <= self.rect.left \
and self.rect.bottom == platform.rect.top:
p_cand = platform
# Error: falling
if p_cand == None:
return
p_found = False
if self.dir == "R":
for platform in pl:
if platform.rect.left == p_cand.rect.right and platform.rect.top == p_cand.rect.top:
p_found = True
break
else: # dir = "L"
for platform in pl:
if platform.rect.right == p_cand.rect.left and platform.rect.top == p_cand.rect.top:
p_found = True
break
# Reverse directions if at edge
if not p_found:
if self.dir == 'R':
if self.rect.right >= p_cand.rect.right:
self.dir = 'L'
self.delta_x = 0
else:
if self.rect.left <= p_cand.rect.left:
self.dir = 'R'
self.delta_x = 0
# collision detection in x
# If collide with wall, reverse direction
self.rect.x += self.delta_x
collide_list = pygame.sprite.spritecollide(self, pl, False)
for platform in collide_list:
if self.delta_x > 0: # dir = "R"
self.rect.right = platform.rect.left
self.dir = "L"
elif self.delta_x < 0: # dir = "L"
self.rect.left = platform.rect.right
self.dir = "R"
self.delta_x = 0
def get_sprites(self):
raise NotImplementedError("Please implement this method")
def gravity(self):
if self.delta_y == 0:
self.delta_y = 1
else:
self.delta_y += 1
# check if we're on the ground
#if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.delta_y >= 0:
# self.delta_y = 0
# self.rect.y = SCREEN_HEIGHT - self.rect.height
def update_sprites(self):
if self.get_sprites():
self.frame_counter = (self.frame_counter + 1) % self.frames_per_sprite
if self.frame_counter == 0:
self.curr_sprite_index = (self.curr_sprite_index + 1) % len(self.get_sprites())
self.image = self.get_sprites()[self.curr_sprite_index]
| apache-2.0 | 3,313,382,168,279,339,500 | 25.973333 | 88 | 0.6478 | false | 2.841292 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.