repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
florentx/stringformat | setup.py | 1 | 1694 | # -*- coding: utf-8 -*-
"""
StringFormat
============
StringFormat is an independent port of the Python 3 advanced string
formatting, compatible with Python >= 2.4.
This implementation is pure Python.
Add the method ``str.format`` only if it is missing (Python < 2.6)::
import stringformat
stringformat.init()
Enable auto-numbering fields (``"{} {}"`` instead of ``"{0} {1}"``)
with Python 2.6 also::
import sys
import stringformat
if sys.version_info < (2, 7):
stringformat.init(True)
The advanced string formatting is officially included in the language
since Python 2.6.
http://docs.python.org/whatsnew/2.6.html#pep-3101-advanced-string-formatting
"""
from setuptools import setup
setup(
name='StringFormat',
version='0.5',
license='BSD',
url='http://github.com/florentx/stringformat',
author='Florent Xicluna',
author_email='[email protected]',
description='Advanced String Formatting for Python >= 2.4',
long_description=__doc__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Filters',
],
zip_safe=False,
platforms='any',
py_modules=['stringformat'],
test_suite='tests',
)
| bsd-3-clause | -3,078,432,857,128,992,000 | 25.888889 | 76 | 0.640496 | false |
mleist/gttg | base/migrations/0001_initial.py | 1 | 1751 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 16:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Audience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Gttg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField(verbose_name='start date')),
],
),
migrations.CreateModel(
name='Para',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=2000)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('gttg', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Gttg')),
],
),
migrations.AddField(
model_name='audience',
name='gttg',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Gttg'),
),
]
| mit | -2,318,908,313,206,312,400 | 33.333333 | 114 | 0.5494 | false |
gocardless/gocardless-pro-python | gocardless_pro/resources/mandate_import_entry.py | 1 | 1417 | # WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
class MandateImportEntry(object):
"""A thin wrapper around a mandate_import_entry, providing easy access to its
attributes.
Example:
mandate_import_entry = client.mandate_import_entries.get()
mandate_import_entry.id
"""
def __init__(self, attributes, api_response):
self.attributes = attributes
self.api_response = api_response
@property
def created_at(self):
return self.attributes.get('created_at')
@property
def links(self):
return self.Links(self.attributes.get('links'))
@property
def record_identifier(self):
return self.attributes.get('record_identifier')
class Links(object):
"""Wrapper for the response's 'links' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def customer(self):
return self.attributes.get('customer')
@property
def customer_bank_account(self):
return self.attributes.get('customer_bank_account')
@property
def mandate(self):
return self.attributes.get('mandate')
@property
def mandate_import(self):
return self.attributes.get('mandate_import')
| mit | -9,103,245,904,076,939,000 | 21.492063 | 81 | 0.606916 | false |
znick/anytask | anytask/api/tests.py | 1 | 25894 | from __future__ import unicode_literals
import base64
import json
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from courses.models import Course
from issues.model_issue_status import IssueStatus
from issues.models import Issue, IssueField, File
from tasks.models import Task
from users.models import Group
from years.models import Year
import api.views
class ApiTest(TestCase):
maxDiff = None
@classmethod
def clean_timestamps(cls, x):
if isinstance(x, list):
for y in x:
cls.clean_timestamps(y)
return
if not isinstance(x, dict):
return
x.pop("create_time", None)
x.pop("update_time", None)
x.pop("timestamp", None)
for k, v in x.iteritems():
cls.clean_timestamps(k)
cls.clean_timestamps(v)
def setUp(self):
self.anytask_password = "anytask"
self.anytask = User.objects.create_user(username='anytask',
password=self.anytask_password)
self.teacher_password = 'password1'
self.teacher = User.objects.create_user(username='teacher',
password=self.teacher_password)
self.teacher.first_name = 'teacher_name'
self.teacher.last_name = 'teacher_last_name'
self.teacher.save()
self.student_password = 'password2'
self.student = User.objects.create_user(username='student',
password=self.student_password)
self.student.first_name = 'student_name'
self.student.last_name = 'student_last_name'
self.student.save()
self.year = Year.objects.create(start_year=2016)
self.group = Group.objects.create(name='group_name',
year=self.year)
self.group.students = [self.student]
self.group.save()
self.course = Course.objects.create(name='course_name',
year=self.year)
self.course.groups = [self.group]
self.course.teachers = [self.teacher]
self.course.issue_fields = IssueField.objects.exclude(id=10).exclude(id=11)
self.course.issue_status_system.statuses = IssueStatus.objects.all()
self.course.save()
self.task1 = Task.objects.create(title='task_title1',
course=self.course,
score_max=10)
self.task2 = Task.objects.create(title='task_title2',
course=self.course,
score_max=20)
self.issue1 = Issue.objects.create(task_id=self.task1.id, student_id=self.student.id)
self.issue2 = Issue.objects.create(task_id=self.task2.id, student_id=self.student.id)
self.issue2.responsible = self.teacher
self.issue2.save()
event = self.issue1.add_comment("Test comment")
File.objects.create(file=SimpleUploadedFile('test_fail_rb.py', b'print "_failed_"'), event=event)
def _request(self, username, password, method=None, *args, **kwargs):
if method is None:
method = self.client.get
http_authorization = "basic " + \
base64.b64encode("{}:{}".format(username, password))
kwargs.update({"HTTP_AUTHORIZATION": http_authorization})
return method(*args, **kwargs)
def test_get_issues(self):
issues_list = [
{
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 1,
'title': 'task_title1'
},
'responsible': None,
'mark': 0.0,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'id': 1
},
{
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 2,
'title': 'task_title2'
},
'responsible': {
'username': 'teacher',
'first_name': 'teacher_name',
'last_name': 'teacher_last_name',
'middle_name': None,
'name': 'teacher_name teacher_last_name',
'id': 2
},
'mark': 0.0,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'id': 2
}
]
response = self._request(self.teacher, self.teacher_password,
path=reverse(api.views.get_issues, kwargs={"course_id": self.course.id}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.clean_timestamps(response_data)
self.assertListEqual(issues_list, response_data)
def test_get_issues__add_events(self):
issues_list = [
{
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 1,
'title': 'task_title1'
},
'responsible': None,
'id': 1,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'mark': 0.0,
'events': [
{
'files': [
{
'id': 1,
'filename': 'test_fail_rb.py'
}
],
'message': '<div class="contest-response-comment not-sanitize">Test comment</div>',
'id': 1,
'author': {
'username': 'anytask',
'first_name': '',
'last_name': '',
'middle_name': None,
'name': '',
'id': 1
}
}
]
},
{
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 2,
'title': 'task_title2'
},
'responsible': {
'username': 'teacher',
'first_name': 'teacher_name',
'last_name': 'teacher_last_name',
'middle_name': None,
'name': 'teacher_name teacher_last_name',
'id': 2
},
'id': 2,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'mark': 0.0,
'events': []
}
]
response = self._request(self.teacher, self.teacher_password,
path=reverse(api.views.get_issues,
kwargs={"course_id": self.course.id}) + "?add_events=1")
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.clean_timestamps(response_data)
self.clean_timestamps(issues_list)
url = response_data[0]['events'][0]['files'][0].pop("url")
path = response_data[0]['events'][0]['files'][0].pop("path")
self.assertIn("http", url)
self.assertIn("/media/", url)
self.assertIn("/media/", path)
self.assertEqual(issues_list, response_data)
def test_get_issues__not_teacher(self):
response = self._request(self.student, self.student_password,
path=reverse(api.views.get_issues, kwargs={"course_id": self.course.id}))
self.assertEqual(response.status_code, 403)
def test_get_issue(self, username=None, password=None):
if not username:
username = self.teacher
password = self.teacher_password
issue = {
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 1,
'title': 'task_title1'
},
'responsible': None,
'id': 1,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'mark': 0.0,
'events': [
{
'files': [
{
'id': 1,
'filename': 'test_fail_rb.py'
}
],
'message': '<div class="contest-response-comment not-sanitize">Test comment</div>',
'id': 1,
'author': {
'username': 'anytask',
'first_name': '',
'last_name': '',
'middle_name': None,
'name': '',
'id': 1
}
}
]
}
response = self._request(username, password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.clean_timestamps(response_data)
url = response_data['events'][0]['files'][0].pop("url")
path = response_data['events'][0]['files'][0].pop("path")
self.assertIn("http", url)
self.assertIn("/media/", url)
self.assertIn("/media/", path)
self.assertDictEqual(issue, response_data)
response = self.client.get(url)
content = ''.join(response.streaming_content)
self.assertEqual('print "_failed_"', content)
def test_get_issue_no_access(self):
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.get_issues, kwargs={"course_id": self.course.id}))
self.assertEqual(response.status_code, 403)
def test_get_issue_student_has_access(self):
self.test_get_issue(self.student, self.student_password)
def test_post_comment(self):
username = self.teacher
password = self.teacher_password
response = self._request(username, password,
path=reverse(api.views.add_comment, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"comment": "Hello from test"})
self.assertEqual(response.status_code, 201)
response = self._request(username, password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Hello from test")
def test_post_comment__no_access(self):
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.add_comment, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"comment": "No access"})
self.assertEqual(response.status_code, 403)
response = self._request(self.teacher, self.teacher_password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "No access")
def test_post_issue__comment(self):
username = self.teacher
password = self.teacher_password
issue_data = {
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 1,
'title': 'task_title1'
},
'responsible': None,
'id': 1,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'mark': 0.0,
'events': [
{
'files': [
{
'id': 1,
'filename': 'test_fail_rb.py'
}
],
'message': '<div class="contest-response-comment not-sanitize">Test comment</div>',
'id': 1,
'author': {
'username': 'anytask',
'first_name': '',
'last_name': '',
'middle_name': None,
'name': '',
'id': 1
}
},
{
'files': [],
'message': '<div class="contest-response-comment not-sanitize">Hello from test</div>',
'id': 2,
'author': {
'username': 'teacher',
'first_name': 'teacher_name',
'last_name': 'teacher_last_name',
'middle_name': None,
'name': 'teacher_name teacher_last_name',
'id': 2
}
}
]
}
response = self._request(username, password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"comment": "Hello from test"})
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.clean_timestamps(response_data)
url = response_data['events'][0]['files'][0].pop("url")
path = response_data['events'][0]['files'][0].pop("path")
self.assertIn("http", url)
self.assertIn("/media/", url)
self.assertIn("/media/", path)
self.assertDictEqual(issue_data, response_data)
def test_post_issue__status(self, status=None):
username = self.teacher
password = self.teacher_password
issue_data = {
'status': {
'color': '#ACCD8C',
'tag': 'accepted_after_deadline',
'id': 7,
'name': 'Accepted after deadline'
},
'task': {
'id': 1,
'title': 'task_title1'
},
'responsible': None,
'id': 1,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'mark': 0.0,
'events': [
{
'files': [
{
'id': 1,
'filename': 'test_fail_rb.py'
}
],
'message': '<div class="contest-response-comment not-sanitize">Test comment</div>',
'id': 1,
'author': {
'username': 'anytask',
'first_name': '',
'last_name': '',
'middle_name': None,
'name': '',
'id': 1
}
},
{
'files': [],
'message': 'status_izmenen'
' \u0417\u0430\u0447\u0442\u0435\u043d\u043e \u043f\u043e\u0441\u043b\u0435'
' \u0434\u0435\u0434\u043b\u0430\u0439\u043d\u0430',
'id': 2,
'author': {
'username': 'teacher',
'first_name': 'teacher_name',
'last_name': 'teacher_last_name',
'middle_name': None,
'name': 'teacher_name teacher_last_name',
'id': 2
}
}
]
}
if status is None:
status = self.course.issue_status_system.statuses.all().order_by("-id")[0].id
response = self._request(username, password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"status": status})
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.clean_timestamps(response_data)
url = response_data['events'][0]['files'][0].pop("url")
path = response_data['events'][0]['files'][0].pop("path")
self.assertIn("http", url)
self.assertIn("/media/", url)
self.assertIn("/media/", path)
self.assertDictEqual(issue_data, response_data)
def test_post_issue__mark(self):
username = self.teacher
password = self.teacher_password
issue_data = {
'status': {
'color': '#818A91',
'tag': 'new',
'id': 1,
'name': 'New'
},
'task': {
'id': 1,
'title': 'task_title1'
},
'responsible': None,
'id': 1,
'followers': [],
'student': {
'username': 'student',
'first_name': 'student_name',
'last_name': 'student_last_name',
'middle_name': None,
'name': 'student_name student_last_name',
'id': 3
},
'mark': 2.0,
'events': [
{
'files': [
{
'id': 1,
'filename': 'test_fail_rb.py'
}
],
'message': '<div class="contest-response-comment not-sanitize">Test comment</div>',
'id': 1,
'author': {
'username': 'anytask',
'first_name': '',
'last_name': '',
'middle_name': None,
'name': '',
'id': 1
}
}
]
}
response = self._request(username, password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"mark": 2.0})
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.clean_timestamps(response_data)
url = response_data['events'][0]['files'][0].pop("url")
path = response_data['events'][0]['files'][0].pop("path")
self.assertIn("http", url)
self.assertIn("/media/", url)
self.assertIn("/media/", path)
self.assertDictEqual(issue_data, response_data)
def test_post_issue__status_tag(self):
self.test_post_issue__status(self.course.issue_status_system.statuses.all().order_by("-id")[0].tag)
def test_post_issue__no_access(self):
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"comment": "No access"})
self.assertEqual(response.status_code, 403)
status = self.course.issue_status_system.statuses.all().order_by("-id")[0]
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"status": status.id})
self.assertEqual(response.status_code, 403)
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"status": status.tag})
self.assertEqual(response.status_code, 403)
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}),
method=self.client.post, data={"mark": 2.0})
self.assertEqual(response.status_code, 403)
response = self._request(self.teacher, self.teacher_password,
path=reverse(api.views.get_or_post_issue, kwargs={"issue_id": self.issue1.id}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "No access")
def test_get_issue_statuses(self):
username = self.teacher
password = self.teacher_password
statuses = [
{
"color": "#818A91",
"tag": "new",
"id": 1,
"name": "New"
},
{
"color": "#818A91",
"tag": "auto_verification",
"id": 2,
"name": "Auto-checking"
},
{
"color": "#F0AD4E",
"tag": "verification",
"id": 3,
"name": "Checking"
},
{
"color": "#D9534F",
"tag": "rework",
"id": 4,
"name": "Revising"
},
{
"color": "#5CB85C",
"tag": "accepted",
"id": 5,
"name": "Accepted"
},
{
"color": "#5BC0DE",
"tag": "need_info",
"id": 6,
"name": "Need information"
},
{
"color": "#ACCD8C",
"tag": "accepted_after_deadline",
"id": 7,
"name": "Accepted after deadline"
}
]
response = self._request(username, password,
path=reverse(api.views.get_issue_statuses, kwargs={"course_id": self.course.id}))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertListEqual(statuses, response_data)
def test_get_issue_statuses__no_access(self):
response = self._request(self.anytask, self.anytask_password,
path=reverse(api.views.get_issue_statuses, kwargs={"course_id": self.course.id}))
self.assertEqual(response.status_code, 403)
response = self._request(self.teacher, self.teacher_password,
path=reverse(api.views.get_issue_statuses, kwargs={"course_id": self.course.id}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "No access")
| mit | 7,359,099,332,820,531,000 | 36.912152 | 114 | 0.434773 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QAbstractItemView.py | 1 | 24092 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from .QAbstractScrollArea import QAbstractScrollArea
class QAbstractItemView(QAbstractScrollArea):
""" QAbstractItemView(QWidget parent=None) """
def activated(self, *args, **kwargs): # real signature unknown
""" QAbstractItemView.activated[QModelIndex] [signal] """
pass
def alternatingRowColors(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.alternatingRowColors() -> bool """
return False
def autoScrollMargin(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.autoScrollMargin() -> int """
return 0
def clearSelection(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.clearSelection() """
pass
def clicked(self, *args, **kwargs): # real signature unknown
""" QAbstractItemView.clicked[QModelIndex] [signal] """
pass
def closeEditor(self, QWidget, QAbstractItemDelegate_EndEditHint): # real signature unknown; restored from __doc__
""" QAbstractItemView.closeEditor(QWidget, QAbstractItemDelegate.EndEditHint) """
pass
def closePersistentEditor(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.closePersistentEditor(QModelIndex) """
pass
def commitData(self, QWidget): # real signature unknown; restored from __doc__
""" QAbstractItemView.commitData(QWidget) """
pass
def currentChanged(self, QModelIndex, QModelIndex_1): # real signature unknown; restored from __doc__
""" QAbstractItemView.currentChanged(QModelIndex, QModelIndex) """
pass
def currentIndex(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.currentIndex() -> QModelIndex """
pass
def dataChanged(self, QModelIndex, QModelIndex_1): # real signature unknown; restored from __doc__
""" QAbstractItemView.dataChanged(QModelIndex, QModelIndex) """
pass
def defaultDropAction(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.defaultDropAction() -> Qt.DropAction """
pass
def dirtyRegionOffset(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.dirtyRegionOffset() -> QPoint """
pass
def doubleClicked(self, *args, **kwargs): # real signature unknown
""" QAbstractItemView.doubleClicked[QModelIndex] [signal] """
pass
def dragDropMode(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.dragDropMode() -> QAbstractItemView.DragDropMode """
pass
def dragDropOverwriteMode(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.dragDropOverwriteMode() -> bool """
return False
def dragEnabled(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.dragEnabled() -> bool """
return False
def dragEnterEvent(self, QDragEnterEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.dragEnterEvent(QDragEnterEvent) """
pass
def dragLeaveEvent(self, QDragLeaveEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.dragLeaveEvent(QDragLeaveEvent) """
pass
def dragMoveEvent(self, QDragMoveEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.dragMoveEvent(QDragMoveEvent) """
pass
def dropEvent(self, QDropEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.dropEvent(QDropEvent) """
pass
def dropIndicatorPosition(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.dropIndicatorPosition() -> QAbstractItemView.DropIndicatorPosition """
pass
def edit(self, QModelIndex, QAbstractItemView_EditTrigger=None, QEvent=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
QAbstractItemView.edit(QModelIndex)
QAbstractItemView.edit(QModelIndex, QAbstractItemView.EditTrigger, QEvent) -> bool
"""
return False
def editorDestroyed(self, QObject): # real signature unknown; restored from __doc__
""" QAbstractItemView.editorDestroyed(QObject) """
pass
def editTriggers(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.editTriggers() -> QAbstractItemView.EditTriggers """
pass
def entered(self, *args, **kwargs): # real signature unknown
""" QAbstractItemView.entered[QModelIndex] [signal] """
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.event(QEvent) -> bool """
return False
def executeDelayedItemsLayout(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.executeDelayedItemsLayout() """
pass
def focusInEvent(self, QFocusEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.focusInEvent(QFocusEvent) """
pass
def focusNextPrevChild(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.focusNextPrevChild(bool) -> bool """
return False
def focusOutEvent(self, QFocusEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.focusOutEvent(QFocusEvent) """
pass
def hasAutoScroll(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.hasAutoScroll() -> bool """
return False
def horizontalOffset(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.horizontalOffset() -> int """
return 0
def horizontalScrollbarAction(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.horizontalScrollbarAction(int) """
pass
def horizontalScrollbarValueChanged(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.horizontalScrollbarValueChanged(int) """
pass
def horizontalScrollMode(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.horizontalScrollMode() -> QAbstractItemView.ScrollMode """
pass
def horizontalStepsPerItem(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.horizontalStepsPerItem() -> int """
return 0
def iconSize(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.iconSize() -> QSize """
pass
def indexAt(self, QPoint): # real signature unknown; restored from __doc__
""" QAbstractItemView.indexAt(QPoint) -> QModelIndex """
pass
def indexWidget(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.indexWidget(QModelIndex) -> QWidget """
return QWidget
def inputMethodEvent(self, QInputMethodEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.inputMethodEvent(QInputMethodEvent) """
pass
def inputMethodQuery(self, Qt_InputMethodQuery): # real signature unknown; restored from __doc__
""" QAbstractItemView.inputMethodQuery(Qt.InputMethodQuery) -> object """
return object()
def isIndexHidden(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.isIndexHidden(QModelIndex) -> bool """
return False
def itemDelegate(self, QModelIndex=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
QAbstractItemView.itemDelegate() -> QAbstractItemDelegate
QAbstractItemView.itemDelegate(QModelIndex) -> QAbstractItemDelegate
"""
return QAbstractItemDelegate
def itemDelegateForColumn(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.itemDelegateForColumn(int) -> QAbstractItemDelegate """
return QAbstractItemDelegate
def itemDelegateForRow(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.itemDelegateForRow(int) -> QAbstractItemDelegate """
return QAbstractItemDelegate
def keyboardSearch(self, p_str): # real signature unknown; restored from __doc__
""" QAbstractItemView.keyboardSearch(str) """
pass
def keyPressEvent(self, QKeyEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.keyPressEvent(QKeyEvent) """
pass
def model(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.model() -> QAbstractItemModel """
pass
def mouseDoubleClickEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.mouseDoubleClickEvent(QMouseEvent) """
pass
def mouseMoveEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.mouseMoveEvent(QMouseEvent) """
pass
def mousePressEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.mousePressEvent(QMouseEvent) """
pass
def mouseReleaseEvent(self, QMouseEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.mouseReleaseEvent(QMouseEvent) """
pass
def moveCursor(self, QAbstractItemView_CursorAction, Qt_KeyboardModifiers): # real signature unknown; restored from __doc__
""" QAbstractItemView.moveCursor(QAbstractItemView.CursorAction, Qt.KeyboardModifiers) -> QModelIndex """
pass
def openPersistentEditor(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.openPersistentEditor(QModelIndex) """
pass
def pressed(self, *args, **kwargs): # real signature unknown
""" QAbstractItemView.pressed[QModelIndex] [signal] """
pass
def reset(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.reset() """
pass
def resizeEvent(self, QResizeEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.resizeEvent(QResizeEvent) """
pass
def rootIndex(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.rootIndex() -> QModelIndex """
pass
def rowsAboutToBeRemoved(self, QModelIndex, p_int, p_int_1): # real signature unknown; restored from __doc__
""" QAbstractItemView.rowsAboutToBeRemoved(QModelIndex, int, int) """
pass
def rowsInserted(self, QModelIndex, p_int, p_int_1): # real signature unknown; restored from __doc__
""" QAbstractItemView.rowsInserted(QModelIndex, int, int) """
pass
def scheduleDelayedItemsLayout(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.scheduleDelayedItemsLayout() """
pass
def scrollDirtyRegion(self, p_int, p_int_1): # real signature unknown; restored from __doc__
""" QAbstractItemView.scrollDirtyRegion(int, int) """
pass
def scrollTo(self, QModelIndex, QAbstractItemView_ScrollHint_hint=None): # real signature unknown; restored from __doc__
""" QAbstractItemView.scrollTo(QModelIndex, QAbstractItemView.ScrollHint hint=QAbstractItemView.EnsureVisible) """
pass
def scrollToBottom(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.scrollToBottom() """
pass
def scrollToTop(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.scrollToTop() """
pass
def selectAll(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectAll() """
pass
def selectedIndexes(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectedIndexes() -> list-of-QModelIndex """
pass
def selectionBehavior(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectionBehavior() -> QAbstractItemView.SelectionBehavior """
pass
def selectionChanged(self, QItemSelection, QItemSelection_1): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectionChanged(QItemSelection, QItemSelection) """
pass
def selectionCommand(self, QModelIndex, QEvent_event=None): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectionCommand(QModelIndex, QEvent event=None) -> QItemSelectionModel.SelectionFlags """
pass
def selectionMode(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectionMode() -> QAbstractItemView.SelectionMode """
pass
def selectionModel(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.selectionModel() -> QItemSelectionModel """
return QItemSelectionModel
def setAlternatingRowColors(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.setAlternatingRowColors(bool) """
pass
def setAutoScroll(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.setAutoScroll(bool) """
pass
def setAutoScrollMargin(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.setAutoScrollMargin(int) """
pass
def setCurrentIndex(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.setCurrentIndex(QModelIndex) """
pass
def setDefaultDropAction(self, Qt_DropAction): # real signature unknown; restored from __doc__
""" QAbstractItemView.setDefaultDropAction(Qt.DropAction) """
pass
def setDirtyRegion(self, QRegion): # real signature unknown; restored from __doc__
""" QAbstractItemView.setDirtyRegion(QRegion) """
pass
def setDragDropMode(self, QAbstractItemView_DragDropMode): # real signature unknown; restored from __doc__
""" QAbstractItemView.setDragDropMode(QAbstractItemView.DragDropMode) """
pass
def setDragDropOverwriteMode(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.setDragDropOverwriteMode(bool) """
pass
def setDragEnabled(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.setDragEnabled(bool) """
pass
def setDropIndicatorShown(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.setDropIndicatorShown(bool) """
pass
def setEditTriggers(self, QAbstractItemView_EditTriggers): # real signature unknown; restored from __doc__
""" QAbstractItemView.setEditTriggers(QAbstractItemView.EditTriggers) """
pass
def setHorizontalScrollMode(self, QAbstractItemView_ScrollMode): # real signature unknown; restored from __doc__
""" QAbstractItemView.setHorizontalScrollMode(QAbstractItemView.ScrollMode) """
pass
def setHorizontalStepsPerItem(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.setHorizontalStepsPerItem(int) """
pass
def setIconSize(self, QSize): # real signature unknown; restored from __doc__
""" QAbstractItemView.setIconSize(QSize) """
pass
def setIndexWidget(self, QModelIndex, QWidget): # real signature unknown; restored from __doc__
""" QAbstractItemView.setIndexWidget(QModelIndex, QWidget) """
pass
def setItemDelegate(self, QAbstractItemDelegate): # real signature unknown; restored from __doc__
""" QAbstractItemView.setItemDelegate(QAbstractItemDelegate) """
pass
def setItemDelegateForColumn(self, p_int, QAbstractItemDelegate): # real signature unknown; restored from __doc__
""" QAbstractItemView.setItemDelegateForColumn(int, QAbstractItemDelegate) """
pass
def setItemDelegateForRow(self, p_int, QAbstractItemDelegate): # real signature unknown; restored from __doc__
""" QAbstractItemView.setItemDelegateForRow(int, QAbstractItemDelegate) """
pass
def setModel(self, QAbstractItemModel): # real signature unknown; restored from __doc__
""" QAbstractItemView.setModel(QAbstractItemModel) """
pass
def setRootIndex(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.setRootIndex(QModelIndex) """
pass
def setSelection(self, QRect, QItemSelectionModel_SelectionFlags): # real signature unknown; restored from __doc__
""" QAbstractItemView.setSelection(QRect, QItemSelectionModel.SelectionFlags) """
pass
def setSelectionBehavior(self, QAbstractItemView_SelectionBehavior): # real signature unknown; restored from __doc__
""" QAbstractItemView.setSelectionBehavior(QAbstractItemView.SelectionBehavior) """
pass
def setSelectionMode(self, QAbstractItemView_SelectionMode): # real signature unknown; restored from __doc__
""" QAbstractItemView.setSelectionMode(QAbstractItemView.SelectionMode) """
pass
def setSelectionModel(self, QItemSelectionModel): # real signature unknown; restored from __doc__
""" QAbstractItemView.setSelectionModel(QItemSelectionModel) """
pass
def setState(self, QAbstractItemView_State): # real signature unknown; restored from __doc__
""" QAbstractItemView.setState(QAbstractItemView.State) """
pass
def setTabKeyNavigation(self, bool): # real signature unknown; restored from __doc__
""" QAbstractItemView.setTabKeyNavigation(bool) """
pass
def setTextElideMode(self, Qt_TextElideMode): # real signature unknown; restored from __doc__
""" QAbstractItemView.setTextElideMode(Qt.TextElideMode) """
pass
def setVerticalScrollMode(self, QAbstractItemView_ScrollMode): # real signature unknown; restored from __doc__
""" QAbstractItemView.setVerticalScrollMode(QAbstractItemView.ScrollMode) """
pass
def setVerticalStepsPerItem(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.setVerticalStepsPerItem(int) """
pass
def showDropIndicator(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.showDropIndicator() -> bool """
return False
def sizeHintForColumn(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.sizeHintForColumn(int) -> int """
return 0
def sizeHintForIndex(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.sizeHintForIndex(QModelIndex) -> QSize """
pass
def sizeHintForRow(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.sizeHintForRow(int) -> int """
return 0
def startDrag(self, Qt_DropActions): # real signature unknown; restored from __doc__
""" QAbstractItemView.startDrag(Qt.DropActions) """
pass
def state(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.state() -> QAbstractItemView.State """
pass
def tabKeyNavigation(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.tabKeyNavigation() -> bool """
return False
def textElideMode(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.textElideMode() -> Qt.TextElideMode """
pass
def timerEvent(self, QTimerEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.timerEvent(QTimerEvent) """
pass
def update(self, QModelIndex=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
QAbstractItemView.update()
QAbstractItemView.update(QModelIndex)
"""
pass
def updateEditorData(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.updateEditorData() """
pass
def updateEditorGeometries(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.updateEditorGeometries() """
pass
def updateGeometries(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.updateGeometries() """
pass
def verticalOffset(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.verticalOffset() -> int """
return 0
def verticalScrollbarAction(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.verticalScrollbarAction(int) """
pass
def verticalScrollbarValueChanged(self, p_int): # real signature unknown; restored from __doc__
""" QAbstractItemView.verticalScrollbarValueChanged(int) """
pass
def verticalScrollMode(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.verticalScrollMode() -> QAbstractItemView.ScrollMode """
pass
def verticalStepsPerItem(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.verticalStepsPerItem() -> int """
return 0
def viewOptions(self): # real signature unknown; restored from __doc__
""" QAbstractItemView.viewOptions() -> QStyleOptionViewItem """
return QStyleOptionViewItem
def viewportEntered(self, *args, **kwargs): # real signature unknown
""" QAbstractItemView.viewportEntered [signal] """
pass
def viewportEvent(self, QEvent): # real signature unknown; restored from __doc__
""" QAbstractItemView.viewportEvent(QEvent) -> bool """
return False
def visualRect(self, QModelIndex): # real signature unknown; restored from __doc__
""" QAbstractItemView.visualRect(QModelIndex) -> QRect """
pass
def visualRegionForSelection(self, QItemSelection): # real signature unknown; restored from __doc__
""" QAbstractItemView.visualRegionForSelection(QItemSelection) -> QRegion """
return QRegion
def __init__(self, QWidget_parent=None): # real signature unknown; restored from __doc__
pass
AboveItem = 1
AllEditTriggers = 31
AnimatingState = 6
AnyKeyPressed = 16
BelowItem = 2
CollapsingState = 5
ContiguousSelection = 4
CurrentChanged = 1
CursorAction = None # (!) real value is ''
DoubleClicked = 2
DragDrop = 3
DragDropMode = None # (!) real value is ''
DraggingState = 1
DragOnly = 1
DragSelectingState = 2
DropIndicatorPosition = None # (!) real value is ''
DropOnly = 2
EditingState = 3
EditKeyPressed = 8
EditTrigger = None # (!) real value is ''
EditTriggers = None # (!) real value is ''
EnsureVisible = 0
ExpandingState = 4
ExtendedSelection = 3
InternalMove = 4
MoveDown = 1
MoveEnd = 5
MoveHome = 4
MoveLeft = 2
MoveNext = 8
MovePageDown = 7
MovePageUp = 6
MovePrevious = 9
MoveRight = 3
MoveUp = 0
MultiSelection = 2
NoDragDrop = 0
NoEditTriggers = 0
NoSelection = 0
NoState = 0
OnItem = 0
OnViewport = 3
PositionAtBottom = 2
PositionAtCenter = 3
PositionAtTop = 1
ScrollHint = None # (!) real value is ''
ScrollMode = None # (!) real value is ''
ScrollPerItem = 0
ScrollPerPixel = 1
SelectColumns = 2
SelectedClicked = 4
SelectionBehavior = None # (!) real value is ''
SelectionMode = None # (!) real value is ''
SelectItems = 0
SelectRows = 1
SingleSelection = 1
State = None # (!) real value is ''
| gpl-2.0 | -695,891,328,013,643,000 | 40.182906 | 153 | 0.674581 | false |
peterhudec/liveandletdie | tests/test_all.py | 1 | 3686 | import os
import liveandletdie
HERE = os.path.dirname(__file__)
PROJECT_PATH = os.path.abspath(os.path.join(HERE, '..'))
SAMPLE_APPS_DIR = os.path.join(PROJECT_PATH, 'sample_apps')
class Base(object):
def teardown(cls):
liveandletdie.port_in_use(8001, kill=True)
def test_default_url(self, scheme='http', **kwargs):
app = self.class_(self.app_path, **kwargs)
assert app.default_url == '{0}://127.0.0.1:8001'.format(scheme)
app = self.class_(self.app_path, port=1234, **kwargs)
assert app.default_url == '{0}://127.0.0.1:1234'.format(scheme)
app = self.class_(self.app_path, host='123.456.789.123', **kwargs)
assert app.default_url == '{0}://123.456.789.123:8001'.format(scheme)
app = self.class_(self.app_path, host='123.456.789.123',
port=1234, **kwargs)
assert app.default_url == '{0}://123.456.789.123:1234'.format(scheme)
def test__normalize_check_url(self, scheme='http', **kwargs):
# Default url
app = self.class_(self.app_path, **kwargs)
normalixed = app._normalize_check_url('http://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:8001'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:8001'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com:5555')
assert normalixed == '{0}://foo.bar.com:8001'.format(scheme)
# Custom port
app = self.class_(self.app_path, port=1234, **kwargs)
normalixed = app._normalize_check_url('http://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:1234'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:1234'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com:5555')
assert normalixed == '{0}://foo.bar.com:1234'.format(scheme)
# Custom host
app = self.class_(self.app_path, host='123.456.789.123', **kwargs)
normalixed = app._normalize_check_url('http://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:8001'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:8001'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com:5555')
assert normalixed == '{0}://foo.bar.com:8001'.format(scheme)
# Custom host and port
app = self.class_(self.app_path, host='123.456.789.123',
port=1234, **kwargs)
normalixed = app._normalize_check_url('http://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:1234'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com')
assert normalixed == '{0}://foo.bar.com:1234'.format(scheme)
normalixed = app._normalize_check_url('https://foo.bar.com:5555')
assert normalixed == '{0}://foo.bar.com:1234'.format(scheme)
class SSLBase(Base):
def test_default_url(self):
super(SSLBase, self).test_default_url()
super(SSLBase, self).test_default_url('https', ssl=True)
def test__normalize_check_url(self):
super(SSLBase, self).test__normalize_check_url()
super(SSLBase, self).test__normalize_check_url('https', ssl=True)
class TestFlask(SSLBase):
app_path = os.path.join(SAMPLE_APPS_DIR, 'flask', 'main.py')
class_ = liveandletdie.Flask
class TestPyramid(SSLBase):
app_path = os.path.join(SAMPLE_APPS_DIR, 'pyramid', 'main.py')
class_ = liveandletdie.Pyramid
| mit | 3,447,240,475,609,438,000 | 39.065217 | 77 | 0.616386 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/tests/test_karmacache_updater.py | 1 | 4326 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
import subprocess
import unittest
import transaction
from zope.component import getUtility
from lp.registry.interfaces.person import IPersonSet
from lp.registry.interfaces.product import IProductSet
from lp.registry.model.karma import KarmaCache
from lp.services.database.sqlbase import flush_database_caches
from lp.testing import (
ANONYMOUS,
login,
logout,
)
from lp.testing.layers import LaunchpadFunctionalLayer
class TestKarmaCacheUpdater(unittest.TestCase):
layer = LaunchpadFunctionalLayer
def setUp(self):
login(ANONYMOUS)
self.personset = getUtility(IPersonSet)
def tearDown(self):
logout()
# As the test performs DB changes in a subprocess, make sure
# the database is marked dirty.
self.layer.force_dirty_database()
def _getCacheEntriesByPerson(self, person):
return KarmaCache.selectBy(person=person)
def _runScript(self):
process = subprocess.Popen(
'cronscripts/foaf-update-karma-cache.py', shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = process.communicate()
self.failUnless(process.returncode == 0, (out, err))
# This is a quite long test, but it's better this way because the
# karmacache updater script takes quite a while to run and changes/deletes
# all KarmaCache entries. IOW, if we split this test it'll take a LOT
# longer to run and we'll have to restore the database after each time
# it's run.
def test_karmacache_entries(self):
# Sample Person has some KarmaCache entries, but it's a long time
# since we last updated this cache, and now the karma he earned a long
# ago is not worth anything, so the karmacache-updater script will
# delete the cache entries for Sample Person.
sample_person = self.personset.getByName('name12')
cache_entries = self._getCacheEntriesByPerson(sample_person)
self.failUnless(not cache_entries.is_empty())
for cache in cache_entries:
self.failIf(cache.karmavalue <= 0)
# As we can see, Foo Bar already has some karmacache entries. We'll
# now add some fresh Karma entries for him and later we'll check that
# the cache-updater script simply updated the existing cache entries
# instead of creating new ones.
foobar = self.personset.getByName('name16')
cache_entries = self._getCacheEntriesByPerson(foobar)
foobar_original_entries_count = cache_entries.count()
self.failUnless(foobar_original_entries_count > 0)
for cache in cache_entries:
self.failIf(cache.karmavalue <= 0)
firefox = getUtility(IProductSet)['firefox']
foobar.assignKarma('bugcreated', firefox)
# In the case of No Priv, he has no KarmaCache entries, so if we add
# some fresh Karma entries to him, our cache-updater script will have
# to create new KarmaCache entries for him.
nopriv = self.personset.getByName('no-priv')
self.failUnless(self._getCacheEntriesByPerson(nopriv).count() == 0)
nopriv.assignKarma('bugcreated', firefox)
transaction.commit()
self._runScript()
# Need to flush our caches since things were updated behind our back.
flush_database_caches()
# Check that Sample Person has no KarmaCache entries at all
sample_person = self.personset.getByName('name12')
self.failUnless(
self._getCacheEntriesByPerson(sample_person).count() == 0)
# Check that Foo Bar had his KarmaCache entries updated.
entries_count = self._getCacheEntriesByPerson(foobar).count()
# The cache entries that would have their karmavalue updated to 0 are
# instead deleted from the DB; that's why the new count can be smaller
# than the original one.
self.failUnless(entries_count <= foobar_original_entries_count)
# And finally, ensure that No Priv got some new KarmaCache entries.
self.failUnless(not self._getCacheEntriesByPerson(nopriv).is_empty())
| agpl-3.0 | 1,961,686,788,335,937,500 | 40.2 | 78 | 0.68932 | false |
i2keita/mz_demo | Docs/subsidyData/toSql.py | 1 | 1555 | # -*- coding:utf-8 -*-
import os
sql_file = 'subsidy.sql'
def get_data(file_name):
res = {}
with open(file_name, mode='r') as f:
res['certificate_type'] = f.readline().strip();
res['kind'] = f.readline().strip();
res['level'] = f.readline().strip();
res['money'] = f.readline().strip();
res['title'] = f.readline().strip();
return res
def deal_with(file_name):
res = get_data(file_name)
with open(sql_file, 'a+') as f :
content = "INSERT INTO `mz_subsidy_standary` (`certificate_type`, `kind`, `level`, `money`, `series`, `title`) VALUES('{certificate_type}', '{kind}', '{level}', '{money}', '{series}', '{title}');\r\n".\
format(certificate_type=res['certificate_type'],kind=res['kind'] ,level=res['level'],money=res['money'],series='' ,title=res['title'])
for subtitle in res['title'].split('、'):
content = "INSERT INTO `mz_subsidy_standary` (`certificate_type`, `kind`, `level`, `money`, `series`, `title`) VALUES('{certificate_type}', '{kind}', '{level}', '{money}', '{series}', '{title}');\r\n".\
format(certificate_type=res['certificate_type'],kind=res['kind'] ,level=res['level'],money=res['money'],series='' ,title=subtitle)
f.write(content)
def main():
if os.path.exists(sql_file):
os.remove(sql_file)
for x in range(1,20):
deal_with('data%d.txt'% x)
# print 'data%d.txt'% x
if __name__ == '__main__':
main()
| gpl-3.0 | -3,724,475,210,995,640,300 | 36.825 | 213 | 0.540245 | false |
Clinical-Genomics/scout | scout/build/variant/variant.py | 1 | 15599 | # -*- coding: utf-8 -*-
import logging
from . import build_genotype, build_compound, build_gene, build_clnsig
LOG = logging.getLogger(__name__)
def build_variant(
variant, institute_id, gene_to_panels=None, hgncid_to_gene=None, sample_info=None
):
"""Build a variant object based on parsed information
Args:
variant(dict)
institute_id(str)
gene_to_panels(dict): A dictionary with
{<hgnc_id>: {
'panel_names': [<panel_name>, ..],
'disease_associated_transcripts': [<transcript_id>, ..]
}
.
.
}
hgncid_to_gene(dict): A dictionary with
{<hgnc_id>: <hgnc_gene info>
.
.
}
sample_info(dict): A dictionary with info about samples.
Strictly for cancer to tell which is tumor
Returns:
variant_obj(dict)
variant = dict(
# document_id is a md5 string created by institute_genelist_caseid_variantid:
_id = str, # required, same as document_id
document_id = str, # required
# variant_id is a md5 string created by chrom_pos_ref_alt (simple_id)
variant_id = str, # required
# display name is variant_id (no md5)
display_name = str, # required
# chrom_pos_ref_alt
simple_id = str,
# The variant can be either research or clinical.
# For research variants we display all the available information while
# the clinical variants have limited annotation fields.
variant_type = str, # required, choices=('research', 'clinical'))
category = str, # choices=('sv', 'snv', 'str')
sub_category = str, # choices=('snv', 'indel', 'del', 'ins', 'dup', 'inv', 'cnv', 'bnd')
mate_id = str, # For SVs this identifies the other end
case_id = str, # case_id is a string like owner_caseid
chromosome = str, # required
position = int, # required
end = int, # required
length = int, # required
reference = str, # required
alternative = str, # required
rank_score = float, # required
variant_rank = int, # required
rank_score_results = list, # List if dictionaries
variant_rank = int, # required
institute = str, # institute_id, required
sanger_ordered = bool,
validation = str, # Sanger validation, choices=('True positive', 'False positive')
quality = float,
filters = list, # list of strings
samples = list, # list of dictionaries that are <gt_calls>
genetic_models = list, # list of strings choices=GENETIC_MODELS
compounds = list, # sorted list of <compound> ordering='combined_score'
genes = list, # list with <gene>
dbsnp_id = str,
# Gene ids:
hgnc_ids = list, # list of hgnc ids (int)
hgnc_symbols = list, # list of hgnc symbols (str)
panels = list, # list of panel names that the variant ovelapps
# Frequencies:
thousand_genomes_frequency = float,
thousand_genomes_frequency_left = float,
thousand_genomes_frequency_right = float,
exac_frequency = float,
max_thousand_genomes_frequency = float,
max_exac_frequency = float,
local_frequency = float,
local_obs_old = int,
local_obs_hom_old = int,
local_obs_total_old = int, # default=638
# Predicted deleteriousness:
cadd_score = float,
revel_score = float,
clnsig = list, # list of <clinsig>
spidex = float,
missing_data = bool, # default False
# STR specific information
str_repid = str, repeat id generally corresponds to gene symbol
str_ru = str, Repeat Unit used e g in PanelApp naming of STRs
str_ref = int, reference copy number
str_len = int, number of repeats found in case
str_status = str, this indicates the severity of the expansion level
str_normal_max = int, max number of repeats to call an STR variant normal
str_pathologic_min = int, min number of repeats to call an STR variant pathologic
str_disease = str, Associated disease name
str_inheritance_mode = str, STR disease mode of inheritance "AD", "XR", "AR", "-"
str_source = dict, STR source dict with keys {"display": str, "type": str ("PubMed", "GeneReviews"), "id": str}
str_swegen_mean = float, STR norm pop mean
str_swegen_std = float, STR norm pop stdev
# Callers
gatk = str, # choices=VARIANT_CALL, default='Not Used'
samtools = str, # choices=VARIANT_CALL, default='Not Used'
freebayes = str, # choices=VARIANT_CALL, default='Not Used'
# Conservation:
phast_conservation = list, # list of str, choices=CONSERVATION
gerp_conservation = list, # list of str, choices=CONSERVATION
phylop_conservation = list, # list of str, choices=CONSERVATION
# Database options:
gene_lists = list,
manual_rank = int, # choices=[0, 1, 2, 3, 4, 5]
cancer_tier = str # choices=['1A', '1B', '2A', '2B', '3', '4']
dismiss_variant = list,
acmg_evaluation = str, # choices=ACMG_TERMS
)
"""
gene_to_panels = gene_to_panels or {}
hgncid_to_gene = hgncid_to_gene or {}
sample_info = sample_info or {}
# LOG.debug("Building variant %s", variant['ids']['document_id'])
variant_obj = dict(
_id=variant["ids"]["document_id"],
document_id=variant["ids"]["document_id"],
variant_id=variant["ids"]["variant_id"],
display_name=variant["ids"]["display_name"],
variant_type=variant["variant_type"],
case_id=variant["case_id"],
chromosome=variant["chromosome"],
reference=variant["reference"],
alternative=variant["alternative"],
institute=institute_id,
)
variant_obj["missing_data"] = False
variant_obj["position"] = int(variant["position"])
variant_obj["rank_score"] = float(variant["rank_score"])
end = variant.get("end")
if end:
variant_obj["end"] = int(end)
length = variant.get("length")
if length:
variant_obj["length"] = int(length)
variant_obj["simple_id"] = variant["ids"].get("simple_id")
variant_obj["quality"] = float(variant["quality"]) if variant["quality"] else None
variant_obj["filters"] = variant["filters"]
variant_obj["dbsnp_id"] = variant.get("dbsnp_id")
variant_obj["cosmic_ids"] = variant.get("cosmic_ids")
variant_obj["category"] = variant["category"]
variant_obj["sub_category"] = variant.get("sub_category")
if "mate_id" in variant:
variant_obj["mate_id"] = variant["mate_id"]
if "cytoband_start" in variant:
variant_obj["cytoband_start"] = variant["cytoband_start"]
if "cytoband_end" in variant:
variant_obj["cytoband_end"] = variant["cytoband_end"]
if "end_chrom" in variant:
variant_obj["end_chrom"] = variant["end_chrom"]
############ Str specific ############
if "str_ru" in variant:
variant_obj["str_ru"] = variant["str_ru"]
if "str_repid" in variant:
variant_obj["str_repid"] = variant["str_repid"]
if "str_ref" in variant:
variant_obj["str_ref"] = variant["str_ref"]
if "str_len" in variant:
variant_obj["str_len"] = variant["str_len"]
if "str_status" in variant:
variant_obj["str_status"] = variant["str_status"]
if "str_normal_max" in variant:
variant_obj["str_normal_max"] = variant["str_normal_max"]
if "str_pathologic_min" in variant:
variant_obj["str_pathologic_min"] = variant["str_pathologic_min"]
if "str_swegen_mean" in variant:
variant_obj["str_swegen_mean"] = (
float(variant["str_swegen_mean"]) if variant["str_swegen_mean"] else None
)
if "str_swegen_std" in variant:
variant_obj["str_swegen_std"] = (
float(variant["str_swegen_std"]) if variant["str_swegen_std"] else None
)
if "str_inheritance_mode" in variant:
variant_obj["str_inheritance_mode"] = variant["str_inheritance_mode"]
if "str_disease" in variant:
variant_obj["str_disease"] = variant["str_disease"]
if "str_source" in variant:
variant_obj["str_source"] = variant["str_source"]
# Mitochondria specific
if "mitomap_associated_diseases" in variant:
variant_obj["mitomap_associated_diseases"] = variant["mitomap_associated_diseases"]
if "hmtvar_variant_id" in variant:
variant_obj["hmtvar_variant_id"] = variant["hmtvar_variant_id"]
gt_types = []
for sample in variant.get("samples", []):
gt_call = build_genotype(sample)
gt_types.append(gt_call)
if sample_info:
sample_id = sample["individual_id"]
if sample_info[sample_id] == "case":
key = "tumor"
else:
key = "normal"
variant_obj[key] = {
"alt_depth": sample["alt_depth"],
"ref_depth": sample["ref_depth"],
"read_depth": sample["read_depth"],
"alt_freq": sample["alt_frequency"],
"ind_id": sample_id,
}
variant_obj["samples"] = gt_types
if "genetic_models" in variant:
variant_obj["genetic_models"] = variant["genetic_models"]
##### Add the compounds #####
compounds = []
for compound in variant.get("compounds", []):
compound_obj = build_compound(compound)
compounds.append(compound_obj)
if compounds:
variant_obj["compounds"] = compounds
##### Add the genes with transcripts #####
genes = []
for index, gene in enumerate(variant.get("genes", [])):
if gene.get("hgnc_id"):
gene_obj = build_gene(gene, hgncid_to_gene)
genes.append(gene_obj)
if index > 30:
# avoid uploading too much data (specifically for SV variants)
# mark variant as missing data
variant_obj["missing_data"] = True
break
if genes:
variant_obj["genes"] = genes
# To make gene searches more effective
if "hgnc_ids" in variant:
variant_obj["hgnc_ids"] = [hgnc_id for hgnc_id in variant["hgnc_ids"] if hgnc_id]
# Add the hgnc symbols from the database genes
hgnc_symbols = []
for hgnc_id in variant_obj["hgnc_ids"]:
gene_obj = hgncid_to_gene.get(hgnc_id)
if gene_obj:
hgnc_symbols.append(gene_obj["hgnc_symbol"])
# else:
# LOG.warning("missing HGNC symbol for: %s", hgnc_id)
if hgnc_symbols:
variant_obj["hgnc_symbols"] = hgnc_symbols
##### link gene panels #####
panel_names = set()
for hgnc_id in variant_obj["hgnc_ids"]:
gene_panels = gene_to_panels.get(hgnc_id, set())
panel_names = panel_names.union(gene_panels)
if panel_names:
variant_obj["panels"] = list(panel_names)
##### Add the clnsig objects from clinvar #####
clnsig_objects = []
for entry in variant.get("clnsig", []):
clnsig_obj = build_clnsig(entry)
clnsig_objects.append(clnsig_obj)
if clnsig_objects:
variant_obj["clnsig"] = clnsig_objects
##### Add the callers #####
call_info = variant.get("callers", {})
for caller in call_info:
if call_info[caller]:
variant_obj[caller] = call_info[caller]
##### Add the conservation #####
conservation_info = variant.get("conservation", {})
if conservation_info.get("phast"):
variant_obj["phast_conservation"] = conservation_info["phast"]
if conservation_info.get("gerp"):
variant_obj["gerp_conservation"] = conservation_info["gerp"]
if conservation_info.get("phylop"):
variant_obj["phylop_conservation"] = conservation_info["phylop"]
##### Add autozygosity calls #####
if variant.get("azlength"):
variant_obj["azlength"] = variant["azlength"]
if variant.get("azqual"):
variant_obj["azqual"] = variant["azqual"]
if variant.get("custom"):
variant_obj["custom"] = variant["custom"]
if variant.get("somatic_score"):
variant_obj["somatic_score"] = variant["somatic_score"]
##### Add the frequencies #####
frequencies = variant.get("frequencies", {})
if frequencies.get("thousand_g"):
variant_obj["thousand_genomes_frequency"] = float(frequencies["thousand_g"])
if frequencies.get("thousand_g_max"):
variant_obj["max_thousand_genomes_frequency"] = float(frequencies["thousand_g_max"])
if frequencies.get("exac"):
variant_obj["exac_frequency"] = float(frequencies["exac"])
if frequencies.get("exac_max"):
variant_obj["max_exac_frequency"] = float(frequencies["exac_max"])
if frequencies.get("gnomad"):
variant_obj["gnomad_frequency"] = float(frequencies["gnomad"])
if frequencies.get("gnomad_max"):
variant_obj["max_gnomad_frequency"] = float(frequencies["gnomad_max"])
if frequencies.get("gnomad_mt_homoplasmic"):
variant_obj["gnomad_mt_homoplasmic_frequency"] = float(frequencies["gnomad_mt_homoplasmic"])
if frequencies.get("gnomad_mt_heteroplasmic"):
variant_obj["gnomad_mt_heteroplasmic_frequency"] = float(
frequencies["gnomad_mt_heteroplasmic"]
)
if frequencies.get("thousand_g_left"):
variant_obj["thousand_genomes_frequency_left"] = float(frequencies["thousand_g_left"])
if frequencies.get("thousand_g_right"):
variant_obj["thousand_genomes_frequency_right"] = float(frequencies["thousand_g_right"])
# add the local observation counts from the old archive
if variant.get("local_obs_old"):
variant_obj["local_obs_old"] = variant["local_obs_old"]
if variant.get("local_obs_hom_old"):
variant_obj["local_obs_hom_old"] = variant["local_obs_hom_old"]
# Add the sv counts:
if frequencies.get("clingen_benign"):
variant_obj["clingen_cgh_benign"] = frequencies["clingen_benign"]
if frequencies.get("clingen_pathogenic"):
variant_obj["clingen_cgh_pathogenic"] = frequencies["clingen_pathogenic"]
if frequencies.get("clingen_ngi"):
variant_obj["clingen_ngi"] = frequencies["clingen_ngi"]
if frequencies.get("swegen"):
variant_obj["swegen"] = frequencies["swegen"]
if frequencies.get("clingen_mip"):
variant_obj["clingen_mip"] = frequencies["clingen_mip"]
# Decipher is never a frequency, it will ony give 1 if variant exists in decipher
# Check if decipher exists
if frequencies.get("decipher"):
variant_obj["decipher"] = frequencies["decipher"]
##### Add the severity predictors #####
if variant.get("cadd_score"):
variant_obj["cadd_score"] = variant["cadd_score"]
if variant.get("revel_score"):
variant_obj["revel_score"] = variant["revel_score"]
if variant.get("spidex"):
variant_obj["spidex"] = variant["spidex"]
# Add the rank score results
rank_results = []
for category in variant.get("rank_result", []):
rank_result = {"category": category, "score": variant["rank_result"][category]}
rank_results.append(rank_result)
if rank_results:
variant_obj["rank_score_results"] = rank_results
# Cancer specific
if variant.get("mvl_tag"):
variant_obj["mvl_tag"] = True
return variant_obj
| bsd-3-clause | -6,906,830,750,173,643,000 | 34.614155 | 119 | 0.601513 | false |
mediatum/mediatum | contenttypes/test/test_image.py | 1 | 4055 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2016 by the mediaTUM authors
:license: GPL3, see COPYING for details
"""
from __future__ import absolute_import
import os
import tempfile
from PIL import Image as PILImage
import pytest
from core import config
from contenttypes.image import _create_zoom_tile_buffer, _create_zoom_archive, get_zoom_zip_filename
from contenttypes.test import fullpath_to_test_image
from contenttypes.test.asserts import assert_thumbnails_ok
from contenttypes.test.helpers import call_event_files_changed
from utils.testing import make_files_munch
def assert_zoom_ok(node):
zoom_image = node.files.filter_by(filetype=u"zoom").scalar()
assert zoom_image is not None
assert zoom_image.size > 1000
def assert_image_formats_ok(image_node):
mimetype = image_node._test_mimetype
files = make_files_munch(image_node)
assert "original" in files
assert "image" in files
if mimetype == u"image/tiff":
assert "image/png" in files.image
assert "image/tiff" in files.image
assert files.original.mimetype == "image/tiff"
assert files.original.path == files.image["image/tiff"].path
assert files.original.path.replace(".tif", ".png") == files.image["image/png"].path
elif mimetype == u"image/svg+xml":
assert "image/svg+xml" in files.image
assert "image/png" in files.image
assert files.original.mimetype == "image/svg+xml"
assert files.original.path == files.image["image/svg+xml"].path
assert files.original.path.replace(".svg", ".png") == files.image["image/png"].path
else:
assert files.original.path == files.image.path
assert files.original.mimetype == files.image.mimetype
@pytest.mark.slow
def test_image_generate_image_formats(image):
image._generate_image_formats()
assert_image_formats_ok(image)
@pytest.mark.slow
def test_image_generate_thumbnails(image):
image._generate_image_formats()
image._generate_thumbnails()
assert_thumbnails_ok(image)
def test_image_create_zoom_tile_buffer(image_png):
img_path = fullpath_to_test_image("png")
img = PILImage.open(img_path)
with _create_zoom_tile_buffer(img, 4, 256, 1, 0, 0) as buff:
val = buff.getvalue()
assert val
tile_img = PILImage.open(buff)
assert [s for s in tile_img.size if s == 250]
@pytest.mark.slow
def test_create_zoom_archive(image):
img_path = fullpath_to_test_image("png")
zip_name = get_zoom_zip_filename(image.id)
zip_path = os.path.join(config.get('paths.zoomdir'), zip_name)
_create_zoom_archive(256, img_path, zip_path)
assert os.stat(zip_path).st_size > 1000
def test_image_extract_metadata(image):
# for svg, the alternative png format is needed for extraction
if image._test_mimetype == "image/svg+xml":
image._generate_image_formats()
image._extract_metadata()
# SVG does not support Exif, GIF and PNG are not supported by our ancient exif lib
if image._test_mimetype in ("image/tiff",):
assert image.get("exif_Image_XResolution") == "400"
@pytest.mark.slow
def test_image_generate_zoom_archive(image):
image._generate_image_formats()
image._generate_zoom_archive()
assert_zoom_ok(image)
def _test_event_files_changed(image):
with call_event_files_changed(image):
assert_thumbnails_ok(image)
assert_image_formats_ok(image)
if image._test_mimetype != "image/svg+xml":
assert_zoom_ok(image)
@pytest.mark.slow
def test_event_files_changed_svg(image_svg):
_test_event_files_changed(image_svg)
@pytest.mark.slow
def test_event_files_changed_png(image_png):
_test_event_files_changed(image_png)
@pytest.mark.slow
def test_event_files_changed_gif(image_gif):
_test_event_files_changed(image_gif)
@pytest.mark.slow
def test_event_files_changed_jpeg(image_jpeg):
_test_event_files_changed(image_jpeg)
@pytest.mark.slow
def test_event_files_changed_tiff(image_tiff):
_test_event_files_changed(image_tiff)
| gpl-3.0 | -2,235,419,218,713,841,400 | 30.192308 | 100 | 0.693218 | false |
stefan-jonasson/home-assistant | homeassistant/components/input_datetime.py | 2 | 7579 | """
Component to offer a way to select a date and / or a time.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/input_datetime/
"""
import asyncio
import logging
import datetime
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_ICON, CONF_NAME, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import async_get_last_state
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'input_datetime'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_HAS_DATE = 'has_date'
CONF_HAS_TIME = 'has_time'
CONF_INITIAL = 'initial'
ATTR_DATE = 'date'
ATTR_TIME = 'time'
SERVICE_SET_DATETIME = 'set_datetime'
SERVICE_SET_DATETIME_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_DATE): cv.date,
vol.Optional(ATTR_TIME): cv.time,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: vol.All({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_HAS_DATE): cv.boolean,
vol.Required(CONF_HAS_TIME): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_INITIAL): cv.datetime,
}, cv.has_at_least_one_key_value((CONF_HAS_DATE, True),
(CONF_HAS_TIME, True)))})
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_set_datetime(hass, entity_id, dt_value):
"""Set date and / or time of input_datetime."""
yield from hass.services.async_call(DOMAIN, SERVICE_SET_DATETIME, {
ATTR_ENTITY_ID: entity_id,
ATTR_DATE: dt_value.date(),
ATTR_TIME: dt_value.time()
})
@asyncio.coroutine
def async_setup(hass, config):
"""Set up an input datetime."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
name = cfg.get(CONF_NAME)
has_time = cfg.get(CONF_HAS_TIME)
has_date = cfg.get(CONF_HAS_DATE)
icon = cfg.get(CONF_ICON)
initial = cfg.get(CONF_INITIAL)
entities.append(InputDatetime(object_id, name,
has_date, has_time, icon, initial))
if not entities:
return False
@asyncio.coroutine
def async_set_datetime_service(call):
"""Handle a call to the input datetime 'set datetime' service."""
target_inputs = component.async_extract_from_service(call)
tasks = []
for input_datetime in target_inputs:
time = call.data.get(ATTR_TIME)
date = call.data.get(ATTR_DATE)
if (input_datetime.has_date() and not date) or \
(input_datetime.has_time() and not time):
_LOGGER.error("Invalid service data for "
"input_datetime.set_datetime: %s",
str(call.data))
continue
tasks.append(input_datetime.async_set_datetime(date, time))
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_SET_DATETIME, async_set_datetime_service,
schema=SERVICE_SET_DATETIME_SCHEMA)
yield from component.async_add_entities(entities)
return True
class InputDatetime(Entity):
"""Representation of a datetime input."""
def __init__(self, object_id, name, has_date, has_time, icon, initial):
"""Initialize a select input."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._has_date = has_date
self._has_time = has_time
self._icon = icon
self._initial = initial
self._current_datetime = None
@asyncio.coroutine
def async_added_to_hass(self):
"""Run when entity about to be added."""
restore_val = None
# Priority 1: Initial State
if self._initial is not None:
restore_val = self._initial
# Priority 2: Old state
if restore_val is None:
old_state = yield from async_get_last_state(self.hass,
self.entity_id)
if old_state is not None:
restore_val = dt_util.parse_datetime(old_state.state)
if restore_val is not None:
if not self._has_date:
self._current_datetime = restore_val.time()
elif not self._has_time:
self._current_datetime = restore_val.date()
else:
self._current_datetime = restore_val
def has_date(self):
"""Return whether the input datetime carries a date."""
return self._has_date
def has_time(self):
"""Return whether the input datetime carries a time."""
return self._has_time
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
if self._current_datetime is None:
return STATE_UNKNOWN
return self._current_datetime
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = {
'has_date': self._has_date,
'has_time': self._has_time,
}
if self._current_datetime is None:
return attrs
if self._has_date and self._current_datetime is not None:
attrs['year'] = self._current_datetime.year
attrs['month'] = self._current_datetime.month
attrs['day'] = self._current_datetime.day
if self._has_time and self._current_datetime is not None:
attrs['hour'] = self._current_datetime.hour
attrs['minute'] = self._current_datetime.minute
attrs['second'] = self._current_datetime.second
if self._current_datetime is not None:
if not self._has_date:
attrs['timestamp'] = self._current_datetime.hour * 3600 + \
self._current_datetime.minute * 60 + \
self._current_datetime.second
elif not self._has_time:
extended = datetime.datetime.combine(self._current_datetime,
datetime.time(0, 0))
attrs['timestamp'] = extended.timestamp()
else:
attrs['timestamp'] = self._current_datetime.timestamp()
return attrs
@asyncio.coroutine
def async_set_datetime(self, date_val, time_val):
"""Set a new date / time."""
if self._has_date and self._has_time and date_val and time_val:
self._current_datetime = datetime.datetime.combine(date_val,
time_val)
elif self._has_date and not self._has_time and date_val:
self._current_datetime = date_val
if self._has_time and not self._has_date and time_val:
self._current_datetime = time_val
yield from self.async_update_ha_state()
| mit | -3,983,523,668,296,364,000 | 32.387665 | 76 | 0.586489 | false |
rtevans/tacc_stats_old | analyze/process_pickles/htrate.py | 1 | 2109 | #!/usr/bin/env python
import sys
sys.path.append('../../monitor')
import datetime, glob, job_stats, os, subprocess, time
import matplotlib
if not 'matplotlib.pyplot' in sys.modules:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
import scipy, scipy.stats
import argparse
import tspl, tspl_utils
class Colors:
def __init__(self):
self.colors=['b','g','r','c','m','y','k']
self.loc=0
def next(self):
if self.loc == len(self.colors):
self.loc=0
c=self.colors[self.loc]
self.loc+=1
return c
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', help='Set full mode', action='store_true')
parser.add_argument('filearg', help='File, directory, or quoted'
' glob pattern', nargs='?',default='jobs')
n=parser.parse_args()
filelist=tspl_utils.getfilelist(n.filearg)
for file in filelist:
try:
full=''
ts=tspl.TSPLBase(file,['amd64_sock', 'amd64_sock', 'amd64_sock'],
['HT0', 'HT1', 'HT2'])
except tspl.TSPLException as e:
continue
if not tspl_utils.checkjob(ts,3600,16): # 1 hour, 16way only
continue
elif ts.numhosts < 2: # At least 2 hosts
print ts.j.id + ': 1 host'
continue
print ts.j.id
tmid=(ts.t[:-1]+ts.t[1:])/2.0
dt=numpy.diff(ts.t)
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=80)
ax.hold=True
xmin,xmax=[0.,0.]
c=Colors()
for k in ts.j.hosts.keys():
h=ts.j.hosts[k]
col=c.next()
for i in range(3):
for j in range(4):
rate=numpy.divide(numpy.diff(ts.data[i][k][j]),dt)
xmin,xmax=[min(xmin,min(rate)),max(xmax,max(rate))]
ax.plot(tmid/3600,rate,'-'+col)
if xmax > 2.0e9:
print ts.j.id + ' over limit: %(v)8.3f' % {'v' : xmax}
else:
plt.close()
continue
plt.suptitle(ts.title)
xmin,xmax=tspl_utils.expand_range(xmin,xmax,.1)
ax.set_ylim(bottom=xmin,top=xmax)
fname='_'.join(['graph',ts.j.id,'HT_rates'])
fig.savefig(fname)
plt.close()
if __name__ == '__main__':
main()
| lgpl-2.1 | 5,669,426,090,507,161,000 | 24.107143 | 71 | 0.588431 | false |
strahlex/pymachinetalk | setup.py | 1 | 1602 | #!/usr/bin/env python
# coding=utf-8
import sys
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
try:
from setuptools import setup, Extension, find_packages
except ImportError:
try:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension, find_packages
except ImportError:
sys.stderr.write(
"Could not import setuptools; make sure you have setuptools or "
"ez_setup installed.\n"
)
raise
from distutils.command.clean import clean
if sys.version_info[0] == 3:
# Python 3
from distutils.command.build_py import build_py_2to3 as build_py
else:
# Python 2
from distutils.command.build_py import build_py as build_py
requirements = ['pyzmq', 'protobuf', 'machinetalk-protobuf', 'fysom', 'six']
if sys.version_info <= (3, 3):
requirements.append('zeroconf<=0.19.1') # freeze version
else:
requirements.append('zeroconf')
if __name__ == '__main__':
setup(
name="pymachinetalk",
version="0.10.1",
description="Python bindings for Machinetalk",
author="Alexander Roessler",
author_email="[email protected]",
url="https://github.com/machinekit/pymachinetalk",
namespace_packages=['pymachinetalk'],
packages=find_packages(),
install_requires=requirements,
extras_require={'dev': ['pytest', 'pytest-mock', 'pytest-pep8', 'pytest-cov']},
cmdclass={'clean': clean, 'build_py': build_py},
)
| mit | -5,216,066,257,719,230,000 | 30.411765 | 87 | 0.650437 | false |
yuxans/badgirl | src/cht_codec.py | 1 | 87371 | # -*- coding:utf-8 -*-
_mapToCht = {
u"啊": u"啊",
u"阿": u"阿",
u"埃": u"埃",
u"挨": u"挨",
u"哎": u"哎",
u"唉": u"唉",
u"哀": u"哀",
u"皑": u"皚",
u"癌": u"癌",
u"蔼": u"藹",
u"矮": u"矮",
u"艾": u"艾",
u"碍": u"礙",
u"爱": u"愛",
u"隘": u"隘",
u"鞍": u"鞍",
u"氨": u"氨",
u"安": u"安",
u"俺": u"俺",
u"按": u"按",
u"暗": u"暗",
u"岸": u"岸",
u"胺": u"胺",
u"案": u"案",
u"肮": u"骯",
u"昂": u"昂",
u"盎": u"盎",
u"凹": u"凹",
u"敖": u"敖",
u"熬": u"熬",
u"翱": u"翺",
u"袄": u"襖",
u"傲": u"傲",
u"奥": u"奧",
u"懊": u"懊",
u"澳": u"澳",
u"芭": u"芭",
u"捌": u"捌",
u"扒": u"扒",
u"叭": u"叭",
u"吧": u"吧",
u"笆": u"笆",
u"八": u"八",
u"疤": u"疤",
u"巴": u"巴",
u"拔": u"拔",
u"跋": u"跋",
u"靶": u"靶",
u"把": u"把",
u"耙": u"耙",
u"坝": u"壩",
u"霸": u"霸",
u"罢": u"罷",
u"爸": u"爸",
u"白": u"白",
u"柏": u"柏",
u"百": u"百",
u"摆": u"擺",
u"佰": u"佰",
u"败": u"敗",
u"拜": u"拜",
u"稗": u"稗",
u"斑": u"斑",
u"班": u"班",
u"搬": u"搬",
u"扳": u"扳",
u"般": u"般",
u"颁": u"頒",
u"板": u"板",
u"版": u"版",
u"扮": u"扮",
u"拌": u"拌",
u"伴": u"伴",
u"瓣": u"瓣",
u"半": u"半",
u"办": u"辦",
u"绊": u"絆",
u"邦": u"邦",
u"帮": u"幫",
u"梆": u"梆",
u"榜": u"榜",
u"膀": u"膀",
u"绑": u"綁",
u"棒": u"棒",
u"磅": u"磅",
u"蚌": u"蚌",
u"镑": u"鎊",
u"傍": u"傍",
u"谤": u"謗",
u"苞": u"苞",
u"胞": u"胞",
u"包": u"包",
u"褒": u"褒",
u"剥": u"剝",
u"薄": u"薄",
u"雹": u"雹",
u"保": u"保",
u"堡": u"堡",
u"饱": u"飽",
u"宝": u"寶",
u"抱": u"抱",
u"报": u"報",
u"暴": u"暴",
u"豹": u"豹",
u"鲍": u"鮑",
u"爆": u"爆",
u"杯": u"杯",
u"碑": u"碑",
u"悲": u"悲",
u"卑": u"卑",
u"北": u"北",
u"辈": u"輩",
u"背": u"背",
u"贝": u"貝",
u"钡": u"鋇",
u"倍": u"倍",
u"狈": u"狽",
u"备": u"備",
u"惫": u"憊",
u"焙": u"焙",
u"被": u"被",
u"奔": u"奔",
u"苯": u"苯",
u"本": u"本",
u"笨": u"笨",
u"崩": u"崩",
u"绷": u"繃",
u"甭": u"甭",
u"泵": u"泵",
u"蹦": u"蹦",
u"迸": u"迸",
u"逼": u"逼",
u"鼻": u"鼻",
u"比": u"比",
u"鄙": u"鄙",
u"笔": u"筆",
u"彼": u"彼",
u"碧": u"碧",
u"蓖": u"蓖",
u"蔽": u"蔽",
u"毕": u"畢",
u"毙": u"斃",
u"毖": u"毖",
u"币": u"幣",
u"庇": u"庇",
u"痹": u"痹",
u"闭": u"閉",
u"敝": u"敝",
u"弊": u"弊",
u"必": u"必",
u"辟": u"辟",
u"壁": u"壁",
u"臂": u"臂",
u"避": u"避",
u"陛": u"陛",
u"鞭": u"鞭",
u"边": u"邊",
u"编": u"編",
u"贬": u"貶",
u"扁": u"扁",
u"便": u"便",
u"变": u"變",
u"卞": u"卞",
u"辨": u"辨",
u"辩": u"辯",
u"辫": u"辮",
u"遍": u"遍",
u"标": u"標",
u"彪": u"彪",
u"膘": u"膘",
u"表": u"表",
u"鳖": u"鱉",
u"憋": u"憋",
u"别": u"別",
u"瘪": u"癟",
u"彬": u"彬",
u"斌": u"斌",
u"濒": u"瀕",
u"滨": u"濱",
u"宾": u"賓",
u"摈": u"擯",
u"兵": u"兵",
u"冰": u"冰",
u"柄": u"柄",
u"丙": u"丙",
u"秉": u"秉",
u"饼": u"餅",
u"炳": u"炳",
u"病": u"病",
u"并": u"並",
u"玻": u"玻",
u"菠": u"菠",
u"播": u"播",
u"拨": u"撥",
u"钵": u"缽",
u"波": u"波",
u"博": u"博",
u"勃": u"勃",
u"搏": u"搏",
u"铂": u"鉑",
u"箔": u"箔",
u"伯": u"伯",
u"帛": u"帛",
u"舶": u"舶",
u"脖": u"脖",
u"膊": u"膊",
u"渤": u"渤",
u"泊": u"泊",
u"驳": u"駁",
u"捕": u"捕",
u"卜": u"蔔",
u"哺": u"哺",
u"补": u"補",
u"埠": u"埠",
u"不": u"不",
u"布": u"布",
u"步": u"步",
u"簿": u"簿",
u"部": u"部",
u"怖": u"怖",
u"擦": u"擦",
u"猜": u"猜",
u"裁": u"裁",
u"材": u"材",
u"才": u"才",
u"财": u"財",
u"睬": u"睬",
u"踩": u"踩",
u"采": u"采",
u"彩": u"彩",
u"菜": u"菜",
u"蔡": u"蔡",
u"餐": u"餐",
u"参": u"參",
u"蚕": u"蠶",
u"残": u"殘",
u"惭": u"慚",
u"惨": u"慘",
u"灿": u"燦",
u"苍": u"蒼",
u"舱": u"艙",
u"仓": u"倉",
u"沧": u"滄",
u"藏": u"藏",
u"操": u"操",
u"糙": u"糙",
u"槽": u"槽",
u"曹": u"曹",
u"草": u"草",
u"厕": u"廁",
u"策": u"策",
u"侧": u"側",
u"册": u"冊",
u"测": u"測",
u"层": u"層",
u"蹭": u"蹭",
u"插": u"插",
u"叉": u"叉",
u"茬": u"茬",
u"茶": u"茶",
u"查": u"查",
u"碴": u"碴",
u"搽": u"搽",
u"察": u"察",
u"岔": u"岔",
u"差": u"差",
u"诧": u"詫",
u"拆": u"拆",
u"柴": u"柴",
u"豺": u"豺",
u"搀": u"攙",
u"掺": u"摻",
u"蝉": u"蟬",
u"馋": u"饞",
u"谗": u"讒",
u"缠": u"纏",
u"铲": u"鏟",
u"产": u"產",
u"阐": u"闡",
u"颤": u"顫",
u"昌": u"昌",
u"猖": u"猖",
u"场": u"場",
u"尝": u"嘗",
u"常": u"常",
u"长": u"長",
u"偿": u"償",
u"肠": u"腸",
u"厂": u"廠",
u"敞": u"敞",
u"畅": u"暢",
u"唱": u"唱",
u"倡": u"倡",
u"超": u"超",
u"抄": u"抄",
u"钞": u"鈔",
u"朝": u"朝",
u"嘲": u"嘲",
u"潮": u"潮",
u"巢": u"巢",
u"吵": u"吵",
u"炒": u"炒",
u"车": u"車",
u"扯": u"扯",
u"撤": u"撤",
u"掣": u"掣",
u"彻": u"徹",
u"澈": u"澈",
u"郴": u"郴",
u"臣": u"臣",
u"辰": u"辰",
u"尘": u"塵",
u"晨": u"晨",
u"忱": u"忱",
u"沉": u"沈",
u"陈": u"陳",
u"趁": u"趁",
u"衬": u"襯",
u"撑": u"撐",
u"称": u"稱",
u"城": u"城",
u"橙": u"橙",
u"成": u"成",
u"呈": u"呈",
u"乘": u"乘",
u"程": u"程",
u"惩": u"懲",
u"澄": u"澄",
u"诚": u"誠",
u"承": u"承",
u"逞": u"逞",
u"骋": u"騁",
u"秤": u"秤",
u"吃": u"吃",
u"痴": u"癡",
u"持": u"持",
u"匙": u"匙",
u"池": u"池",
u"迟": u"遲",
u"弛": u"弛",
u"驰": u"馳",
u"耻": u"恥",
u"齿": u"齒",
u"侈": u"侈",
u"尺": u"尺",
u"赤": u"赤",
u"翅": u"翅",
u"斥": u"斥",
u"炽": u"熾",
u"充": u"充",
u"冲": u"沖",
u"虫": u"蟲",
u"崇": u"崇",
u"宠": u"寵",
u"抽": u"抽",
u"酬": u"酬",
u"畴": u"疇",
u"踌": u"躊",
u"稠": u"稠",
u"愁": u"愁",
u"筹": u"籌",
u"仇": u"仇",
u"绸": u"綢",
u"瞅": u"瞅",
u"丑": u"醜",
u"臭": u"臭",
u"初": u"初",
u"出": u"出",
u"橱": u"櫥",
u"厨": u"廚",
u"躇": u"躇",
u"锄": u"鋤",
u"雏": u"雛",
u"滁": u"滁",
u"除": u"除",
u"楚": u"楚",
u"础": u"礎",
u"储": u"儲",
u"矗": u"矗",
u"搐": u"搐",
u"触": u"觸",
u"处": u"處",
u"揣": u"揣",
u"川": u"川",
u"穿": u"穿",
u"椽": u"椽",
u"传": u"傳",
u"船": u"船",
u"喘": u"喘",
u"串": u"串",
u"疮": u"瘡",
u"窗": u"窗",
u"幢": u"幢",
u"床": u"床",
u"闯": u"闖",
u"创": u"創",
u"吹": u"吹",
u"炊": u"炊",
u"捶": u"捶",
u"锤": u"錘",
u"垂": u"垂",
u"春": u"春",
u"椿": u"椿",
u"醇": u"醇",
u"唇": u"唇",
u"淳": u"淳",
u"纯": u"純",
u"蠢": u"蠢",
u"戳": u"戳",
u"绰": u"綽",
u"疵": u"疵",
u"茨": u"茨",
u"磁": u"磁",
u"雌": u"雌",
u"辞": u"辭",
u"慈": u"慈",
u"瓷": u"瓷",
u"词": u"詞",
u"此": u"此",
u"刺": u"刺",
u"赐": u"賜",
u"次": u"次",
u"聪": u"聰",
u"葱": u"蔥",
u"囱": u"囪",
u"匆": u"匆",
u"从": u"從",
u"丛": u"叢",
u"凑": u"湊",
u"粗": u"粗",
u"醋": u"醋",
u"簇": u"簇",
u"促": u"促",
u"蹿": u"躥",
u"篡": u"篡",
u"窜": u"竄",
u"摧": u"摧",
u"崔": u"崔",
u"催": u"催",
u"脆": u"脆",
u"瘁": u"瘁",
u"粹": u"粹",
u"淬": u"淬",
u"翠": u"翠",
u"村": u"村",
u"存": u"存",
u"寸": u"寸",
u"磋": u"磋",
u"撮": u"撮",
u"搓": u"搓",
u"措": u"措",
u"挫": u"挫",
u"错": u"錯",
u"搭": u"搭",
u"达": u"達",
u"答": u"答",
u"瘩": u"瘩",
u"打": u"打",
u"大": u"大",
u"呆": u"呆",
u"歹": u"歹",
u"傣": u"傣",
u"戴": u"戴",
u"带": u"帶",
u"殆": u"殆",
u"代": u"代",
u"贷": u"貸",
u"袋": u"袋",
u"待": u"待",
u"逮": u"逮",
u"怠": u"怠",
u"耽": u"耽",
u"担": u"擔",
u"丹": u"丹",
u"单": u"單",
u"郸": u"鄲",
u"掸": u"撣",
u"胆": u"膽",
u"旦": u"旦",
u"氮": u"氮",
u"但": u"但",
u"惮": u"憚",
u"淡": u"淡",
u"诞": u"誕",
u"弹": u"彈",
u"蛋": u"蛋",
u"当": u"當",
u"挡": u"擋",
u"党": u"黨",
u"荡": u"蕩",
u"档": u"檔",
u"刀": u"刀",
u"捣": u"搗",
u"蹈": u"蹈",
u"倒": u"倒",
u"岛": u"島",
u"祷": u"禱",
u"导": u"導",
u"到": u"到",
u"稻": u"稻",
u"悼": u"悼",
u"道": u"道",
u"盗": u"盜",
u"德": u"德",
u"得": u"得",
u"的": u"的",
u"蹬": u"蹬",
u"灯": u"燈",
u"登": u"登",
u"等": u"等",
u"瞪": u"瞪",
u"凳": u"凳",
u"邓": u"鄧",
u"堤": u"堤",
u"低": u"低",
u"滴": u"滴",
u"迪": u"迪",
u"敌": u"敵",
u"笛": u"笛",
u"狄": u"狄",
u"涤": u"滌",
u"翟": u"翟",
u"嫡": u"嫡",
u"抵": u"抵",
u"底": u"底",
u"地": u"地",
u"蒂": u"蒂",
u"第": u"第",
u"帝": u"帝",
u"弟": u"弟",
u"递": u"遞",
u"缔": u"締",
u"颠": u"顛",
u"掂": u"掂",
u"滇": u"滇",
u"碘": u"碘",
u"点": u"點",
u"典": u"典",
u"靛": u"靛",
u"垫": u"墊",
u"电": u"電",
u"佃": u"佃",
u"甸": u"甸",
u"店": u"店",
u"惦": u"惦",
u"奠": u"奠",
u"淀": u"澱",
u"殿": u"殿",
u"碉": u"碉",
u"叼": u"叼",
u"雕": u"雕",
u"凋": u"雕",
u"刁": u"刁",
u"掉": u"掉",
u"吊": u"吊",
u"钓": u"釣",
u"调": u"調",
u"跌": u"跌",
u"爹": u"爹",
u"碟": u"碟",
u"蝶": u"蝶",
u"迭": u"叠",
u"谍": u"諜",
u"叠": u"疊",
u"丁": u"丁",
u"盯": u"盯",
u"叮": u"叮",
u"钉": u"釘",
u"顶": u"頂",
u"鼎": u"鼎",
u"锭": u"錠",
u"定": u"定",
u"订": u"訂",
u"丢": u"丟",
u"东": u"東",
u"冬": u"冬",
u"董": u"董",
u"懂": u"懂",
u"动": u"動",
u"栋": u"棟",
u"侗": u"侗",
u"恫": u"恫",
u"冻": u"凍",
u"洞": u"洞",
u"兜": u"兜",
u"抖": u"抖",
u"斗": u"鬥",
u"陡": u"陡",
u"豆": u"豆",
u"逗": u"逗",
u"痘": u"痘",
u"都": u"都",
u"督": u"督",
u"毒": u"毒",
u"犊": u"犢",
u"独": u"獨",
u"读": u"讀",
u"堵": u"堵",
u"睹": u"睹",
u"赌": u"賭",
u"杜": u"杜",
u"镀": u"鍍",
u"肚": u"肚",
u"度": u"度",
u"渡": u"渡",
u"妒": u"妒",
u"端": u"端",
u"短": u"短",
u"锻": u"鍛",
u"段": u"段",
u"断": u"斷",
u"缎": u"緞",
u"堆": u"堆",
u"兑": u"兌",
u"队": u"隊",
u"对": u"對",
u"墩": u"墩",
u"吨": u"噸",
u"蹲": u"蹲",
u"敦": u"敦",
u"顿": u"頓",
u"囤": u"囤",
u"钝": u"鈍",
u"盾": u"盾",
u"遁": u"遁",
u"掇": u"掇",
u"哆": u"哆",
u"多": u"多",
u"夺": u"奪",
u"垛": u"垛",
u"躲": u"躲",
u"朵": u"朵",
u"跺": u"跺",
u"舵": u"舵",
u"剁": u"剁",
u"惰": u"惰",
u"堕": u"墮",
u"蛾": u"蛾",
u"峨": u"峨",
u"鹅": u"鵝",
u"俄": u"俄",
u"额": u"額",
u"讹": u"訛",
u"娥": u"娥",
u"恶": u"惡",
u"厄": u"厄",
u"扼": u"扼",
u"遏": u"遏",
u"鄂": u"鄂",
u"饿": u"餓",
u"恩": u"恩",
u"而": u"而",
u"儿": u"兒",
u"耳": u"耳",
u"尔": u"爾",
u"饵": u"餌",
u"洱": u"洱",
u"二": u"二",
u"贰": u"貳",
u"发": u"發",
u"罚": u"罰",
u"筏": u"筏",
u"伐": u"伐",
u"乏": u"乏",
u"阀": u"閥",
u"法": u"法",
u"珐": u"琺",
u"藩": u"藩",
u"帆": u"帆",
u"番": u"番",
u"翻": u"翻",
u"樊": u"樊",
u"矾": u"礬",
u"钒": u"釩",
u"繁": u"繁",
u"凡": u"凡",
u"烦": u"煩",
u"反": u"反",
u"返": u"返",
u"范": u"範",
u"贩": u"販",
u"犯": u"犯",
u"饭": u"飯",
u"泛": u"泛",
u"坊": u"坊",
u"芳": u"芳",
u"方": u"方",
u"肪": u"肪",
u"房": u"房",
u"防": u"防",
u"妨": u"妨",
u"仿": u"仿",
u"访": u"訪",
u"纺": u"紡",
u"放": u"放",
u"菲": u"菲",
u"非": u"非",
u"啡": u"啡",
u"飞": u"飛",
u"肥": u"肥",
u"匪": u"匪",
u"诽": u"誹",
u"吠": u"吠",
u"肺": u"肺",
u"废": u"廢",
u"沸": u"沸",
u"费": u"費",
u"芬": u"芬",
u"酚": u"酚",
u"吩": u"吩",
u"氛": u"氛",
u"分": u"分",
u"纷": u"紛",
u"坟": u"墳",
u"焚": u"焚",
u"汾": u"汾",
u"粉": u"粉",
u"奋": u"奮",
u"份": u"份",
u"忿": u"忿",
u"愤": u"憤",
u"粪": u"糞",
u"丰": u"豐",
u"封": u"封",
u"枫": u"楓",
u"蜂": u"蜂",
u"峰": u"峰",
u"锋": u"鋒",
u"风": u"風",
u"疯": u"瘋",
u"烽": u"烽",
u"逢": u"逢",
u"冯": u"馮",
u"缝": u"縫",
u"讽": u"諷",
u"奉": u"奉",
u"凤": u"鳳",
u"佛": u"佛",
u"否": u"否",
u"夫": u"夫",
u"敷": u"敷",
u"肤": u"膚",
u"孵": u"孵",
u"扶": u"扶",
u"拂": u"拂",
u"辐": u"輻",
u"幅": u"幅",
u"氟": u"氟",
u"符": u"符",
u"伏": u"伏",
u"俘": u"俘",
u"服": u"服",
u"浮": u"浮",
u"涪": u"涪",
u"福": u"福",
u"袱": u"袱",
u"弗": u"弗",
u"甫": u"甫",
u"抚": u"撫",
u"辅": u"輔",
u"俯": u"俯",
u"釜": u"釜",
u"斧": u"斧",
u"脯": u"脯",
u"腑": u"腑",
u"府": u"府",
u"腐": u"腐",
u"赴": u"赴",
u"副": u"副",
u"覆": u"覆",
u"赋": u"賦",
u"复": u"復",
u"傅": u"傅",
u"付": u"付",
u"阜": u"阜",
u"父": u"父",
u"腹": u"腹",
u"负": u"負",
u"富": u"富",
u"讣": u"訃",
u"附": u"附",
u"妇": u"婦",
u"缚": u"縛",
u"咐": u"咐",
u"噶": u"噶",
u"嘎": u"嘎",
u"该": u"該",
u"改": u"改",
u"概": u"概",
u"钙": u"鈣",
u"盖": u"蓋",
u"溉": u"溉",
u"干": u"幹",
u"甘": u"甘",
u"杆": u"桿",
u"柑": u"柑",
u"竿": u"竿",
u"肝": u"肝",
u"赶": u"趕",
u"感": u"感",
u"秆": u"稈",
u"敢": u"敢",
u"赣": u"贛",
u"冈": u"岡",
u"刚": u"剛",
u"钢": u"鋼",
u"缸": u"缸",
u"肛": u"肛",
u"纲": u"綱",
u"岗": u"崗",
u"港": u"港",
u"杠": u"杠",
u"篙": u"篙",
u"皋": u"臯",
u"高": u"高",
u"膏": u"膏",
u"羔": u"羔",
u"糕": u"糕",
u"搞": u"搞",
u"镐": u"鎬",
u"稿": u"稿",
u"告": u"告",
u"哥": u"哥",
u"歌": u"歌",
u"搁": u"擱",
u"戈": u"戈",
u"鸽": u"鴿",
u"胳": u"胳",
u"疙": u"疙",
u"割": u"割",
u"革": u"革",
u"葛": u"葛",
u"格": u"格",
u"蛤": u"蛤",
u"阁": u"閣",
u"隔": u"隔",
u"铬": u"鉻",
u"个": u"個",
u"各": u"各",
u"给": u"給",
u"根": u"根",
u"跟": u"跟",
u"耕": u"耕",
u"更": u"更",
u"庚": u"庚",
u"羹": u"羹",
u"埂": u"埂",
u"耿": u"耿",
u"梗": u"梗",
u"工": u"工",
u"攻": u"攻",
u"功": u"功",
u"恭": u"恭",
u"龚": u"龔",
u"供": u"供",
u"躬": u"躬",
u"公": u"公",
u"宫": u"宮",
u"弓": u"弓",
u"巩": u"鞏",
u"汞": u"汞",
u"拱": u"拱",
u"贡": u"貢",
u"共": u"共",
u"钩": u"鉤",
u"勾": u"勾",
u"沟": u"溝",
u"苟": u"茍",
u"狗": u"狗",
u"垢": u"垢",
u"构": u"構",
u"购": u"購",
u"够": u"夠",
u"辜": u"辜",
u"菇": u"菇",
u"咕": u"咕",
u"箍": u"箍",
u"估": u"估",
u"沽": u"沽",
u"孤": u"孤",
u"姑": u"姑",
u"鼓": u"鼓",
u"古": u"古",
u"蛊": u"蠱",
u"骨": u"骨",
u"谷": u"谷",
u"股": u"股",
u"故": u"故",
u"顾": u"顧",
u"固": u"固",
u"雇": u"雇",
u"刮": u"刮",
u"瓜": u"瓜",
u"剐": u"剮",
u"寡": u"寡",
u"挂": u"掛",
u"褂": u"褂",
u"乖": u"乖",
u"拐": u"拐",
u"怪": u"怪",
u"棺": u"棺",
u"关": u"關",
u"官": u"官",
u"冠": u"冠",
u"观": u"觀",
u"管": u"管",
u"馆": u"館",
u"罐": u"罐",
u"惯": u"慣",
u"灌": u"灌",
u"贯": u"貫",
u"光": u"光",
u"广": u"廣",
u"逛": u"逛",
u"瑰": u"瑰",
u"规": u"規",
u"圭": u"圭",
u"硅": u"矽",
u"归": u"歸",
u"龟": u"龜",
u"闺": u"閨",
u"轨": u"軌",
u"鬼": u"鬼",
u"诡": u"詭",
u"癸": u"癸",
u"桂": u"桂",
u"柜": u"櫃",
u"跪": u"跪",
u"贵": u"貴",
u"刽": u"劊",
u"辊": u"輥",
u"滚": u"滾",
u"棍": u"棍",
u"锅": u"鍋",
u"郭": u"郭",
u"国": u"國",
u"果": u"果",
u"裹": u"裹",
u"过": u"過",
u"哈": u"哈",
u"骸": u"骸",
u"孩": u"孩",
u"海": u"海",
u"氦": u"氦",
u"亥": u"亥",
u"害": u"害",
u"骇": u"駭",
u"酣": u"酣",
u"憨": u"憨",
u"邯": u"邯",
u"韩": u"韓",
u"含": u"含",
u"涵": u"涵",
u"寒": u"寒",
u"函": u"函",
u"喊": u"喊",
u"罕": u"罕",
u"翰": u"翰",
u"撼": u"撼",
u"捍": u"捍",
u"旱": u"旱",
u"憾": u"憾",
u"悍": u"悍",
u"焊": u"焊",
u"汗": u"汗",
u"汉": u"漢",
u"夯": u"夯",
u"杭": u"杭",
u"航": u"航",
u"壕": u"壕",
u"嚎": u"嚎",
u"豪": u"豪",
u"毫": u"毫",
u"郝": u"郝",
u"好": u"好",
u"耗": u"耗",
u"号": u"號",
u"浩": u"浩",
u"呵": u"呵",
u"喝": u"喝",
u"荷": u"荷",
u"菏": u"菏",
u"核": u"核",
u"禾": u"禾",
u"和": u"和",
u"何": u"何",
u"合": u"合",
u"盒": u"盒",
u"貉": u"貉",
u"阂": u"閡",
u"河": u"河",
u"涸": u"涸",
u"赫": u"赫",
u"褐": u"褐",
u"鹤": u"鶴",
u"贺": u"賀",
u"嘿": u"嘿",
u"黑": u"黑",
u"痕": u"痕",
u"很": u"很",
u"狠": u"狠",
u"恨": u"恨",
u"哼": u"哼",
u"亨": u"亨",
u"横": u"橫",
u"衡": u"衡",
u"恒": u"恒",
u"轰": u"轟",
u"哄": u"哄",
u"烘": u"烘",
u"虹": u"虹",
u"鸿": u"鴻",
u"洪": u"洪",
u"宏": u"宏",
u"弘": u"弘",
u"红": u"紅",
u"喉": u"喉",
u"侯": u"侯",
u"猴": u"猴",
u"吼": u"吼",
u"厚": u"厚",
u"候": u"候",
u"后": u"後",
u"呼": u"呼",
u"乎": u"乎",
u"忽": u"忽",
u"瑚": u"瑚",
u"壶": u"壺",
u"葫": u"葫",
u"胡": u"胡",
u"蝴": u"蝴",
u"狐": u"狐",
u"糊": u"糊",
u"湖": u"湖",
u"弧": u"弧",
u"虎": u"虎",
u"唬": u"唬",
u"护": u"護",
u"互": u"互",
u"沪": u"滬",
u"户": u"戶",
u"花": u"花",
u"哗": u"嘩",
u"华": u"華",
u"猾": u"猾",
u"滑": u"滑",
u"画": u"畫",
u"划": u"劃",
u"化": u"化",
u"话": u"話",
u"槐": u"槐",
u"徊": u"徊",
u"怀": u"懷",
u"淮": u"淮",
u"坏": u"壞",
u"欢": u"歡",
u"环": u"環",
u"桓": u"桓",
u"还": u"還",
u"缓": u"緩",
u"换": u"換",
u"患": u"患",
u"唤": u"喚",
u"痪": u"瘓",
u"豢": u"豢",
u"焕": u"煥",
u"涣": u"渙",
u"宦": u"宦",
u"幻": u"幻",
u"荒": u"荒",
u"慌": u"慌",
u"黄": u"黃",
u"磺": u"磺",
u"蝗": u"蝗",
u"簧": u"簧",
u"皇": u"皇",
u"凰": u"凰",
u"惶": u"惶",
u"煌": u"煌",
u"晃": u"晃",
u"幌": u"幌",
u"恍": u"恍",
u"谎": u"謊",
u"灰": u"灰",
u"挥": u"揮",
u"辉": u"輝",
u"徽": u"徽",
u"恢": u"恢",
u"蛔": u"蛔",
u"回": u"回",
u"毁": u"毀",
u"悔": u"悔",
u"慧": u"慧",
u"卉": u"卉",
u"惠": u"惠",
u"晦": u"晦",
u"贿": u"賄",
u"秽": u"穢",
u"会": u"會",
u"烩": u"燴",
u"汇": u"匯",
u"讳": u"諱",
u"诲": u"誨",
u"绘": u"繪",
u"荤": u"葷",
u"昏": u"昏",
u"婚": u"婚",
u"魂": u"魂",
u"浑": u"渾",
u"混": u"混",
u"豁": u"豁",
u"活": u"活",
u"伙": u"夥",
u"火": u"火",
u"获": u"獲",
u"或": u"或",
u"惑": u"惑",
u"霍": u"霍",
u"货": u"貨",
u"祸": u"禍",
u"击": u"擊",
u"圾": u"圾",
u"基": u"基",
u"机": u"機",
u"畸": u"畸",
u"稽": u"稽",
u"积": u"積",
u"箕": u"箕",
u"肌": u"肌",
u"饥": u"饑",
u"迹": u"跡",
u"激": u"激",
u"讥": u"譏",
u"鸡": u"雞",
u"姬": u"姬",
u"绩": u"績",
u"缉": u"緝",
u"吉": u"吉",
u"极": u"極",
u"棘": u"棘",
u"辑": u"輯",
u"籍": u"籍",
u"集": u"集",
u"及": u"及",
u"急": u"急",
u"疾": u"疾",
u"汲": u"汲",
u"即": u"即",
u"嫉": u"嫉",
u"级": u"級",
u"挤": u"擠",
u"几": u"幾",
u"脊": u"脊",
u"己": u"己",
u"蓟": u"薊",
u"技": u"技",
u"冀": u"冀",
u"季": u"季",
u"伎": u"伎",
u"祭": u"祭",
u"剂": u"劑",
u"悸": u"悸",
u"济": u"濟",
u"寄": u"寄",
u"寂": u"寂",
u"计": u"計",
u"记": u"記",
u"既": u"既",
u"忌": u"忌",
u"际": u"際",
u"继": u"繼",
u"纪": u"紀",
u"嘉": u"嘉",
u"枷": u"枷",
u"夹": u"夾",
u"佳": u"佳",
u"家": u"家",
u"加": u"加",
u"荚": u"莢",
u"颊": u"頰",
u"贾": u"賈",
u"甲": u"甲",
u"钾": u"鉀",
u"假": u"假",
u"稼": u"稼",
u"价": u"價",
u"架": u"架",
u"驾": u"駕",
u"嫁": u"嫁",
u"歼": u"殲",
u"监": u"監",
u"坚": u"堅",
u"尖": u"尖",
u"笺": u"箋",
u"间": u"間",
u"煎": u"煎",
u"兼": u"兼",
u"肩": u"肩",
u"艰": u"艱",
u"奸": u"奸",
u"缄": u"緘",
u"茧": u"繭",
u"检": u"檢",
u"柬": u"柬",
u"碱": u"堿",
u"硷": u"鹼",
u"拣": u"揀",
u"捡": u"撿",
u"简": u"簡",
u"俭": u"儉",
u"剪": u"剪",
u"减": u"減",
u"荐": u"薦",
u"槛": u"檻",
u"鉴": u"鑒",
u"践": u"踐",
u"贱": u"賤",
u"见": u"見",
u"键": u"鍵",
u"箭": u"箭",
u"件": u"件",
u"健": u"健",
u"舰": u"艦",
u"剑": u"劍",
u"饯": u"餞",
u"渐": u"漸",
u"溅": u"濺",
u"涧": u"澗",
u"建": u"建",
u"僵": u"僵",
u"姜": u"姜",
u"将": u"將",
u"浆": u"漿",
u"江": u"江",
u"疆": u"疆",
u"蒋": u"蔣",
u"桨": u"槳",
u"奖": u"獎",
u"讲": u"講",
u"匠": u"匠",
u"酱": u"醬",
u"降": u"降",
u"蕉": u"蕉",
u"椒": u"椒",
u"礁": u"礁",
u"焦": u"焦",
u"胶": u"膠",
u"交": u"交",
u"郊": u"郊",
u"浇": u"澆",
u"骄": u"驕",
u"娇": u"嬌",
u"嚼": u"嚼",
u"搅": u"攪",
u"铰": u"鉸",
u"矫": u"矯",
u"侥": u"僥",
u"脚": u"腳",
u"狡": u"狡",
u"角": u"角",
u"饺": u"餃",
u"缴": u"繳",
u"绞": u"絞",
u"剿": u"剿",
u"教": u"教",
u"酵": u"酵",
u"轿": u"轎",
u"较": u"較",
u"叫": u"叫",
u"窖": u"窖",
u"揭": u"揭",
u"接": u"接",
u"皆": u"皆",
u"秸": u"稭",
u"街": u"街",
u"阶": u"階",
u"截": u"截",
u"劫": u"劫",
u"节": u"節",
u"茎": u"莖",
u"睛": u"睛",
u"晶": u"晶",
u"鲸": u"鯨",
u"京": u"京",
u"惊": u"驚",
u"精": u"精",
u"粳": u"粳",
u"经": u"經",
u"井": u"井",
u"警": u"警",
u"景": u"景",
u"颈": u"頸",
u"静": u"靜",
u"境": u"境",
u"敬": u"敬",
u"镜": u"鏡",
u"径": u"徑",
u"痉": u"痙",
u"靖": u"靖",
u"竟": u"竟",
u"竞": u"競",
u"净": u"凈",
u"炯": u"炯",
u"窘": u"窘",
u"揪": u"揪",
u"究": u"究",
u"纠": u"糾",
u"玖": u"玖",
u"韭": u"韭",
u"久": u"久",
u"灸": u"灸",
u"九": u"九",
u"酒": u"酒",
u"厩": u"廄",
u"救": u"救",
u"旧": u"舊",
u"臼": u"臼",
u"舅": u"舅",
u"咎": u"咎",
u"就": u"就",
u"疚": u"疚",
u"鞠": u"鞠",
u"拘": u"拘",
u"狙": u"狙",
u"疽": u"疽",
u"居": u"居",
u"驹": u"駒",
u"菊": u"菊",
u"局": u"局",
u"咀": u"咀",
u"矩": u"矩",
u"举": u"舉",
u"沮": u"沮",
u"聚": u"聚",
u"拒": u"拒",
u"据": u"據",
u"巨": u"巨",
u"具": u"具",
u"距": u"距",
u"踞": u"踞",
u"锯": u"鋸",
u"俱": u"俱",
u"句": u"句",
u"惧": u"懼",
u"炬": u"炬",
u"剧": u"劇",
u"捐": u"捐",
u"鹃": u"鵑",
u"娟": u"娟",
u"倦": u"倦",
u"眷": u"眷",
u"卷": u"卷",
u"绢": u"絹",
u"撅": u"撅",
u"攫": u"攫",
u"抉": u"抉",
u"掘": u"掘",
u"倔": u"倔",
u"爵": u"爵",
u"桔": u"桔",
u"杰": u"傑",
u"捷": u"捷",
u"睫": u"睫",
u"竭": u"竭",
u"洁": u"潔",
u"结": u"結",
u"解": u"解",
u"姐": u"姐",
u"戒": u"戒",
u"藉": u"藉",
u"芥": u"芥",
u"界": u"界",
u"借": u"借",
u"介": u"介",
u"疥": u"疥",
u"诫": u"誡",
u"届": u"屆",
u"巾": u"巾",
u"筋": u"筋",
u"斤": u"斤",
u"金": u"金",
u"今": u"今",
u"津": u"津",
u"襟": u"襟",
u"紧": u"緊",
u"锦": u"錦",
u"仅": u"僅",
u"谨": u"謹",
u"进": u"進",
u"靳": u"靳",
u"晋": u"晉",
u"禁": u"禁",
u"近": u"近",
u"烬": u"燼",
u"浸": u"浸",
u"尽": u"盡",
u"劲": u"勁",
u"荆": u"荊",
u"兢": u"兢",
u"觉": u"覺",
u"决": u"決",
u"诀": u"訣",
u"绝": u"絕",
u"均": u"均",
u"菌": u"菌",
u"钧": u"鈞",
u"军": u"軍",
u"君": u"君",
u"峻": u"峻",
u"俊": u"俊",
u"竣": u"竣",
u"浚": u"浚",
u"郡": u"郡",
u"骏": u"駿",
u"喀": u"喀",
u"咖": u"咖",
u"卡": u"卡",
u"咯": u"咯",
u"开": u"開",
u"揩": u"揩",
u"楷": u"楷",
u"凯": u"凱",
u"慨": u"慨",
u"刊": u"刊",
u"堪": u"堪",
u"勘": u"勘",
u"坎": u"坎",
u"砍": u"砍",
u"看": u"看",
u"康": u"康",
u"慷": u"慷",
u"糠": u"糠",
u"扛": u"扛",
u"抗": u"抗",
u"亢": u"亢",
u"炕": u"炕",
u"考": u"考",
u"拷": u"拷",
u"烤": u"烤",
u"靠": u"靠",
u"坷": u"坷",
u"苛": u"苛",
u"柯": u"柯",
u"棵": u"棵",
u"磕": u"磕",
u"颗": u"顆",
u"科": u"科",
u"壳": u"殼",
u"咳": u"咳",
u"可": u"可",
u"渴": u"渴",
u"克": u"克",
u"刻": u"刻",
u"客": u"客",
u"课": u"課",
u"肯": u"肯",
u"啃": u"啃",
u"垦": u"墾",
u"恳": u"懇",
u"坑": u"坑",
u"吭": u"吭",
u"空": u"空",
u"恐": u"恐",
u"孔": u"孔",
u"控": u"控",
u"抠": u"摳",
u"口": u"口",
u"扣": u"扣",
u"寇": u"寇",
u"枯": u"枯",
u"哭": u"哭",
u"窟": u"窟",
u"苦": u"苦",
u"酷": u"酷",
u"库": u"庫",
u"裤": u"褲",
u"夸": u"誇",
u"垮": u"垮",
u"挎": u"挎",
u"跨": u"跨",
u"胯": u"胯",
u"块": u"塊",
u"筷": u"筷",
u"侩": u"儈",
u"快": u"快",
u"宽": u"寬",
u"款": u"款",
u"匡": u"匡",
u"筐": u"筐",
u"狂": u"狂",
u"框": u"框",
u"矿": u"礦",
u"眶": u"眶",
u"旷": u"曠",
u"况": u"況",
u"亏": u"虧",
u"盔": u"盔",
u"岿": u"巋",
u"窥": u"窺",
u"葵": u"葵",
u"奎": u"奎",
u"魁": u"魁",
u"傀": u"傀",
u"馈": u"饋",
u"愧": u"愧",
u"溃": u"潰",
u"坤": u"坤",
u"昆": u"昆",
u"捆": u"捆",
u"困": u"困",
u"括": u"括",
u"扩": u"擴",
u"廓": u"廓",
u"阔": u"闊",
u"垃": u"垃",
u"拉": u"拉",
u"喇": u"喇",
u"蜡": u"蠟",
u"腊": u"臘",
u"辣": u"辣",
u"啦": u"啦",
u"莱": u"萊",
u"来": u"來",
u"赖": u"賴",
u"蓝": u"藍",
u"婪": u"婪",
u"栏": u"欄",
u"拦": u"攔",
u"篮": u"籃",
u"阑": u"闌",
u"兰": u"蘭",
u"澜": u"瀾",
u"谰": u"讕",
u"揽": u"攬",
u"览": u"覽",
u"懒": u"懶",
u"缆": u"纜",
u"烂": u"爛",
u"滥": u"濫",
u"琅": u"瑯",
u"榔": u"榔",
u"狼": u"狼",
u"廊": u"廊",
u"郎": u"郎",
u"朗": u"朗",
u"浪": u"浪",
u"捞": u"撈",
u"劳": u"勞",
u"牢": u"牢",
u"老": u"老",
u"佬": u"佬",
u"姥": u"姥",
u"酪": u"酪",
u"烙": u"烙",
u"涝": u"澇",
u"勒": u"勒",
u"乐": u"樂",
u"雷": u"雷",
u"镭": u"鐳",
u"蕾": u"蕾",
u"磊": u"磊",
u"累": u"累",
u"儡": u"儡",
u"垒": u"壘",
u"擂": u"擂",
u"肋": u"肋",
u"类": u"類",
u"泪": u"淚",
u"棱": u"棱",
u"楞": u"楞",
u"冷": u"冷",
u"厘": u"厘",
u"梨": u"梨",
u"犁": u"犁",
u"黎": u"黎",
u"篱": u"籬",
u"狸": u"貍",
u"离": u"離",
u"漓": u"漓",
u"理": u"理",
u"李": u"李",
u"里": u"裏",
u"鲤": u"鯉",
u"礼": u"禮",
u"莉": u"莉",
u"荔": u"荔",
u"吏": u"吏",
u"栗": u"栗",
u"丽": u"麗",
u"厉": u"厲",
u"励": u"勵",
u"砾": u"礫",
u"历": u"歷",
u"利": u"利",
u"傈": u"傈",
u"例": u"例",
u"俐": u"俐",
u"痢": u"痢",
u"立": u"立",
u"粒": u"粒",
u"沥": u"瀝",
u"隶": u"隸",
u"力": u"力",
u"璃": u"璃",
u"哩": u"哩",
u"俩": u"倆",
u"联": u"聯",
u"莲": u"蓮",
u"连": u"連",
u"镰": u"鐮",
u"廉": u"廉",
u"怜": u"憐",
u"涟": u"漣",
u"帘": u"簾",
u"敛": u"斂",
u"脸": u"臉",
u"链": u"鏈",
u"恋": u"戀",
u"炼": u"煉",
u"练": u"練",
u"粮": u"糧",
u"凉": u"涼",
u"梁": u"梁",
u"粱": u"粱",
u"良": u"良",
u"两": u"兩",
u"辆": u"輛",
u"量": u"量",
u"晾": u"晾",
u"亮": u"亮",
u"谅": u"諒",
u"撩": u"撩",
u"聊": u"聊",
u"僚": u"僚",
u"疗": u"療",
u"燎": u"燎",
u"寥": u"寥",
u"辽": u"遼",
u"潦": u"潦",
u"了": u"了",
u"撂": u"撂",
u"镣": u"鐐",
u"廖": u"廖",
u"料": u"料",
u"列": u"列",
u"裂": u"裂",
u"烈": u"烈",
u"劣": u"劣",
u"猎": u"獵",
u"琳": u"琳",
u"林": u"林",
u"磷": u"磷",
u"霖": u"霖",
u"临": u"臨",
u"邻": u"鄰",
u"鳞": u"鱗",
u"淋": u"淋",
u"凛": u"凜",
u"赁": u"賃",
u"吝": u"吝",
u"拎": u"拎",
u"玲": u"玲",
u"菱": u"菱",
u"零": u"零",
u"龄": u"齡",
u"铃": u"鈴",
u"伶": u"伶",
u"羚": u"羚",
u"凌": u"淩",
u"灵": u"靈",
u"陵": u"陵",
u"岭": u"嶺",
u"领": u"領",
u"另": u"另",
u"令": u"令",
u"溜": u"溜",
u"琉": u"琉",
u"榴": u"榴",
u"硫": u"硫",
u"馏": u"餾",
u"留": u"留",
u"刘": u"劉",
u"瘤": u"瘤",
u"流": u"流",
u"柳": u"柳",
u"六": u"六",
u"龙": u"龍",
u"聋": u"聾",
u"咙": u"嚨",
u"笼": u"籠",
u"窿": u"窿",
u"隆": u"隆",
u"垄": u"壟",
u"拢": u"攏",
u"陇": u"隴",
u"楼": u"樓",
u"娄": u"婁",
u"搂": u"摟",
u"篓": u"簍",
u"漏": u"漏",
u"陋": u"陋",
u"芦": u"蘆",
u"卢": u"盧",
u"颅": u"顱",
u"庐": u"廬",
u"炉": u"爐",
u"掳": u"擄",
u"卤": u"鹵",
u"虏": u"虜",
u"鲁": u"魯",
u"麓": u"麓",
u"碌": u"碌",
u"露": u"露",
u"路": u"路",
u"赂": u"賂",
u"鹿": u"鹿",
u"潞": u"潞",
u"禄": u"祿",
u"录": u"錄",
u"陆": u"陸",
u"戮": u"戮",
u"驴": u"驢",
u"吕": u"呂",
u"铝": u"鋁",
u"侣": u"侶",
u"旅": u"旅",
u"履": u"履",
u"屡": u"屢",
u"缕": u"縷",
u"虑": u"慮",
u"氯": u"氯",
u"律": u"律",
u"率": u"率",
u"滤": u"濾",
u"绿": u"綠",
u"峦": u"巒",
u"挛": u"攣",
u"孪": u"孿",
u"滦": u"灤",
u"卵": u"卵",
u"乱": u"亂",
u"掠": u"掠",
u"略": u"略",
u"抡": u"掄",
u"轮": u"輪",
u"伦": u"倫",
u"仑": u"侖",
u"沦": u"淪",
u"纶": u"綸",
u"论": u"論",
u"萝": u"蘿",
u"螺": u"螺",
u"罗": u"羅",
u"逻": u"邏",
u"锣": u"鑼",
u"箩": u"籮",
u"骡": u"騾",
u"裸": u"裸",
u"落": u"落",
u"洛": u"洛",
u"骆": u"駱",
u"络": u"絡",
u"妈": u"媽",
u"麻": u"麻",
u"玛": u"瑪",
u"码": u"碼",
u"蚂": u"螞",
u"马": u"馬",
u"骂": u"罵",
u"嘛": u"嘛",
u"吗": u"嗎",
u"埋": u"埋",
u"买": u"買",
u"麦": u"麥",
u"卖": u"賣",
u"迈": u"邁",
u"脉": u"脈",
u"瞒": u"瞞",
u"馒": u"饅",
u"蛮": u"蠻",
u"满": u"滿",
u"蔓": u"蔓",
u"曼": u"曼",
u"慢": u"慢",
u"漫": u"漫",
u"谩": u"謾",
u"芒": u"芒",
u"茫": u"茫",
u"盲": u"盲",
u"氓": u"氓",
u"忙": u"忙",
u"莽": u"莽",
u"猫": u"貓",
u"茅": u"茅",
u"锚": u"錨",
u"毛": u"毛",
u"矛": u"矛",
u"铆": u"鉚",
u"卯": u"卯",
u"茂": u"茂",
u"冒": u"冒",
u"帽": u"帽",
u"貌": u"貌",
u"贸": u"貿",
u"么": u"麽",
u"玫": u"玫",
u"枚": u"枚",
u"梅": u"梅",
u"酶": u"酶",
u"霉": u"黴",
u"煤": u"煤",
u"没": u"沒",
u"眉": u"眉",
u"媒": u"媒",
u"镁": u"鎂",
u"每": u"每",
u"美": u"美",
u"昧": u"昧",
u"寐": u"寐",
u"妹": u"妹",
u"媚": u"媚",
u"门": u"門",
u"闷": u"悶",
u"们": u"們",
u"萌": u"萌",
u"蒙": u"蒙",
u"檬": u"檬",
u"盟": u"盟",
u"锰": u"錳",
u"猛": u"猛",
u"梦": u"夢",
u"孟": u"孟",
u"眯": u"瞇",
u"醚": u"醚",
u"靡": u"靡",
u"糜": u"糜",
u"迷": u"迷",
u"谜": u"謎",
u"弥": u"彌",
u"米": u"米",
u"秘": u"秘",
u"觅": u"覓",
u"泌": u"泌",
u"蜜": u"蜜",
u"密": u"密",
u"幂": u"冪",
u"棉": u"棉",
u"眠": u"眠",
u"绵": u"綿",
u"冕": u"冕",
u"免": u"免",
u"勉": u"勉",
u"娩": u"娩",
u"缅": u"緬",
u"面": u"面",
u"苗": u"苗",
u"描": u"描",
u"瞄": u"瞄",
u"藐": u"藐",
u"秒": u"秒",
u"渺": u"渺",
u"庙": u"廟",
u"妙": u"妙",
u"蔑": u"蔑",
u"灭": u"滅",
u"民": u"民",
u"抿": u"抿",
u"皿": u"皿",
u"敏": u"敏",
u"悯": u"憫",
u"闽": u"閩",
u"明": u"明",
u"螟": u"螟",
u"鸣": u"鳴",
u"铭": u"銘",
u"名": u"名",
u"命": u"命",
u"谬": u"謬",
u"摸": u"摸",
u"摹": u"摹",
u"蘑": u"蘑",
u"模": u"模",
u"膜": u"膜",
u"磨": u"磨",
u"摩": u"摩",
u"魔": u"魔",
u"抹": u"抹",
u"末": u"末",
u"莫": u"莫",
u"墨": u"墨",
u"默": u"默",
u"沫": u"沫",
u"漠": u"漠",
u"寞": u"寞",
u"陌": u"陌",
u"谋": u"謀",
u"牟": u"牟",
u"某": u"某",
u"拇": u"拇",
u"牡": u"牡",
u"亩": u"畝",
u"姆": u"姆",
u"母": u"母",
u"墓": u"墓",
u"暮": u"暮",
u"幕": u"幕",
u"募": u"募",
u"慕": u"慕",
u"木": u"木",
u"目": u"目",
u"睦": u"睦",
u"牧": u"牧",
u"穆": u"穆",
u"拿": u"拿",
u"哪": u"哪",
u"呐": u"吶",
u"钠": u"鈉",
u"那": u"那",
u"娜": u"娜",
u"纳": u"納",
u"氖": u"氖",
u"乃": u"乃",
u"奶": u"奶",
u"耐": u"耐",
u"奈": u"奈",
u"南": u"南",
u"男": u"男",
u"难": u"難",
u"囊": u"囊",
u"挠": u"撓",
u"脑": u"腦",
u"恼": u"惱",
u"闹": u"鬧",
u"淖": u"淖",
u"呢": u"呢",
u"馁": u"餒",
u"内": u"內",
u"嫩": u"嫩",
u"能": u"能",
u"妮": u"妮",
u"霓": u"霓",
u"倪": u"倪",
u"泥": u"泥",
u"尼": u"尼",
u"拟": u"擬",
u"你": u"妳",
u"匿": u"匿",
u"腻": u"膩",
u"逆": u"逆",
u"溺": u"溺",
u"蔫": u"蔫",
u"拈": u"拈",
u"年": u"年",
u"碾": u"碾",
u"撵": u"攆",
u"捻": u"撚",
u"念": u"念",
u"娘": u"娘",
u"酿": u"釀",
u"鸟": u"鳥",
u"尿": u"尿",
u"捏": u"捏",
u"聂": u"聶",
u"孽": u"孽",
u"啮": u"嚙",
u"镊": u"鑷",
u"镍": u"鎳",
u"涅": u"涅",
u"您": u"您",
u"柠": u"檸",
u"狞": u"獰",
u"凝": u"凝",
u"宁": u"寧",
u"拧": u"擰",
u"泞": u"濘",
u"牛": u"牛",
u"扭": u"扭",
u"钮": u"鈕",
u"纽": u"紐",
u"脓": u"膿",
u"浓": u"濃",
u"农": u"農",
u"弄": u"弄",
u"奴": u"奴",
u"努": u"努",
u"怒": u"怒",
u"女": u"女",
u"暖": u"暖",
u"虐": u"虐",
u"疟": u"瘧",
u"挪": u"挪",
u"懦": u"懦",
u"糯": u"糯",
u"诺": u"諾",
u"哦": u"哦",
u"欧": u"歐",
u"鸥": u"鷗",
u"殴": u"毆",
u"藕": u"藕",
u"呕": u"嘔",
u"偶": u"偶",
u"沤": u"漚",
u"啪": u"啪",
u"趴": u"趴",
u"爬": u"爬",
u"帕": u"帕",
u"怕": u"怕",
u"琶": u"琶",
u"拍": u"拍",
u"排": u"排",
u"牌": u"牌",
u"徘": u"徘",
u"湃": u"湃",
u"派": u"派",
u"攀": u"攀",
u"潘": u"潘",
u"盘": u"盤",
u"磐": u"磐",
u"盼": u"盼",
u"畔": u"畔",
u"判": u"判",
u"叛": u"叛",
u"乓": u"乓",
u"庞": u"龐",
u"旁": u"旁",
u"耪": u"耪",
u"胖": u"胖",
u"抛": u"拋",
u"咆": u"咆",
u"刨": u"刨",
u"炮": u"炮",
u"袍": u"袍",
u"跑": u"跑",
u"泡": u"泡",
u"呸": u"呸",
u"胚": u"胚",
u"培": u"培",
u"裴": u"裴",
u"赔": u"賠",
u"陪": u"陪",
u"配": u"配",
u"佩": u"佩",
u"沛": u"沛",
u"喷": u"噴",
u"盆": u"盆",
u"砰": u"砰",
u"抨": u"抨",
u"烹": u"烹",
u"澎": u"澎",
u"彭": u"彭",
u"蓬": u"蓬",
u"棚": u"棚",
u"硼": u"硼",
u"篷": u"篷",
u"膨": u"膨",
u"朋": u"朋",
u"鹏": u"鵬",
u"捧": u"捧",
u"碰": u"碰",
u"坯": u"坯",
u"砒": u"砒",
u"霹": u"霹",
u"批": u"批",
u"披": u"披",
u"劈": u"劈",
u"琵": u"琵",
u"毗": u"毗",
u"啤": u"啤",
u"脾": u"脾",
u"疲": u"疲",
u"皮": u"皮",
u"匹": u"匹",
u"痞": u"痞",
u"僻": u"僻",
u"屁": u"屁",
u"譬": u"譬",
u"篇": u"篇",
u"偏": u"偏",
u"片": u"片",
u"骗": u"騙",
u"飘": u"飄",
u"漂": u"漂",
u"瓢": u"瓢",
u"票": u"票",
u"撇": u"撇",
u"瞥": u"瞥",
u"拼": u"拼",
u"频": u"頻",
u"贫": u"貧",
u"品": u"品",
u"聘": u"聘",
u"乒": u"乒",
u"坪": u"坪",
u"苹": u"蘋",
u"萍": u"萍",
u"平": u"平",
u"凭": u"憑",
u"瓶": u"瓶",
u"评": u"評",
u"屏": u"屏",
u"坡": u"坡",
u"泼": u"潑",
u"颇": u"頗",
u"婆": u"婆",
u"破": u"破",
u"魄": u"魄",
u"迫": u"迫",
u"粕": u"粕",
u"剖": u"剖",
u"扑": u"撲",
u"铺": u"鋪",
u"仆": u"仆",
u"莆": u"莆",
u"葡": u"葡",
u"菩": u"菩",
u"蒲": u"蒲",
u"埔": u"埔",
u"朴": u"樸",
u"圃": u"圃",
u"普": u"普",
u"浦": u"浦",
u"谱": u"譜",
u"曝": u"曝",
u"瀑": u"瀑",
u"期": u"期",
u"欺": u"欺",
u"栖": u"棲",
u"戚": u"戚",
u"妻": u"妻",
u"七": u"七",
u"凄": u"淒",
u"漆": u"漆",
u"柒": u"柒",
u"沏": u"沏",
u"其": u"其",
u"棋": u"棋",
u"奇": u"奇",
u"歧": u"歧",
u"畦": u"畦",
u"崎": u"崎",
u"脐": u"臍",
u"齐": u"齊",
u"旗": u"旗",
u"祈": u"祈",
u"祁": u"祁",
u"骑": u"騎",
u"起": u"起",
u"岂": u"豈",
u"乞": u"乞",
u"企": u"企",
u"启": u"啟",
u"契": u"契",
u"砌": u"砌",
u"器": u"器",
u"气": u"氣",
u"迄": u"迄",
u"弃": u"棄",
u"汽": u"汽",
u"泣": u"泣",
u"讫": u"訖",
u"掐": u"掐",
u"洽": u"洽",
u"牵": u"牽",
u"扦": u"扡",
u"钎": u"釬",
u"铅": u"鉛",
u"千": u"千",
u"迁": u"遷",
u"签": u"簽",
u"仟": u"仟",
u"谦": u"謙",
u"乾": u"乾",
u"黔": u"黔",
u"钱": u"錢",
u"钳": u"鉗",
u"前": u"前",
u"潜": u"潛",
u"遣": u"遣",
u"浅": u"淺",
u"谴": u"譴",
u"堑": u"塹",
u"嵌": u"嵌",
u"欠": u"欠",
u"歉": u"歉",
u"枪": u"槍",
u"呛": u"嗆",
u"腔": u"腔",
u"羌": u"羌",
u"墙": u"墻",
u"蔷": u"薔",
u"强": u"強",
u"抢": u"搶",
u"橇": u"橇",
u"锹": u"鍬",
u"敲": u"敲",
u"悄": u"悄",
u"桥": u"橋",
u"瞧": u"瞧",
u"乔": u"喬",
u"侨": u"僑",
u"巧": u"巧",
u"鞘": u"鞘",
u"撬": u"撬",
u"翘": u"翹",
u"峭": u"峭",
u"俏": u"俏",
u"窍": u"竅",
u"切": u"切",
u"茄": u"茄",
u"且": u"且",
u"怯": u"怯",
u"窃": u"竊",
u"钦": u"欽",
u"侵": u"侵",
u"亲": u"親",
u"秦": u"秦",
u"琴": u"琴",
u"勤": u"勤",
u"芹": u"芹",
u"擒": u"擒",
u"禽": u"禽",
u"寝": u"寢",
u"沁": u"沁",
u"青": u"青",
u"轻": u"輕",
u"氢": u"氫",
u"倾": u"傾",
u"卿": u"卿",
u"清": u"清",
u"擎": u"擎",
u"晴": u"晴",
u"氰": u"氰",
u"情": u"情",
u"顷": u"頃",
u"请": u"請",
u"庆": u"慶",
u"琼": u"瓊",
u"穷": u"窮",
u"秋": u"秋",
u"丘": u"丘",
u"邱": u"邱",
u"球": u"球",
u"求": u"求",
u"囚": u"囚",
u"酋": u"酋",
u"泅": u"泅",
u"趋": u"趨",
u"区": u"區",
u"蛆": u"蛆",
u"曲": u"曲",
u"躯": u"軀",
u"屈": u"屈",
u"驱": u"驅",
u"渠": u"渠",
u"取": u"取",
u"娶": u"娶",
u"龋": u"齲",
u"趣": u"趣",
u"去": u"去",
u"圈": u"圈",
u"颧": u"顴",
u"权": u"權",
u"醛": u"醛",
u"泉": u"泉",
u"全": u"全",
u"痊": u"痊",
u"拳": u"拳",
u"犬": u"犬",
u"券": u"券",
u"劝": u"勸",
u"缺": u"缺",
u"炔": u"炔",
u"瘸": u"瘸",
u"却": u"卻",
u"鹊": u"鵲",
u"榷": u"榷",
u"确": u"確",
u"雀": u"雀",
u"裙": u"裙",
u"群": u"群",
u"然": u"然",
u"燃": u"燃",
u"冉": u"冉",
u"染": u"染",
u"瓤": u"瓤",
u"壤": u"壤",
u"攘": u"攘",
u"嚷": u"嚷",
u"让": u"讓",
u"饶": u"饒",
u"扰": u"擾",
u"绕": u"繞",
u"惹": u"惹",
u"热": u"熱",
u"壬": u"壬",
u"仁": u"仁",
u"人": u"人",
u"忍": u"忍",
u"韧": u"韌",
u"任": u"任",
u"认": u"認",
u"刃": u"刃",
u"妊": u"妊",
u"纫": u"紉",
u"扔": u"扔",
u"仍": u"仍",
u"日": u"日",
u"戎": u"戎",
u"茸": u"茸",
u"蓉": u"蓉",
u"荣": u"榮",
u"融": u"融",
u"熔": u"熔",
u"溶": u"溶",
u"容": u"容",
u"绒": u"絨",
u"冗": u"冗",
u"揉": u"揉",
u"柔": u"柔",
u"肉": u"肉",
u"茹": u"茹",
u"蠕": u"蠕",
u"儒": u"儒",
u"孺": u"孺",
u"如": u"如",
u"辱": u"辱",
u"乳": u"乳",
u"汝": u"汝",
u"入": u"入",
u"褥": u"褥",
u"软": u"軟",
u"阮": u"阮",
u"蕊": u"蕊",
u"瑞": u"瑞",
u"锐": u"銳",
u"闰": u"閏",
u"润": u"潤",
u"若": u"若",
u"弱": u"弱",
u"撒": u"撒",
u"洒": u"灑",
u"萨": u"薩",
u"腮": u"腮",
u"鳃": u"鰓",
u"塞": u"塞",
u"赛": u"賽",
u"三": u"三",
u"叁": u"三",
u"伞": u"傘",
u"散": u"散",
u"桑": u"桑",
u"嗓": u"嗓",
u"丧": u"喪",
u"搔": u"搔",
u"骚": u"騷",
u"扫": u"掃",
u"嫂": u"嫂",
u"瑟": u"瑟",
u"色": u"色",
u"涩": u"澀",
u"森": u"森",
u"僧": u"僧",
u"莎": u"莎",
u"砂": u"砂",
u"杀": u"殺",
u"刹": u"剎",
u"沙": u"沙",
u"纱": u"紗",
u"傻": u"傻",
u"啥": u"啥",
u"煞": u"煞",
u"筛": u"篩",
u"晒": u"曬",
u"珊": u"珊",
u"苫": u"苫",
u"杉": u"杉",
u"山": u"山",
u"删": u"刪",
u"煽": u"煽",
u"衫": u"衫",
u"闪": u"閃",
u"陕": u"陜",
u"擅": u"擅",
u"赡": u"贍",
u"膳": u"膳",
u"善": u"善",
u"汕": u"汕",
u"扇": u"扇",
u"缮": u"繕",
u"墒": u"墑",
u"伤": u"傷",
u"商": u"商",
u"赏": u"賞",
u"晌": u"晌",
u"上": u"上",
u"尚": u"尚",
u"裳": u"裳",
u"梢": u"梢",
u"捎": u"捎",
u"稍": u"稍",
u"烧": u"燒",
u"芍": u"芍",
u"勺": u"勺",
u"韶": u"韶",
u"少": u"少",
u"哨": u"哨",
u"邵": u"邵",
u"绍": u"紹",
u"奢": u"奢",
u"赊": u"賒",
u"蛇": u"蛇",
u"舌": u"舌",
u"舍": u"舍",
u"赦": u"赦",
u"摄": u"攝",
u"射": u"射",
u"慑": u"懾",
u"涉": u"涉",
u"社": u"社",
u"设": u"設",
u"砷": u"砷",
u"申": u"申",
u"呻": u"呻",
u"伸": u"伸",
u"身": u"身",
u"深": u"深",
u"娠": u"娠",
u"绅": u"紳",
u"神": u"神",
u"沈": u"沈",
u"审": u"審",
u"婶": u"嬸",
u"甚": u"甚",
u"肾": u"腎",
u"慎": u"慎",
u"渗": u"滲",
u"声": u"聲",
u"生": u"生",
u"甥": u"甥",
u"牲": u"牲",
u"升": u"升",
u"绳": u"繩",
u"省": u"省",
u"盛": u"盛",
u"剩": u"剩",
u"胜": u"勝",
u"圣": u"聖",
u"师": u"師",
u"失": u"失",
u"狮": u"獅",
u"施": u"施",
u"湿": u"濕",
u"诗": u"詩",
u"尸": u"屍",
u"虱": u"虱",
u"十": u"十",
u"石": u"石",
u"拾": u"拾",
u"时": u"時",
u"什": u"什",
u"食": u"食",
u"蚀": u"蝕",
u"实": u"實",
u"识": u"識",
u"史": u"史",
u"矢": u"矢",
u"使": u"使",
u"屎": u"屎",
u"驶": u"駛",
u"始": u"始",
u"式": u"式",
u"示": u"示",
u"士": u"士",
u"世": u"世",
u"柿": u"柿",
u"事": u"事",
u"拭": u"拭",
u"誓": u"誓",
u"逝": u"逝",
u"势": u"勢",
u"是": u"是",
u"嗜": u"嗜",
u"噬": u"噬",
u"适": u"適",
u"仕": u"仕",
u"侍": u"侍",
u"释": u"釋",
u"饰": u"飾",
u"氏": u"氏",
u"市": u"市",
u"恃": u"恃",
u"室": u"室",
u"视": u"視",
u"试": u"試",
u"收": u"收",
u"手": u"手",
u"首": u"首",
u"守": u"守",
u"寿": u"壽",
u"授": u"授",
u"售": u"售",
u"受": u"受",
u"瘦": u"瘦",
u"兽": u"獸",
u"蔬": u"蔬",
u"枢": u"樞",
u"梳": u"梳",
u"殊": u"殊",
u"抒": u"抒",
u"输": u"輸",
u"叔": u"叔",
u"舒": u"舒",
u"淑": u"淑",
u"疏": u"疏",
u"书": u"書",
u"赎": u"贖",
u"孰": u"孰",
u"熟": u"熟",
u"薯": u"薯",
u"暑": u"暑",
u"曙": u"曙",
u"署": u"署",
u"蜀": u"蜀",
u"黍": u"黍",
u"鼠": u"鼠",
u"属": u"屬",
u"术": u"術",
u"述": u"述",
u"树": u"樹",
u"束": u"束",
u"戍": u"戍",
u"竖": u"豎",
u"墅": u"墅",
u"庶": u"庶",
u"数": u"數",
u"漱": u"漱",
u"恕": u"恕",
u"刷": u"刷",
u"耍": u"耍",
u"摔": u"摔",
u"衰": u"衰",
u"甩": u"甩",
u"帅": u"帥",
u"栓": u"栓",
u"拴": u"拴",
u"霜": u"霜",
u"双": u"雙",
u"爽": u"爽",
u"谁": u"誰",
u"水": u"水",
u"睡": u"睡",
u"税": u"稅",
u"吮": u"吮",
u"瞬": u"瞬",
u"顺": u"順",
u"舜": u"舜",
u"说": u"說",
u"硕": u"碩",
u"朔": u"朔",
u"烁": u"爍",
u"斯": u"斯",
u"撕": u"撕",
u"嘶": u"嘶",
u"思": u"思",
u"私": u"私",
u"司": u"司",
u"丝": u"絲",
u"死": u"死",
u"肆": u"肆",
u"寺": u"寺",
u"嗣": u"嗣",
u"四": u"四",
u"伺": u"伺",
u"似": u"似",
u"饲": u"飼",
u"巳": u"巳",
u"松": u"松",
u"耸": u"聳",
u"怂": u"慫",
u"颂": u"頌",
u"送": u"送",
u"宋": u"宋",
u"讼": u"訟",
u"诵": u"誦",
u"搜": u"搜",
u"艘": u"艘",
u"擞": u"擻",
u"嗽": u"嗽",
u"苏": u"蘇",
u"酥": u"酥",
u"俗": u"俗",
u"素": u"素",
u"速": u"速",
u"粟": u"粟",
u"僳": u"僳",
u"塑": u"塑",
u"溯": u"溯",
u"宿": u"宿",
u"诉": u"訴",
u"肃": u"肅",
u"酸": u"酸",
u"蒜": u"蒜",
u"算": u"算",
u"虽": u"雖",
u"隋": u"隋",
u"随": u"隨",
u"绥": u"綏",
u"髓": u"髓",
u"碎": u"碎",
u"岁": u"歲",
u"穗": u"穗",
u"遂": u"遂",
u"隧": u"隧",
u"祟": u"祟",
u"孙": u"孫",
u"损": u"損",
u"笋": u"筍",
u"蓑": u"蓑",
u"梭": u"梭",
u"唆": u"唆",
u"缩": u"縮",
u"琐": u"瑣",
u"索": u"索",
u"锁": u"鎖",
u"所": u"所",
u"塌": u"塌",
u"他": u"他",
u"它": u"它",
u"她": u"她",
u"塔": u"塔",
u"獭": u"獺",
u"挞": u"撻",
u"蹋": u"蹋",
u"踏": u"踏",
u"胎": u"胎",
u"苔": u"苔",
u"抬": u"擡",
u"台": u"臺",
u"泰": u"泰",
u"酞": u"酞",
u"太": u"太",
u"态": u"態",
u"汰": u"汰",
u"坍": u"坍",
u"摊": u"攤",
u"贪": u"貪",
u"瘫": u"癱",
u"滩": u"灘",
u"坛": u"壇",
u"檀": u"檀",
u"痰": u"痰",
u"潭": u"潭",
u"谭": u"譚",
u"谈": u"談",
u"坦": u"坦",
u"毯": u"毯",
u"袒": u"袒",
u"碳": u"碳",
u"探": u"探",
u"叹": u"嘆",
u"炭": u"炭",
u"汤": u"湯",
u"塘": u"塘",
u"搪": u"搪",
u"堂": u"堂",
u"棠": u"棠",
u"膛": u"膛",
u"唐": u"唐",
u"糖": u"糖",
u"倘": u"倘",
u"躺": u"躺",
u"淌": u"淌",
u"趟": u"趟",
u"烫": u"燙",
u"掏": u"掏",
u"涛": u"濤",
u"滔": u"滔",
u"绦": u"絳",
u"萄": u"萄",
u"桃": u"桃",
u"逃": u"逃",
u"淘": u"淘",
u"陶": u"陶",
u"讨": u"討",
u"套": u"套",
u"特": u"特",
u"藤": u"藤",
u"腾": u"騰",
u"疼": u"疼",
u"誊": u"謄",
u"梯": u"梯",
u"剔": u"剔",
u"踢": u"踢",
u"锑": u"銻",
u"提": u"提",
u"题": u"題",
u"蹄": u"蹄",
u"啼": u"啼",
u"体": u"體",
u"替": u"替",
u"嚏": u"嚏",
u"惕": u"惕",
u"涕": u"涕",
u"剃": u"剃",
u"屉": u"屜",
u"天": u"天",
u"添": u"添",
u"填": u"填",
u"田": u"田",
u"甜": u"甜",
u"恬": u"恬",
u"舔": u"舔",
u"腆": u"腆",
u"挑": u"挑",
u"条": u"條",
u"迢": u"迢",
u"眺": u"眺",
u"跳": u"跳",
u"贴": u"貼",
u"铁": u"鐵",
u"帖": u"帖",
u"厅": u"廳",
u"听": u"聽",
u"烃": u"烴",
u"汀": u"汀",
u"廷": u"廷",
u"停": u"停",
u"亭": u"亭",
u"庭": u"庭",
u"挺": u"挺",
u"艇": u"艇",
u"通": u"通",
u"桐": u"桐",
u"酮": u"酮",
u"瞳": u"瞳",
u"同": u"同",
u"铜": u"銅",
u"彤": u"彤",
u"童": u"童",
u"桶": u"桶",
u"捅": u"捅",
u"筒": u"筒",
u"统": u"統",
u"痛": u"痛",
u"偷": u"偷",
u"投": u"投",
u"头": u"頭",
u"透": u"透",
u"凸": u"凸",
u"秃": u"禿",
u"突": u"突",
u"图": u"圖",
u"徒": u"徒",
u"途": u"途",
u"涂": u"塗",
u"屠": u"屠",
u"土": u"土",
u"吐": u"吐",
u"兔": u"兔",
u"湍": u"湍",
u"团": u"團",
u"推": u"推",
u"颓": u"頹",
u"腿": u"腿",
u"蜕": u"蛻",
u"褪": u"褪",
u"退": u"退",
u"吞": u"吞",
u"屯": u"屯",
u"臀": u"臀",
u"拖": u"拖",
u"托": u"托",
u"脱": u"脫",
u"鸵": u"鴕",
u"陀": u"陀",
u"驮": u"馱",
u"驼": u"駝",
u"椭": u"橢",
u"妥": u"妥",
u"拓": u"拓",
u"唾": u"唾",
u"挖": u"挖",
u"哇": u"哇",
u"蛙": u"蛙",
u"洼": u"窪",
u"娃": u"娃",
u"瓦": u"瓦",
u"袜": u"襪",
u"歪": u"歪",
u"外": u"外",
u"豌": u"豌",
u"弯": u"彎",
u"湾": u"灣",
u"玩": u"玩",
u"顽": u"頑",
u"丸": u"丸",
u"烷": u"烷",
u"完": u"完",
u"碗": u"碗",
u"挽": u"挽",
u"晚": u"晚",
u"皖": u"皖",
u"惋": u"惋",
u"宛": u"宛",
u"婉": u"婉",
u"万": u"萬",
u"腕": u"腕",
u"汪": u"汪",
u"王": u"王",
u"亡": u"亡",
u"枉": u"枉",
u"网": u"網",
u"往": u"往",
u"旺": u"旺",
u"望": u"望",
u"忘": u"忘",
u"妄": u"妄",
u"威": u"威",
u"巍": u"巍",
u"微": u"微",
u"危": u"危",
u"韦": u"韋",
u"违": u"違",
u"桅": u"桅",
u"围": u"圍",
u"唯": u"唯",
u"惟": u"惟",
u"为": u"為",
u"潍": u"濰",
u"维": u"維",
u"苇": u"葦",
u"萎": u"萎",
u"委": u"委",
u"伟": u"偉",
u"伪": u"偽",
u"尾": u"尾",
u"纬": u"緯",
u"未": u"未",
u"蔚": u"蔚",
u"味": u"味",
u"畏": u"畏",
u"胃": u"胃",
u"喂": u"餵",
u"魏": u"魏",
u"位": u"位",
u"渭": u"渭",
u"谓": u"謂",
u"尉": u"尉",
u"慰": u"慰",
u"卫": u"衛",
u"瘟": u"瘟",
u"温": u"溫",
u"蚊": u"蚊",
u"文": u"文",
u"闻": u"聞",
u"纹": u"紋",
u"吻": u"吻",
u"稳": u"穩",
u"紊": u"紊",
u"问": u"問",
u"嗡": u"嗡",
u"翁": u"翁",
u"瓮": u"甕",
u"挝": u"撾",
u"蜗": u"蝸",
u"涡": u"渦",
u"窝": u"窩",
u"我": u"我",
u"斡": u"斡",
u"卧": u"臥",
u"握": u"握",
u"沃": u"沃",
u"巫": u"巫",
u"呜": u"嗚",
u"钨": u"鎢",
u"乌": u"烏",
u"污": u"汙",
u"诬": u"誣",
u"屋": u"屋",
u"无": u"無",
u"芜": u"蕪",
u"梧": u"梧",
u"吾": u"吾",
u"吴": u"吳",
u"毋": u"毋",
u"武": u"武",
u"五": u"五",
u"捂": u"捂",
u"午": u"午",
u"舞": u"舞",
u"伍": u"伍",
u"侮": u"侮",
u"坞": u"塢",
u"戊": u"戊",
u"雾": u"霧",
u"晤": u"晤",
u"物": u"物",
u"勿": u"勿",
u"务": u"務",
u"悟": u"悟",
u"误": u"誤",
u"昔": u"昔",
u"熙": u"熙",
u"析": u"析",
u"西": u"西",
u"硒": u"硒",
u"矽": u"矽",
u"晰": u"晰",
u"嘻": u"嘻",
u"吸": u"吸",
u"锡": u"錫",
u"牺": u"犧",
u"稀": u"稀",
u"息": u"息",
u"希": u"希",
u"悉": u"悉",
u"膝": u"膝",
u"夕": u"夕",
u"惜": u"惜",
u"熄": u"熄",
u"烯": u"烯",
u"溪": u"溪",
u"汐": u"汐",
u"犀": u"犀",
u"檄": u"檄",
u"袭": u"襲",
u"席": u"席",
u"习": u"習",
u"媳": u"媳",
u"喜": u"喜",
u"铣": u"銑",
u"洗": u"洗",
u"系": u"系",
u"隙": u"隙",
u"戏": u"戲",
u"细": u"細",
u"瞎": u"瞎",
u"虾": u"蝦",
u"匣": u"匣",
u"霞": u"霞",
u"辖": u"轄",
u"暇": u"暇",
u"峡": u"峽",
u"侠": u"俠",
u"狭": u"狹",
u"下": u"下",
u"厦": u"廈",
u"夏": u"夏",
u"吓": u"嚇",
u"掀": u"掀",
u"锨": u"鍁",
u"先": u"先",
u"仙": u"仙",
u"鲜": u"鮮",
u"纤": u"纖",
u"咸": u"鹹",
u"贤": u"賢",
u"衔": u"銜",
u"舷": u"舷",
u"闲": u"閑",
u"涎": u"涎",
u"弦": u"弦",
u"嫌": u"嫌",
u"显": u"顯",
u"险": u"險",
u"现": u"現",
u"献": u"獻",
u"县": u"縣",
u"腺": u"腺",
u"馅": u"餡",
u"羡": u"羨",
u"宪": u"憲",
u"陷": u"陷",
u"限": u"限",
u"线": u"線",
u"相": u"相",
u"厢": u"廂",
u"镶": u"鑲",
u"香": u"香",
u"箱": u"箱",
u"襄": u"襄",
u"湘": u"湘",
u"乡": u"鄉",
u"翔": u"翔",
u"祥": u"祥",
u"详": u"詳",
u"想": u"想",
u"响": u"響",
u"享": u"享",
u"项": u"項",
u"巷": u"巷",
u"橡": u"橡",
u"像": u"像",
u"向": u"向",
u"象": u"象",
u"萧": u"蕭",
u"硝": u"硝",
u"霄": u"霄",
u"削": u"削",
u"哮": u"哮",
u"嚣": u"囂",
u"销": u"銷",
u"消": u"消",
u"宵": u"宵",
u"淆": u"淆",
u"晓": u"曉",
u"小": u"小",
u"孝": u"孝",
u"校": u"校",
u"肖": u"肖",
u"啸": u"嘯",
u"笑": u"笑",
u"效": u"效",
u"楔": u"楔",
u"些": u"些",
u"歇": u"歇",
u"蝎": u"蠍",
u"鞋": u"鞋",
u"协": u"協",
u"挟": u"挾",
u"携": u"攜",
u"邪": u"邪",
u"斜": u"斜",
u"胁": u"脅",
u"谐": u"諧",
u"写": u"寫",
u"械": u"械",
u"卸": u"卸",
u"蟹": u"蟹",
u"懈": u"懈",
u"泄": u"泄",
u"泻": u"瀉",
u"谢": u"謝",
u"屑": u"屑",
u"薪": u"薪",
u"芯": u"芯",
u"锌": u"鋅",
u"欣": u"欣",
u"辛": u"辛",
u"新": u"新",
u"忻": u"忻",
u"心": u"心",
u"信": u"信",
u"衅": u"釁",
u"星": u"星",
u"腥": u"腥",
u"猩": u"猩",
u"惺": u"惺",
u"兴": u"興",
u"刑": u"刑",
u"型": u"型",
u"形": u"形",
u"邢": u"邢",
u"行": u"行",
u"醒": u"醒",
u"幸": u"幸",
u"杏": u"杏",
u"性": u"性",
u"姓": u"姓",
u"兄": u"兄",
u"凶": u"兇",
u"胸": u"胸",
u"匈": u"匈",
u"汹": u"洶",
u"雄": u"雄",
u"熊": u"熊",
u"休": u"休",
u"修": u"修",
u"羞": u"羞",
u"朽": u"朽",
u"嗅": u"嗅",
u"锈": u"銹",
u"秀": u"秀",
u"袖": u"袖",
u"绣": u"繡",
u"墟": u"墟",
u"戌": u"戌",
u"需": u"需",
u"虚": u"虛",
u"嘘": u"噓",
u"须": u"須",
u"徐": u"徐",
u"许": u"許",
u"蓄": u"蓄",
u"酗": u"酗",
u"叙": u"敘",
u"旭": u"旭",
u"序": u"序",
u"畜": u"畜",
u"恤": u"恤",
u"絮": u"絮",
u"婿": u"婿",
u"绪": u"緒",
u"续": u"續",
u"轩": u"軒",
u"喧": u"喧",
u"宣": u"宣",
u"悬": u"懸",
u"旋": u"旋",
u"玄": u"玄",
u"选": u"選",
u"癣": u"癬",
u"眩": u"眩",
u"绚": u"絢",
u"靴": u"靴",
u"薛": u"薛",
u"学": u"學",
u"穴": u"穴",
u"雪": u"雪",
u"血": u"血",
u"勋": u"勛",
u"熏": u"熏",
u"循": u"循",
u"旬": u"旬",
u"询": u"詢",
u"寻": u"尋",
u"驯": u"馴",
u"巡": u"巡",
u"殉": u"殉",
u"汛": u"汛",
u"训": u"訓",
u"讯": u"訊",
u"逊": u"遜",
u"迅": u"迅",
u"压": u"壓",
u"押": u"押",
u"鸦": u"鴉",
u"鸭": u"鴨",
u"呀": u"呀",
u"丫": u"丫",
u"芽": u"芽",
u"牙": u"牙",
u"蚜": u"蚜",
u"崖": u"崖",
u"衙": u"衙",
u"涯": u"涯",
u"雅": u"雅",
u"哑": u"啞",
u"亚": u"亞",
u"讶": u"訝",
u"焉": u"焉",
u"咽": u"咽",
u"阉": u"閹",
u"烟": u"煙",
u"淹": u"淹",
u"盐": u"鹽",
u"严": u"嚴",
u"研": u"研",
u"蜒": u"蜒",
u"岩": u"巖",
u"延": u"延",
u"言": u"言",
u"颜": u"顏",
u"阎": u"閻",
u"炎": u"炎",
u"沿": u"沿",
u"奄": u"奄",
u"掩": u"掩",
u"眼": u"眼",
u"衍": u"衍",
u"演": u"演",
u"艳": u"艷",
u"堰": u"堰",
u"燕": u"燕",
u"厌": u"厭",
u"砚": u"硯",
u"雁": u"雁",
u"唁": u"唁",
u"彦": u"彥",
u"焰": u"焰",
u"宴": u"宴",
u"谚": u"諺",
u"验": u"驗",
u"殃": u"殃",
u"央": u"央",
u"鸯": u"鴦",
u"秧": u"秧",
u"杨": u"楊",
u"扬": u"揚",
u"佯": u"佯",
u"疡": u"瘍",
u"羊": u"羊",
u"洋": u"洋",
u"阳": u"陽",
u"氧": u"氧",
u"仰": u"仰",
u"痒": u"癢",
u"养": u"養",
u"样": u"樣",
u"漾": u"漾",
u"邀": u"邀",
u"腰": u"腰",
u"妖": u"妖",
u"瑶": u"瑤",
u"摇": u"搖",
u"尧": u"堯",
u"遥": u"遙",
u"窑": u"窯",
u"谣": u"謠",
u"姚": u"姚",
u"咬": u"咬",
u"舀": u"舀",
u"药": u"藥",
u"要": u"要",
u"耀": u"耀",
u"椰": u"椰",
u"噎": u"噎",
u"耶": u"耶",
u"爷": u"爺",
u"野": u"野",
u"冶": u"冶",
u"也": u"也",
u"页": u"頁",
u"掖": u"掖",
u"业": u"業",
u"叶": u"葉",
u"曳": u"曳",
u"腋": u"腋",
u"夜": u"夜",
u"液": u"液",
u"一": u"壹",
u"壹": u"壹",
u"医": u"醫",
u"揖": u"揖",
u"铱": u"銥",
u"依": u"依",
u"伊": u"伊",
u"衣": u"衣",
u"颐": u"頤",
u"夷": u"夷",
u"遗": u"遺",
u"移": u"移",
u"仪": u"儀",
u"胰": u"胰",
u"疑": u"疑",
u"沂": u"沂",
u"宜": u"宜",
u"姨": u"姨",
u"彝": u"彜",
u"椅": u"椅",
u"蚁": u"蟻",
u"倚": u"倚",
u"已": u"已",
u"乙": u"乙",
u"矣": u"矣",
u"以": u"以",
u"艺": u"藝",
u"抑": u"抑",
u"易": u"易",
u"邑": u"邑",
u"屹": u"屹",
u"亿": u"億",
u"役": u"役",
u"臆": u"臆",
u"逸": u"逸",
u"肄": u"肄",
u"疫": u"疫",
u"亦": u"亦",
u"裔": u"裔",
u"意": u"意",
u"毅": u"毅",
u"忆": u"憶",
u"义": u"義",
u"益": u"益",
u"溢": u"溢",
u"诣": u"詣",
u"议": u"議",
u"谊": u"誼",
u"译": u"譯",
u"异": u"異",
u"翼": u"翼",
u"翌": u"翌",
u"绎": u"繹",
u"茵": u"茵",
u"荫": u"蔭",
u"因": u"因",
u"殷": u"殷",
u"音": u"音",
u"阴": u"陰",
u"姻": u"姻",
u"吟": u"吟",
u"银": u"銀",
u"淫": u"淫",
u"寅": u"寅",
u"饮": u"飲",
u"尹": u"尹",
u"引": u"引",
u"隐": u"隱",
u"印": u"印",
u"英": u"英",
u"樱": u"櫻",
u"婴": u"嬰",
u"鹰": u"鷹",
u"应": u"應",
u"缨": u"纓",
u"莹": u"瑩",
u"萤": u"螢",
u"营": u"營",
u"荧": u"熒",
u"蝇": u"蠅",
u"迎": u"迎",
u"赢": u"贏",
u"盈": u"盈",
u"影": u"影",
u"颖": u"穎",
u"硬": u"硬",
u"映": u"映",
u"哟": u"喲",
u"拥": u"擁",
u"佣": u"傭",
u"臃": u"臃",
u"痈": u"癰",
u"庸": u"庸",
u"雍": u"雍",
u"踊": u"踴",
u"蛹": u"蛹",
u"咏": u"詠",
u"泳": u"泳",
u"涌": u"湧",
u"永": u"永",
u"恿": u"恿",
u"勇": u"勇",
u"用": u"用",
u"幽": u"幽",
u"优": u"優",
u"悠": u"悠",
u"忧": u"憂",
u"尤": u"尤",
u"由": u"由",
u"邮": u"郵",
u"铀": u"鈾",
u"犹": u"猶",
u"油": u"油",
u"游": u"遊",
u"酉": u"酉",
u"有": u"有",
u"友": u"友",
u"右": u"右",
u"佑": u"佑",
u"釉": u"釉",
u"诱": u"誘",
u"又": u"又",
u"幼": u"幼",
u"迂": u"迂",
u"淤": u"淤",
u"于": u"於",
u"盂": u"盂",
u"榆": u"榆",
u"虞": u"虞",
u"愚": u"愚",
u"舆": u"輿",
u"余": u"余",
u"俞": u"俞",
u"逾": u"逾",
u"鱼": u"魚",
u"愉": u"愉",
u"渝": u"渝",
u"渔": u"漁",
u"隅": u"隅",
u"予": u"予",
u"娱": u"娛",
u"雨": u"雨",
u"与": u"與",
u"屿": u"嶼",
u"禹": u"禹",
u"宇": u"宇",
u"语": u"語",
u"羽": u"羽",
u"玉": u"玉",
u"域": u"域",
u"芋": u"芋",
u"郁": u"郁",
u"吁": u"籲",
u"遇": u"遇",
u"喻": u"喻",
u"峪": u"峪",
u"御": u"禦",
u"愈": u"愈",
u"欲": u"欲",
u"狱": u"獄",
u"育": u"育",
u"誉": u"譽",
u"浴": u"浴",
u"寓": u"寓",
u"裕": u"裕",
u"预": u"預",
u"豫": u"豫",
u"驭": u"馭",
u"鸳": u"鴛",
u"渊": u"淵",
u"冤": u"冤",
u"元": u"元",
u"垣": u"垣",
u"袁": u"袁",
u"原": u"原",
u"援": u"援",
u"辕": u"轅",
u"园": u"園",
u"员": u"員",
u"圆": u"圓",
u"猿": u"猿",
u"源": u"源",
u"缘": u"緣",
u"远": u"遠",
u"苑": u"苑",
u"愿": u"願",
u"怨": u"怨",
u"院": u"院",
u"曰": u"曰",
u"约": u"約",
u"越": u"越",
u"跃": u"躍",
u"钥": u"鑰",
u"岳": u"嶽",
u"粤": u"粵",
u"月": u"月",
u"悦": u"悅",
u"阅": u"閱",
u"耘": u"耘",
u"云": u"雲",
u"郧": u"鄖",
u"匀": u"勻",
u"陨": u"隕",
u"允": u"允",
u"运": u"運",
u"蕴": u"蘊",
u"酝": u"醞",
u"晕": u"暈",
u"韵": u"韻",
u"孕": u"孕",
u"匝": u"匝",
u"砸": u"砸",
u"杂": u"雜",
u"栽": u"栽",
u"哉": u"哉",
u"灾": u"災",
u"宰": u"宰",
u"载": u"載",
u"再": u"再",
u"在": u"在",
u"咱": u"咱",
u"攒": u"攢",
u"暂": u"暫",
u"赞": u"贊",
u"赃": u"贓",
u"脏": u"臟",
u"葬": u"葬",
u"遭": u"遭",
u"糟": u"糟",
u"凿": u"鑿",
u"藻": u"藻",
u"枣": u"棗",
u"早": u"早",
u"澡": u"澡",
u"蚤": u"蚤",
u"躁": u"躁",
u"噪": u"噪",
u"造": u"造",
u"皂": u"皂",
u"灶": u"竈",
u"燥": u"燥",
u"责": u"責",
u"择": u"擇",
u"则": u"則",
u"泽": u"澤",
u"贼": u"賊",
u"怎": u"怎",
u"增": u"增",
u"憎": u"憎",
u"曾": u"曾",
u"赠": u"贈",
u"扎": u"紮",
u"喳": u"喳",
u"渣": u"渣",
u"札": u"劄",
u"轧": u"軋",
u"铡": u"鍘",
u"闸": u"閘",
u"眨": u"眨",
u"栅": u"柵",
u"榨": u"榨",
u"咋": u"咋",
u"乍": u"乍",
u"炸": u"炸",
u"诈": u"詐",
u"摘": u"摘",
u"斋": u"齋",
u"宅": u"宅",
u"窄": u"窄",
u"债": u"債",
u"寨": u"寨",
u"瞻": u"瞻",
u"毡": u"氈",
u"詹": u"詹",
u"粘": u"粘",
u"沾": u"沾",
u"盏": u"盞",
u"斩": u"斬",
u"辗": u"輾",
u"崭": u"嶄",
u"展": u"展",
u"蘸": u"蘸",
u"栈": u"棧",
u"占": u"占",
u"战": u"戰",
u"站": u"站",
u"湛": u"湛",
u"绽": u"綻",
u"樟": u"樟",
u"章": u"章",
u"彰": u"彰",
u"漳": u"漳",
u"张": u"張",
u"掌": u"掌",
u"涨": u"漲",
u"杖": u"杖",
u"丈": u"丈",
u"帐": u"帳",
u"账": u"賬",
u"仗": u"仗",
u"胀": u"脹",
u"瘴": u"瘴",
u"障": u"障",
u"招": u"招",
u"昭": u"昭",
u"找": u"找",
u"沼": u"沼",
u"赵": u"趙",
u"照": u"照",
u"罩": u"罩",
u"兆": u"兆",
u"肇": u"肇",
u"召": u"召",
u"遮": u"遮",
u"折": u"折",
u"哲": u"哲",
u"蛰": u"蟄",
u"辙": u"轍",
u"者": u"者",
u"锗": u"鍺",
u"蔗": u"蔗",
u"这": u"這",
u"浙": u"浙",
u"珍": u"珍",
u"斟": u"斟",
u"真": u"真",
u"甄": u"甄",
u"砧": u"砧",
u"臻": u"臻",
u"贞": u"貞",
u"针": u"針",
u"侦": u"偵",
u"枕": u"枕",
u"疹": u"疹",
u"诊": u"診",
u"震": u"震",
u"振": u"振",
u"镇": u"鎮",
u"阵": u"陣",
u"蒸": u"蒸",
u"挣": u"掙",
u"睁": u"睜",
u"征": u"征",
u"狰": u"猙",
u"争": u"爭",
u"怔": u"怔",
u"整": u"整",
u"拯": u"拯",
u"正": u"正",
u"政": u"政",
u"帧": u"幀",
u"症": u"癥",
u"郑": u"鄭",
u"证": u"證",
u"芝": u"芝",
u"枝": u"枝",
u"支": u"支",
u"吱": u"吱",
u"蜘": u"蜘",
u"知": u"知",
u"肢": u"肢",
u"脂": u"脂",
u"汁": u"汁",
u"之": u"之",
u"织": u"織",
u"职": u"職",
u"直": u"直",
u"植": u"植",
u"殖": u"殖",
u"执": u"執",
u"值": u"值",
u"侄": u"侄",
u"址": u"址",
u"指": u"指",
u"止": u"止",
u"趾": u"趾",
u"只": u"只",
u"旨": u"旨",
u"纸": u"紙",
u"志": u"誌",
u"挚": u"摯",
u"掷": u"擲",
u"至": u"至",
u"致": u"致",
u"置": u"置",
u"帜": u"幟",
u"峙": u"峙",
u"制": u"制",
u"智": u"智",
u"秩": u"秩",
u"稚": u"稚",
u"质": u"質",
u"炙": u"炙",
u"痔": u"痔",
u"滞": u"滯",
u"治": u"治",
u"窒": u"窒",
u"中": u"中",
u"盅": u"盅",
u"忠": u"忠",
u"钟": u"鐘",
u"衷": u"衷",
u"终": u"終",
u"种": u"種",
u"肿": u"腫",
u"重": u"重",
u"仲": u"仲",
u"众": u"眾",
u"舟": u"舟",
u"周": u"周",
u"州": u"州",
u"洲": u"洲",
u"诌": u"謅",
u"粥": u"粥",
u"轴": u"軸",
u"肘": u"肘",
u"帚": u"帚",
u"咒": u"咒",
u"皱": u"皺",
u"宙": u"宙",
u"昼": u"晝",
u"骤": u"驟",
u"珠": u"珠",
u"株": u"株",
u"蛛": u"蛛",
u"朱": u"朱",
u"猪": u"豬",
u"诸": u"諸",
u"诛": u"誅",
u"逐": u"逐",
u"竹": u"竹",
u"烛": u"燭",
u"煮": u"煮",
u"拄": u"拄",
u"瞩": u"矚",
u"嘱": u"囑",
u"主": u"主",
u"著": u"著",
u"柱": u"柱",
u"助": u"助",
u"蛀": u"蛀",
u"贮": u"貯",
u"铸": u"鑄",
u"筑": u"築",
u"住": u"住",
u"注": u"註",
u"祝": u"祝",
u"驻": u"駐",
u"抓": u"抓",
u"爪": u"爪",
u"拽": u"拽",
u"专": u"專",
u"砖": u"磚",
u"转": u"轉",
u"撰": u"撰",
u"赚": u"賺",
u"篆": u"篆",
u"桩": u"樁",
u"庄": u"莊",
u"装": u"裝",
u"妆": u"妝",
u"撞": u"撞",
u"壮": u"壯",
u"状": u"狀",
u"椎": u"椎",
u"锥": u"錐",
u"追": u"追",
u"赘": u"贅",
u"坠": u"墜",
u"缀": u"綴",
u"谆": u"諄",
u"准": u"準",
u"捉": u"捉",
u"拙": u"拙",
u"卓": u"卓",
u"桌": u"桌",
u"琢": u"琢",
u"茁": u"茁",
u"酌": u"酌",
u"啄": u"啄",
u"着": u"著",
u"灼": u"灼",
u"浊": u"濁",
u"兹": u"茲",
u"咨": u"咨",
u"资": u"資",
u"姿": u"姿",
u"滋": u"滋",
u"淄": u"淄",
u"孜": u"孜",
u"紫": u"紫",
u"仔": u"仔",
u"籽": u"籽",
u"滓": u"滓",
u"子": u"子",
u"自": u"自",
u"渍": u"漬",
u"字": u"字",
u"鬃": u"鬃",
u"棕": u"棕",
u"踪": u"蹤",
u"宗": u"宗",
u"综": u"綜",
u"总": u"總",
u"纵": u"縱",
u"邹": u"鄒",
u"走": u"走",
u"奏": u"奏",
u"揍": u"揍",
u"租": u"租",
u"足": u"足",
u"卒": u"卒",
u"族": u"族",
u"祖": u"祖",
u"诅": u"詛",
u"阻": u"阻",
u"组": u"組",
u"钻": u"鉆",
u"纂": u"纂",
u"嘴": u"嘴",
u"醉": u"醉",
u"最": u"最",
u"罪": u"罪",
u"尊": u"尊",
u"遵": u"遵",
u"昨": u"昨",
u"左": u"左",
u"佐": u"佐",
u"柞": u"柞",
u"做": u"做",
u"作": u"作",
u"坐": u"坐",
u"座": u"座",
u"锕": u"錒",
u"嗳": u"噯",
u"嫒": u"嬡",
u"瑷": u"璦",
u"暧": u"曖",
u"霭": u"靄",
u"谙": u"諳",
u"铵": u"銨",
u"鹌": u"鵪",
u"媪": u"媼",
u"骜": u"驁",
u"鳌": u"鰲",
u"钯": u"鈀",
u"呗": u"唄",
u"钣": u"鈑",
u"鸨": u"鴇",
u"龅": u"齙",
u"鹎": u"鵯",
u"贲": u"賁",
u"锛": u"錛",
u"荜": u"蓽",
u"哔": u"嗶",
u"滗": u"潷",
u"铋": u"鉍",
u"筚": u"篳",
u"跸": u"蹕",
u"苄": u"芐",
u"缏": u"緶",
u"笾": u"籩",
u"骠": u"驃",
u"飑": u"颮",
u"飙": u"飆",
u"镖": u"鏢",
u"镳": u"鑣",
u"鳔": u"鰾",
u"傧": u"儐",
u"缤": u"繽",
u"槟": u"檳",
u"殡": u"殯",
u"膑": u"臏",
u"镔": u"鑌",
u"髌": u"髕",
u"鬓": u"鬢",
u"禀": u"稟",
u"饽": u"餑",
u"钹": u"鈸",
u"鹁": u"鵓",
u"钸": u"鈽",
u"骖": u"驂",
u"黪": u"黲",
u"恻": u"惻",
u"锸": u"鍤",
u"侪": u"儕",
u"钗": u"釵",
u"冁": u"囅",
u"谄": u"諂",
u"谶": u"讖",
u"蒇": u"蕆",
u"忏": u"懺",
u"婵": u"嬋",
u"骣": u"驏",
u"觇": u"覘",
u"禅": u"禪",
u"镡": u"鐔",
u"伥": u"倀",
u"苌": u"萇",
u"怅": u"悵",
u"阊": u"閶",
u"鲳": u"鯧",
u"砗": u"硨",
u"伧": u"傖",
u"谌": u"諶",
u"榇": u"櫬",
u"碜": u"磣",
u"龀": u"齔",
u"枨": u"棖",
u"柽": u"檉",
u"铖": u"鋮",
u"铛": u"鐺",
u"饬": u"飭",
u"鸱": u"鴟",
u"铳": u"銃",
u"俦": u"儔",
u"帱": u"幬",
u"雠": u"讎",
u"刍": u"芻",
u"绌": u"絀",
u"蹰": u"躕",
u"钏": u"釧",
u"怆": u"愴",
u"缍": u"綞",
u"鹑": u"鶉",
u"辍": u"輟",
u"龊": u"齪",
u"鹚": u"鶿",
u"苁": u"蓯",
u"骢": u"驄",
u"枞": u"樅",
u"辏": u"輳",
u"撺": u"攛",
u"锉": u"銼",
u"鹾": u"鹺",
u"哒": u"噠",
u"鞑": u"韃",
u"骀": u"駘",
u"绐": u"紿",
u"殚": u"殫",
u"赕": u"賧",
u"瘅": u"癉",
u"箪": u"簞",
u"谠": u"讜",
u"砀": u"碭",
u"裆": u"襠",
u"焘": u"燾",
u"镫": u"鐙",
u"籴": u"糴",
u"诋": u"詆",
u"谛": u"諦",
u"绨": u"綈",
u"觌": u"覿",
u"镝": u"鏑",
u"巅": u"巔",
u"钿": u"鈿",
u"癫": u"癲",
u"铫": u"銚",
u"鲷": u"鯛",
u"鲽": u"鰈",
u"铤": u"鋌",
u"铥": u"銩",
u"岽": u"崠",
u"鸫": u"鶇",
u"窦": u"竇",
u"渎": u"瀆",
u"椟": u"櫝",
u"牍": u"牘",
u"笃": u"篤",
u"黩": u"黷",
u"簖": u"籪",
u"怼": u"懟",
u"镦": u"鐓",
u"炖": u"燉",
u"趸": u"躉",
u"铎": u"鐸",
u"谔": u"諤",
u"垩": u"堊",
u"阏": u"閼",
u"轭": u"軛",
u"锇": u"鋨",
u"锷": u"鍔",
u"鹗": u"鶚",
u"颚": u"顎",
u"颛": u"顓",
u"鳄": u"鱷",
u"诶": u"誒",
u"迩": u"邇",
u"铒": u"鉺",
u"鸸": u"鴯",
u"鲕": u"鮞",
u"钫": u"鈁",
u"鲂": u"魴",
u"绯": u"緋",
u"镄": u"鐨",
u"鲱": u"鯡",
u"偾": u"僨",
u"沣": u"灃",
u"凫": u"鳧",
u"驸": u"駙",
u"绂": u"紱",
u"绋": u"紼",
u"赙": u"賻",
u"麸": u"麩",
u"鲋": u"鮒",
u"鳆": u"鰒",
u"钆": u"釓",
u"赅": u"賅",
u"尴": u"尷",
u"擀": u"搟",
u"绀": u"紺",
u"戆": u"戇",
u"睾": u"睪",
u"诰": u"誥",
u"缟": u"縞",
u"锆": u"鋯",
u"纥": u"紇",
u"镉": u"鎘",
u"颍": u"潁",
u"亘": u"亙",
u"赓": u"賡",
u"绠": u"綆",
u"鲠": u"鯁",
u"诟": u"詬",
u"缑": u"緱",
u"觏": u"覯",
u"诂": u"詁",
u"毂": u"轂",
u"钴": u"鈷",
u"锢": u"錮",
u"鸪": u"鴣",
u"鹄": u"鵠",
u"鹘": u"鶻",
u"鸹": u"鴰",
u"掴": u"摑",
u"诖": u"詿",
u"掼": u"摜",
u"鹳": u"鸛",
u"鳏": u"鰥",
u"犷": u"獷",
u"匦": u"匭",
u"刿": u"劌",
u"妫": u"媯",
u"桧": u"檜",
u"鲑": u"鮭",
u"鳜": u"鱖",
u"衮": u"袞",
u"绲": u"緄",
u"鲧": u"鯀",
u"埚": u"堝",
u"呙": u"咼",
u"帼": u"幗",
u"椁": u"槨",
u"蝈": u"蟈",
u"铪": u"鉿",
u"阚": u"闞",
u"绗": u"絎",
u"颉": u"頡",
u"灏": u"灝",
u"颢": u"顥",
u"诃": u"訶",
u"阖": u"闔",
u"蛎": u"蠣",
u"黉": u"黌",
u"讧": u"訌",
u"荭": u"葒",
u"闳": u"閎",
u"鲎": u"鱟",
u"浒": u"滸",
u"鹕": u"鶘",
u"骅": u"驊",
u"桦": u"樺",
u"铧": u"鏵",
u"奂": u"奐",
u"缳": u"繯",
u"锾": u"鍰",
u"鲩": u"鯇",
u"鳇": u"鰉",
u"诙": u"詼",
u"荟": u"薈",
u"哕": u"噦",
u"浍": u"澮",
u"缋": u"繢",
u"珲": u"琿",
u"晖": u"暉",
u"诨": u"諢",
u"馄": u"餛",
u"阍": u"閽",
u"钬": u"鈥",
u"镬": u"鑊",
u"讦": u"訐",
u"诘": u"詰",
u"荠": u"薺",
u"叽": u"嘰",
u"哜": u"嚌",
u"骥": u"驥",
u"玑": u"璣",
u"觊": u"覬",
u"齑": u"齏",
u"矶": u"磯",
u"羁": u"羈",
u"虿": u"蠆",
u"跻": u"躋",
u"霁": u"霽",
u"鲚": u"鱭",
u"鲫": u"鯽",
u"郏": u"郟",
u"浃": u"浹",
u"铗": u"鋏",
u"镓": u"鎵",
u"蛲": u"蟯",
u"谏": u"諫",
u"缣": u"縑",
u"戋": u"戔",
u"戬": u"戩",
u"睑": u"瞼",
u"鹣": u"鶼",
u"笕": u"筧",
u"鲣": u"鰹",
u"鞯": u"韉",
u"绛": u"絳",
u"缰": u"韁",
u"挢": u"撟",
u"峤": u"嶠",
u"鹪": u"鷦",
u"鲛": u"鮫",
u"疖": u"癤",
u"颌": u"頜",
u"鲒": u"鮚",
u"卺": u"巹",
u"荩": u"藎",
u"馑": u"饉",
u"缙": u"縉",
u"赆": u"贐",
u"觐": u"覲",
u"刭": u"剄",
u"泾": u"涇",
u"迳": u"逕",
u"弪": u"弳",
u"胫": u"脛",
u"靓": u"靚",
u"阄": u"鬮",
u"鸠": u"鳩",
u"鹫": u"鷲",
u"讵": u"詎",
u"屦": u"屨",
u"榉": u"櫸",
u"飓": u"颶",
u"钜": u"鉅",
u"锔": u"鋦",
u"窭": u"窶",
u"龃": u"齟",
u"锩": u"錈",
u"镌": u"鐫",
u"隽": u"雋",
u"谲": u"譎",
u"珏": u"玨",
u"皲": u"皸",
u"剀": u"剴",
u"垲": u"塏",
u"忾": u"愾",
u"恺": u"愷",
u"铠": u"鎧",
u"锴": u"鍇",
u"龛": u"龕",
u"闶": u"閌",
u"钪": u"鈧",
u"铐": u"銬",
u"骒": u"騍",
u"缂": u"緙",
u"轲": u"軻",
u"钶": u"鈳",
u"锞": u"錁",
u"颔": u"頷",
u"龈": u"齦",
u"铿": u"鏗",
u"喾": u"嚳",
u"郐": u"鄶",
u"哙": u"噲",
u"脍": u"膾",
u"狯": u"獪",
u"髋": u"髖",
u"诓": u"誆",
u"诳": u"誑",
u"邝": u"鄺",
u"圹": u"壙",
u"纩": u"纊",
u"贶": u"貺",
u"匮": u"匱",
u"蒉": u"蕢",
u"愦": u"憒",
u"聩": u"聵",
u"篑": u"簣",
u"阃": u"閫",
u"锟": u"錕",
u"鲲": u"鯤",
u"蛴": u"蠐",
u"崃": u"崍",
u"徕": u"徠",
u"涞": u"淶",
u"濑": u"瀨",
u"赉": u"賚",
u"睐": u"睞",
u"铼": u"錸",
u"癞": u"癩",
u"籁": u"籟",
u"岚": u"嵐",
u"榄": u"欖",
u"斓": u"斕",
u"镧": u"鑭",
u"褴": u"襤",
u"阆": u"閬",
u"锒": u"鋃",
u"唠": u"嘮",
u"崂": u"嶗",
u"铑": u"銠",
u"铹": u"鐒",
u"痨": u"癆",
u"鳓": u"鰳",
u"诔": u"誄",
u"缧": u"縲",
u"俪": u"儷",
u"郦": u"酈",
u"坜": u"壢",
u"苈": u"藶",
u"莅": u"蒞",
u"蓠": u"蘺",
u"呖": u"嚦",
u"逦": u"邐",
u"骊": u"驪",
u"缡": u"縭",
u"枥": u"櫪",
u"栎": u"櫟",
u"轹": u"轢",
u"砺": u"礪",
u"锂": u"鋰",
u"鹂": u"鸝",
u"疠": u"癘",
u"粝": u"糲",
u"跞": u"躒",
u"雳": u"靂",
u"鲡": u"鱺",
u"鳢": u"鱧",
u"蔹": u"蘞",
u"奁": u"奩",
u"潋": u"瀲",
u"琏": u"璉",
u"殓": u"殮",
u"裢": u"褳",
u"裣": u"襝",
u"鲢": u"鰱",
u"魉": u"魎",
u"缭": u"繚",
u"钌": u"釕",
u"鹩": u"鷯",
u"蔺": u"藺",
u"廪": u"廩",
u"檩": u"檁",
u"辚": u"轔",
u"躏": u"躪",
u"绫": u"綾",
u"棂": u"欞",
u"蛏": u"蟶",
u"鲮": u"鯪",
u"浏": u"瀏",
u"骝": u"騮",
u"绺": u"綹",
u"镏": u"鎦",
u"鹨": u"鷚",
u"茏": u"蘢",
u"泷": u"瀧",
u"珑": u"瓏",
u"栊": u"櫳",
u"胧": u"朧",
u"砻": u"礱",
u"偻": u"僂",
u"蒌": u"蔞",
u"喽": u"嘍",
u"嵝": u"嶁",
u"镂": u"鏤",
u"瘘": u"瘺",
u"耧": u"耬",
u"蝼": u"螻",
u"髅": u"髏",
u"垆": u"壚",
u"撸": u"擼",
u"噜": u"嚕",
u"闾": u"閭",
u"泸": u"瀘",
u"渌": u"淥",
u"栌": u"櫨",
u"橹": u"櫓",
u"轳": u"轤",
u"辂": u"輅",
u"辘": u"轆",
u"氇": u"氌",
u"胪": u"臚",
u"鸬": u"鸕",
u"鹭": u"鷺",
u"舻": u"艫",
u"鲈": u"鱸",
u"脔": u"臠",
u"娈": u"孌",
u"栾": u"欒",
u"鸾": u"鸞",
u"銮": u"鑾",
u"囵": u"圇",
u"荦": u"犖",
u"猡": u"玀",
u"泺": u"濼",
u"椤": u"欏",
u"脶": u"腡",
u"镙": u"鏍",
u"榈": u"櫚",
u"褛": u"褸",
u"锊": u"鋝",
u"呒": u"嘸",
u"唛": u"嘜",
u"嬷": u"嬤",
u"杩": u"榪",
u"劢": u"勱",
u"缦": u"縵",
u"镘": u"鏝",
u"颡": u"顙",
u"鳗": u"鰻",
u"麽": u"麼",
u"扪": u"捫",
u"焖": u"燜",
u"懑": u"懣",
u"钔": u"鍆",
u"芈": u"羋",
u"谧": u"謐",
u"猕": u"獼",
u"祢": u"禰",
u"渑": u"澠",
u"腼": u"靦",
u"黾": u"黽",
u"缈": u"緲",
u"缪": u"繆",
u"闵": u"閔",
u"缗": u"緡",
u"谟": u"謨",
u"蓦": u"驀",
u"馍": u"饃",
u"殁": u"歿",
u"镆": u"鏌",
u"钼": u"鉬",
u"铙": u"鐃",
u"讷": u"訥",
u"铌": u"鈮",
u"鲵": u"鯢",
u"辇": u"輦",
u"鲶": u"鯰",
u"茑": u"蔦",
u"袅": u"裊",
u"陧": u"隉",
u"蘖": u"蘗",
u"嗫": u"囁",
u"颟": u"顢",
u"蹑": u"躡",
u"苎": u"苧",
u"咛": u"嚀",
u"聍": u"聹",
u"侬": u"儂",
u"哝": u"噥",
u"驽": u"駑",
u"钕": u"釹",
u"傩": u"儺",
u"讴": u"謳",
u"怄": u"慪",
u"瓯": u"甌",
u"蹒": u"蹣",
u"疱": u"皰",
u"辔": u"轡",
u"纰": u"紕",
u"罴": u"羆",
u"铍": u"鈹",
u"谝": u"諞",
u"骈": u"駢",
u"缥": u"縹",
u"嫔": u"嬪",
u"钋": u"釙",
u"镤": u"鏷",
u"镨": u"鐠",
u"蕲": u"蘄",
u"骐": u"騏",
u"绮": u"綺",
u"桤": u"榿",
u"碛": u"磧",
u"颀": u"頎",
u"颃": u"頏",
u"鳍": u"鰭",
u"佥": u"僉",
u"荨": u"蕁",
u"悭": u"慳",
u"骞": u"騫",
u"缱": u"繾",
u"椠": u"槧",
u"钤": u"鈐",
u"嫱": u"嬙",
u"樯": u"檣",
u"戗": u"戧",
u"炝": u"熗",
u"锖": u"錆",
u"锵": u"鏘",
u"镪": u"鏹",
u"羟": u"羥",
u"跄": u"蹌",
u"诮": u"誚",
u"谯": u"譙",
u"荞": u"蕎",
u"缲": u"繰",
u"硗": u"磽",
u"跷": u"蹺",
u"惬": u"愜",
u"锲": u"鍥",
u"箧": u"篋",
u"锓": u"鋟",
u"揿": u"撳",
u"鲭": u"鯖",
u"茕": u"煢",
u"蛱": u"蛺",
u"巯": u"巰",
u"赇": u"賕",
u"虮": u"蟣",
u"鳅": u"鰍",
u"诎": u"詘",
u"岖": u"嶇",
u"阒": u"闃",
u"觑": u"覷",
u"鸲": u"鴝",
u"诠": u"詮",
u"绻": u"綣",
u"辁": u"輇",
u"铨": u"銓",
u"阕": u"闋",
u"阙": u"闕",
u"悫": u"愨",
u"荛": u"蕘",
u"娆": u"嬈",
u"桡": u"橈",
u"饪": u"飪",
u"轫": u"軔",
u"嵘": u"嶸",
u"蝾": u"蠑",
u"缛": u"縟",
u"铷": u"銣",
u"颦": u"顰",
u"蚬": u"蜆",
u"飒": u"颯",
u"毵": u"毿",
u"糁": u"糝",
u"缫": u"繅",
u"啬": u"嗇",
u"铯": u"銫",
u"穑": u"穡",
u"铩": u"鎩",
u"鲨": u"鯊",
u"酾": u"釃",
u"讪": u"訕",
u"姗": u"姍",
u"骟": u"騸",
u"钐": u"釤",
u"鳝": u"鱔",
u"垧": u"坰",
u"殇": u"殤",
u"觞": u"觴",
u"厍": u"厙",
u"滠": u"灄",
u"畲": u"畬",
u"诜": u"詵",
u"谂": u"諗",
u"渖": u"瀋",
u"谥": u"謚",
u"埘": u"塒",
u"莳": u"蒔",
u"弑": u"弒",
u"轼": u"軾",
u"贳": u"貰",
u"铈": u"鈰",
u"鲥": u"鰣",
u"绶": u"綬",
u"摅": u"攄",
u"纾": u"紓",
u"闩": u"閂",
u"铄": u"鑠",
u"厮": u"廝",
u"驷": u"駟",
u"缌": u"緦",
u"锶": u"鍶",
u"鸶": u"鷥",
u"薮": u"藪",
u"馊": u"餿",
u"飕": u"颼",
u"锼": u"鎪",
u"谡": u"謖",
u"稣": u"穌",
u"谇": u"誶",
u"荪": u"蓀",
u"狲": u"猻",
u"唢": u"嗩",
u"睃": u"脧",
u"闼": u"闥",
u"铊": u"鉈",
u"鳎": u"鰨",
u"钛": u"鈦",
u"鲐": u"鮐",
u"昙": u"曇",
u"钽": u"鉭",
u"锬": u"錟",
u"顸": u"頇",
u"傥": u"儻",
u"饧": u"餳",
u"铴": u"鐋",
u"镗": u"鏜",
u"韬": u"韜",
u"铽": u"鋱",
u"缇": u"緹",
u"鹈": u"鵜",
u"阗": u"闐",
u"粜": u"糶",
u"龆": u"齠",
u"鲦": u"鰷",
u"恸": u"慟",
u"钭": u"鈄",
u"钍": u"釷",
u"抟": u"摶",
u"饨": u"飩",
u"箨": u"籜",
u"鼍": u"鼉",
u"娲": u"媧",
u"腽": u"膃",
u"纨": u"紈",
u"绾": u"綰",
u"辋": u"輞",
u"诿": u"諉",
u"帏": u"幃",
u"闱": u"闈",
u"沩": u"溈",
u"涠": u"潿",
u"玮": u"瑋",
u"韪": u"韙",
u"炜": u"煒",
u"鲔": u"鮪",
u"阌": u"閿",
u"莴": u"萵",
u"龌": u"齷",
u"邬": u"鄔",
u"庑": u"廡",
u"怃": u"憮",
u"妩": u"嫵",
u"骛": u"騖",
u"鹉": u"鵡",
u"鹜": u"鶩",
u"饩": u"餼",
u"阋": u"鬩",
u"玺": u"璽",
u"觋": u"覡",
u"硖": u"硤",
u"苋": u"莧",
u"莶": u"薟",
u"藓": u"蘚",
u"岘": u"峴",
u"猃": u"獫",
u"娴": u"嫻",
u"鹇": u"鷴",
u"痫": u"癇",
u"蚝": u"蠔",
u"籼": u"秈",
u"跹": u"躚",
u"芗": u"薌",
u"饷": u"餉",
u"骧": u"驤",
u"缃": u"緗",
u"飨": u"饗",
u"哓": u"嘵",
u"潇": u"瀟",
u"骁": u"驍",
u"绡": u"綃",
u"枭": u"梟",
u"箫": u"簫",
u"亵": u"褻",
u"撷": u"擷",
u"绁": u"紲",
u"缬": u"纈",
u"陉": u"陘",
u"荥": u"滎",
u"馐": u"饈",
u"鸺": u"鵂",
u"诩": u"詡",
u"顼": u"頊",
u"谖": u"諼",
u"铉": u"鉉",
u"镟": u"鏇",
u"谑": u"謔",
u"泶": u"澩",
u"鳕": u"鱈",
u"埙": u"塤",
u"浔": u"潯",
u"鲟": u"鱘",
u"垭": u"埡",
u"娅": u"婭",
u"桠": u"椏",
u"氩": u"氬",
u"厣": u"厴",
u"赝": u"贗",
u"俨": u"儼",
u"兖": u"兗",
u"谳": u"讞",
u"恹": u"懨",
u"闫": u"閆",
u"酽": u"釅",
u"魇": u"魘",
u"餍": u"饜",
u"鼹": u"鼴",
u"炀": u"煬",
u"轺": u"軺",
u"鹞": u"鷂",
u"鳐": u"鰩",
u"靥": u"靨",
u"谒": u"謁",
u"邺": u"鄴",
u"晔": u"曄",
u"烨": u"燁",
u"诒": u"詒",
u"呓": u"囈",
u"峄": u"嶧",
u"饴": u"飴",
u"怿": u"懌",
u"驿": u"驛",
u"缢": u"縊",
u"轶": u"軼",
u"贻": u"貽",
u"钇": u"釔",
u"镒": u"鎰",
u"镱": u"鐿",
u"瘗": u"瘞",
u"舣": u"艤",
u"铟": u"銦",
u"瘾": u"癮",
u"茔": u"塋",
u"莺": u"鶯",
u"萦": u"縈",
u"蓥": u"鎣",
u"撄": u"攖",
u"嘤": u"嚶",
u"滢": u"瀅",
u"潆": u"瀠",
u"璎": u"瓔",
u"鹦": u"鸚",
u"瘿": u"癭",
u"颏": u"頦",
u"罂": u"罌",
u"镛": u"鏞",
u"莸": u"蕕",
u"铕": u"銪",
u"鱿": u"魷",
u"伛": u"傴",
u"俣": u"俁",
u"谀": u"諛",
u"谕": u"諭",
u"蓣": u"蕷",
u"嵛": u"崳",
u"饫": u"飫",
u"阈": u"閾",
u"妪": u"嫗",
u"纡": u"紆",
u"觎": u"覦",
u"欤": u"歟",
u"钰": u"鈺",
u"鹆": u"鵒",
u"鹬": u"鷸",
u"龉": u"齬",
u"橼": u"櫞",
u"鸢": u"鳶",
u"鼋": u"黿",
u"钺": u"鉞",
u"郓": u"鄆",
u"芸": u"蕓",
u"恽": u"惲",
u"愠": u"慍",
u"纭": u"紜",
u"韫": u"韞",
u"殒": u"殞",
u"氲": u"氳",
u"瓒": u"瓚",
u"趱": u"趲",
u"錾": u"鏨",
u"驵": u"駔",
u"赜": u"賾",
u"啧": u"嘖",
u"帻": u"幘",
u"箦": u"簀",
u"谮": u"譖",
u"缯": u"繒",
u"谵": u"譫",
u"诏": u"詔",
u"钊": u"釗",
u"谪": u"謫",
u"辄": u"輒",
u"鹧": u"鷓",
u"浈": u"湞",
u"缜": u"縝",
u"桢": u"楨",
u"轸": u"軫",
u"赈": u"賑",
u"祯": u"禎",
u"鸩": u"鴆",
u"诤": u"諍",
u"峥": u"崢",
u"钲": u"鉦",
u"铮": u"錚",
u"筝": u"箏",
u"骘": u"騭",
u"栉": u"櫛",
u"栀": u"梔",
u"轵": u"軹",
u"轾": u"輊",
u"贽": u"贄",
u"鸷": u"鷙",
u"蛳": u"螄",
u"絷": u"縶",
u"踬": u"躓",
u"踯": u"躑",
u"觯": u"觶",
u"锺": u"鍾",
u"纣": u"紂",
u"绉": u"縐",
u"伫": u"佇",
u"槠": u"櫧",
u"铢": u"銖",
u"啭": u"囀",
u"馔": u"饌",
u"颞": u"顳",
u"骓": u"騅",
u"缒": u"縋",
u"诼": u"諑",
u"镯": u"鐲",
u"谘": u"諮",
u"缁": u"緇",
u"辎": u"輜",
u"赀": u"貲",
u"眦": u"眥",
u"锱": u"錙",
u"龇": u"齜",
u"鲻": u"鯔",
u"偬": u"傯",
u"诹": u"諏",
u"驺": u"騶",
u"鲰": u"鯫",
u"镞": u"鏃",
u"缵": u"纘",
u"躜": u"躦",
u"鳟": u"鱒",
u"讠": u"訁",
u"谫": u"譾",
u"郄": u"郤",
u"勐": u"猛",
u"凼": u"氹",
u"坂": u"阪",
u"垅": u"壟",
u"垴": u"堖",
u"埯": u"垵",
u"埝": u"墊",
u"苘": u"檾",
u"荬": u"蕒",
u"荮": u"葤",
u"莜": u"蓧",
u"莼": u"蒓",
u"菰": u"菇",
u"藁": u"槁",
u"揸": u"摣",
u"吒": u"咤",
u"吣": u"唚",
u"咔": u"哢",
u"咝": u"噝",
u"咴": u"噅",
u"噘": u"撅",
u"噼": u"劈",
u"嚯": u"謔",
u"幞": u"襆",
u"岙": u"嶴",
u"嵴": u"脊",
u"彷": u"仿",
u"徼": u"僥",
u"犸": u"獁",
u"狍": u"麅",
u"馀": u"餘",
u"馇": u"餷",
u"馓": u"饊",
u"馕": u"饢",
u"愣": u"楞",
u"憷": u"怵",
u"懔": u"懍",
u"丬": u"爿",
u"溆": u"漵",
u"滟": u"灩",
u"溷": u"混",
u"漤": u"濫",
u"潴": u"瀦",
u"澹": u"淡",
u"甯": u"寧",
u"纟": u"糸",
u"绔": u"絝",
u"绱": u"緔",
u"珉": u"瑉",
u"枧": u"梘",
u"桊": u"棬",
u"桉": u"案",
u"槔": u"橰",
u"橥": u"櫫",
u"轱": u"軲",
u"轷": u"軤",
u"赍": u"賫",
u"肷": u"膁",
u"胨": u"腖",
u"飚": u"飈",
u"煳": u"糊",
u"煅": u"煆",
u"熘": u"溜",
u"愍": u"湣",
u"淼": u"渺",
u"砜": u"碸",
u"磙": u"滾",
u"眍": u"瞘",
u"钚": u"鈈",
u"钷": u"鉕",
u"铘": u"鋣",
u"铞": u"銱",
u"锃": u"鋥",
u"锍": u"鋶",
u"锎": u"鐦",
u"锏": u"鐧",
u"锘": u"鍩",
u"锝": u"鍀",
u"锪": u"鍃",
u"锫": u"錇",
u"锿": u"鎄",
u"镅": u"鎇",
u"镎": u"鎿",
u"镢": u"鐝",
u"镥": u"鑥",
u"镩": u"鑹",
u"镲": u"鑔",
u"稆": u"穭",
u"鹋": u"鶓",
u"鹛": u"鶥",
u"鹱": u"鸌",
u"疬": u"癧",
u"疴": u"屙",
u"痖": u"瘂",
u"癯": u"臒",
u"裥": u"襇",
u"襁": u"繈",
u"耢": u"耮",
u"颥": u"顬",
u"螨": u"蟎",
u"麴": u"麯",
u"鲅": u"鮁",
u"鲆": u"鮃",
u"鲇": u"鮎",
u"鲞": u"鯗",
u"鲴": u"鯝",
u"鲺": u"鯴",
u"鲼": u"鱝",
u"鳊": u"鯿",
u"鳋": u"鰠",
u"鳘": u"鰵",
u"鳙": u"鱅",
u"鞒": u"鞽",
u"鞴": u"韝"
}
_mapFromCht = None
import codecs
from StringIO import StringIO
__all__ = ['encode', 'decode']
def cht_encode(input):
s = StringIO()
for i in (input is unicode and input or input.decode("UTF-8")):
s.write(i in _mapToCht and _mapToCht[i] or i)
return (s.getvalue(), len(input))
def cht_decode(input):
global _mapFromCht
if not _mapFromCht:
_mapFromCht = dict([(v, k) for (k, v) in _mapToCht.iteritems()])
s = StringIO()
for i in (input is unicode and input or input.decode("UTF-8")):
s.write(i in _mapFromCht and _mapFromCht[i] or i)
return (s.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return cht_encode(input, errors)
def decode(self, input, errors='strict'):
return cht_decode(input, errors)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def search_function(encoding):
if encoding == "cht":
return (cht_encode, cht_decode, StreamReader, StreamWriter)
return None
codecs.register(search_function)
| gpl-2.0 | -7,163,460,249,237,643,000 | 12.095268 | 66 | 0.292067 | false |
brinkframework/brink | setup.py | 1 | 1259 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="brink",
version="0.3.0",
description="A simple real time web framework based on\
aiohttp and RethinkDB.",
long_description=long_description,
url="https://github.com/lohmander/brink",
author="CH Lohmander",
author_email="[email protected]",
license="BSD-3",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
],
keywords="sample setuptools development",
packages=find_packages(exclude=["contrib", "docs", "tests"]),
install_requires=["aiohttp", "aiohttp_autoreload",
"rethinkdb", "inflection", "pycolor"],
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-asyncio"],
extras_require={
"dev": ["check-manifest"],
"test": ["coverage"],
},
scripts=["bin/brink"],
)
| bsd-3-clause | 4,129,899,232,461,795,300 | 25.229167 | 65 | 0.615568 | false |
suhelhammoud/omr | src/Configuration.py | 1 | 4194 | import numpy as np
import math
class Section:
"""region"""
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def crop(self, img):
return img[self.y1: self.y2, self.x1: self.x2]
def coordinates(self):
return self.x1, self.y1, self.x2, self.y2
def translate(self, dx, dy):
'''returns new section transformed into new coordinates'''
return Section(self.x1 + dx, self.y1 + dy,
self.x2 + dx, self.y2 + dy)
def height(self):
return self.y2 - self.y1
@staticmethod
def of(section, shift=None):
x1, y1, x2, y2 = section.coordinates()
if shift is None:
return Section(x1, y1, x2, y2)
elif len(shift) == 2: # [dx,dy]
dx, dy = shift
return Section(x1 - dx, y1 - dy, x1 + dx, y2 + dy)
else: # [dx1, dy1, dx2, dy2]
return Section(x1 + shift[0], y1 + shift[1], x2 + shift[2], y2 + shift[3])
class OmrConfiguration:
rshape = [1000, 1500]
sec_id = Section(260, 35, 485, 333)
sec_type = Section(478, 35, 566, 246)
sec_answers = Section(15, 260, 500, 1270)
sec_one = Section(15, 260, 265, 1270)
sec_two = Section(260, 260, 500, 1270)
y_step = 20
y_window = 100
marker_x0_bound = 0
marker_x1_bound = 55
# sec_marker = Section(0, 0, marker_r_shift - marker_l_shift, rshape[1])
sec_marker_column = Section(marker_x0_bound, 0, marker_x1_bound, rshape[1])
num_markers = 63
marker_filter_median_blur = 3
marker_y_padding_top = 45
marker_y_padding_down = rshape[1] - 30
marker_smooth_window = 110
marker_threshold_spacing = 2
marker_height_range = range(3, 12)
marker_space_range = range(20, 25)
marker_width_range = range(7, 27)
# top_marker = Section(0, -5, 300, 15)
sec_marker = Section(0, -3, 70, 12)
sec_marker_shift = [0, -20, 237, 20]
marker_calibre_range = (195, 205)
conf = OmrConfiguration
class Marker:
def __init__(self, y0, y1, x0=None, x1=None, id=None):
assert y1 > y0
self.y0 = y0
self.y1 = y1
self.x0 = x0
self.x1 = x1
self.id = id
self.shift_y = 0
def set_id(self, id):
self.id = id
return self
def id(self):
return self.id
def set_shift_y(self, dy):
self.shift_y = dy
def translate(self, dx, dy):
'''returns new section transformed into new coordinates'''
return Marker(self.y0 + dy, self.y1 + dy,
self.x0 + dx, self.x1 + dx, self.id)
def coordinates(self):
return self.x0, self.y0, self.x1, self.y1
def center_y(self):
return (self.y0 + self.y1) / 2
def height(self):
return self.y1 - self.y0
def is_in_h_range(self, h_r=conf.marker_height_range):
return (self.y1 - self.y0) in h_r
def is_lower_than(self, that):
return self.x0 > that.x1
def is_in_h_space(self, that, space=conf.marker_space_range):
upper, lower = Marker.upper_lower(self, that)
return (lower.y0 - upper.y0) in space \
and (lower.y1 - upper.y1) in space
def __repr__(self):
return 'Marker (id:{}, y0:{}, y1:{}, x0:{}, x1:{})' \
.format(self.id, self.y0, self.y1, self.x0, self.x1)
def y0_y1_shift(self):
return self.y0, self.y1, self.shift_y
def set_x0_x1(self, x0, x1):
self.x0 = x0
self.x1 = x1
def x0_x1(self):
return self.x0, self.x1
@staticmethod
def upper_lower(m1, m2):
if m2.is_lower_than(m1):
return m1, m2
else:
return m2, m1
@staticmethod
def can_acept(y0, y1):
return y0 > conf.marker_y_padding_top \
and y1 < conf.marker_y_padding_down \
and y1 - y0 in conf.marker_height_range
def is_valid_marker(marker):
if marker.y0 < conf.marker_y_padding_top \
or marker.y1 > conf.marker_y_padding_down:
return False
if not marker.height() in conf.marker_height_range:
return False
| apache-2.0 | 3,492,936,413,018,460,700 | 26.233766 | 86 | 0.552933 | false |
stscieisenhamer/glue | glue/core/subset.py | 1 | 34912 | from __future__ import absolute_import, division, print_function
import uuid
import numbers
import operator
import numpy as np
from glue.external import six
from glue.external.six import PY3
from glue.core.roi import CategoricalROI
from glue.core.contracts import contract
from glue.core.util import split_component_view
from glue.core.registry import Registry
from glue.core.exceptions import IncompatibleAttribute
from glue.core.message import SubsetDeleteMessage, SubsetUpdateMessage
from glue.core.decorators import memoize
from glue.core.visual import VisualAttributes
from glue.config import settings
from glue.utils import view_shape, broadcast_to
__all__ = ['Subset', 'SubsetState', 'RoiSubsetState', 'CategoricalROISubsetState',
'RangeSubsetState', 'MultiRangeSubsetState', 'CompositeSubsetState',
'OrState', 'AndState', 'XorState', 'InvertState', 'MaskSubsetState', 'CategorySubsetState',
'ElementSubsetState', 'InequalitySubsetState', 'combine_multiple',
'CategoricalMultiRangeSubsetState', 'CategoricalROISubsetState2D']
OPSYM = {operator.ge: '>=', operator.gt: '>',
operator.le: '<=', operator.lt: '<',
operator.and_: '&', operator.or_: '|',
operator.xor: '^', operator.eq: '==',
operator.ne: '!='}
SYMOP = dict((v, k) for k, v in OPSYM.items())
class Subset(object):
"""Base class to handle subsets of data.
These objects both describe subsets of a dataset, and relay any
state changes to the hub that their parent data are assigned to.
This base class only directly impements the logic that relays
state changes back to the hub. Subclasses implement the actual
description and manipulation of data subsets
:param data:
The dataset that this subset describes
:type data: :class:`~glue.core.data.Data`
"""
@contract(data='isinstance(Data)|None',
color='color',
alpha=float,
label='string|None')
def __init__(self, data, color=settings.SUBSET_COLORS[0], alpha=0.5, label=None):
""" Create a new subset object.
Note: the preferred way for creating subsets is
via DataCollection.new_subset_group. Manually-instantiated
subsets will probably *not* be represented properly by the UI
"""
self._broadcasting = False # must be first def
self.data = data
self.label = label # trigger disambiguation
self.subset_state = SubsetState() # calls proper setter method
self.style = VisualAttributes(parent=self)
self.style.markersize *= 1.5
self.style.color = color
self.style.alpha = alpha
# We assign a UUID which can then be used for example in equations
# for derived components - the idea is that this doesn't change over
# the life cycle of glue, so it is a more reliable way to refer to
# components in strings than using labels
self._uuid = str(uuid.uuid4())
@property
def uuid(self):
return self._uuid
@property
def subset_state(self):
return self._subset_state
@subset_state.setter
def subset_state(self, state):
if isinstance(state, np.ndarray):
if self.data.shape != state.shape:
raise ValueError("Shape of mask doesn't match shape of data")
cids = self.data.pixel_component_ids
state = MaskSubsetState(state, cids)
if not isinstance(state, SubsetState):
raise TypeError("State must be a SubsetState instance or array")
self._subset_state = state
@property
def style(self):
return self._style
@style.setter
@contract(value=VisualAttributes)
def style(self, value):
value.parent = self
self._style = value
@property
def label(self):
""" Convenience access to subset's label """
return self._label
@label.setter
def label(self, value):
"""Set the subset's label
Subset labels within a data object must be unique. The input
will be auto-disambiguated if necessary
"""
value = Registry().register(self, value, group=self.data)
self._label = value
@property
def attributes(self):
"""
Returns a tuple of the ComponentIDs that this subset
depends upon
"""
return self.subset_state.attributes
def register(self):
""" Register a subset to its data, and start broadcasting
state changes
"""
self.data.add_subset(self)
self.do_broadcast(True)
@contract(returns='array[N]')
def to_index_list(self):
"""
Convert the current subset to a list of indices. These index
the elements in the (flattened) data object that belong to the subset.
If x is the numpy array corresponding to some component.data,
the two following statements are equivalent::
x.flat[subset.to_index_list()]
x[subset.to_mask()]
Returns:
A numpy array, giving the indices of elements in the data that
belong to this subset.
Raises:
IncompatibleDataException: if an index list cannot be created
for the requested data set.
"""
try:
return self.subset_state.to_index_list(self.data)
except IncompatibleAttribute as exc:
try:
return self._to_index_list_join()
except IncompatibleAttribute:
raise exc
def _to_index_list_join(self):
return np.where(self._to_mask_join(None).flat)[0]
def _to_mask_join(self, view):
"""
Convert the subset to a mask through an entity join to another
dataset.
"""
for other, (cid1, cid2) in self.data._key_joins.items():
if getattr(other, '_recursing', False):
continue
try:
self.data._recursing = True
s2 = Subset(other)
s2.subset_state = self.subset_state
mask_right = s2.to_mask()
except IncompatibleAttribute:
continue
finally:
self.data._recursing = False
if len(cid1) == 1 and len(cid2) == 1:
key_left = self.data[cid1[0], view]
key_right = other[cid2[0], mask_right]
mask = np.in1d(key_left.ravel(), key_right.ravel())
return mask.reshape(key_left.shape)
elif len(cid1) == len(cid2):
key_left_all = []
key_right_all = []
for cid1_i, cid2_i in zip(cid1, cid2):
key_left_all.append(self.data[cid1_i, view].ravel())
key_right_all.append(other[cid2_i, mask_right].ravel())
# TODO: The following is slow because we are looping in Python.
# This could be made significantly faster by switching to
# C/Cython.
key_left_all = zip(*key_left_all)
key_right_all = set(zip(*key_right_all))
result = [key in key_right_all for key in key_left_all]
result = np.array(result)
return result.reshape(self.data[cid1_i, view].shape)
elif len(cid1) == 1:
key_left = self.data[cid1[0], view].ravel()
mask = np.zeros_like(key_left, dtype=bool)
for cid2_i in cid2:
key_right = other[cid2_i, mask_right].ravel()
mask |= np.in1d(key_left, key_right)
return mask.reshape(self.data[cid1[0], view].shape)
elif len(cid2) == 1:
key_right = other[cid2[0], mask_right].ravel()
mask = np.zeros_like(self.data[cid1[0], view].ravel(), dtype=bool)
for cid1_i in cid1:
key_left = self.data[cid1_i, view].ravel()
mask |= np.in1d(key_left, key_right)
return mask.reshape(self.data[cid1[0], view].shape)
else:
raise Exception("Either the number of components in the key join sets "
"should match, or one of the component sets should ",
"contain a single component.")
raise IncompatibleAttribute
@contract(view='array_view', returns='array')
def to_mask(self, view=None):
"""
Convert the current subset to a mask.
:param view: An optional view into the dataset (e.g. a slice)
If present, the mask will pertain to the view and not the
entire dataset.
A boolean numpy array, the same shape as the data, that
defines whether each element belongs to the subset.
"""
try:
mask = self.subset_state.to_mask(self.data, view)
return mask
except IncompatibleAttribute as exc:
return self._to_mask_join(view)
@contract(value=bool)
def do_broadcast(self, value):
"""
Set whether state changes to the subset are relayed to a hub.
It can be useful to turn off broadcasting, when modifying the
subset in ways that don't impact any of the clients.
Attributes:
value: Whether the subset should broadcast state changes (True/False)
"""
object.__setattr__(self, '_broadcasting', value)
@contract(attribute='string')
def broadcast(self, attribute):
"""
Explicitly broadcast a SubsetUpdateMessage to the hub
:param attribute:
The name of the attribute (if any) that should be
broadcast as updated.
:type attribute: ``str``
"""
if not hasattr(self, 'data') or not hasattr(self.data, 'hub'):
return
if self._broadcasting and self.data.hub:
msg = SubsetUpdateMessage(self, attribute=attribute)
self.data.hub.broadcast(msg)
def delete(self):
"""Broadcast a SubsetDeleteMessage to the hub, and stop broadcasting
Also removes subset reference from parent data's subsets list
"""
dobroad = self._broadcasting and self.data is not None and \
self.data.hub is not None
self.do_broadcast(False)
if self.data is not None and self in self.data.subsets:
self.data._subsets.remove(self)
if dobroad:
msg = SubsetDeleteMessage(self)
self.data.hub.broadcast(msg)
Registry().unregister(self, group=self.data)
@contract(file_name='string')
def write_mask(self, file_name, format="fits"):
""" Write a subset mask out to file
:param file_name: name of file to write to
:param format:
Name of format to write to. Currently, only "fits" is
supported
"""
mask = np.short(self.to_mask())
if format == 'fits':
from astropy.io import fits
try:
fits.writeto(file_name, mask, overwrite=True)
except TypeError:
fits.writeto(file_name, mask, clobber=True)
else:
raise AttributeError("format not supported: %s" % format)
@contract(file_name='string')
def read_mask(self, file_name):
try:
from astropy.io import fits
with fits.open(file_name) as hdulist:
mask = hdulist[0].data
except IOError:
raise IOError("Could not read %s (not a fits file?)" % file_name)
ind = np.where(mask.flat)[0]
state = ElementSubsetState(indices=ind)
self.subset_state = state
def __del__(self):
self.delete()
def __setattr__(self, attribute, value):
object.__setattr__(self, attribute, value)
if not attribute.startswith('_'):
self.broadcast(attribute)
def __getitem__(self, view):
""" Retrieve the elements from a data view within the subset
:param view: View of the data. See data.__getitem__ for detils
"""
c, v = split_component_view(view)
ma = self.to_mask(v)
return self.data[view][ma]
@contract(other_subset='isinstance(Subset)')
def paste(self, other_subset):
"""paste subset state from other_subset onto self """
state = other_subset.subset_state.copy()
self.subset_state = state
def __str__(self):
dlabel = "(no data)"
if self.data is not None:
dlabel = "(data: %s)" % self.data.label
slabel = "Subset: (no label)"
if self.label:
slabel = "Subset: %s" % self.label
return "%s %s" % (slabel, dlabel)
def __repr__(self):
return self.__str__()
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __or__(self, other):
return _combine([self, other], operator.or_)
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __and__(self, other):
return _combine([self, other], operator.and_)
@contract(returns='isinstance(Subset)')
def __invert__(self):
return _combine([self], operator.invert)
@contract(other='isinstance(Subset)', returns='isinstance(Subset)')
def __xor__(self, other):
return _combine([self, other], operator.xor)
def __eq__(self, other):
if not isinstance(other, Subset):
return False
# XXX need to add equality specification for subset states
if self is other:
return True
return (self.subset_state == other.subset_state and
self.style == other.style)
def state_as_mask(self):
"""
Convert the current SubsetState to a MaskSubsetState
"""
try:
m = self.to_mask()
except IncompatibleAttribute:
m = np.zeros(self.data.shape, dtype=np.bool)
cids = self.data.pixel_component_ids
return MaskSubsetState(m, cids)
# In Python 2 we need to do this explicitly
def __ne__(self, other):
return not self.__eq__(other)
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if PY3:
__hash__ = object.__hash__
# Provide convenient access to Data methods/properties that make sense
# here too.
def component_ids(self):
return self.data.component_ids()
@property
def components(self):
return self.data.components
@property
def derived_components(self):
return self.data.derived_components
@property
def primary_components(self):
return self.data.primary_components
@property
def visible_components(self):
return self.data.visible_components
@property
def pixel_component_ids(self):
return self.data.pixel_component_ids
@property
def world_component_ids(self):
return self.data.world_component_ids
@property
def ndim(self):
return self.data.ndim
@property
def shape(self):
return self.data.shape
@property
def size(self):
return self.data.size
@property
def hub(self):
return self.data.hub
class SubsetState(object):
def __init__(self):
pass
@property
def attributes(self):
return tuple()
@property
def subset_state(self): # convenience method, mimic interface of Subset
return self
@contract(data='isinstance(Data)')
def to_index_list(self, data):
return np.where(self.to_mask(data).flat)[0]
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
shp = view_shape(data.shape, view)
return np.zeros(shp, dtype=bool)
@contract(returns='isinstance(SubsetState)')
def copy(self):
return SubsetState()
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __or__(self, other_state):
return OrState(self, other_state)
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __and__(self, other_state):
return AndState(self, other_state)
@contract(returns='isinstance(SubsetState)')
def __invert__(self):
return InvertState(self)
@contract(other_state='isinstance(SubsetState)',
returns='isinstance(SubsetState)')
def __xor__(self, other_state):
return XorState(self, other_state)
class RoiSubsetState(SubsetState):
@contract(xatt='isinstance(ComponentID)', yatt='isinstance(ComponentID)')
def __init__(self, xatt=None, yatt=None, roi=None):
super(RoiSubsetState, self).__init__()
self.xatt = xatt
self.yatt = yatt
self.roi = roi
@property
def attributes(self):
return (self.xatt, self.yatt)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
# TODO: make sure that pixel components don't actually take up much
# memory and are just views
x = data[self.xatt, view]
y = data[self.yatt, view]
if (x.ndim == data.ndim and
self.xatt in data.pixel_component_ids and
self.yatt in data.pixel_component_ids):
# This is a special case - the ROI is defined in pixel space, so we
# can apply it to a single slice and then broadcast it to all other
# dimensions. We start off by extracting a slice which takes only
# the first elements of all dimensions except the attributes in
# question, for which we take all the elements. We need to preserve
# the dimensionality of the array, hence the use of slice(0, 1).
# Note that we can only do this if the view (if present) preserved
# the dimensionality, which is why we checked that x.ndim == data.ndim
subset = []
for i in range(data.ndim):
if i == self.xatt.axis or i == self.yatt.axis:
subset.append(slice(None))
else:
subset.append(slice(0, 1))
x_slice = x[subset]
y_slice = y[subset]
if self.roi.defined():
result = self.roi.contains(x_slice, y_slice)
else:
result = np.zeros(x_slice.shape, dtype=bool)
result = broadcast_to(result, x.shape)
else:
if self.roi.defined():
result = self.roi.contains(x, y)
else:
result = np.zeros(x.shape, dtype=bool)
if result.shape != x.shape:
raise ValueError("Unexpected error: boolean mask has incorrect dimensions")
return result
def copy(self):
result = RoiSubsetState()
result.xatt = self.xatt
result.yatt = self.yatt
result.roi = self.roi
return result
class CategoricalROISubsetState(SubsetState):
def __init__(self, att=None, roi=None):
super(CategoricalROISubsetState, self).__init__()
self.att = att
self.roi = roi
@property
def attributes(self):
return self.att,
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data.get_component(self.att)._categorical_data[view]
result = self.roi.contains(x, None)
assert x.shape == result.shape
return result.ravel()
def copy(self):
result = CategoricalROISubsetState()
result.att = self.att
result.roi = self.roi
return result
@staticmethod
def from_range(component, att, lo, hi):
roi = CategoricalROI.from_range(component, lo, hi)
subset = CategoricalROISubsetState(roi=roi,
att=att)
return subset
def __gluestate__(self, context):
return dict(att=context.id(self.att),
roi=context.id(self.roi))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(att=context.object(rec['att']), roi=context.object(rec['roi']))
class RangeSubsetState(SubsetState):
def __init__(self, lo, hi, att=None):
super(RangeSubsetState, self).__init__()
self.lo = lo
self.hi = hi
self.att = att
@property
def attributes(self):
return (self.att,)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data[self.att, view]
result = (x >= self.lo) & (x <= self.hi)
return result
def copy(self):
return RangeSubsetState(self.lo, self.hi, self.att)
class MultiRangeSubsetState(SubsetState):
"""
A subset state defined by multiple discontinuous ranges
Parameters
----------
pairs : list
A list of (lo, hi) tuples
"""
def __init__(self, pairs, att=None):
super(MultiRangeSubsetState, self).__init__()
self.pairs = pairs
self.att = att
@property
def attributes(self):
return (self.att,)
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
x = data[self.att, view]
result = np.zeros_like(x, dtype=bool)
for lo, hi in self.pairs:
result |= (x >= lo) & (x <= hi)
return result
def copy(self):
return MultiRangeSubsetState(self.pairs, self.att)
class CategoricalROISubsetState2D(SubsetState):
"""
A 2D subset state where both attributes are categorical.
Parameters
----------
categories : dict
A dictionary containing for each label of one categorical component an
interable of labels for the other categorical component (using sets will
provide the best performance)
att1 : :class:`~glue.core.component_id.ComponentID`
The component ID matching the keys of the ``categories`` dictionary
att2 : :class:`~glue.core.component_id.ComponentID`
The component ID matching the values of the ``categories`` dictionary
"""
def __init__(self, categories, att1, att2):
self.categories = categories
self.att1 = att1
self.att2 = att2
@property
def attributes(self):
return (self.att1, self.att2)
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
# Extract categories and numerical values
labels1 = data.get_component(self.att1).labels
labels2 = data.get_component(self.att2).labels
if view is not None:
labels1 = labels1[view]
labels2 = labels2[view]
# Initialize empty mask
mask = np.zeros(labels1.shape, dtype=bool)
# A loop over all values here is actually reasonably efficient compared
# to alternatives. Any improved implementation, even vectorized, should
# ensure that it is more efficient for large numbers of categories and
# values.
for i in range(len(labels1)):
if labels1[i] in self.categories:
if labels2[i] in self.categories[labels1[i]]:
mask[i] = True
return mask
def copy(self):
result = CategoricalROISubsetState2D(self.categories,
self.att1, self.att2)
return result
def __gluestate__(self, context):
return dict(categories=self.categories,
att1=context.id(self.att1),
att2=context.id(self.att2))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(categories=rec['categories'],
att1=context.object(rec['att1']),
att2=context.object(rec['att2']))
class CategoricalMultiRangeSubsetState(SubsetState):
"""
A 2D subset state where one attribute is categorical and the other is
numerical, and where for each category, there are multiple possible subset
ranges.
Parameters
----------
ranges : dict
A dictionary containing for each category (key), a list of tuples
giving the ranges of values for the numerical attribute.
cat_att : :class:`~glue.core.component_id.ComponentID`
The component ID for the categorical attribute
num_att : :class:`~glue.core.component_id.ComponentID`
The component ID for the numerical attribute
"""
def __init__(self, ranges, cat_att, num_att):
self.ranges = ranges
self.cat_att = cat_att
self.num_att = num_att
@property
def attributes(self):
return (self.cat_att, self._num_att)
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
# Extract categories and numerical values
labels = data.get_component(self.cat_att).labels
values = data[self.num_att]
if view is not None:
labels = labels[view]
values = values[view]
# Initialize empty mask
mask = np.zeros(values.shape, dtype=bool)
# A loop over all values here is actually reasonably efficient compared
# to alternatives. Any improved implementation, even vectorized, should
# ensure that it is more efficient for large numbers of categories and
# values. For example, using 10000 categories and 1000000 data points
# takes 1.2 seconds on a laptop.
for i in range(len(values)):
if labels[i] in self.ranges:
for lo, hi in self.ranges[labels[i]]:
if values[i] >= lo and values[i] <= hi:
mask[i] = True
break
return mask
def copy(self):
result = CategoricalMultiRangeSubsetState(self.ranges,
self.cat_att,
self.num_att)
return result
def __gluestate__(self, context):
return dict(ranges=self.ranges,
cat_att=context.id(self.cat_att),
num_att=context.id(self.num_att))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(ranges=rec['ranges'],
cat_att=context.object(rec['cat_att']),
num_att=context.object(rec['num_att']))
class CompositeSubsetState(SubsetState):
op = None
def __init__(self, state1, state2=None):
super(CompositeSubsetState, self).__init__()
self.state1 = state1.copy()
if state2:
state2 = state2.copy()
self.state2 = state2
def copy(self):
return type(self)(self.state1, self.state2)
@property
def attributes(self):
att = self.state1.attributes
if self.state2 is not None:
att += self.state2.attributes
return tuple(sorted(set(att)))
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
return self.op(self.state1.to_mask(data, view),
self.state2.to_mask(data, view))
def __str__(self):
sym = OPSYM.get(self.op, self.op)
return "(%s %s %s)" % (self.state1, sym, self.state2)
class OrState(CompositeSubsetState):
op = operator.or_
class AndState(CompositeSubsetState):
op = operator.and_
class XorState(CompositeSubsetState):
op = operator.xor
class InvertState(CompositeSubsetState):
@memoize
@contract(data='isinstance(Data)', view='array_view')
def to_mask(self, data, view=None):
return ~self.state1.to_mask(data, view)
def __str__(self):
return "(~%s)" % self.state1
class MaskSubsetState(SubsetState):
"""
A subset defined by boolean pixel mask
"""
def __init__(self, mask, cids):
"""
:param cids: List of ComponentIDs, defining the pixel coordinate space of the mask
:param mask: Boolean ndarray
"""
self.cids = cids
self.mask = np.asarray(mask, dtype=bool)
def copy(self):
return MaskSubsetState(self.mask, self.cids)
def to_mask(self, data, view=None):
view = view or slice(None)
# shortcut for data on the same pixel grid
if data.pixel_component_ids == self.cids:
return self.mask[view].copy()
# locate each element of data in the coordinate system of the mask
vals = [data[c, view].astype(np.int) for c in self.cids]
result = self.mask[vals]
for v, n in zip(vals, data.shape):
result &= ((v >= 0) & (v < n))
return result
def __gluestate__(self, context):
return dict(cids=[context.id(c) for c in self.cids],
mask=context.do(self.mask))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['mask']),
[context.object(c) for c in rec['cids']])
class CategorySubsetState(SubsetState):
def __init__(self, attribute, values):
super(CategorySubsetState, self).__init__()
self._attribute = attribute
self._values = np.asarray(values).ravel()
@memoize
def to_mask(self, data, view=None):
vals = data[self._attribute, view]
result = np.in1d(vals.ravel(), self._values)
return result.reshape(vals.shape)
def copy(self):
return CategorySubsetState(self._attribute, self._values.copy())
def __gluestate__(self, context):
return dict(att=context.id(self._attribute),
vals=context.do(self._values))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['att']),
context.object(rec['vals']))
class ElementSubsetState(SubsetState):
def __init__(self, indices=None, data=None):
super(ElementSubsetState, self).__init__()
self._indices = indices
if data is None:
self._data_uuid = None
else:
self._data_uuid = data.uuid
@memoize
def to_mask(self, data, view=None):
if data.uuid == self._data_uuid or self._data_uuid is None:
# XXX this is inefficient for views
result = np.zeros(data.shape, dtype=bool)
if self._indices is not None:
try:
result.flat[self._indices] = True
except IndexError:
if self._data_uuid is None:
raise IncompatibleAttribute()
else:
raise
if view is not None:
result = result[view]
return result
else:
raise IncompatibleAttribute()
def copy(self):
state = ElementSubsetState(indices=self._indices)
state._data_uuid = self._data_uuid
return state
def __gluestate__(self, context):
return dict(indices=context.do(self._indices),
data_uuid=self._data_uuid)
@classmethod
def __setgluestate__(cls, rec, context):
state = cls(indices=context.object(rec['indices']))
try:
state._data_uuid = rec['data_uuid']
except KeyError: # BACKCOMPAT
pass
return state
class InequalitySubsetState(SubsetState):
def __init__(self, left, right, op):
from glue.core.component_link import ComponentLink
super(InequalitySubsetState, self).__init__()
from glue.core.data import ComponentID
valid_ops = [operator.gt, operator.ge,
operator.lt, operator.le,
operator.eq, operator.ne]
if op not in valid_ops:
raise TypeError("Invalid boolean operator: %s" % op)
if not isinstance(left, (ComponentID, numbers.Number,
ComponentLink, six.string_types)):
raise TypeError("Input must be ComponentID or NumberType or string: %s"
% type(left))
if not isinstance(right, (ComponentID, numbers.Number,
ComponentLink, six.string_types)):
raise TypeError("Input must be ComponentID or NumberType or string: %s"
% type(right))
self._left = left
self._right = right
self._operator = op
@property
def left(self):
return self._left
@property
def right(self):
return self._right
@property
def operator(self):
return self._operator
@memoize
def to_mask(self, data, view=None):
# FIXME: the default view in glue should be ... not None, because
# if x is a Numpy array, x[None] has one more dimension than x. For
# now we just fix this for the scope of this method.
if view is None:
view = Ellipsis
if isinstance(self._left, (numbers.Number, six.string_types)):
left = self._left
else:
try:
comp = data.get_component(self._left)
except IncompatibleAttribute:
left = data[self._left, view]
else:
if comp.categorical:
left = comp.labels[view]
else:
left = comp.data[view]
if isinstance(self._right, (numbers.Number, six.string_types)):
right = self._right
else:
try:
comp = data.get_component(self._right)
except IncompatibleAttribute:
right = data[self._right, view]
else:
if comp.categorical:
right = comp.labels[view]
else:
right = comp.data[view]
return self._operator(left, right)
def copy(self):
return InequalitySubsetState(self._left, self._right, self._operator)
def __str__(self):
sym = OPSYM.get(self._operator, self._operator)
return "(%s %s %s)" % (self._left, sym, self._right)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
@contract(subsets='list(isinstance(Subset))', returns=Subset)
def _combine(subsets, operator):
state = operator(*[s.subset_state for s in subsets])
result = Subset(None)
result.subset_state = state
return result
def combine_multiple(subsets, operator):
if len(subsets) == 0:
return SubsetState()
else:
combined = subsets[0]
for subset in subsets[1:]:
combined = operator(combined, subset)
return combined
| bsd-3-clause | 5,609,667,963,165,240,000 | 30.452252 | 102 | 0.584212 | false |
naturalness/sensibility | sensibility/__init__.py | 1 | 1346 | #!/usr/bin/env python3
# Copyright 2017 Eddie Antonio Santos <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sensibility --- detect and fix syntax errors in source code.
"""
import pkg_resources
from .edit import Edit, Insertion, Deletion, Substitution
from .language import Language, current_language
from .lexical_analysis import Lexeme, Token, Location, Position
from .source_vector import SourceVector
from .vocabulary import Vocabulary, Vind
# Get the current version from setup.py
__version__ = pkg_resources.get_distribution(__name__).version
# XXX: Deprecated: this alias
language = current_language
__all__ = [
'Edit', 'Insertion', 'Deletion', 'Substitution',
'Language', 'language', 'current_language',
'Lexeme', 'Token', 'Location', 'Position',
'SourceVector',
'Vocabulary', 'Vind',
]
| apache-2.0 | -1,326,384,814,622,904,800 | 31.829268 | 74 | 0.739227 | false |
shrinidhi666/rbhus | rbhusUI/guiBin/selectRadioBox.py | 1 | 3836 | #!/usr/bin/python
from PyQt4 import QtCore, QtGui
import glob
import os
import sys
import datetime
import re
import argparse
dirSelf = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dirSelf.rstrip(os.sep).rstrip("guiBin").rstrip(os.sep) + os.sep + "lib")
import selectRadioBoxMod
sys.path.append(dirSelf.rstrip(os.sep).rstrip("guiBin").rstrip(os.sep).rstrip("rbhusUI").rstrip(os.sep) + os.sep +"rbhus")
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input",dest='inputlist',help='comma seperated input list')
parser.add_argument("-d","--default",dest='defaultlist',help='comma seperated default checked list')
args = parser.parse_args()
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(selectRadioBoxMod.Ui_selectRadioBox):
def setupUi(self, Form):
selectRadioBoxMod.Ui_selectRadioBox.setupUi(self,Form)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(dirSelf.rstrip(os.sep).rstrip("guiBin").rstrip(os.sep).rstrip("rbhusUI").rstrip(os.sep)+ os.sep +"etc/icons/rbhus.png")), QtGui.QIcon.Normal, QtGui.QIcon.On)
Form.setWindowIcon(icon)
self.inList = []
self.defList = []
self.updateLine = []
if(args.inputlist):
self.inList = args.inputlist.split(",")
if(args.defaultlist):
self.defList = args.defaultlist.split(",")
self.radioButts = {}
self.updateCheckBoxes()
self.updateSelected()
self.pushApply.clicked.connect(self.pApply)
self.lineEditSearch.textChanged.connect(self.updateCheckBoxes)
self.pushClearSearch.clicked.connect(self.lineEditSearch.clear)
Form.closeEvent = self.closeEvent
def closeEvent(self,event):
print(",".join(self.defList))
event.accept()
def pApply(self):
print(",".join(self.updateLine))
QtCore.QCoreApplication.instance().quit()
def updateCheckBoxes(self):
findList = []
for x in self.inList:
if((x.lower()).find(str(self.lineEditSearch.text()).lower()) >= 0):
findList.append(x)
for x in self.inList:
try:
self.radioButts[x].setParent(None)
self.radioButts[x].deleteLater()
self.radioButts[x] = None
del(self.radioButts[x])
except:
pass
if(findList):
for x in findList:
self.radioButts[x] = QtGui.QRadioButton(self.scrollAreaWidgetContents)
self.radioButts[x].setObjectName(_fromUtf8(x))
self.verticalLayout.addWidget(self.radioButts[x])
self.radioButts[x].setText(_fromUtf8(x))
self.radioButts[x].toggled.connect(self.updateSelected)
if(x in self.defList):
self.radioButts[x].setChecked(2)
#self.defList = []
def deselectall(self):
for x in self.inList:
self.radioButts[x].setChecked(0)
def selectall(self):
for x in self.inList:
self.radioButts[x].setChecked(2)
def updateSelected(self):
self.updateLine = []
#self.plainTextEditSelected.setReadOnly(False)
self.plainTextEditSelected.clear()
for x in self.radioButts.keys():
#print(x + " : "+ str(self.radioButts[x].isChecked()))
if(self.radioButts[x].isChecked()):
self.updateLine.append(str(x))
self.plainTextEditSelected.setPlainText(_fromUtf8(",".join(self.updateLine)))
#self.plainTextEditSelected.setReadOnly(True)
#self.checkBox_2 = QtGui.QCheckBox(self.scrollAreaWidgetContents)
#self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
#self.verticalLayout.addWidget(self.checkBox_2)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
Form = QtGui.QMainWindow()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| gpl-3.0 | 4,537,618,025,003,838,500 | 27.205882 | 200 | 0.658759 | false |
endvroy/PATlanguages | patFutures.py | 1 | 1571 | from pat import getPayload
from collections import Counter
from concurrent import futures
import requests, bs4
from tqdm import tqdm
def getPage(session, page):
# print('getting page %d...' % page)
url = 'http://www.patest.cn/contests/pat-b-practise/submissions?page=%d' % page
res = session.get(url)
try:
res.raise_for_status()
except requests.HTTPError as exc:
if exc.response.status_code == 404:
print('page {} encountered 404'.format(page))
else:
raise
else:
return res
def extractColumns(res):
counter = Counter()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
table = soup.select('table')[0]
for row in table.find_all('tr')[1:]:
cells = row.find_all('td')
counter.update([cells[4].text])
return counter
def getCount(payload, page):
return extractColumns(getPage(payload, page))
if __name__ == '__main__':
payload = getPayload()
executor = futures.ThreadPoolExecutor(max_workers=100)
fs = set()
with requests.Session() as session:
session.post('http://www.patest.cn/users/sign_in', data=payload)
for i in range(1, 1001):
future = executor.submit(getCount, session, i)
fs.add(future)
results = futures.as_completed(fs)
results = tqdm(results, total=len(fs))
# results = executor.map(partial(getCount, payload), range(1, 1000, 100))
counter = Counter()
for future in results:
counter.update(future.result())
print(counter)
| mit | -1,742,330,523,643,958,000 | 28.092593 | 83 | 0.623806 | false |
nicolashainaux/mathmaker | mathmaker/lib/document/content/geometry/perimeter_polygon.py | 1 | 2795 | # -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from mathmaker.lib import shared
from mathmaker.lib.document.content import component
from mathmaker.lib.tools.wording import setup_wording_format_of
class sub_object(component.structure):
def __init__(self, build_data, **options):
super().setup('minimal', **options)
level = int(build_data[5])
# super().setup('numbers', nb=, **options)
# super().setup('nb_variants', **options)
super().setup('length_units', **options)
# We know the wording will be in two lines:
super().setup('polygon', polygon_data=build_data, wlines_nb=2)
self.wording = {
3: _(r'Perimeter of this triangle?\newline '
r'(length unit: {length_unit}) |hint:length_unit|'),
4: _(r'Perimeter of this quadrilateral?\newline '
r'(length unit: {length_unit}) |hint:length_unit|'),
5: _(r'Perimeter of this pentagon?\newline '
r'(length unit: {length_unit}) |hint:length_unit|'),
6: _(r'Perimeter of this hexagon?\newline '
r'(length unit: {length_unit}) |hint:length_unit|')
}[len(self.polygon.sides)]
self.transduration = 12 + 3 * (level - 1)
setup_wording_format_of(self)
self.wording = self.wording.format(**self.wording_format)
def q(self, **options):
if self.slideshow:
return '{}{}{}'.format(self.wording,
shared.machine.addvspace(height='10pt'),
self.polygon.drawn)
else:
return shared.machine.write_layout(
(1, 2), [5, 8], [self.polygon.drawn, self.wording])
def a(self, **options):
# This is actually meant for self.preset == 'mental calculation'
return self.polygon.lbl_perimeter.printed
def js_a(self, **kwargs):
return [self.polygon.lbl_perimeter.uiprinted]
| gpl-3.0 | -1,160,689,326,687,422,200 | 38.928571 | 76 | 0.633989 | false |
htwenhe/DJOA | env/Lib/site-packages/xlwt/UnicodeUtils.py | 1 | 5033 | # -*- coding: windows-1252 -*-
'''
From BIFF8 on, strings are always stored using UTF-16LE text encoding. The
character array is a sequence of 16-bit values4. Additionally it is
possible to use a compressed format, which omits the high bytes of all
characters, if they are all zero.
The following tables describe the standard format of the entire string, but
in many records the strings differ from this format. This will be mentioned
separately. It is possible (but not required) to store Rich-Text formatting
information and Asian phonetic information inside a Unicode string. This
results in four different ways to store a string. The character array
is not zero-terminated.
The string consists of the character count (as usual an 8-bit value or
a 16-bit value), option flags, the character array and optional formatting
information. If the string is empty, sometimes the option flags field will
not occur. This is mentioned at the respective place.
Offset Size Contents
0 1 or 2 Length of the string (character count, ln)
1 or 2 1 Option flags:
Bit Mask Contents
0 01H Character compression (ccompr):
0 = Compressed (8-bit characters)
1 = Uncompressed (16-bit characters)
2 04H Asian phonetic settings (phonetic):
0 = Does not contain Asian phonetic settings
1 = Contains Asian phonetic settings
3 08H Rich-Text settings (richtext):
0 = Does not contain Rich-Text settings
1 = Contains Rich-Text settings
[2 or 3] 2 (optional, only if richtext=1) Number of Rich-Text formatting runs (rt)
[var.] 4 (optional, only if phonetic=1) Size of Asian phonetic settings block (in bytes, sz)
var. ln or
2·ln Character array (8-bit characters or 16-bit characters, dependent on ccompr)
[var.] 4·rt (optional, only if richtext=1) List of rt formatting runs
[var.] sz (optional, only if phonetic=1) Asian Phonetic Settings Block
'''
from .compat import unicode, unicode_type
from struct import pack
def upack2(s, encoding='ascii'):
# If not unicode, make it so.
if isinstance(s, unicode_type):
us = s
else:
us = unicode(s, encoding)
# Limit is based on number of content characters
# (not on number of bytes in packed result)
len_us = len(us)
if len_us > 32767:
raise Exception('String longer than 32767 characters')
try:
encs = us.encode('latin1')
# Success here means all chars are in U+0000 to U+00FF
# inclusive, meaning that we can use "compressed format".
flag = 0
n_items = len_us
except UnicodeEncodeError:
encs = us.encode('utf_16_le')
flag = 1
n_items = len(encs) // 2
# n_items is the number of "double byte characters" i.e. MS C wchars
# Can't use len(us).
# len(u"\U0001D400") -> 1 on a wide-unicode build
# and 2 on a narrow-unicode build.
# We need n_items == 2 in this case.
return pack('<HB', n_items, flag) + encs
def upack2rt(rt, encoding='ascii'):
us = u''
fr = b''
offset = 0
# convert rt strings to unicode if not already unicode
# also generate the formatting run for the styles added
for s, fontx in rt:
if not isinstance(s, unicode_type):
s = unicode(s, encoding)
us += s
if fontx is not None:
# code in Rows.py ensures that
# fontx can be None only for the first piece
fr += pack('<HH', offset, fontx)
# offset is the number of MS C wchar characters.
# That is 1 if c <= u'\uFFFF' else 2
offset += len(s.encode('utf_16_le')) // 2
num_fr = len(fr) // 4 # ensure result is int
if offset > 32767:
raise Exception('String longer than 32767 characters')
try:
encs = us.encode('latin1')
# Success here means all chars are in U+0000 to U+00FF
# inclusive, meaning that we can use "compressed format".
flag = 0 | 8
n_items = len(encs)
except UnicodeEncodeError:
encs = us.encode('utf_16_le')
flag = 1 | 8
n_items = len(encs) // 2 # see comments in upack2 function above
return pack('<HBH', n_items, flag, num_fr) + encs, fr
def upack1(s, encoding='ascii'):
# Same as upack2(), but with a one-byte length field.
if isinstance(s, unicode_type):
us = s
else:
us = unicode(s, encoding)
len_us = len(us)
if len_us > 255:
raise Exception('String longer than 255 characters')
try:
encs = us.encode('latin1')
flag = 0
n_items = len_us
except UnicodeEncodeError:
encs = us.encode('utf_16_le')
flag = 1
n_items = len(encs) // 2
return pack('<BB', n_items, flag) + encs
| mit | -7,807,916,046,642,765,000 | 40.254098 | 99 | 0.606795 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en/to_be_fixed/sitedown/tvbmoviez.py | 1 | 5493 | # -*- coding: UTF-8 -*-
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['best-moviez.ws']
self.base_link = 'http://www.best-moviez.ws'
self.search_link = '/search/%s/feed/rss2/'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = zip(client.parseDOM(c, 'a', ret='href'), client.parseDOM(c, 'a'))
u = [(i[1], i[0], s) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | 7,544,595,579,048,061,000 | 35.138158 | 172 | 0.446022 | false |
dpranke/typ | typ/runner.py | 3 | 35257 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import importlib
import inspect
import json
import os
import pdb
import sys
import unittest
import traceback
from collections import OrderedDict
# This ensures that absolute imports of typ modules will work when
# running typ/runner.py as a script even if typ is not installed.
# We need this entry in addition to the one in __main__.py to ensure
# that typ/runner.py works when invoked via subprocess on windows in
# _spawn_main().
path_to_file = os.path.realpath(__file__)
if path_to_file.endswith('.pyc'): # pragma: no cover
path_to_file = path_to_file[:-1]
dir_above_typ = os.path.dirname(os.path.dirname(path_to_file))
if dir_above_typ not in sys.path: # pragma: no cover
sys.path.append(dir_above_typ)
from typ import json_results
from typ.arg_parser import ArgumentParser
from typ.host import Host
from typ.pool import make_pool
from typ.stats import Stats
from typ.printer import Printer
from typ.test_case import TestCase as TypTestCase
from typ.version import VERSION
Result = json_results.Result
ResultSet = json_results.ResultSet
ResultType = json_results.ResultType
def main(argv=None, host=None, win_multiprocessing=None, **defaults):
host = host or Host()
runner = Runner(host=host)
if win_multiprocessing is not None:
runner.win_multiprocessing = win_multiprocessing
return runner.main(argv, **defaults)
class TestInput(object):
def __init__(self, name, msg='', timeout=None, expected=None):
self.name = name
self.msg = msg
self.timeout = timeout
self.expected = expected
class TestSet(object):
def __init__(self, parallel_tests=None, isolated_tests=None,
tests_to_skip=None):
def promote(tests):
tests = tests or []
return [test if isinstance(test, TestInput) else TestInput(test)
for test in tests]
self.parallel_tests = promote(parallel_tests)
self.isolated_tests = promote(isolated_tests)
self.tests_to_skip = promote(tests_to_skip)
class WinMultiprocessing(object):
ignore = 'ignore'
importable = 'importable'
spawn = 'spawn'
values = [ignore, importable, spawn]
class _AddTestsError(Exception):
pass
class Runner(object):
def __init__(self, host=None):
self.args = None
self.classifier = None
self.cov = None
self.context = None
self.coverage_source = None
self.host = host or Host()
self.loader = unittest.loader.TestLoader()
self.printer = None
self.setup_fn = None
self.stats = None
self.teardown_fn = None
self.top_level_dir = None
self.top_level_dirs = []
self.win_multiprocessing = WinMultiprocessing.spawn
self.final_responses = []
# initialize self.args to the defaults.
parser = ArgumentParser(self.host)
self.parse_args(parser, [])
def main(self, argv=None, **defaults):
parser = ArgumentParser(self.host)
self.parse_args(parser, argv, **defaults)
if parser.exit_status is not None:
return parser.exit_status
try:
ret, _, _ = self.run()
return ret
except KeyboardInterrupt:
self.print_("interrupted, exiting", stream=self.host.stderr)
return 130
def parse_args(self, parser, argv, **defaults):
for attrname in defaults:
if not hasattr(self.args, attrname):
parser.error("Unknown default argument name '%s'" % attrname,
bailout=False)
return
parser.set_defaults(**defaults)
self.args = parser.parse_args(args=argv)
if parser.exit_status is not None:
return
def print_(self, msg='', end='\n', stream=None):
self.host.print_(msg, end, stream=stream)
def run(self, test_set=None):
ret = 0
h = self.host
if self.args.version:
self.print_(VERSION)
return ret, None, None
should_spawn = self._check_win_multiprocessing()
if should_spawn:
return self._spawn(test_set)
ret = self._set_up_runner()
if ret:
return ret, None, None
find_start = h.time()
if self.cov: # pragma: no cover
self.cov.erase()
self.cov.start()
full_results = None
result_set = ResultSet()
if not test_set:
ret, test_set = self.find_tests(self.args)
find_end = h.time()
if not ret:
ret, full_results = self._run_tests(result_set, test_set)
if self.cov: # pragma: no cover
self.cov.stop()
self.cov.save()
test_end = h.time()
trace = self._trace_from_results(result_set)
if full_results:
self._summarize(full_results)
self._write(self.args.write_full_results_to, full_results)
upload_ret = self._upload(full_results)
if not ret:
ret = upload_ret
reporting_end = h.time()
self._add_trace_event(trace, 'run', find_start, reporting_end)
self._add_trace_event(trace, 'discovery', find_start, find_end)
self._add_trace_event(trace, 'testing', find_end, test_end)
self._add_trace_event(trace, 'reporting', test_end, reporting_end)
self._write(self.args.write_trace_to, trace)
self.report_coverage()
else:
upload_ret = 0
return ret, full_results, trace
def _check_win_multiprocessing(self):
wmp = self.win_multiprocessing
ignore, importable, spawn = WinMultiprocessing.values
if wmp not in WinMultiprocessing.values:
raise ValueError('illegal value %s for win_multiprocessing' %
wmp)
h = self.host
if wmp == ignore and h.platform == 'win32': # pragma: win32
raise ValueError('Cannot use WinMultiprocessing.ignore for '
'win_multiprocessing when actually running '
'on Windows.')
if wmp == ignore or self.args.jobs == 1:
return False
if wmp == importable:
if self._main_is_importable():
return False
raise ValueError('The __main__ module (%s) ' # pragma: no cover
'may not be importable' %
sys.modules['__main__'].__file__)
assert wmp == spawn
return True
def _main_is_importable(self): # pragma: untested
path = sys.modules['__main__'].__file__
if not path:
return False
if path.endswith('.pyc'):
path = path[:-1]
if not path.endswith('.py'):
return False
if path.endswith('__main__.py'):
# main modules are not directly importable.
return False
path = self.host.realpath(path)
for d in sys.path:
if path.startswith(self.host.realpath(d)):
return True
return False # pragma: no cover
def _spawn(self, test_set):
# TODO: Handle picklable hooks, rather than requiring them to be None.
assert self.classifier is None
assert self.context is None
assert self.setup_fn is None
assert self.teardown_fn is None
assert test_set is None
h = self.host
if self.args.write_trace_to: # pragma: untested
should_delete_trace = False
else:
should_delete_trace = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_trace_to = fp.name
if self.args.write_full_results_to: # pragma: untested
should_delete_results = False
else:
should_delete_results = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_full_results_to = fp.name
argv = ArgumentParser(h).argv_from_args(self.args)
ret = h.call_inline([h.python_interpreter, path_to_file] + argv)
trace = self._read_and_delete(self.args.write_trace_to,
should_delete_trace)
full_results = self._read_and_delete(self.args.write_full_results_to,
should_delete_results)
return ret, full_results, trace
def _set_up_runner(self):
h = self.host
args = self.args
self.stats = Stats(args.status_format, h.time, args.jobs)
self.printer = Printer(
self.print_, args.overwrite, args.terminal_width)
if self.args.top_level_dirs and self.args.top_level_dir:
self.print_(
'Cannot specify both --top-level-dir and --top-level-dirs',
stream=h.stderr)
return 1
self.top_level_dirs = args.top_level_dirs
if not self.top_level_dirs and args.top_level_dir:
self.top_level_dirs = [args.top_level_dir]
if not self.top_level_dirs:
for test in [t for t in args.tests if h.exists(t)]:
if h.isdir(test):
top_dir = test
else:
top_dir = h.dirname(test)
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
top_dir = h.realpath(top_dir)
if not top_dir in self.top_level_dirs:
self.top_level_dirs.append(top_dir)
if not self.top_level_dirs:
top_dir = h.getcwd()
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
top_dir = h.realpath(top_dir)
self.top_level_dirs.append(top_dir)
if not self.top_level_dir and self.top_level_dirs:
self.top_level_dir = self.top_level_dirs[0]
for path in self.top_level_dirs:
h.add_to_path(path)
for path in args.path:
h.add_to_path(path)
if args.coverage: # pragma: no cover
try:
import coverage
except ImportError:
return 1
source = self.args.coverage_source
if not source:
source = self.top_level_dirs + self.args.path
self.coverage_source = source
self.cov = coverage.coverage(source=self.coverage_source,
data_suffix=True)
self.cov.erase()
return 0
def find_tests(self, args):
test_set = TestSet()
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if args.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
try:
names = self._name_list_from_args(args)
classifier = self.classifier or _default_classifier(args)
for name in names:
try:
self._add_tests_to_set(test_set, args.suffixes,
self.top_level_dirs, classifier,
name)
except (AttributeError, ImportError, SyntaxError) as e:
ex_str = traceback.format_exc()
self.print_('Failed to load "%s" in find_tests: %s' %
(name, e))
self.print_(' %s' %
'\n '.join(ex_str.splitlines()))
self.print_(ex_str)
return 1, None
except _AddTestsError as e:
self.print_(str(e))
return 1, None
# TODO: Add support for discovering setupProcess/teardownProcess?
shard_index = args.shard_index
total_shards = args.total_shards
assert total_shards >= 1
assert shard_index >= 0 and shard_index < total_shards, (
'shard_index (%d) must be >= 0 and < total_shards (%d)' %
(shard_index, total_shards))
test_set.parallel_tests = _sort_inputs(
test_set.parallel_tests)[shard_index::total_shards]
test_set.isolated_tests = _sort_inputs(
test_set.isolated_tests)[shard_index::total_shards]
test_set.tests_to_skip = _sort_inputs(
test_set.tests_to_skip)[shard_index::total_shards]
return 0, test_set
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
def _name_list_from_args(self, args):
if args.tests:
names = args.tests
elif args.file_list:
if args.file_list == '-':
s = self.host.stdin.read()
else:
s = self.host.read_text_file(args.file_list)
names = [line.strip() for line in s.splitlines()]
else:
names = self.top_level_dirs
return names
def _add_tests_to_set(self, test_set, suffixes, top_level_dirs, classifier,
name):
h = self.host
loader = self.loader
add_tests = _test_adder(test_set, classifier)
found = set()
for d in top_level_dirs:
if h.isfile(name):
rpath = h.relpath(name, d)
if rpath.startswith('..'):
continue
if rpath.endswith('.py'):
rpath = rpath[:-3]
module = rpath.replace(h.sep, '.')
if module not in found:
found.add(module)
add_tests(loader.loadTestsFromName(module))
elif h.isdir(name):
rpath = h.relpath(name, d)
if rpath.startswith('..'):
continue
for suffix in suffixes:
if not name in found:
found.add(name + '/' + suffix)
add_tests(loader.discover(name, suffix, d))
else:
possible_dir = name.replace('.', h.sep)
if h.isdir(d, possible_dir):
for suffix in suffixes:
path = h.join(d, possible_dir)
if not path in found:
found.add(path + '/' + suffix)
suite = loader.discover(path, suffix, d)
add_tests(suite)
elif not name in found:
found.add(name)
add_tests(loader.loadTestsFromName(name))
# pylint: disable=no-member
if hasattr(loader, 'errors') and loader.errors: # pragma: python3
# In Python3's version of unittest, loader failures get converted
# into failed test cases, rather than raising exceptions. However,
# the errors also get recorded so you can err out immediately.
raise ImportError(loader.errors)
def _run_tests(self, result_set, test_set):
h = self.host
all_tests = [ti.name for ti in
_sort_inputs(test_set.parallel_tests +
test_set.isolated_tests +
test_set.tests_to_skip)]
if self.args.list_only:
self.print_('\n'.join(all_tests))
return 0, None
self._run_one_set(self.stats, result_set, test_set)
failed_tests = sorted(json_results.failed_test_names(result_set))
retry_limit = self.args.retry_limit
while retry_limit and failed_tests:
if retry_limit == self.args.retry_limit:
self.flush()
self.args.overwrite = False
self.printer.should_overwrite = False
self.args.verbose = min(self.args.verbose, 1)
self.print_('')
self.print_('Retrying failed tests (attempt #%d of %d)...' %
(self.args.retry_limit - retry_limit + 1,
self.args.retry_limit))
self.print_('')
stats = Stats(self.args.status_format, h.time, 1)
stats.total = len(failed_tests)
tests_to_retry = TestSet(isolated_tests=list(failed_tests))
retry_set = ResultSet()
self._run_one_set(stats, retry_set, tests_to_retry)
result_set.results.extend(retry_set.results)
failed_tests = json_results.failed_test_names(retry_set)
retry_limit -= 1
if retry_limit != self.args.retry_limit:
self.print_('')
full_results = json_results.make_full_results(self.args.metadata,
int(h.time()),
all_tests, result_set)
return (json_results.exit_code_from_full_results(full_results),
full_results)
def _run_one_set(self, stats, result_set, test_set):
stats.total = (len(test_set.parallel_tests) +
len(test_set.isolated_tests) +
len(test_set.tests_to_skip))
self._skip_tests(stats, result_set, test_set.tests_to_skip)
self._run_list(stats, result_set,
test_set.parallel_tests, self.args.jobs)
self._run_list(stats, result_set,
test_set.isolated_tests, 1)
def _skip_tests(self, stats, result_set, tests_to_skip):
for test_input in tests_to_skip:
last = self.host.time()
stats.started += 1
self._print_test_started(stats, test_input)
now = self.host.time()
result = Result(test_input.name, actual=ResultType.Skip,
started=last, took=(now - last), worker=0,
expected=[ResultType.Skip],
out=test_input.msg)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
def _run_list(self, stats, result_set, test_inputs, jobs):
h = self.host
running_jobs = set()
jobs = min(len(test_inputs), jobs)
if not jobs:
return
child = _Child(self)
pool = make_pool(h, jobs, _run_one_test, child,
_setup_process, _teardown_process)
try:
while test_inputs or running_jobs:
while test_inputs and (len(running_jobs) < self.args.jobs):
test_input = test_inputs.pop(0)
stats.started += 1
pool.send(test_input)
running_jobs.add(test_input.name)
self._print_test_started(stats, test_input)
result = pool.get()
running_jobs.remove(result.name)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
pool.close()
finally:
self.final_responses.extend(pool.join())
def _print_test_started(self, stats, test_input):
if self.args.quiet:
# Print nothing when --quiet was passed.
return
# If -vvv was passed, print when the test is queued to be run.
# We don't actually know when the test picked up to run, because
# that is handled by the child process (where we can't easily
# print things). Otherwise, only print when the test is started
# if we know we can overwrite the line, so that we do not
# get multiple lines of output as noise (in -vvv, we actually want
# the noise).
test_start_msg = stats.format() + test_input.name
if self.args.verbose > 2:
self.update(test_start_msg + ' queued', elide=False)
if self.args.overwrite:
self.update(test_start_msg, elide=(not self.args.verbose))
def _print_test_finished(self, stats, result):
stats.add_time()
assert result.actual in [ResultType.Failure, ResultType.Skip,
ResultType.Pass]
if result.actual == ResultType.Failure:
result_str = ' failed'
elif result.actual == ResultType.Skip:
result_str = ' was skipped'
elif result.actual == ResultType.Pass:
result_str = ' passed'
if result.unexpected:
result_str += ' unexpectedly'
if self.args.timing:
timing_str = ' %.4fs' % result.took
else:
timing_str = ''
suffix = '%s%s' % (result_str, timing_str)
out = result.out
err = result.err
if result.code:
if out or err:
suffix += ':\n'
self.update(stats.format() + result.name + suffix, elide=False)
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
elif not self.args.quiet:
if self.args.verbose > 1 and (out or err):
suffix += ':\n'
self.update(stats.format() + result.name + suffix,
elide=(not self.args.verbose))
if self.args.verbose > 1:
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
if self.args.verbose:
self.flush()
def update(self, msg, elide):
self.printer.update(msg, elide)
def flush(self):
self.printer.flush()
def _summarize(self, full_results):
num_passes = json_results.num_passes(full_results)
num_failures = json_results.num_failures(full_results)
num_skips = json_results.num_skips(full_results)
if self.args.quiet and num_failures == 0:
return
if self.args.timing:
timing_clause = ' in %.1fs' % (self.host.time() -
self.stats.started_time)
else:
timing_clause = ''
self.update('%d test%s passed%s, %d skipped, %d failure%s.' %
(num_passes,
'' if num_passes == 1 else 's',
timing_clause,
num_skips,
num_failures,
'' if num_failures == 1 else 's'), elide=False)
self.print_()
def _read_and_delete(self, path, delete):
h = self.host
obj = None
if h.exists(path):
contents = h.read_text_file(path)
if contents:
obj = json.loads(contents)
if delete:
h.remove(path)
return obj
def _write(self, path, obj):
if path:
self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')
def _upload(self, full_results):
h = self.host
if not self.args.test_results_server:
return 0
url, content_type, data = json_results.make_upload_request(
self.args.test_results_server, self.args.builder_name,
self.args.master_name, self.args.test_type,
full_results)
try:
h.fetch(url, data, {'Content-Type': content_type})
return 0
except Exception as e:
h.print_('Uploading the JSON results raised "%s"' % str(e))
return 1
def report_coverage(self):
if self.args.coverage: # pragma: no cover
self.host.print_()
import coverage
cov = coverage.coverage(data_suffix=True)
cov.combine()
cov.report(show_missing=self.args.coverage_show_missing,
omit=self.args.coverage_omit)
if self.args.coverage_annotate:
cov.annotate(omit=self.args.coverage_omit)
def _add_trace_event(self, trace, name, start, end):
event = {
'name': name,
'ts': int((start - self.stats.started_time) * 1000000),
'dur': int((end - start) * 1000000),
'ph': 'X',
'pid': self.host.getpid(),
'tid': 0,
}
trace['traceEvents'].append(event)
def _trace_from_results(self, result_set):
trace = OrderedDict()
trace['traceEvents'] = []
trace['otherData'] = {}
for m in self.args.metadata:
k, v = m.split('=')
trace['otherData'][k] = v
for result in result_set.results:
started = int((result.started - self.stats.started_time) * 1000000)
took = int(result.took * 1000000)
event = OrderedDict()
event['name'] = result.name
event['dur'] = took
event['ts'] = started
event['ph'] = 'X' # "Complete" events
event['pid'] = result.pid
event['tid'] = result.worker
args = OrderedDict()
args['expected'] = sorted(str(r) for r in result.expected)
args['actual'] = str(result.actual)
args['out'] = result.out
args['err'] = result.err
args['code'] = result.code
args['unexpected'] = result.unexpected
args['flaky'] = result.flaky
event['args'] = args
trace['traceEvents'].append(event)
return trace
def _matches(name, globs):
return any(fnmatch.fnmatch(name, glob) for glob in globs)
def _default_classifier(args):
def default_classifier(test_set, test):
name = test.id()
if not args.all and _matches(name, args.skip):
test_set.tests_to_skip.append(TestInput(name,
'skipped by request'))
elif _matches(name, args.isolate):
test_set.isolated_tests.append(TestInput(name))
else:
test_set.parallel_tests.append(TestInput(name))
return default_classifier
def _test_adder(test_set, classifier):
def add_tests(obj):
if isinstance(obj, unittest.suite.TestSuite):
for el in obj:
add_tests(el)
elif (obj.id().startswith('unittest.loader.LoadTestsFailure') or
obj.id().startswith('unittest.loader.ModuleImportFailure')):
# Access to protected member pylint: disable=W0212
module_name = obj._testMethodName
try:
method = getattr(obj, obj._testMethodName)
method()
except Exception as e:
if 'LoadTests' in obj.id():
raise _AddTestsError('%s.load_tests() failed: %s'
% (module_name, str(e)))
else:
raise _AddTestsError(str(e))
else:
assert isinstance(obj, unittest.TestCase)
classifier(test_set, obj)
return add_tests
class _Child(object):
def __init__(self, parent):
self.host = None
self.worker_num = None
self.all = parent.args.all
self.debugger = parent.args.debugger
self.coverage = parent.args.coverage and parent.args.jobs > 1
self.coverage_source = parent.coverage_source
self.dry_run = parent.args.dry_run
self.loader = parent.loader
self.passthrough = parent.args.passthrough
self.context = parent.context
self.setup_fn = parent.setup_fn
self.teardown_fn = parent.teardown_fn
self.context_after_setup = None
self.top_level_dir = parent.top_level_dir
self.top_level_dirs = parent.top_level_dirs
self.loaded_suites = {}
self.cov = None
def _setup_process(host, worker_num, child):
child.host = host
child.worker_num = worker_num
# pylint: disable=protected-access
if child.coverage: # pragma: no cover
import coverage
child.cov = coverage.coverage(source=child.coverage_source,
data_suffix=True)
child.cov._warn_no_data = False
child.cov.start()
if child.setup_fn:
child.context_after_setup = child.setup_fn(child, child.context)
else:
child.context_after_setup = child.context
return child
def _teardown_process(child):
res = None
e = None
if child.teardown_fn:
try:
res = child.teardown_fn(child, child.context_after_setup)
except Exception as e:
pass
if child.cov: # pragma: no cover
child.cov.stop()
child.cov.save()
return (child.worker_num, res, e)
def _run_one_test(child, test_input):
h = child.host
pid = h.getpid()
test_name = test_input.name
start = h.time()
# It is important to capture the output before loading the test
# to ensure that
# 1) the loader doesn't logs something we don't captured
# 2) neither the loader nor the test case grab a reference to the
# uncaptured stdout or stderr that later is used when the test is run.
# This comes up when using the FakeTestLoader and testing typ itself,
# but could come up when testing non-typ code as well.
h.capture_output(divert=not child.passthrough)
ex_str = ''
try:
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if child.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
try:
suite = child.loader.loadTestsFromName(test_name)
except Exception as e:
ex_str = ('loadTestsFromName("%s") failed: %s\n%s\n' %
(test_name, e, traceback.format_exc()))
try:
suite = _load_via_load_tests(child, test_name)
ex_str += ('\nload_via_load_tests(\"%s\") returned %d tests\n' %
(test_name, len(list(suite))))
except Exception as e: # pragma: untested
suite = []
ex_str += ('\nload_via_load_tests("%s") failed: %s\n%s\n' %
(test_name, e, traceback.format_exc()))
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
tests = list(suite)
if len(tests) != 1:
err = 'Failed to load "%s" in run_one_test' % test_name
if ex_str: # pragma: untested
err += '\n ' + '\n '.join(ex_str.splitlines())
h.restore_output()
return Result(test_name, ResultType.Failure, start, 0,
child.worker_num, unexpected=True, code=1,
err=err, pid=pid)
test_case = tests[0]
if isinstance(test_case, TypTestCase):
test_case.child = child
test_case.context = child.context_after_setup
test_result = unittest.TestResult()
out = ''
err = ''
try:
if child.dry_run:
pass
elif child.debugger: # pragma: no cover
_run_under_debugger(h, test_case, suite, test_result)
else:
suite.run(test_result)
finally:
out, err = h.restore_output()
took = h.time() - start
return _result_from_test_result(test_result, test_name, start, took, out,
err, child.worker_num, pid)
def _run_under_debugger(host, test_case, suite,
test_result): # pragma: no cover
# Access to protected member pylint: disable=W0212
test_func = getattr(test_case, test_case._testMethodName)
fname = inspect.getsourcefile(test_func)
lineno = inspect.getsourcelines(test_func)[1] + 1
dbg = pdb.Pdb(stdout=host.stdout.stream)
dbg.set_break(fname, lineno)
dbg.runcall(suite.run, test_result)
def _result_from_test_result(test_result, test_name, start, took, out, err,
worker_num, pid):
flaky = False
if test_result.failures:
expected = [ResultType.Pass]
actual = ResultType.Failure
code = 1
unexpected = True
err = err + test_result.failures[0][1]
elif test_result.errors:
expected = [ResultType.Pass]
actual = ResultType.Failure
code = 1
unexpected = True
err = err + test_result.errors[0][1]
elif test_result.skipped:
expected = [ResultType.Skip]
actual = ResultType.Skip
err = err + test_result.skipped[0][1]
code = 0
unexpected = False
elif test_result.expectedFailures:
expected = [ResultType.Failure]
actual = ResultType.Failure
code = 1
err = err + test_result.expectedFailures[0][1]
unexpected = False
elif test_result.unexpectedSuccesses:
expected = [ResultType.Failure]
actual = ResultType.Pass
code = 0
unexpected = True
else:
expected = [ResultType.Pass]
actual = ResultType.Pass
code = 0
unexpected = False
return Result(test_name, actual, start, took, worker_num,
expected, unexpected, flaky, code, out, err, pid)
def _load_via_load_tests(child, test_name):
# If we couldn't import a test directly, the test may be only loadable
# via unittest's load_tests protocol. See if we can find a load_tests
# entry point that will work for this test.
loader = child.loader
comps = test_name.split('.')
new_suite = unittest.TestSuite()
while comps:
name = '.'.join(comps)
module = None
suite = None
if name not in child.loaded_suites:
try:
module = importlib.import_module(name)
except ImportError:
pass
if module:
suite = loader.loadTestsFromModule(module)
child.loaded_suites[name] = suite
suite = child.loaded_suites[name]
if suite:
for test_case in suite:
assert isinstance(test_case, unittest.TestCase)
if test_case.id() == test_name: # pragma: untested
new_suite.addTest(test_case)
break
comps.pop()
return new_suite
def _sort_inputs(inps):
return sorted(inps, key=lambda inp: inp.name)
if __name__ == '__main__': # pragma: no cover
sys.modules['__main__'].__file__ = path_to_file
sys.exit(main(win_multiprocessing=WinMultiprocessing.importable))
| apache-2.0 | -6,514,229,973,386,140,000 | 34.505539 | 80 | 0.54653 | false |
OpenTechFund/WebApp | opentech/apply/review/forms.py | 1 | 4947 | from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.html import escape
from opentech.apply.review.options import NA
from opentech.apply.stream_forms.forms import StreamBaseForm
from .models import Review, ReviewOpinion
from .options import OPINION_CHOICES, PRIVATE
class MixedMetaClass(type(StreamBaseForm), type(forms.ModelForm)):
pass
class ReviewModelForm(StreamBaseForm, forms.ModelForm, metaclass=MixedMetaClass):
draft_button_name = "save_draft"
class Meta:
model = Review
fields = ['recommendation', 'visibility', 'score', 'submission', 'author']
widgets = {
'recommendation': forms.HiddenInput(),
'score': forms.HiddenInput(),
'submission': forms.HiddenInput(),
'author': forms.HiddenInput(),
'visibility': forms.HiddenInput(),
}
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "You have already posted a review for this submission",
}
}
def __init__(self, *args, user, submission, initial={}, instance=None, **kwargs):
initial.update(submission=submission.id)
initial.update(author=user.id)
if instance:
for key, value in instance.form_data.items():
if key not in self._meta.fields:
initial[key] = value
super().__init__(*args, initial=initial, instance=instance, **kwargs)
for field in self._meta.widgets:
self.fields[field].disabled = True
if self.draft_button_name in self.data:
for field in self.fields.values():
field.required = False
def clean(self):
cleaned_data = super().clean()
cleaned_data['form_data'] = {
key: value
for key, value in cleaned_data.items()
if key not in self._meta.fields
}
return cleaned_data
def save(self, commit=True):
self.instance.score = self.calculate_score(self.cleaned_data)
self.instance.recommendation = int(self.cleaned_data[self.instance.recommendation_field.id])
self.instance.is_draft = self.draft_button_name in self.data
# Old review forms do not have the requred visability field.
# This will set visibility to PRIVATE by default.
try:
self.instance.visibility = self.cleaned_data[self.instance.visibility_field.id]
except AttributeError:
self.instance.visibility = PRIVATE
self.instance.form_data = self.cleaned_data['form_data']
if not self.instance.is_draft:
# Capture the revision against which the user was reviewing
self.instance.revision = self.instance.submission.live_revision
return super().save(commit)
def calculate_score(self, data):
scores = list()
for field in self.instance.score_fields:
score = data.get(field.id)[1]
if score != NA:
scores.append(score)
try:
return sum(scores) / len(scores)
except ZeroDivisionError:
return NA
class SubmitButtonWidget(forms.Widget):
def render(self, name, value, attrs=None):
disabled = 'disabled' if attrs.get('disabled') else ''
return '<input type="submit" name="{name}" value="{value}" class="button button--primary button--bottom-space" {disabled}>'.format(
disabled=disabled,
name=escape(name),
value=escape(name.title()),
)
class OpinionField(forms.IntegerField):
def __init__(self, *args, opinion, **kwargs):
kwargs["widget"] = SubmitButtonWidget
self.opinion = opinion
kwargs['label'] = ''
super().__init__(*args, **kwargs)
def clean(self, value):
if value:
return self.opinion
class ReviewOpinionForm(forms.ModelForm):
opinion = forms.IntegerField(required=False, widget=forms.HiddenInput())
class Meta:
model = ReviewOpinion
fields = ('opinion',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for value, opinion in OPINION_CHOICES:
self.fields[opinion.lower()] = OpinionField(
label=opinion.title(),
opinion=value,
disabled=self.instance.opinion == value,
)
def clean(self):
cleaned_data = super().clean()
opinions = [cleaned_data.get(opinion.lower()) for _, opinion in OPINION_CHOICES]
valid_opinions = [opinion for opinion in opinions if opinion is not None]
if len(valid_opinions) > 1:
self.add_error(None, 'Cant submit both an agreement and disagreement')
cleaned_data = {'opinion': valid_opinions[0]}
return cleaned_data
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
| gpl-2.0 | -2,164,851,810,208,382,200 | 32.883562 | 139 | 0.610269 | false |
Pulgama/supriya | supriya/patterns/Pseed.py | 1 | 1073 | import inspect
from supriya.patterns.Pattern import Pattern
from supriya.patterns.RandomNumberGenerator import RandomNumberGenerator
class Pseed(Pattern):
### CLASS VARIABLES ###
__slots__ = ("_pattern", "_seed")
_file_path = __file__
### INITIALIZER ###
def __init__(self, pattern, seed=0):
assert isinstance(pattern, Pattern)
self._pattern = pattern
self._seed = int(seed)
### PRIVATE METHODS ###
def _iterate(self, state=None):
try:
identifier = id(inspect.currentframe())
rng = RandomNumberGenerator(seed=self.seed)
Pattern._rngs[identifier] = iter(rng)
yield from self._pattern
finally:
del Pattern._rngs[identifier]
### PUBLIC PROPERTIES ###
@property
def arity(self):
return self._pattern.arity
@property
def is_infinite(self):
return self._pattern.is_infinite
@property
def pattern(self):
return self._pattern
@property
def seed(self):
return self._seed
| mit | -3,964,006,970,529,549,000 | 20.897959 | 72 | 0.599254 | false |
bmtgoncalves/TorinoCourse | Lecture VI/matplotlib_imshow.py | 1 | 1857 | import numpy as np
import matplotlib.pyplot as plt
def load_asc(filename):
fp = open(filename)
ncols, count = fp.readline().split()
ncols = int(count)
nrows, count = fp.readline().split()
nrows = int(count)
xllcorner, value = fp.readline().split()
xllcorner = float(value)
yllcorner, value = fp.readline().split()
yllcorner = float(value)
cellsize, value = fp.readline().split()
cellsize = float(value)
NODATA_value, value = fp.readline().split()
NODATA_value = float(value)
data = []
for line in fp:
fields = line.strip().split()
data.append([float(field) for field in fields])
data = np.array(data)
data[data==NODATA_value] = 0
return data, xllcorner, yllcorner, cellsize
def map_points(xllcorner, yllcorner, cellsize, nrows, x, y):
x = int((x-xllcorner)/cellsize)
y = (nrows-1)-int((y-yllcorner)/cellsize)
return x, y
def save_asc(data, xllcorner, yllcorner, cellsize, filename):
fp = open(filename, "w")
nrows, ncols = data.shape
print("ncols", ncols, file=fp)
print("nrows", nrows, file=fp)
print("xllcorner", xllcorner, file=fp)
print("yllcorner", yllcorner, file=fp)
print("cellsize", cellsize, file=fp)
print("NODATA_value", "-9999", file=fp)
for i in range(nrows):
for j in range(ncols):
print(("%u " % data[i, j]), end="", file=fp)
print("\n", end="", file=fp)
fp.close()
fig, ax = plt.subplots(1,1)
data, xllcorner, yllcorner, cellsize = load_asc('../Lecture V/geofiles/US_pop.asc')
ax.imshow(np.log(data+1))
x1, y1 = map_points(xllcorner, yllcorner, cellsize, data.shape[0], -86.523435, 39.163355, )
x2, y2 = map_points(xllcorner, yllcorner, cellsize, data.shape[0], -84.404820, 33.761926, )
ax.plot([x1, x2], [y1, y2], 'r-')
fig.savefig('US_ASC.png')
| mit | -2,406,466,435,693,642,000 | 24.791667 | 91 | 0.624125 | false |
Rolight/So-Dashboard | so/core.py | 1 | 6436 | import json
from django.conf import settings
from elasticsearch import Elasticsearch
from so import cache_keys
from so.cache import redis_cache
from so.constant import constant
from so.models import (
Website,
WebsiteAllowedDomain,
WebsiteUrlPattern,
WebsiteSelector,
SpiderTask,
)
test_data = {
"website_id": 406,
"task_id": 1,
"index": "rolight-sample-1",
"es_host": settings.ES_HOST,
"allowed_domains": ["spidertest-app.smartgslb.com"],
"start_urls": ["http://spidertest-app.smartgslb.com"],
"sleep": 1,
"parse_url_rules": [
r"http://spidertest-app.smartgslb.com/\d{4}/\d{2}/\d{2}/.*",
],
}
def get_spiders():
cluster_key = cache_keys.key_of_spider_cluster()
all_spiders = [s.decode() for s in redis_cache.smembers(cluster_key)]
spiders = []
for spider in all_spiders:
spider_key = cache_keys.key_of_spider(spider)
spider_data = redis_cache.get(spider_key)
if spider_data is not None:
task_data = json.loads(spider_data.decode())
website_id = task_data['website_id']
try:
website = Website.objects.get(pk=website_id)
website = website.title
except Website.DoesNotExist:
website = ''
task_data['website'] = website
spiders.append(task_data)
return {'all_spiders': all_spiders, 'spiders': spiders}
def stop_spider_task(spider):
spider_key = cache_keys.key_of_spider(spider)
spider_data = redis_cache.get(spider_key)
if spider_data is None:
return
spider_data = json.loads(spider_data.decode())
if spider_data['status'] == 'running':
command_key = cache_keys.key_of_task_command(
spider_data['task_id'],
'stop'
)
redis_cache.incr(command_key)
def fetch_log(task_id, spider):
try:
spider_task = SpiderTask.objects.get(pk=task_id)
except SpiderTask.DoesNotExist:
return
if spider_task.spider != spider:
spider_task.spider = spider
spider_task.save()
log_key = cache_keys.key_of_task_log(task_id)
log_len = redis_cache.llen(log_key)
if log_len == 0:
return
log_data = []
for _ in range(log_len):
data = redis_cache.lpop(log_key)
if data:
log_data.append(data.decode())
spider_task.logs += ''.join(log_data)
spider_task.save()
def run_task(task):
website_id = task['website_id']
spider_task = SpiderTask.objects.create(website_id=website_id,)
task['task_id'] = spider_task.pk
task_data = json.dumps(task)
task_queue_key = cache_keys.key_of_task_queue()
redis_cache.rpush(task_queue_key, task_data)
def create_website_spider_task(website_id):
website = Website.objects.get(pk=website_id)
task = {
'website_id': website.pk,
'index': 'so-index-%d' % website_id,
'es_host': settings.ES_HOST,
'sleep': website.sleep_seconds,
'expire_seconds': website.expire_seconds,
}
allow_domains = WebsiteAllowedDomain.objects.filter(website=website)
task['allow_domains'] = [a.domain for a in allow_domains]
start_urls = WebsiteUrlPattern.objects.filter(
website=website, pattern_type=constant.URL_START)
task['start_urls'] = [u.pattern for u in start_urls]
walk_urls = WebsiteUrlPattern.objects.filter(
website=website, pattern_type=constant.URL_WALK)
task['walk_url_rules'] = [u.pattern for u in walk_urls]
parse_urls = WebsiteUrlPattern.objects.filter(
website=website, pattern_type=constant.URL_PARSE)
task['parse_url_rules'] = [u.pattern for u in parse_urls]
title_selector = WebsiteSelector.objects.get(
website=website, key_name='title')
task['title_selector'] = title_selector.xpath
content_selector = WebsiteSelector.objects.get(
website=website, key_name='body')
task['content_selector'] = content_selector.xpath
other_selectors = WebsiteSelector.objects.filter(
website=website).exclude(
key_name__in=['title', 'body'])
task['custom_selectors'] = [
{'field_name': s.key_name,
'xpath': s.xpath}
for s in other_selectors
]
return task
def raw_es_query(index, query_body, ipp=10, page=1):
es_host = settings.ES_HOST
es = Elasticsearch(hosts=es_host)
# query like raw_str
if isinstance(query_body, str):
res = es.search(
index=index,
doc_type='fulltext',
body={
'query': {
'multi_match': {
'query': query_body,
'fields': ['title', 'content']
}
},
'highlight': {
'fields': {
'*': {}
}
},
},
from_=ipp * (page - 1),
size=ipp,
)
else:
res = es.search(
index=index,
doc_type='fulltext',
body={
'query': {
'match': query_body
},
'highlight': {
'fields': {
'*': {}
}
}
},
from_=ipp * (page - 1),
size=ipp,
)
return res
def es_query(data):
query_data = data['query']
ipp = data.get('ipp', 15)
page = data.get('page', 1)
index = data.get('index')
print('ipp: %s, page: %s' % (ipp, page))
res = raw_es_query(index, query_data, ipp, page)
total = res['hits']['total']
hits_data = []
for hit in res['hits']['hits']:
data = {
'score': hit['_score'],
'data': hit['highlight']
}
for field in ('url', 'title'):
if field not in data['data']:
data['data'][field] = hit['_source'][field]
if isinstance(data['data']['title'], list):
data['data']['title'] = data['data']['title'][0]
if isinstance(data['data']['content'], list):
data['data']['content'] = '...'.join(data['data']['content'])
hits_data.append(data)
print('len of hitsdata = %s' % len(hits_data))
return {
'page': page,
'ipp': ipp,
'total': total,
'hits': hits_data
}
| gpl-3.0 | -4,467,453,939,454,056,400 | 28.254545 | 73 | 0.547545 | false |
ActiveState/code | recipes/Python/496742_ShelfProxy/recipe-496742.py | 1 | 3065 | import shelve
class InvalidationError(Exception):
pass
class ShelfProxy(Proxy):
__slots__ = ["_key", "_shelf", "_invalidated"]
def __init__(self, obj, shelf, key):
Proxy.__init__(self, obj)
object.__setattr__(self, "_shelf", shelf)
object.__setattr__(self, "_key", key)
object.__setattr__(self, "_invalidated", False)
def __del__(self):
try:
sync_proxy(self)
except InvalidationError:
pass
class ShelfWrapper(object):
def __init__(self, shelf):
self.__shelf = shelf
self.__cache = {}
def __del__(self):
self.close()
def __getattr__(self, name):
return getattr(self.__shelf, name)
def __contains__(self, key):
return key in self.__shelf
def __len__(self, key):
return len(self.__shelf)
def __delitem__(self, key):
if key in self.__cache:
object.__setattr__(self.__cache[key], "_invalidated", True)
del self.__cache[key]
del self.__shelf[key]
def __getitem__(self, key):
try:
obj = self.__cache[key]
except KeyError:
self.__cache[key] = obj = ShelfProxy(self.__shelf[key], self.__shelf, key)
return obj
def __setitem__(self, key, value):
if key in self.__cache:
object.__setattr__(self.__cache[key], "_invalidated", True)
self.__cache[key] = ShelfProxy(value, self.__shelf, key)
self.__shelf[key] = value
def sync(self):
for obj in self.__cache.itervalues():
try:
sync_proxy(obj)
except InvalidationError:
pass
def close(self):
self.sync()
self.__cache.clear()
self.__shelf.close()
def sync_proxy(proxy):
if object.__getattribute__(proxy, "_invalidated"):
raise InvalidationError("the proxy has been invalidated (the key was reassigned)")
shelf = object.__getattribute__(proxy, "_shelf")
key = object.__getattribute__(proxy, "_key")
obj = object.__getattribute__(proxy, "_obj")
shelf[key] = obj
shelf.sync()
def open(*args):
return ShelfWrapper( shelve.open(*args) )
------ example ------
>>> db = open("blah.db")
>>> db["mylist"]=[1,2,3]
>>> db["mylist"].append(4)
>>> db["mylist"]
[1, 2, 3, 4]
>>> p = db["mylist"]
>>> type(p)
<class '__main__.ShelfProxy(list)'>
>>> p.append(5)
>>> p2 = db["mylist"]
>>> p2
[1, 2, 3, 4, 5]
>>> p2 is p
True
----- invalidation -----
When we reassign a key that have been proxies earlier, the proxy
instance becomes invalidated, so it will not override the new value.
>>> db["mylist"] = 19
>>> sync_proxy(p)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "Proxy.py", line 152, in sync_proxy
raise InvalidationError("the proxy has been invalidated (the key was reassigned)")
__main__.InvalidationError: the proxy has been invalidated (the key was reassigned)
>>>
>>> db["mylist"] += 1
>>> db["mylist"]
20
| mit | 630,592,922,411,288,300 | 25.885965 | 90 | 0.55367 | false |
dnaextrim/django_adminlte_x | adminlte/static/plugins/datatables/extensions/Responsive/examples/child-rows/column-control.html.py | 1 | 21862 | XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXX X
XXXXXXXX X
XXXXX XXXXXXXX
X
XX
XXXXXXXXXXX X X
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXX
XXXXXXXX X
X XX
XXXXXX X XX XXXXX X
X XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXX XXX XXXXX XX XXXXXXX XXX XXXXXXXXXX XXX XXXXXXXXXXX XXXXXXX XX XXX XXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XX XXX XXXXXXX XXXXXX XXX XXXXX XXX
XXXXXXX XX XXX XXXXX XXXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXX XXX X XXXXXXXXXXX XXXXXXXXXXX XX XXX XXXXXXXX XXX XXXXXXX XXXXXX XX XXXXX XXXX XXXX XXXXX XX XXXX XXXXX
XXXXXX XXXXXXX XXX XX XXXXXXXXX XXXX XX XXX XXXX X XXXX XXXXXXX XXX XXX XXXXXXXXX
XXXXXXX XXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXX XX XXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXX XXXXXXX XXXXXXX XXXX XXXX XX
XXXXXXX XXX XXXXX XXXXXX XX XXXX XX XXX XXXXXXXX XX XXXXXXXXXXXX XX XXX XXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXX XXXX XX XXXXXXX XXXXXXX XX XXXX XXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXX X
XXXXXXXX X
XXXXX XXXXXXXX
X
XX
XXXXXXXXXXX X X
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXX
XXXXXXXX X
X XX
XXXXXX X XX XXXXX X
X XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXX X XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX XXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX | mit | -3,839,956,034,201,854,500 | 24.600703 | 166 | 0.73072 | false |
PlushBeaver/FanFicFare | fanficfare/adapters/adapter_thehookupzonenet.py | 1 | 1121 | # -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
from base_efiction_adapter import BaseEfictionAdapter
class TheHookupZoneNetAdapter(BaseEfictionAdapter):
@staticmethod
def getSiteDomain():
return 'thehookupzone.net'
@classmethod
def getPathToArchive(self):
return '/CriminalMinds'
@classmethod
def getSiteAbbrev(seluuf):
return 'thupz'
@classmethod
def getDateFormat(self):
return "%d/%m/%y"
def getClass():
return TheHookupZoneNetAdapter
| gpl-3.0 | 1,168,718,173,804,861,000 | 27.025 | 74 | 0.727029 | false |
lrntc/touch_hdr | touch_hdr.py | 1 | 6454 | import sys
import subprocess
import detect_camera
import touch_hdr_variables
from PySide.QtCore import *
from PySide.QtGui import *
class TouchHDRMenu(QWidget):
def __init__(self):
super(TouchHDRMenu, self).__init__()
self.check = QCheckBox("HDR", self)
self.btn_take_fast_hdr = QPushButton("Quick HDR", self)
self.btn_take_picture = QPushButton("Capture", self)
self.controlsLayout = QGridLayout()
self.controlsLayout.addWidget(self.check, 0,0)
self.controlsLayout.addWidget(self.btn_take_fast_hdr, 1,0)
self.controlsLayout.addWidget(self.btn_take_picture, 0,1)
self.setLayout(self.controlsLayout)
class AdvancedHDRLayout(QWidget):
def select_all(self):
self.check0.setCheckState(Qt.Checked)
self.check1.setCheckState(Qt.Checked)
self.check2.setCheckState(Qt.Checked)
self.check3.setCheckState(Qt.Checked)
self.check4.setCheckState(Qt.Checked)
self.check5.setCheckState(Qt.Checked)
self.check6.setCheckState(Qt.Checked)
self.check7.setCheckState(Qt.Checked)
self.check8.setCheckState(Qt.Checked)
self.check9.setCheckState(Qt.Checked)
self.check10.setCheckState(Qt.Checked)
self.check11.setCheckState(Qt.Checked)
self.check12.setCheckState(Qt.Checked)
self.check13.setCheckState(Qt.Checked)
self.check14.setCheckState(Qt.Checked)
self.check15.setCheckState(Qt.Checked)
self.check16.setCheckState(Qt.Checked)
self.check17.setCheckState(Qt.Checked)
self.check18.setCheckState(Qt.Checked)
self.check19.setCheckState(Qt.Checked)
self.check20.setCheckState(Qt.Checked)
self.check21.setCheckState(Qt.Checked)
self.check22.setCheckState(Qt.Checked)
self.check23.setCheckState(Qt.Checked)
self.check24.setCheckState(Qt.Checked)
self.check25.setCheckState(Qt.Checked)
self.check26.setCheckState(Qt.Checked)
self.check27.setCheckState(Qt.Checked)
self.check28.setCheckState(Qt.Checked)
self.check29.setCheckState(Qt.Checked)
self.check30.setCheckState(Qt.Checked)
def select_half(self):
self.check0.setCheckState(Qt.Checked)
self.check2.setCheckState(Qt.Checked)
self.check4.setCheckState(Qt.Checked)
self.check6.setCheckState(Qt.Checked)
self.check8.setCheckState(Qt.Checked)
self.check10.setCheckState(Qt.Checked)
self.check12.setCheckState(Qt.Checked)
self.check14.setCheckState(Qt.Checked)
self.check16.setCheckState(Qt.Checked)
self.check18.setCheckState(Qt.Checked)
self.check20.setCheckState(Qt.Checked)
self.check22.setCheckState(Qt.Checked)
self.check24.setCheckState(Qt.Checked)
self.check26.setCheckState(Qt.Checked)
self.check28.setCheckState(Qt.Checked)
self.check30.setCheckState(Qt.Checked)
def close_me(self):
exit()
def capture_photos(self):
#self.btn_take_picture.setText("Caputuring..")
#self.total = len(touch_hdr_variables.EV)
#each step: XX Pictures to go
#Loop though the checkboxes
self.command_list = ["gphoto2"]
for exp in range(0, len(touch_hdr_variables.EV)):
self.temp_var = "check" + str(exp)
self.temp_check = getattr(self, str(self.temp_var))
#Get checked ckeckboxes
self.progress.setText("Getting data")
if self.temp_check.isChecked():
print(exp, " Checked, and has value: ", touch_hdr_variables.EV_dict[exp])
self.command_list.append("--set-config")
self.command_list.append("/main/capturesettings/exposurecompensation=" + touch_hdr_variables.EV_dict[exp])
self.command_list.append("--capture-image")
else:
print("nope")
self.progress.setText("Starting sequence")
subprocess.call(self.command_list)
print("HDR Sequence is done.")
self.progress.setText("Done")
def __init__(self):
super(AdvancedHDRLayout, self).__init__()
self.controlsLayout = QGridLayout()
x = 0
y = 0
for exp in range(0, len(touch_hdr_variables.EV)):
print("value: ", exp, "has value ", touch_hdr_variables.EV[exp])
#Declare variables
self.temp_var = "check" + str(exp)
setattr(self, str(self.temp_var), QCheckBox(touch_hdr_variables.EV[exp] , self))
#add widgets to layout
#self.temp_widget = "self." + str(self.temp_var)
self.controlsLayout.addWidget(getattr(self, str(self.temp_var)), y, x)
if int(x) < int(5):
x = x + 1
else:
x = 0
y = y + 1
#Add menu buttons
self.progress = QLabel("...", self)
self.quit = QPushButton("Quit", self)
self.semi = QPushButton("Half", self)
self.all = QPushButton("All", self)
self.capture = QPushButton("Capture", self)
self.controlsLayout.addWidget(self.progress, y, (x+1))
self.controlsLayout.addWidget(self.quit, y, (x))
self.controlsLayout.addWidget(self.semi, y, (x+2))
self.controlsLayout.addWidget(self.all, y, (x+3))
self.controlsLayout.addWidget(self.capture, y, (x+4))
#Link action to menu buttons
self.quit.clicked.connect(self.close_me)
self.semi.clicked.connect(self.select_half)
self.all.clicked.connect(self.select_all)
self.capture.clicked.connect(self.capture_photos)
self.setLayout(self.controlsLayout)
class TouchHDRWindow(QMainWindow):
def __init__(self):
super(TouchHDRWindow, self).__init__()
self.widget = AdvancedHDRLayout()
self.setCentralWidget(self.widget)
class TouchHDRWindowMenu(QMainWindow):
def __init__(self):
super(TouchHDRWindowMenu, self).__init__()
self.widget = TouchHDRMenu()
self.setCentralWidget(self.widget)
detect_camera.set_up_dslr()
app = QApplication(sys.argv)
window = TouchHDRWindow()
window.show()
app.exec_()
sys.exit()
| gpl-2.0 | -797,496,485,911,961,500 | 30.482927 | 122 | 0.617911 | false |
markastern/hbcal | hbcal/hebrew_calendar/date.py | 1 | 15258 | """This module defines calendar classes (mostly abstract) for hbcal"""
# Copyright 2015, 2016, 2019 Mark Stern
#
# This file is part of Hbcal.
#
# Hbcal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# Hbcal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hbcal. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from abc import ABCMeta, abstractmethod
from enum import IntEnum
import logging
from future.builtins import range
from future.utils import PY2, with_metaclass
from .abs_time import RelTime, AbsTime, DAY
from .abstract_attribute import AbstractAttribute
# Exception Classes
class BadDate(ValueError):
"""An exception class for an invalid Date"""
pass
class DateBeforeCreation(BadDate):
"""An exception class for a date before the creation of the world"""
pass
class MonthNotInRange(BadDate):
"""An exception class for a date whose month is out of range"""
pass
class DateNotInRange(BadDate):
"""An exception class for a date whose date (in month) is out of range"""
pass
class Month(IntEnum):
"""A base class for month classes of different calendars"""
def name(self):
"""Return the name of the month"""
return self._name_.replace('_', ' ').title()
@staticmethod
@abstractmethod
def start_year_month():
"""Return the month at the start of which the year changes.
For Hebrew years, this is not 1."""
if PY2:
raise NotImplementedError
@staticmethod
@abstractmethod
def end_year_month():
"""Return the month after which the year changes."""
if PY2:
raise NotImplementedError
def __format__(self, fmt):
return str(self)
def __str__(self):
return self.name()
class Date(object):
"""A date class for different calendar types.
The year attribute is an instance of class Year and determines the
calendar type."""
def __init__(self, year, month, date=None):
if isinstance(month, AbsTime):
if month < AbsTime(0, 0, 6):
raise BadDate
else:
year, remainder = year.current_year(month)
self.year = year
self.month = year.month_class().start_year_month()
self.date = year.first_day()
days = remainder.days_chalakim[0]
self.__iadd__(days)
else:
self.year = year
(month, self.date) = year.adjust_date(month, date)
self.month = year.month_class()(month)
def __eq__(self, other):
if not isinstance(other, Date):
return NotImplemented
return (self.year, self.month, self.date) == (other.year,
other.month,
other.date)
def __ne__(self, other):
return not self == other
def __iadd__(self, other):
if isinstance(other, int):
self.year, self.month, self.date = self.year.add_days(self.month,
self.date,
other)
return self
else:
raise TypeError("unsupported operand type(s) for += : " +
"'{0}' and '{1}'".format(self.__class__.__name__,
other.__class__.__name__))
def __isub__(self, other):
if isinstance(other, int):
self.year, self.month, self.date = self.year.add_days(self.month,
self.date,
-other)
return self
else:
raise TypeError("unsupported operand type(s) for -= : " +
"'{0}' and '{1}'".format(self.__class__.__name__,
other.__class__.__name__))
def __add__(self, other):
total = self
total += other
return total
def __sub__(self, other):
difference = self
difference -= other
return difference
def day_start(self):
"""Return the absolute time of the start of the current date."""
return self.year.day_start(self.month, self.date)
def __repr__(self):
return "Date({0}, {1}, {2})".format(self.year, self.month, self.date)
def __str__(self):
return self.__format__("")
def __format__(self, fmt):
return self.year.format_date(self.month, self.date, fmt)
LOG = logging.getLogger(__name__)
class Year(with_metaclass(ABCMeta, object)):
"""Abstract base class for defining the year of different calendar types"""
MIN_DATE = None
FIRST_YEAR = AbstractAttribute("The value of the first year")
START_FIRST_YEAR = AbstractAttribute("The start of the first year")
def __init__(self, year):
if isinstance(year, int):
self._value = self.FIRST_YEAR
self._start = self.START_FIRST_YEAR
self.value = year
elif isinstance(year, self.__class__):
# pylint: disable=protected-access
self._value = year.value
self._start = year._start
else:
raise TypeError("unsupported operand type for " +
"{0}(): '{1}'".format(self.__class__.__name__,
year.__class__.__name__))
@property
def value(self):
"""Return the year value (integer)."""
return self._value
@value.setter
@abstractmethod
def value(self, value):
"""Set year value.
:param value: The year value (int)
:return: None
"""
raise NotImplementedError
@property
def start(self):
"""Return the start of the year (AbsTime).
For Hebrew Years, this returns Molad Tishri rather than the actual
start of the year.
"""
return self._start
def __eq__(self, other):
return (isinstance(other, self.__class__) and
isinstance(self, other.__class__) and
self._value == other.value)
def __ne__(self, other):
return not self == other
@abstractmethod
def days_in_month(self, month):
"""Return the number of days in the specified month."""
pass
@staticmethod
def first_day():
"""Return the first day of the month."""
return 1
def last_day(self, month):
"""Return the last day of the month."""
return self.days_in_month(month)
@abstractmethod
def months_in_year(self):
"""Return the number of months in the year."""
raise NotImplementedError
def months(self):
"""A generator for the months of the current year."""
for month in range(self.month_class().start_year_month(),
self.months_in_year() + 1):
yield self.month_class()(month)
@abstractmethod
def days_in_year(self):
"""Return the number of days in the current year."""
raise NotImplementedError
def duration(self):
"""Return the duration (RelTime) of the current year."""
return self.days_in_year() * DAY
def adjust_date(self, month, date):
"""Check if the month and date supplied are valid for the current year.
Returns a tuple comprising the month and date, adjusted if necessary
to make them valid. If the month and date are still invalid,
an exception is thrown."""
min_date = self.min_date()
if (self._value, month, date) < (min_date.year.value, min_date.month,
min_date.date):
raise DateBeforeCreation()
# Allow negative months (count back from end of year
if -self.months_in_year() <= month <= -1:
month += self.months_in_year() + 1
# Check if the month is nonsense
if month not in self.months():
raise MonthNotInRange()
# Allow negative values of date (count back from end of month)
if -self.days_in_month(month) <= date <= -1:
date += self.last_day(month) + 1
# Check if date is valid
if not self.first_day() <= date <= self.last_day(month):
raise DateNotInRange()
return (month, date)
def __iadd__(self, other):
self.value += other
return self
def __isub__(self, other):
self.value -= other
return self
def __add__(self, other):
if not isinstance(other, int):
return NotImplemented
new_year = self.__class__(self)
return new_year.__iadd__(other)
def __sub__(self, other):
if not isinstance(other, int):
return NotImplemented
new_year = self.__class__(self)
return new_year.__isub__(other)
def add_days(self, month, date, days):
""" Adds the specified number of days to a date in the current year.
:param month: the current month
:param date: the current date (of the month)
:param days: number of days to add
:return: A 3-tuple comprising:
A (possibly new) year object
The month after addition
The date after addition
"""
date += days
cls = self.month_class()
if date > self.last_day(month):
while date > self.last_day(month):
date -= self.days_in_month(month)
month = cls(month + 1 if month < self.months_in_year() else 1)
if month == cls.start_year_month():
self += 1
else:
while date < self.first_day():
if month == cls.start_year_month():
self -= 1
month = cls(month - 1 if month > 1 else self.months_in_year())
date += self.days_in_month(month)
return (self, month, date)
@classmethod
def current_year(cls, atime):
"""Return a 2-tuple for the year containing the specified time atime.
The tuple comprises:
The year containing atime (an instance of cls, a subclass of Year)
A RelTime object containing the difference between atime and the
start of the year.
This should be an abstract class method, but abstract class methods
do not work in python 2 (fixed in python 3.4).
"""
raise NotImplementedError
def day_start(self, month, date):
"""Return the start (AbsTime) of the specified month and date."""
day_count = 0
for month_count in self.months():
if month == month_count:
day_count += date - self.first_day()
break
else:
day_count += self.days_in_month(month_count)
return self.start + RelTime(0, day_count)
def __repr__(self):
return "{0}({1})".format(type(self).__name__, self._value)
def __str__(self):
return "{0}".format(self._value)
@classmethod
def month_class(cls):
"""Return the associated class (subclass of Month) for months.
This should be an abstract class method, but abstract class methods
do not work in python 2 (fixed in python 3.4).
"""
raise NotImplementedError
def format_date(self, month, date, fmt):
"""Returns a formatted unicode string for output of date.
By default, fmt is ignored. Sub-classes may act differently."""
del fmt
return u"{0} {1} {2}".format(date, month.name(), self._value)
@classmethod
def min_date(cls):
"""Calculate the minimum date for this class.
We only need to do it once per class."""
if cls.MIN_DATE is None:
cls.MIN_DATE = Date(cls, AbsTime(0, 0, 6))
return cls.MIN_DATE
class RegularYear(Year):
"""Abstract subclass of Year with a regular cycle of years.
Not all years are the same length, but there is usually a cycle of
a fixed number of years, where the length of a cycle is fixed. For
Hebrew years this is only true of the Molad used to calculate
the start of the year (so cycles are not exactly the same length).
"""
@classmethod
def _years_per_cycle(cls):
"""Return number of years in a cycle."""
raise NotImplementedError
@classmethod
def _cycle_duration(cls):
"""Return length of a cycle (RelTime)."""
raise NotImplementedError
@classmethod
def estimate_current_year(cls, atime):
"""Return an estimate of the year containing the specified time.
The calling function must allow for the possibility that it is not
exactly correct."""
return ((atime - cls.START_FIRST_YEAR) * cls._years_per_cycle() //
cls._cycle_duration()) + cls.FIRST_YEAR
@Year.value.setter
def value(self, value):
difference = value - self._value
cycles = (difference + self._years_per_cycle() // 2) // \
self._years_per_cycle()
self._start += self._cycle_duration() * cycles
self._value += self._years_per_cycle() * cycles
while self._value < value:
self._start += self.duration()
self._value += 1
while self._value > value:
self._value -= 1
self._start -= self.duration()
@classmethod
def current_year(cls, atime):
estimate = cls.estimate_current_year(atime)
year = cls(estimate)
# now adjust it until it is exactly right.
while year.start > atime:
year -= 1
while year.start + year.days_in_year() * DAY <= atime:
year += 1
if estimate != year.value:
LOG.debug("Calculating year for %s, estimated %s actual %s",
atime, estimate, year.value)
# We now have the right year.
return year, atime - year.start
class DateTime(object):
"""A class comprising a Date object and a RelTime object.
The object represents an instant in time. It comprises a Date object and
a RelTime object (the latter should comprise only hours and chalakim)."""
def __init__(self, cls, atime):
""" Construct a DatTime object
:param cls: A subclass of Year
:param atime: An AbsTime object (a point in time)
"""
year, remainder = cls.current_year(atime)
month = year.month_class().start_year_month()
self.date = Date(year, month, year.first_day())
days, remainder = remainder.days_chalakim
self.date += days
self.time = RelTime(0, 0, 0, remainder)
| gpl-2.0 | 123,123,470,890,932,380 | 31.812903 | 79 | 0.566391 | false |
nzinfo/ElasticSphinx | space/db_import.py | 1 | 7053 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
实际处理 表结构的导入
1 遍历所有表
2 检查表的字段定义、索引的定义、主键的定义、外键的定义。
"""
import json
from .db_conn import cs_create_engine
import sqlalchemy
from sqlalchemy import inspect
class DBTableMeta(object):
def __init__(self, tbl_name):
self._tbl_name = tbl_name
self._column = []
self._primary = []
self._foreign = []
self._index = []
self._unique = []
def type_convert(self, c, dialect=False):
"""
将用类表示的数据字段类型,转为 字符串表示,
目前不支持 方言。 如果支持,只支持 MySQL | PostgreSQL
"""
"""
sqlalchemy.types.BigInteger
sqlalchemy.types.Boolean
sqlalchemy.types.Date
sqlalchemy.types.DateTime
sqlalchemy.types.Enum
sqlalchemy.types.Float
sqlalchemy.types.Integer
sqlalchemy.types.Interval
sqlalchemy.types.LargeBinary
sqlalchemy.types.Numeric
sqlalchemy.types.PickleType
sqlalchemy.types.SchemaType
sqlalchemy.types.SmallInteger
sqlalchemy.types.String
sqlalchemy.types.Text
sqlalchemy.types.Time
sqlalchemy.types.Unicode
sqlalchemy.types.UnicodeText
"""
# WON'T FIX: 此处可以把代码写的很漂亮, 但是为了 后面 处理类型的灵活性, 处理为 IF
# SQL Standard Types
if issubclass(c, sqlalchemy.types.BIGINT):
return "BIGINT"
if issubclass(c, sqlalchemy.types.BINARY):
return "BINARY"
if issubclass(c, sqlalchemy.types.BLOB):
return "BLOB"
if issubclass(c, sqlalchemy.types.BOOLEAN):
return "BOOLEAN"
if issubclass(c, sqlalchemy.types.CHAR):
return "CHAR"
if issubclass(c, sqlalchemy.types.CLOB):
return "CLOB"
if issubclass(c, sqlalchemy.types.DATE):
return "DATE"
if issubclass(c, sqlalchemy.types.DATETIME):
return "DATETIME"
if issubclass(c, sqlalchemy.types.DECIMAL):
return "DECIMAL"
if issubclass(c, sqlalchemy.types.FLOAT):
return "FLOAT"
if issubclass(c, sqlalchemy.types.INTEGER):
return "INTEGER"
if issubclass(c, sqlalchemy.types.NCHAR):
return "NCHAR"
if issubclass(c, sqlalchemy.types.NVARCHAR):
return "NVARCHAR"
if issubclass(c, sqlalchemy.types.NUMERIC):
return "NUMERIC"
if issubclass(c, sqlalchemy.types.REAL):
return "REAL"
if issubclass(c, sqlalchemy.types.SMALLINT):
return "SMALLINT"
if issubclass(c, sqlalchemy.types.TEXT):
return "TEXT"
if issubclass(c, sqlalchemy.types.TIME):
return "TIME"
if issubclass(c, sqlalchemy.types.TIMESTAMP):
return "TIMESTAMP"
if issubclass(c, sqlalchemy.types.VARBINARY):
return "VARBINARY"
if issubclass(c, sqlalchemy.types.VARCHAR):
return "VARCHAR"
if dialect:
if issubclass(c, sqlalchemy.dialects.mysql.TINYINT):
return "TINYINT"
if issubclass(c, sqlalchemy.dialects.mysql.TINYTEXT):
return "TEXT"
return None
def to_jsonable(self):
meta = {
"name": self._tbl_name,
"columns": [],
"primary": self._primary,
"foreign": self._foreign,
"index": self._index,
#"unique": self._unique # 已经包括在 index 中
}
#with meta["columns"] as c:
if True:
for c in self._column:
#print c.keys(), dir(c)
column_meta = {
"name": c['name'],
"nullable": c['nullable'],
#"default": c['default'],
#"autoinc": c['autoincrement'],
}
if 'default' in c and c['default']:
column_meta['default'] = c['default']
if 'autoincrement' in c:
column_meta['autoinc'] = c['autoincrement']
c_expr = c['type'].column_expression
c_type = self.type_convert(c_expr.im_class, True)
column_meta['type'] = c_type
if c_type in ['CHAR', 'NCHAR', 'NVARCHAR', 'VARBINARY', 'VARCHAR']:
column_meta['length'] = c['type'].length
meta['columns'].append(column_meta)
#FIXME: 暂时不处理限制 INTEGER 字节数的情况。
#print dir(c['type'])
#print dir(c_expr), c_expr
#print issubclass(c_expr.im_class, sqlalchemy.types.CHAR)
#print c_expr.im_class, c_expr.im_func, c_expr.im_self
#print c['type'].length
#print c['type'].python_type, c['type']._sqla_type, c['type'].charset, c['type'].collation
#print col
# 处理主键
# 处理索引
# 处理唯一索引
# 处理外键
return meta
class DBInspector(object):
"""
"""
def __init__(self, engine):
self._engine = engine
self._table_names = []
def tables(self):
if len(self._table_names):
return self._table_names
inspector = inspect(self._engine)
self._table_names = inspector.get_table_names()
return self._table_names
def table_define(self, tbl_name):
inspector = inspect(self._engine)
if tbl_name not in self.tables():
return [] # no such table
columns = inspector.get_columns(tbl_name)
comments = {}
if self._engine.uri.find('mysql') == 0:
#select COLUMN_NAME, COLUMN_COMMENT from information_schema.columns where table_name="test_tbl"
try:
rs = self._engine.execute("select COLUMN_NAME, COLUMN_COMMENT from information_schema.columns "
"where table_name='%s'" % tbl_name)
for row in rs:
#print row
if row[1]:
comments[row[0]] = row[1]
finally:
#conn.close()
pass
# postfix stick comment on column
for col in columns:
if col['name'] in comments:
col['comment'] = comments[col['name']]
# do table define
tbl_meta = DBTableMeta(tbl_name)
tbl_meta._column = columns
tbl_meta._primary = inspector.get_pk_constraint(tbl_name)
tbl_meta._foreign = inspector.get_foreign_keys(tbl_name)
tbl_meta._index = inspector.get_indexes(tbl_name)
tbl_meta._unique = inspector.get_unique_constraints(tbl_name)
return tbl_meta
# end class
# end of file
| apache-2.0 | -5,775,867,692,580,977,000 | 29.436937 | 111 | 0.534853 | false |
mikeshardmind/SinbadCogs | rolemanagement/massmanager.py | 1 | 10201 | import csv
import io
import logging
from typing import Optional, Set
import discord
from redbot.core import checks, commands
from .abc import MixinMeta
from .converters import (
ComplexActionConverter,
ComplexSearchConverter,
RoleSyntaxConverter,
)
from .exceptions import RoleManagementException
log = logging.getLogger("red.sinbadcogs.rolemanagement.massmanager")
class MassManagementMixin(MixinMeta):
"""
Mass role operations
"""
@commands.guild_only()
@checks.admin_or_permissions(manage_roles=True)
@commands.group(name="massrole", autohelp=True, aliases=["mrole"])
async def mrole(self, ctx: commands.GuildContext):
"""
Commands for mass role management
"""
pass
def search_filter(self, members: set, query: dict) -> set:
"""
Reusable
"""
if query["everyone"]:
return members
all_set: Set[discord.Member] = set()
if query["all"]:
first, *rest = query["all"]
all_set = set(first.members)
for other_role in rest:
all_set &= set(other_role.members)
none_set: Set[discord.Member] = set()
if query["none"]:
for role in query["none"]:
none_set.update(role.members)
any_set: Set[discord.Member] = set()
if query["any"]:
for role in query["any"]:
any_set.update(role.members)
minimum_perms: Optional[discord.Permissions] = None
if query["hasperm"]:
minimum_perms = discord.Permissions()
minimum_perms.update(**{x: True for x in query["hasperm"]})
def mfilter(m: discord.Member) -> bool:
if query["bots"] and not m.bot:
return False
if query["humans"] and m.bot:
return False
if query["any"] and m not in any_set:
return False
if query["all"] and m not in all_set:
return False
if query["none"] and m in none_set:
return False
if query["hasperm"] and not m.guild_permissions.is_superset(minimum_perms):
return False
if query["anyperm"] and not any(
bool(value and perm in query["anyperm"])
for perm, value in iter(m.guild_permissions)
):
return False
if query["notperm"] and any(
bool(value and perm in query["notperm"])
for perm, value in iter(m.guild_permissions)
):
return False
if query["noroles"] and len(m.roles) != 1:
return False
# 0 is a valid option for these, everyone role not counted, ._roles doesnt include everyone
if query["quantity"] is not None and len(m._roles) != query["quantity"]:
return False
if query["lt"] is not None and len(m._roles) >= query["lt"]:
return False
if query["gt"] is not None and len(m._roles) <= query["gt"]:
return False
top_role = self.get_top_role(m)
if query["above"] and top_role <= query["above"]:
return False
if query["below"] and top_role >= query["below"]:
return False
return True
members = {m for m in members if mfilter(m)}
return members
@mrole.command(name="user")
async def mrole_user(
self,
ctx: commands.GuildContext,
users: commands.Greedy[discord.Member],
*,
_query: RoleSyntaxConverter,
) -> None:
"""
adds/removes roles to one or more users
You cannot add and remove the same role
Example Usage:
[p]massrole user Sinbad --add RoleToGive "Role with spaces to give"
--remove RoleToRemove "some other role to remove" Somethirdrole
[p]massrole user LoudMouthedUser ProfaneUser --add muted
For role operations based on role membership, permissions had, or whether someone is a bot
(or even just add to/remove from all) see `[p]massrole search` and `[p]massrole modify`
"""
query = _query.parsed
apply = query["add"] + query["remove"]
if not await self.all_are_valid_roles(ctx, *apply):
await ctx.send(
"Either you or I don't have the required permissions "
"or position in the hierarchy."
)
return
for user in users:
await self.update_roles_atomically(
who=user, give=query["add"], remove=query["remove"]
)
await ctx.tick()
@mrole.command(name="search")
async def mrole_search(
self, ctx: commands.GuildContext, *, _query: ComplexSearchConverter
):
"""
Searches for users with the specified role criteria
--has-all roles
--has-none roles
--has-any roles
--has-no-roles
--has-exactly-nroles number
--has-more-than-nroles number
--has-less-than-nroles number
--has-perm permissions
--any-perm permissions
--not-perm permissions
--above role
--below role
--only-humans
--only-bots
--everyone
--csv
csv output will be used if output would exceed embed limits, or if flag is provided
"""
members = set(ctx.guild.members)
query = _query.parsed
members = self.search_filter(members, query)
if len(members) < 50 and not query["csv"]:
def chunker(memberset, size=3):
ret_str = ""
for i, m in enumerate(memberset, 1):
ret_str += m.mention
if i % size == 0:
ret_str += "\n"
else:
ret_str += " "
return ret_str
description = chunker(members)
embed = discord.Embed(description=description)
if ctx.guild:
embed.color = ctx.guild.me.color
await ctx.send(
embed=embed, content=f"Search results for {ctx.author.mention}"
)
else:
await self.send_maybe_chunked_csv(ctx, list(members))
@staticmethod
async def send_maybe_chunked_csv(ctx: commands.GuildContext, members):
chunk_size = 75000
chunks = [
members[i : (i + chunk_size)] for i in range(0, len(members), chunk_size)
]
for part, chunk in enumerate(chunks, 1):
csvf = io.StringIO()
fieldnames = [
"User ID",
"Display Name",
"Username#Discrim",
"Joined Server",
"Joined Discord",
]
fmt = "%Y-%m-%d"
writer = csv.DictWriter(csvf, fieldnames=fieldnames)
writer.writeheader()
for member in chunk:
writer.writerow(
{
"User ID": member.id,
"Display Name": member.display_name,
"Username#Discrim": str(member),
"Joined Server": member.joined_at.strftime(fmt)
if member.joined_at
else None,
"Joined Discord": member.created_at.strftime(fmt),
}
)
csvf.seek(0)
b_data = csvf.read().encode()
data = io.BytesIO(b_data)
data.seek(0)
filename = f"{ctx.message.id}"
if len(chunks) > 1:
filename += f"-part{part}"
filename += ".csv"
await ctx.send(
content=f"Data for {ctx.author.mention}",
files=[discord.File(data, filename=filename)],
)
csvf.close()
data.close()
del csvf
del data
@mrole.command(name="modify")
async def mrole_complex(
self, ctx: commands.GuildContext, *, _query: ComplexActionConverter
):
"""
Similar syntax to search, while applying/removing roles
--has-all roles
--has-none roles
--has-any roles
--has-no-roles
--has-exactly-nroles number
--has-more-than-nroles number
--has-less-than-nroles number
--has-perm permissions
--any-perm permissions
--not-perm permissions
--above role
--below role
--only-humans
--only-bots
--everyone
--add roles
--remove roles
"""
query = _query.parsed
apply = query["add"] + query["remove"]
if not await self.all_are_valid_roles(ctx, *apply):
return await ctx.send(
"Either you or I don't have the required permissions "
"or position in the hierarchy."
)
members = set(ctx.guild.members)
members = self.search_filter(members, query)
if len(members) > 100:
await ctx.send(
"This may take a while given the number of members to update."
)
async with ctx.typing():
for member in members:
try:
await self.update_roles_atomically(
who=member, give=query["add"], remove=query["remove"]
)
except RoleManagementException:
log.debug(
"Internal filter failure on member id %d guild id %d query %s",
member.id,
ctx.guild.id,
query,
)
except discord.HTTPException:
log.debug(
"Unpredicted failure for member id %d in guild id %d query %s",
member.id,
ctx.guild.id,
query,
)
await ctx.tick()
| mit | 3,458,244,613,031,202,000 | 29.27003 | 103 | 0.51142 | false |
tjcsl/ion | intranet/apps/eighth/migrations/0039_auto_20160322_1013.py | 1 | 1169 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 14:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('eighth', '0038_merge')]
operations = [
migrations.AddField(
model_name='eighthscheduledactivity',
name='restricted',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='eighthscheduledactivity',
name='sticky',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='historicaleighthscheduledactivity',
name='administrative',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='historicaleighthscheduledactivity',
name='restricted',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='historicaleighthscheduledactivity',
name='sticky',
field=models.BooleanField(default=False),
),
]
| gpl-2.0 | -507,226,886,883,369,500 | 29.763158 | 59 | 0.598802 | false |
janko33bd/bitcoin | contrib/devtools/security-check.py | 1 | 6510 | #!/usr/bin/python2
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if succesful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
return int(tokens[1],16)
return 0
def check_PE_PIE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return bool(get_PE_dll_characteristics(executable) & 0x40)
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
return bool(get_PE_dll_characteristics(executable) & 0x100)
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('PIE', check_PE_PIE),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
| mit | 5,513,877,472,251,832,000 | 34.966851 | 163 | 0.607988 | false |
icyflame/batman | tests/family_tests.py | 1 | 10716 | # -*- coding: utf-8 -*-
"""Tests for the family module."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot.family import Family, SingleSiteFamily
from pywikibot.exceptions import UnknownFamily
from pywikibot.tools import PY2
import pywikibot.site
from tests.aspects import (
unittest,
TestCase,
DeprecationTestCase,
PatchingTestCase,
)
from tests.utils import DrySite
if not PY2:
basestring = (str, )
class TestFamily(TestCase):
"""Test cases for Family methods."""
net = False
def test_family_load_valid(self):
"""Test that a family can be loaded via Family.load."""
for name in pywikibot.config.family_files:
f = Family.load(name)
self.assertIsInstance(f.langs, dict)
self.assertTrue(f.langs)
self.assertTrue(f.codes)
self.assertTrue(iter(f.codes))
self.assertIsInstance(next(iter(f.codes)), basestring)
self.assertTrue(f.domains)
self.assertTrue(iter(f.domains))
for domain in f.domains:
self.assertIsInstance(domain, basestring)
if domain != 'localhost':
self.assertIn('.', domain)
self.assertEqual(f.name, name)
self.assertIsInstance(f.languages_by_size, list)
self.assertGreaterEqual(set(f.langs), set(f.languages_by_size))
if len(f.langs) > 6 and f.name != 'wikimediachapter':
self.assertNotEqual(f.languages_by_size, [])
if isinstance(f, SingleSiteFamily):
self.assertIsNotNone(f.code)
self.assertIsNotNone(f.domain)
self.assertEqual(set(f.langs), set([f.code]))
self.assertEqual(set(f.codes), set([f.code]))
def test_family_load_invalid(self):
"""Test that an invalid family raised UnknownFamily exception."""
self.assertRaises(UnknownFamily, Family.load, 'unknown')
def test_eq_different_families_by_name(self):
"""Test that two Family with same name are equal."""
family_1 = Family()
family_2 = Family()
family_1.name = 'a'
family_2.name = 'a'
self.assertNotEqual(id(family_1), id(family_2))
self.assertEqual(family_1, family_2)
def test_eq_different_families_by_id(self):
"""Test that two Family with no name attribute are not equal."""
family_1 = Family()
family_2 = Family()
family_1.name = 'a'
del family_2.name
self.assertNotEqual(id(family_1), id(family_2))
self.assertNotEqual(family_1, family_2)
def test_eq_family_with_string_repr_same_family(self):
"""Test that Family and string with same name are equal."""
family = Family.load('wikipedia')
other = 'wikipedia'
self.assertEqual(family, other)
self.assertFalse(family != other)
def test_ne_family_with_string_repr_different_family(self):
"""Test that Family and string with different name are not equal."""
family = Family.load('wikipedia')
other = 'wikisource'
self.assertNotEqual(family, other)
self.assertFalse(family == other)
def test_eq_family_with_string_repr_not_existing_family(self):
"""Test that Family and string with different name are not equal."""
family = Family.load('wikipedia')
other = 'unknown'
self.assertRaises(UnknownFamily, family.__eq__, other)
def test_get_obsolete_wp(self):
"""Test three types of obsolete codes."""
family = Family.load('wikipedia')
self.assertIsInstance(family.obsolete, dict)
# redirected code (see site tests test_alias_code_site)
self.assertEqual(family.obsolete['dk'], 'da')
# closed/locked site (see site tests test_locked_site)
self.assertEqual(family.obsolete['mh'], None)
# offline site (see site tests test_removed_site)
self.assertEqual(family.obsolete['ru-sib'], None)
def test_get_obsolete_test(self):
"""Test WikimediaFamily default obsolete."""
family = Family.load('test')
self.assertIn('dk', family.obsolete)
self.assertIn('dk', family.interwiki_replacements)
self.assertEqual(family.obsolete, family.interwiki_replacements)
self.assertEqual(family.interwiki_removals, set())
def test_set_obsolete(self):
"""Test obsolete can be set."""
family = Family()
self.assertEqual(family.obsolete, {})
self.assertEqual(family.interwiki_replacements, {})
self.assertEqual(family.interwiki_removals, [])
family.obsolete = {'a': 'b', 'c': None}
self.assertEqual(family.obsolete, {'a': 'b', 'c': None})
self.assertEqual(family.interwiki_replacements, {'a': 'b'})
self.assertEqual(family.interwiki_removals, ['c'])
def test_obsolete_readonly(self):
"""Test obsolete result not updatable."""
family = Family.load('test')
self.assertRaises(TypeError, family.obsolete.update, {})
self.assertRaises(TypeError, family.obsolete.__setitem__, 'a', 'b')
def test_WikimediaFamily_obsolete_readonly(self):
"""Test WikimediaFamily obsolete is readonly."""
family = Family.load('test')
self.assertRaises(TypeError, family.__setattr__, 'obsolete',
{'a': 'b', 'c': None})
class TestFamilyUrlRegex(PatchingTestCase):
"""Test family URL regex."""
net = False
@PatchingTestCase.patched(pywikibot, 'Site')
def Site(self, code, fam, *args, **kwargs):
"""Own DrySite creator."""
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {})
self.assertEqual(code, self.current_code)
self.assertEqual(fam, self.current_family)
site = DrySite(code, fam, None, None)
site._siteinfo._cache['general'] = ({'articlepath': self.article_path},
True)
return site
def setUp(self):
"""Setup default article path."""
super(TestFamilyUrlRegex, self).setUp()
self.article_path = '/wiki/$1'
def test_from_url_wikipedia_extra(self):
"""Test various URLs against wikipedia regex."""
self.current_code = 'vo'
self.current_family = 'wikipedia'
f = Family.load('wikipedia')
prefix = 'https://vo.wikipedia.org'
self.assertEqual(f.from_url(prefix + '/wiki/'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php/'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php?title=$1'), 'vo')
self.assertEqual(f.from_url(prefix + '/wiki/$1'), 'vo')
self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php/$1'), 'vo')
self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')
# Text after $1 is not allowed
self.assertRaises(ValueError, f.from_url,
'//vo.wikipedia.org/wiki/$1/foo')
# the IWM may contain the wrong protocol, but it's only used to
# determine a site so using HTTP or HTTPS is not an issue
self.assertEqual(f.from_url('http://vo.wikipedia.org/wiki/$1'), 'vo')
# wrong protocol
self.assertIsNone(f.from_url('ftp://vo.wikipedia.org/wiki/$1'))
# wrong code
self.assertIsNone(f.from_url('https://foobar.wikipedia.org/wiki/$1'))
# wrong family
self.assertIsNone(f.from_url('https://vo.wikibooks.org/wiki/$1'))
self.assertIsNone(f.from_url('http://vo.wikibooks.org/wiki/$1'))
# invalid path
self.assertIsNone(f.from_url('https://vo.wikipedia.org/wik/$1'))
self.assertIsNone(f.from_url('https://vo.wikipedia.org/index.php/$1'))
def test_each_family(self):
"""Test each family builds a working regex."""
for family in pywikibot.config.family_files:
self.current_family = family
family = Family.load(family)
for code in family.codes:
self.current_code = code
url = ('%s://%s%s/$1' % (family.protocol(code),
family.hostname(code),
family.path(code)))
# Families can switch off if they want to be detected using URL
# this applies for test:test (there is test:wikipedia)
if family._ignore_from_url or code in family._ignore_from_url:
self.assertIsNone(family.from_url(url))
else:
self.assertEqual(family.from_url(url), code)
class TestOldFamilyMethod(DeprecationTestCase):
"""Test cases for old site.Family method."""
net = False
def test_old_site_family_function(self):
"""Test deprecated Family function with valid families."""
f = pywikibot.site.Family('species')
self.assertEqual(f.name, 'species')
f = pywikibot.site.Family('osm')
self.assertEqual(f.name, 'osm')
self.assertOneDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load', 2)
# @deprecated warning occurs within redirect_func's call
# invoking the method instead of this test module.
self._do_test_warning_filename = False
f = pywikibot.site.Family('i18n', fatal=False)
self.assertEqual(f.name, 'i18n')
self.assertDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load')
self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')
def test_old_site_family_function_invalid(self):
"""Test that an invalid family raised UnknownFamily exception."""
# As assertRaises calls the method, unittest is the module
# invoking the method instead of this test module.
self._do_test_warning_filename = False
self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown',
fatal=False)
self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown')
self.assertDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load')
self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| mit | 4,559,287,985,541,460,500 | 38.836431 | 85 | 0.608996 | false |
glaucomunsberg/decision_tree | Attributs.py | 1 | 10212 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from Configuration import Configuration
from Exemple import *
from math import log
from operator import itemgetter
class Attributs:
_file_url = None
_config = None
list_attributs = None
list_exemples = None
def __init__(self, file_url=""):
self._file_url = file_url
self._config = Configuration()
# check if the file is possible to open
if not isinstance(file_url, str):
raise TypeError("file_url can't be open")
if file_url != "":
with open(file_url, 'r') as fichier:
self.list_attributs = fichier.readline().lower().strip().split(self._config.splitter)
#print 'attributs:'
#print(self.list_attributs)
#print 'attributs split:'
self.list_attributs = self.list_attributs[self._config.num_split_columns:(len(self.list_attributs)-1)]
#print(self.list_attributs)
self.list_exemples = self.list_of_examples(fichier.read().lower().split('\n'),self.list_attributs)
#print 'examples:'
#for i in self.list_exemples :
# print i.print_example()
else:
self.list_attributs = list()
self.list_exemples = list()
#
# Return a list of examples with this
# label
#
def labels_possible(self):
list_to_return = list()
for exemple in self.list_exemples:
if not exemple.label in list_to_return:
list_to_return.append(exemple.label)
return list_to_return
#
# Entropy
# return_val = 0
# S = list of exemples
# for each label in set
# sub_set with the elements with labels
# return_val += |c| * log_2(|c|)
# log_2(|S|) - return_val/|S|
#
#
def entropy(self):
return_val = 0
for label in self.labels_possible():
sub_set = self.labeled_as(label)
log_sub_set = len(sub_set)
return_val += log_sub_set * log(log_sub_set, 2)
return log(len(self), 2) - return_val/len(self)
#
# The biggest attribut
# Return the name of label of attr needs be tested
#
def biggest_attr(self, ID3=True):
max, return_attr = float("-inf"), ""
for attribut in self.list_attributs:
if not ID3:
gain = self.ratio_gain(attribut)
else:
gain = self.gain_entropy(attribut)
if gain >= max:
max, return_attr = gain, attribut
return return_attr
#
# List values of attribut
#
def list_values_of_attr(self, nom_attribut):
list_return = list()
for exemple in self.list_exemples:
# is_in = False
# for element in list_return:
# if element == exemple:
# is_in = True
# if not is_in:
# list_return.append(exemple.dict_attributs[nom_attribut])
if not exemple.dict_attributs[nom_attribut] in list_return:
list_return.append(exemple.dict_attributs[nom_attribut])
return list_return
#
# Return entropy of nom attribut
# get the attributs labeled as
# value_entropy = value sur v de |Sv| * Entropie(Sv)
# Gain(S, A) = Entropie(S) - 1/|S| * value_entropy
#
def gain_entropy(self, nom_attribut):
value_entropy = 0
for value in self.list_values_of_attr(nom_attribut):
sub_set_attr = self.attribut_as(nom_attribut, value)
value_entropy += len(sub_set_attr) * sub_set_attr.entropy()
return self.entropy() - value_entropy/len(self)
#
#
#
def ratio_gain(self, nom_attribut):
split = self.entropy_set(nom_attribut)
gain = self.gain_entropy(nom_attribut)
return gain/split if split != 0 else float("inf")
#
#
# Most common value of attr
#
def most_common_value_of(self, nom_attribut):
dict_frequences = dict()
for exemple in self.list_exemples:
if exemple.dict_attributs[nom_attribut] not in dict_frequences:
dict_frequences[exemple.dict_attributs[nom_attribut]] = 0
dict_frequences[exemple.dict_attributs[nom_attribut]] += 1
return max(dict_frequences, key=dict_frequences.get)
#
# Set of examples of this attribut
#
def attribut_as(self, nom_attribut, value):
attr_to_return = Attributs()
attr_to_return.list_attributs = self.list_attributs[:]
attr_to_return.list_attributs.remove(nom_attribut)
for exemple in self.list_exemples:
if exemple.dict_attributs[nom_attribut] == value:
attr_to_return.list_exemples.append(exemple)
return attr_to_return
#
# Return a set of examples that havel this label
#
def labeled_as(self, label):
attr_to_return = Attributs()
attr_to_return.list_attributs = self.list_attributs[:]
for exemple in self.list_exemples:
if exemple.label == label:
attr_to_return.list_exemples.append(exemple)
return attr_to_return
#
# entropy of set
#
def entropy_set(self, nom_attribut):
value = 0
for valeur in self.list_values_of_attr(nom_attribut):
sub_set = self.attribut_as(nom_attribut, valeur)
value += len(sub_set) * log(len(sub_set), 2)
return log(len(self), 2) - value/len(self)
#
# Check if the attr is discretizable
#
def is_discretizable(self, nom_attribut):
for exemple in self.list_exemples:
try:
float(exemple.dict_attributs[nom_attribut])
except ValueError:
return False
return True
#
# Return tuples of attributs
# ['attr','value']
#
def attr_tuples(self):
list_tuples = list()
for attribut in self.list_attributs:
#s'il est discrétisable
if self.is_discretizable(attribut):
#on sauvegarde les valeurs de chaque exemple
value_attr = [exemple.dict_attributs[attribut] for exemple in self.list_exemples]
list_tuples.append((attribut, value_attr))
return list_tuples
#
# discretizable of attr
#
def c45_discretizable(self, nom_attribut):
negative_value_infinity = float("-inf")
positive_value_infinity = float("+inf")
list_intervals = list()
indice_borne_inf = 0
#values_sorted is inicialize with values of attribut in example. After sort.
values_sorted = [(i, self.list_exemples[i].dict_attributs[nom_attribut]) for i in range(len(self))]
values_sorted.sort(key=itemgetter(1))
# calc based each value in sort list
for i in range(1, len(values_sorted)):
if self.list_exemples[values_sorted[i][0]].label != self.list_exemples[values_sorted[indice_borne_inf][0]].label:
borne_inf = negative_value_infinity
if len(list_intervals) == 0:
#borne_inf = math.inf
borne_inf = negative_value_infinity
else:
borne_inf = (float(values_sorted[indice_borne_inf][1]) + float(values_sorted[indice_borne_inf-1][1]))/2
borne_sup = (float(values_sorted[i][1]) + float(values_sorted[i-1][1])) / 2
list_intervals.append((borne_inf, borne_sup))
indice_borne_inf = i
# last interval is positive
try:
list_intervals.append((list_intervals[-1][1], positive_value_infinity))
except Exception, e:
print e.message,list_intervals,nom_attribut
for exemple in self.list_exemples:
for intervalle in list_intervals:
if float(exemple.dict_attributs[nom_attribut]) < intervalle[1]:
exemple.dict_attributs[nom_attribut] = intervalle
break
#
# Set empty ou unkown values with
# the commom value of this attribut
#
def set_attr_without_value(self):
for example in range(len(self.list_exemples)):
# if attribut is in dict attribut list
for nom_attribut in self.list_exemples[example].dict_attributs:
# and the value is for example '' or '?' or 'unknown'
if self.list_exemples[example].dict_attributs[nom_attribut] in self._config.unknown_values:
#on isole les éléments ayant la même étiquette
sous_ensemble = self.labeled_as(self.list_exemples[example].label)
#et on récupère la valeur de ce même attribut la plus
#fréquente pour l'assigner à la place du '?'
self.list_exemples[example].dict_attributs[nom_attribut] = sous_ensemble.most_common_value_of(nom_attribut)
def __len__(self):
return len(self.list_exemples)
#
# Return an list of examples based at list whit values
# and another with labels of attr
#
def list_of_examples(self,exemples, noms_attributs):
list_to_return = list()
for row in exemples:
attributs = row.lower().strip().split(self._config.splitter)
if self._config.num_split_columns > 0:
attributs = attributs[self._config.num_split_columns:len(attributs)]
#print 'attr->', attributs
label = attributs[-1] if len(attributs) != len(noms_attributs) else ""
list_to_return.append(Exemple(noms_attributs,attributs[:len(noms_attributs)],label))
return list_to_return
| mit | 2,976,176,886,541,299,700 | 34.547038 | 127 | 0.554989 | false |
auready/django | django/db/models/sql/query.py | 2 | 95034 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
from collections import Counter, Iterator, Mapping, OrderedDict
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, QUERY_TERMS, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, EmptyResultSet, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.encoding import force_text
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
class RawQuery:
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None, context=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
self.context = context or {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params, context=self.context.copy())
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query:
"""
A single SQL query.
"""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
query_terms = QUERY_TERMS
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A list of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = []
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
self.context = {}
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def _prepare(self, field):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
elif self.group_by is True:
obj.group_by = True
else:
obj.group_by = self.group_by[:]
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_for_update_skip_locked = self.select_for_update_skip_locked
obj.select_related = self.select_related
obj.values_select = self.values_select[:]
obj._annotations = self._annotations.copy() if self._annotations is not None else None
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.max_depth = self.max_depth
obj.combinator = self.combinator
obj.combinator_all = self.combinator_all
obj.combined_queries = self.combined_queries
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.subquery = self.subquery
if 'alias_prefix' in self.__dict__:
obj.alias_prefix = self.alias_prefix
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
obj.context = self.context.copy()
return obj
def add_context(self, key, value):
self.context[key] = value
def get_context(self, key, default=None):
return self.context.get(key, default)
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
elif isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, then those operations must be
# done in a subquery so that we are aggregating on the limit and/or
# distinct results instead of applying the distinct and limit after the
# aggregation.
if (isinstance(self.group_by, list) or has_limit or has_existing_annotations or
self.distinct):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = [self.model._meta.pk.get_col(inner_query.get_initial_alias())]
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.tables}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == [] and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = [self.model._meta.pk.get_col(inner_query.get_initial_alias())]
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = []
self.default_cols = False
self._extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None for q in outer_query.annotation_select.items()]
converters = compiler.get_converters(outer_query.annotation_select.values())
result = compiler.apply_converters(result, converters)
return {
alias: val
for (alias, annotation), val
in zip(outer_query.annotation_select.items(), result)
}
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
self.add_select(col.relabeled_clone(change_map))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.fields:
if field in values:
continue
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, list):
self.group_by = [col.relabeled_clone(change_map) for col in self.group_by]
self.select = [col.relabeled_clone(change_map) for col in self.select]
if self._annotations:
self._annotations = OrderedDict(
(key, col.relabeled_clone(change_map)) for key, col in self._annotations.items())
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Changes the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generates a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.tables):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3. All new
joins are created as LOUTER if nullable is True.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
reuse = [a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join]
if reuse:
self.ref_alias(reuse[0])
return reuse[0]
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""
Adds a single annotation expression to the Query
"""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def _prepare_as_filter_value(self):
return self.clone()
def prepare_lookup_value(self, value, lookups, can_reuse, allow_joins=True):
# Default lookup if none given is exact.
used_joins = []
if len(lookups) == 0:
lookups = ['exact']
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookups[-1] not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return True, ['isnull'], used_joins
elif hasattr(value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
used_joins = [k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)]
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
processed_values = []
used_joins = set()
for sub_value in value:
if hasattr(sub_value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
processed_values.append(
sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
)
# The used_joins for a tuple of expressions is the union of
# the used_joins for the individual expressions.
used_joins |= set(k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0))
# Subqueries need to use a different set of aliases than the
# outer query. Call bump_prefix to change aliases of the inner
# query (the value).
if hasattr(value, '_prepare_as_filter_value'):
value = value._prepare_as_filter_value()
value.bump_prefix(self)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookups[-1] == 'exact' and value == ''):
value = True
lookups[-1] = 'isnull'
return value, lookups, used_joins
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (eg: 'foobar__id__icontains')
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) == 0:
lookup_parts = ['exact']
elif len(lookup_parts) > 1:
if not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__))
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Checks whether the object passed while querying is of the correct type.
If not, it raises a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""
Checks the type of object passed to query relations.
"""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
# If _forced_pk isn't set, this isn't a queryset query or values()
# or values_list() was specified by the developer in which case
# that choice is trusted.
if (getattr(value, '_forced_pk', False) and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def build_lookup(self, lookups, lhs, rhs):
"""
Tries to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
lookups = lookups[:]
while lookups:
name = lookups[0]
# If there is just one part left, try first get_lookup() so
# that if the lhs supports both transform and lookup for the
# name, then lookup will be picked.
if len(lookups) == 1:
final_lookup = lhs.get_lookup(name)
if not final_lookup:
# We didn't find a lookup. We are going to interpret
# the name as transform, and do an Exact lookup against
# it.
lhs = self.try_transform(lhs, name, lookups)
final_lookup = lhs.get_lookup('exact')
return final_lookup(lhs, rhs)
lhs = self.try_transform(lhs, name, lookups)
lookups = lookups[1:]
def try_transform(self, lhs, name, rest_of_lookups):
"""
Helper method for build_lookup. Tries to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(name, lhs.output_field.__class__.__name__))
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, connector=AND, allow_joins=True, split_subq=True):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, lookups, used_joins = self.prepare_lookup_value(value, lookups, can_reuse, allow_joins)
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(field, value, opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if can_reuse is not None:
can_reuse.update(join_list)
used_joins = set(used_joins).union(set(join_list))
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
assert num_lookups > 0 # Likely a bug in Django if this fails.
lookup_class = field.get_lookup(lookups[0])
if lookup_class is None:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
lhs = targets[0].get_col(alias, field)
else:
lhs = MultiColSource(alias, targets, sources, field)
condition = lookup_class(lhs, value)
lookup_type = lookup_class.lookup_name
else:
col = targets[0].get_col(alias, field)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and value is True and not current_negated
if current_negated and (lookup_type != 'isnull' or value is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(targets[0].get_col(alias, sources[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = set(
(a for a in self.alias_map if self.alias_map[a].join_type == INNER))
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, connector=connector,
allow_joins=allow_joins, split_subq=split_subq,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the list of names and turns them into PathInfo tuples. Note that
a single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field). Finally, the method returns
those names that weren't found (which are likely transforms and the
final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(field_names + list(self.annotation_select))
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(opts.db_table, alias, None, INNER, join.join_field, nullable)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotation_select[name]
else:
field_list = name.split(LOOKUP_SEP)
field, sources, opts, join_list, path = self.setup_joins(
field_list, self.get_meta(),
self.get_initial_alias(), reuse)
targets, _, join_list = self.trim_joins(sources, join_list, path)
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
if reuse is not None:
reuse.update(join_list)
col = targets[0].get_col(join_list[-1], sources[0])
return col
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.values_select = []
def add_select(self, col):
self.default_cols = False
self.select.append(col)
def set_select(self, cols):
self.default_cols = False
self.select = cols
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
_, targets, _, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(targets, joins, path)
for target in targets:
self.add_select(target.get_col(final_alias))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(list(get_field_names_from_opts(opts)) + list(self.extra) + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col in self.select:
self.group_by.append(col)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
for col in annotation.get_group_by_cols():
self.group_by.append(col)
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"Set the mask of annotations that will actually be returned by the SELECT"
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(set(names).union(self.annotation_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
self.set_group_by()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self._extra and not self._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
self.values_select = field_names
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""The OrderedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self._annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = OrderedDict(
(k, v) for k, v in self.annotations.items()
if k in self.annotation_select_mask
)
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.tables:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed:
return True
else:
return field.null
def as_subquery_filter(self, db):
self._db = db
self.subquery = True
# It's safe to drop ordering if the queryset isn't using slicing,
# distinct(*fields) or select_for_update().
if (self.low_mark == 0 and self.high_mark is None and
not self.distinct_fields and
not self.select_for_update):
self.clear_ordering(True)
return self
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| bsd-3-clause | -1,911,751,377,344,912,100 | 43.429173 | 119 | 0.592409 | false |
robmcmullen/peppy | editra.in/make-major-modes-from-editra.py | 1 | 13072 | #!/usr/bin/env python
import os, shutil, sys, glob, imp
import __builtin__
import ConfigParser
from cStringIO import StringIO
from optparse import OptionParser
import wx.stc
__builtin__._ = str
sys.path.append("..")
from peppy.debug import *
from facade import *
facade = EditraFacade()
class_attr_template = ''' keyword = '%(keyword)s'
editra_synonym = '%(lang)s'
stc_lexer_id = %(lexer)s
start_line_comment = %(start_comment)s
end_line_comment = %(end_comment)s'''
classprefs_template = ''' StrParam('extensions', '%(extensions)s', fullwidth=True),'''
keyword_set_template = ''' StrParam('keyword_set_%d', unique_keywords[%d], hidden=False, fullwidth=True),'''
template = '''# peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""%(lang)s programming language editing support.
Major mode for editing %(lang)s files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.fundamental import FundamentalMode
from peppy.editra.style_specs import unique_keywords
class %(class_name)sMode(FundamentalMode):
"""Stub major mode for editing %(keyword)s files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
%(class_attrs)s
icon = 'icons/page_white.png'
default_classprefs = (
%(classprefs)s
)
class %(class_name)sModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for %(keyword)s
"""
def getMajorModes(self):
yield %(class_name)sMode
'''
# Global lists and dicts using my multiple processes
langs = facade.getAllEditraLanguages()
extra_properties = {}
syntax_style_specs = {}
keywords = {}
stc_lexer_id = {}
for lang in langs:
keyword = facade.getPeppyModeKeyword(lang)
#dprint(keyword)
extra_properties[keyword] = facade.getEditraExtraProperties(lang)
syntax_style_specs[keyword] = facade.getEditraSyntaxSpecs(lang)
keywords[keyword] = facade.getEditraLanguageKeywords(lang)
stc_lexer_id[keyword] = facade.getEditraSTCLexer(lang)
def findCommonKeywords(keywords):
unique_keywords = []
unique_id = 0
keywords_text = {}
keywords_mapping = {}
for lang, keyword_dict in keywords.iteritems():
if lang not in keywords_mapping:
dprint("adding %s" % lang)
keywords_mapping[lang] = {}
try:
for id, text in keyword_dict.iteritems():
# keyword_spec is a tuple of int and string
if text in keywords_text:
dprint("found common for %s, %d: %s" % (lang, id, keywords_text[text]))
keywords_mapping[lang][id] = keywords_text[text]
else:
keywords_text[text] = unique_id
unique_keywords.append(text)
keywords_mapping[lang][id] = unique_id
unique_id += 1
except (ValueError, TypeError):
dprint(lang)
dprint(keyword_spec_list)
raise
except KeyError:
dprint(keywords_mapping.keys())
raise
dprint(keywords_mapping)
return unique_keywords, keywords_mapping
unique_keywords, keywords_mapping = findCommonKeywords(keywords)
# Processing routines
def process(destdir):
missing, existing = getDefinedModes(destdir)
for mode in missing:
convertEditraMode(destdir, mode)
for mode in existing:
updateEditraMode(destdir, mode)
def getDefinedModes(destdir):
langs = facade.getAllEditraLanguages()
missing = []
existing = []
for lang in langs:
module_name = facade.getPeppyFileName(lang)
module_path = os.path.join(destdir, module_name + ".py")
if os.path.exists(module_path):
#dprint("found %s -> %s -> %s" % (lang, module_name, module_path))
existing.append(lang)
else:
#dprint("CREATING %s -> %s -> %s" % (lang, module_name, module_path))
missing.append(lang)
return missing, existing
def getEditraInfo(lang):
module_name = facade.getPeppyFileName(lang)
syn = facade.getEditraSyntaxData(lang)
keyword = facade.getPeppyModeKeyword(lang)
vals = {
'lang': lang,
'keyword': keyword,
'class_name': facade.getPeppyClassName(lang),
'module_name': module_name,
'extensions': " ".join(facade.getExtensionsForLanguage(lang)),
'lexer': facade.getEditraSTCLexerName(lang),
'start_comment': repr(facade.getEditraCommentChars(lang)[0]),
'end_comment': repr(facade.getEditraCommentChars(lang)[1]),
}
vals['class_attrs'] = class_attr_template % vals
classprefs = classprefs_template % vals
order = sorted(keywords_mapping[keyword].iteritems())
for keyword_set_id, unique_id in order:
classprefs += "\n" + keyword_set_template % (keyword_set_id, unique_id)
vals['classprefs'] = classprefs
return module_name, vals
def convertEditraMode(destdir, lang):
module_name, vals = getEditraInfo(lang)
module_path = os.path.join(destdir, module_name + ".py")
text = template % vals
#print(text)
fh = open(module_path, 'w')
fh.write(text)
fh.close()
generatePluginFile(destdir, lang)
def updateEditraMode(destdir, lang):
module_name, vals = getEditraInfo(lang)
module_path = os.path.join(destdir, module_name + ".py")
fh = open(module_path, 'r')
text = fh.read()
fh.close()
classtext = ClassText(text, lang)
classtext.replace(vals)
fh = open(module_path, 'w')
fh.write(str(classtext))
fh.close()
class ClassText(object):
"""Gets the class attribute section of the major mode class
"""
def __init__(self, text, lang):
self.header = ""
self.class_attrs = ""
self.classprefs = ""
self.footer = ""
self.lang = lang
self.parse(text)
def __str__(self):
#return "Class Attribute Section: %s\nClass Preference Section: %s"% (self.class_attrs, self.classprefs)
return self.header + self.class_attrs + self.classprefs + self.footer
def parse(self, text):
classmatch = "class %sMode(" % facade.getPeppyClassName(self.lang)
#dprint(classmatch)
state = "header"
for line in text.splitlines(True):
#dprint(line)
if state == "header":
if line.startswith(classmatch):
state = "in_class"
else:
self.header += line
if state == "in_class":
if line.strip().startswith("keyword =") or line.strip().startswith("keyword="):
state = "class_attrs"
else:
self.header += line
if state == "class_attrs":
if line.strip().startswith("default_classprefs"):
state = "classprefs"
else:
self.class_attrs += line
if state == "classprefs":
if line.strip() == ")":
state = "footer"
else:
self.classprefs += line
if state == "footer":
self.footer += line
def replace(self, vals):
"""Replace any class attributes or classprefs with the new values
"""
self.replaceImports()
self.replaceClassAttrs(vals)
self.replaceClassprefs(vals)
def replaceImports(self):
"""Special case to add the unique_keywords dict to the list of imports
In versions prior to r2412, the import statement for unique_keywords
from peppy.editra.style_specs didn't exist. Now that keywords can be
modified by the user, the import statement must be included because
the StrParams reference the unique_keywords dict to supply defaults
for the preferences.
"""
extra = "from peppy.editra.style_specs import unique_keywords"
try:
self.header.index(extra)
except ValueError:
dprint("Replacing imports for %s" % self.lang)
index = self.header.index("from peppy.fundamental import")
self.header = self.header[0:index] + extra + "\n" + self.header[index:]
def replaceClassAttrs(self, vals):
newattrs = vals['class_attrs']
keywords = {}
for attrline in newattrs.splitlines():
keyword, value = attrline.split("=")
keyword = keyword.strip()
keywords[keyword] = attrline
lines = self.class_attrs.splitlines(True)
newlines = ""
for line in lines:
splitted = line.split("=")
if len(splitted) > 1 and splitted[0].strip() in keywords:
# Replace the keyword with the new value
#newlines += keywords[splitted[0]]
#del keywords[splitted[0]]
pass
else:
newlines += line
self.class_attrs = newattrs + "\n" + newlines
def replaceClassprefs(self, vals):
newprefs = vals['classprefs']
keywords = set()
for attrline in newprefs.splitlines():
keyword, value = attrline.split(",", 1)
keywords.add(keyword)
lines = self.classprefs.splitlines(True)
newlines = ""
# the default_classprefs should start it out
newprefs = lines[0] + newprefs
for line in lines[1:]:
splitted = line.split(",")
if len(splitted) > 1 and splitted[0] in keywords:
pass
else:
newlines += line
self.classprefs = newprefs + "\n" + newlines
def generatePluginFile(destdir, lang):
module_name = facade.getPeppyFileName(lang)
plugin_path = os.path.join(destdir, module_name + ".peppy-plugin")
conf = ConfigParser.ConfigParser()
conf.add_section("Core")
conf.set("Core", "Name", "%s Mode" % facade.getPeppyModeKeyword(lang))
conf.set("Core", "Module", module_name)
conf.add_section("Documentation")
conf.set("Documentation", "Author", "Rob McMullen")
conf.set("Documentation", "Version", "0.1")
conf.set("Documentation", "Website", "http://www.flipturn.org/peppy")
conf.set("Documentation", "Description", "Major mode for editing %s files" % facade.getPeppyModeKeyword(lang))
fh = open(plugin_path, "w")
conf.write(fh)
def processSampleText(filename):
#dprint("Processing sample text")
langs = facade.getAllEditraLanguages()
sample_text = {}
for lang in langs:
keyword = facade.getPeppyModeKeyword(lang)
sample_text[keyword] = facade.getEditraLanguageSampleText(lang)
import pprint
pp = pprint.PrettyPrinter()
fh = open(filename, "w")
fh.write("# Generated file containing the sample text for Editra modes\n")
fh.write("sample_text=")
fh.write(pp.pformat(sample_text))
fh.close()
def processStyleSpecs(filename):
#dprint("Processing style specs")
import pprint
pp = pprint.PrettyPrinter()
fh = open(filename, "w")
fh.write("# Generated file containing Editra style defaults for those major modes based\n# on Editra style files\n")
fh.write("\nstc_lexer_id=")
fh.write(pp.pformat(stc_lexer_id))
fh.write("\nsyntax_style_specs=")
fh.write(pp.pformat(syntax_style_specs))
fh.write("\nextra_properties=")
fh.write(pp.pformat(extra_properties))
if keywords_mapping:
fh.write("\nkeywords_mapping=")
fh.write(pp.pformat(keywords_mapping))
fh.write("\nunique_keywords=")
fh.write(pp.pformat(unique_keywords))
fh.write("\n")
fh.close()
if __name__ == "__main__":
usage="usage: %prog [-s dir] [-o file]"
parser=OptionParser(usage=usage)
parser.add_option("-o", action="store", dest="outputdir",
default="../peppy/major_modes", help="output directory")
parser.add_option("--sample-text", action="store", dest="sample_text_file",
default="../peppy/editra/sample_text.py", help="dict containing sample text for each editra language")
parser.add_option("--style-spec", action="store", dest="style_spec_file",
default="../peppy/editra/style_specs.py", help="dict containing sample text for each editra language")
(options, args) = parser.parse_args()
process(options.outputdir)
processSampleText(options.sample_text_file)
processStyleSpecs(options.style_spec_file)
| gpl-2.0 | -3,615,765,190,283,932,000 | 34.045576 | 124 | 0.617427 | false |
shearichard/dir-list-analysis | dla/whats_present.py | 1 | 5279 | '''
whats_present.py
Given a list of file names provides information about which files are
present and which are absent from within a file listing. There's a good
number of tools out there that will do this as well but in my case I didn't
have access to those.
Usage:
whats_present.py [-h] -i FILEDIR -o FILEOUT -t FILETEST
Options:
-h --help show this
-i FILEDIR specify directory listing file
-o FILEOUT specify output file
-t FILETEST specify test file
'''
from dla_temp_utils import getTempPath
from docopt import docopt
from sets import Set
import string
class DirectoryListing(object):
def __init__(self, fileinpath, testpath):
self.fileinpath = fileinpath
self.testpath = testpath
self.dicfiles = {}
self.set_testfiles = None
self.set_inputfiles = None
self.__make_dic_from_filein()
self.__make_test_set()
self.__make_input_set()
def files_in_test_but_not_listing(self):
'''
Returns a set of file names which are
in the test set but not in the listing
'''
workset = self.set_testfiles.difference((self.set_inputfiles))
return workset
def files_in_listing_but_not_test(self):
'''
Returns a set of file names which are
in the listing set but not in the test set
'''
workset = self.set_inputfiles.difference((self.set_testfiles))
return workset
def __clean_non_printable(self, the_string):
'''
Removes control chars from string
Assumes strings are ASCII
'''
return filter(lambda x: x in string.printable, the_string)
def __make_input_set(self):
'''
Make a Set object of all the input filenames
'''
lstwork = []
for k in self.dicfiles:
fname_clean = self.__clean_non_printable(self.dicfiles[k]['filename'])
lstwork.append(fname_clean)
self.set_inputfiles = Set(lstwork)
def __make_test_set(self):
'''
Make a Set object of all the filenames
to be tested against
'''
lstwork = []
with open(self.testpath, "rb") as f:
lines = []
for line in f:
#Unix
if line[-1] == "\n":
line = line[:-1]
#Windows has both
if line[-1] == "\r":
line = line[:-1]
lstwork.append(line)
# for line in lines:
# fname_clean = self.__clean_non_printable(line)
# lstwork.append(fname_clean)
self.set_testfiles = Set(lstwork)
def __make_dic_from_fileinline(self, line):
'''
Given an input line from the file
add an element to the instance var dict
which corresponds to one file in the listing
'''
lstelems = line.split()
try:
mm, dd, yyyy = lstelems[0].split("/")
except ValueError:
#Some lines aren't the format we want
pass
except IndexError:
#Some lines aren't the format we want
pass
else:
#At this stage the line is a goody
#so make the date usable
str_dt_iso = "%s-%s-%s" % (yyyy, mm, dd)
#file names can have spaces (unfortunaly)
filename = " ".join(lstelems[4:])
#deal with commas in size
if lstelems[3] == """<DIR>""":
pass
else:
size = int(lstelems[3].replace(',',''))
#not sure what to use as key, use file name for now
key = filename
self.dicfiles[key] = {'filename': filename, 'size': size, 'dateiso' : str_dt_iso}
def __make_dic_from_filein(self):
'''
Based on a file which has at least one line which looks like this:
12/15/2003 01:15 PM 3,383 000BE98E-7076-4CF8-8BFA-266E6EE4C9DF.GIF
a dict of dicts is returned, keyed on file name.
'''
lstOut = []
with open(self.fileinpath, 'r') as f:
for line in f:
self.__make_dic_from_fileinline(line)
def dump_set_to_file(fname, set):
lout = []
with open(fname, 'w') as f:
for elem in sorted(set):
f.write(elem + '\n')
def main(filein, filetest, fileout):
import pprint
d = DirectoryListing(filein, filetest)
print "test"
print(len(d.set_testfiles))
print "input"
print(len(d.set_inputfiles))
s1 = d.files_in_test_but_not_listing()
s2 = d.files_in_listing_but_not_test()
print "files_in_test_but_not_listing"
print(len(s1))
#pprint.pprint(s1)
p_s1 = getTempPath("s1.txt")
dump_set_to_file(p_s1, s1)
print("*" * 50)
print "files_in_listing_but_not_test"
print(len(s2))
#pprint.pprint(s2)
p_s2 = getTempPath("s2.txt")
dump_set_to_file(p_s2, s2)
print("*" * 50)
print getTempPath("test1")
print p_s1
print p_s2
if __name__ == '__main__':
arguments = docopt(__doc__, version='Whats Present 0.1')
main(arguments['-i'], arguments['-t'], arguments['-o'])
| mit | 8,817,128,369,539,018,000 | 29.514451 | 98 | 0.552188 | false |
tomevans/utils | read_marcs.py | 1 | 1426 | from __future__ import print_function
import numpy as np
import scipy.interpolate
import matplotlib.pyplot as plt
import os
import pdb
import sys
import glob
def read_marcs(teff=5000, logg=4.5, turbulence=2.0, metallicity=0.0):
"""
Reads in MARCS spectrum of specified properties
and returns two arrays, one containing the wavelengths
and the other containing the relative fluxes at
each wavelength.
Fluxes are returned in units of erg/cm2/s/angstrom.
Wavelengths are returned in units of microns.
"""
print( '\nReading in MARCS spectrum:' )
specdir = str('~/data/MARCS/').replace('~',os.path.expanduser('~'))
# Read in the relative fluxes (units=):
specfile = specdir+'p%04d_g%+4.1f_m0.0_t%02d_st_z%+5.2f' % (teff, logg, turbulence, metallicity)
specfile = glob.glob(specfile.replace('//','/')+'*.flx')[0]
flux = np.loadtxt(specfile)
print( ' Relative fluxes from: %s' % specfile )
# Read in the corresponding wavelengths (units=):
wavfile = specdir+'wavelengths.vac'
wav = np.loadtxt(wavfile)
print( ' Wavelengths from: %s' % wavfile )
# Convert the wavelengths from angstroms to microns:
wav = wav/(1e4)
# NOTE: The relative fluxes can be left in units of
# 'per angstrom', because converting them to 'per micron'
# or something similar is simply just multiplying
# everything by a constant value.
return wav, flux
| gpl-2.0 | -5,502,322,731,232,479,000 | 37.540541 | 100 | 0.685835 | false |
Migelo/mpa_garching | 1/temperature.py | 1 | 2567 | import pygad as pg
import matplotlib.pyplot as plt
import numpy as np
import utils
import glob
from multiprocessing import Pool
filename = __file__
def plot(args):
halo = args[0]
type = args[1]
path = '/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_???' % (halo, halo)
max = int(sorted(glob.glob(path))[-1][-3:])
s, h, g = pg.prepare_zoom('/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_%s' % (halo, halo, max), gas_trace='/u/mihac/data/%s/4x-2phase/gastrace_%s' % (halo, type), star_form=None)
mask = s.gas['ejection_time'][:,0] > '8 Gyr'
ej_hot = s.gas['mass_at_ejection'][mask][ s.gas['T_at_ejection'][:,0][mask]>1e5 , 0 ].sum()
ej_cold = s.gas['mass_at_ejection'][mask][ s.gas['T_at_ejection'][:,0][mask]<=1e5 , 0 ].sum()
print ej_hot / (ej_cold+ej_hot)
ej_hot = s.gas['mass_at_ejection'][ s.gas['T_at_ejection']>1e5 ].sum()
ej_cold = s.gas['mass_at_ejection'][ s.gas['T_at_ejection']<=1e5 ].sum()
print ej_hot / (ej_cold+ej_hot)
T_ejection = s.gas['T_at_ejection'][s.gas['num_recycled'] > -1]
T_infall = s.gas['T_at_infall'][s.gas['num_recycled'] > -1]
for i, temp in enumerate(T_ejection):
if len(temp) < len(T_infall[i]):
T_infall[i] = T_infall[i][:-1]
elif len(temp) > len(T_infall[i]):
print 'hmm'
fig, ax = plt.subplots(3)
plt.tight_layout()
ax[0].set_xlabel("$T_{infall}$")
ax[0].set_ylabel("$T_{ejection}$")
ax[0].set_xlim((1e2, 1e9))
ax[0].set_ylim((1e2, 1e9))
ax[0].set_xscale('log')
ax[0].set_yscale('log')
ax[0].scatter(T_infall, T_ejection, alpha=.1, edgecolor='none')
ax[0].plot([0, 1e80], [0, 1e80], color='r')
ax[1].set_ylabel("count")
ax[1].set_xlabel("$T$ [K]")
ax[1].set_xscale('log')
ax[1].set_yscale('log')
ax[1].hist(T_infall.flatten(), bins=np.logspace(2, 9, 100/2), alpha=.5, label='infall')
ax[1].hist(T_ejection.flatten(), bins=np.logspace(2, 9, 100/2), alpha=.5, label='ejection')
lgd1 = ax[1].legend(loc='best')
ax[2].set_title('Initial')
ax[2].set_ylabel("count")
ax[2].set_xlabel("$T$ [K]")
ax[2].set_xscale('log')
ax[2].set_yscale('log')
ax[2].hist(T_infall[:,0], bins=np.logspace(2, 9, 100/2), alpha=.5, label='initial infall')
ax[2].hist(T_ejection[:,0], bins=np.logspace(2, 9, 100/2), alpha=.5, label='initial ejection')
lgd2 = ax[2].legend(loc='best')
plt.savefig(filename.split("/")[-1][:-3] + '_' + halo + '_' + type + ".png", bbox_inches='tight')
p = Pool(4)
p.map(plot, utils.combinations)
| mit | -4,927,423,738,475,809,000 | 37.313433 | 194 | 0.580834 | false |
bpowers/opensim | opensim/visuals/widgets/stock.py | 1 | 8685 | #===--- stock.py - OpenSim Stock widget -----------------===#
#
# Copyright 2008 Bobby Powers
#
# This file is part of OpenSim.
#
# OpenSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenSim. If not, see <http://www.gnu.org/licenses/>.
#
#===-----------------------------------------------------------------------===#
#
# This file contains the implementation of the stock widget
#
#===-----------------------------------------------------------------------===#
import gobject
import gtk
import goocanvas
import math
import cairo
import logging
from opensim.visuals.tools import edit_equation
from text import TextInfo
from item import SimItem
class StockItem(SimItem):
def __init__(self, x, y, width=140, height=80, name=None,
focus=True, line_width=3.5, **kwargs):
super(StockItem, self).__init__(**kwargs)
self._new = True
# this will be the variable created in the simulator
self.var = None
self.x = int(x - width/2)
self.y = int(y - height/2)
self.width = width
self.height = height
self.dragging = False
self.active_color = [0, 0, 0]
self.line_width = line_width
self.__old_name = ""
self.named = True
# keep track of inflows and outflows, for use in engine
self.inflows = []
self.outflows = []
text_width = self.width - self.padding*2
if name is not None:
self._display_name = TextInfo(name, wrap_width=text_width,
placeholder_text=False)
else:
self._display_name = TextInfo("(enter name)", wrap_width=text_width,
placeholder_text=True)
self.__needs_resize_calc = True
if focus:
self.get_canvas().grab_focus(self)
self.get_canvas().grab_highlight(self)
def do_simple_create_path(self, cr):
self.ensure_size(cr)
# define the bounding path here.
cr.rectangle(self.x - self.line_width/2.0,
self.y - self.line_width/2.0,
self.width + self.line_width/2.0,
self.height + self.line_width/2.0)
def center(self):
return (int(self.x + self.width/2), int(self.y + self.height/2))
def abs_center(self):
center = self.center()
transform = self.get_transform()
x0, y0 = 0, 0
if transform is not None:
xx, yx, xy, yy, x0, y0 = transform
return (x0 + center[0], y0 + center[1])
def edge_point(self, end_point):
center_x, center_y = self.abs_center()
line_angle = math.atan2((end_point[1] - center_y),
(end_point[0] - center_x))
if line_angle < 0: line_angle = 2*math.pi + line_angle
# should always be between 0 and .5*pi
ref_angle = math.atan2(float(self.height),float(self.width))
width = self.width/2
height = self.height/2
if line_angle < ref_angle or line_angle > 2*math.pi - ref_angle:
center_x = center_x + width
center_y = center_y + width * math.tan(line_angle)
elif line_angle > math.pi - ref_angle and line_angle < math.pi + ref_angle:
center_x = center_x - width
center_y = center_y - width * math.tan(line_angle)
if line_angle >= ref_angle and line_angle <= math.pi - ref_angle:
center_x = center_x - height * math.tan(line_angle - math.pi/2)
center_y = center_y + height
elif line_angle >= math.pi + ref_angle and \
line_angle <= 2*math.pi - ref_angle:
center_x = center_x + height * math.tan(line_angle - math.pi/2)
center_y = center_y - height
#logging.debug("line: %5.1f, ref %5.1f" % (math.degrees(line_angle),
# math.degrees(ref_angle)))
return (center_x, center_y)
def ensure_size(self, cr):
if self.__needs_resize_calc:
self._display_name.update_extents(cr)
old_center_x = self.x + self.width/2.0
old_center_y = self.y + self.height/2.0
self.height = max(self.height, \
self._display_name.height + 2*self.padding)
self.x = old_center_x - self.width/2.0
self.y = old_center_y - self.height/2.0
self.bounds_x1 = self.x - self.line_width/2.0
self.bounds_y1 = self.y - self.line_width/2.0
self.bounds_x2 = self.x + self.width + self.line_width/2.0
self.bounds_y2 = self.y + self.height + self.line_width/2.0
self.__needs_resize_calc = False
self.force_redraw()
def do_simple_paint(self, cr, bounds):
cr.save()
self.ensure_size(cr)
cr.rectangle(self.x, self.y, self.width, self.height)
cr.set_source_rgb (1, 1, 1)
cr.fill_preserve()
cr.set_line_width(self.line_width)
cr.set_source_rgb(self.active_color[0], \
self.active_color[1], \
self.active_color[2])
cr.stroke()
# translate so that our coordinate system is in the widget
center = self.center()
cr.translate(center[0], center[1])
self._display_name.show_text(cr)
cr.restore()
def xml_representation(self):
# get the center of the widget, so that we get the correct
# behavior when it loads. also, add the cairo transformation
# matrix offset.
x_center = self.bounds_x1 + self.width/2.0
y_center = self.bounds_y1 + self.height/2.0
xml_string = '\
<stock>\n\
<name>%s</name>\n\
<x>%d</x>\n\
<y>%d</y>\n\
<width>%f</width>\n\
<height>%f</height>\n\
</stock>\n' % (self._display_name.string, x_center, y_center,
self.width, self.height)
return xml_string
def name(self):
return self._display_name.string
def on_key_press(self, item, target, event):
key_name = gtk.gdk.keyval_name(event.keyval)
if key_name in self.enter_key:
self.emit("highlight_out_event", self)
elif key_name in self.delete_key:
self._display_name.backspace()
elif key_name in self.escape_key:
print("escape key!")
else:
# add key to name buffer
self._display_name.add(event.string)
self.__needs_resize_calc = True
self.force_redraw()
# return true to stop propogation
return True
def on_button_press(self, item, target, event):
canvas = self.get_canvas()
if canvas.override:
# if we're in the process of drawing a line, just
# propogate the signal. first fix the coordinates
canvas = self.get_canvas()
event.x, event.y = canvas.convert_from_item_space(self,
event.x, event.y)
return False
canvas.grab_focus(item)
logging.debug("**before grab")
canvas.grab_highlight(self)
logging.debug("**after grab")
if event.button is 1:
self.drag_x = event.x
self.drag_y = event.y
fleur = gtk.gdk.Cursor(gtk.gdk.FLEUR)
canvas = item.get_canvas()
canvas.pointer_grab(item,
gtk.gdk.POINTER_MOTION_MASK
| gtk.gdk.BUTTON_RELEASE_MASK,
fleur, event.time)
self.dragging = True
elif event.button is 3:
edit_equation(self.var)
canvas.drop_highlight()
else:
print "unsupported button: %d" % event.button
return True
def on_button_release(self, item, target, event):
if event.button is 1:
canvas = item.get_canvas()
canvas.pointer_ungrab(item, event.time)
self.dragging = False
def on_motion_notify (self, item, target, event):
if (self.dragging == True) and (event.state & gtk.gdk.BUTTON1_MASK):
new_x = event.x
new_y = event.y
item.translate(new_x - self.drag_x, new_y - self.drag_y)
self.emit("item_moved_event", self)
return True
canvas = self.get_canvas()
event.x, event.y = canvas.convert_from_item_space(self, event.x, event.y)
return False
def on_focus_in(self, item, target, event):
return False
def on_focus_out(self, item, target, event):
return False
def on_highlight_in(self, item, target):
self.active_color = [1, .6, .2]
self.force_redraw()
self.__old_name = self.name()
return False
gobject.type_register(StockItem)
| gpl-3.0 | 7,247,521,511,962,889,000 | 28.242424 | 79 | 0.594819 | false |
torenado/plugin.video.animestream | resources/lib/streamSites/myanimelinks.py | 1 | 6227 | import urllib,urllib2,re,sys,httplib
#import xbmcplugin,xbmcgui,xbmcaddon,urlresolver
import cookielib,os,string,cookielib,StringIO
import os,time,base64,logging
from datetime import datetime
from utils import *
try:
import json
except ImportError:
import simplejson as json
#testing in shell
#TEST 1
# python -c "execfile('default.py'); Episode_Listing_Pages('http://www.myanimelinks.com/category/fairy-tail/')"
#TEST2
# python -c "execfile('default.py'); Episode_Media_Link('http://www.myanimelinks.com/fairy-tail-episode-90/')"
#animestream
# modded from --> <addon id="plugin.video.animecrazy" name="Anime Crazy" version="1.0.9" provider-name="AJ">
BASE_URL = 'http://myanimelinks.com'
base_url_name = BASE_URL.split('//')[1]
base_txt = base_url_name + ': '
# aniUrls = ['http://www.myanimelinks.com/full-anime-list/']
aniUrls = []
aniUrls.append(['http://www.myanimelinks.com/full-anime-list/','anime'])
def Episode_Listing_Pages(url):
# Identifies the number of pages attached to the original content page
print base_txt + url
link = grabUrlSource(url)
match=re.compile("class='pages'>Page 1 of (.+?)</span>").findall(link)
epList = []
episodeListPage = url
if(len(match) >= 1):
for ii in range(1,int(match[0])+1):
episodeListPage = url + '/page/' + str(ii)
episodeListPage.replace('//page','/page').replace('//page','/page')
Episode_Listing(episodeListPage)
epList = epList + Episode_Listing(episodeListPage)
else:
epList = epList + Episode_Listing(episodeListPage)
return epList
def Episode_Listing(url):
# Extracts the URL and Page name of the various content pages
link = grabUrlSource(url)
match=re.compile('<h5><CENTER><a href="(.+?)">(.+?)</a>(.+?)background-image:url\((.+?)&').findall(link)
# match=re.compile('<h5><CENTER><a href="(.+?)">(.+?)</a>(.+?)background-image:url\((.+?)&').findall(link)
epList = []
if(len(match) >= 1):
for episodePageLink, episodePageName, garbage, episodeMediaThumb in match:
season = '1'
episodePageName.replace('# ','#')
epNum = 0
if epNum == 0:
epNumPart = episodePageName.strip().split('#')
for epNumTest in reversed(epNumPart):
if epNumTest.isdigit():
epNum = int(epNumTest)
break
if epNum == 0:
epNumPart = episodePageName.strip().split()
for epNumTest in reversed(epNumPart):
if epNumTest.isdigit():
epNum = int(epNumTest)
break
if 'season' in episodePageLink:
season=re.compile('season-(.+?)-').findall(episodePageLink)[0]
elif 'Season' in episodePageName.title():
season=re.compile('Season (.+?) ').findall(episodePageName.title())[0]
if 'Special' in episodePageName.title():
season = '0'
season = int(season)
episodePageName = episodePageName.title().replace(' - ',' ').replace(':',' ').replace('-',' ').strip()
epList.append([episodePageLink, episodePageName, episodeMediaThumb.replace("'",""), epNum, season])
else:
print base_txt + 'Nothing was parsed from Episode_Listing: ' + url
return epList
def Episode_Page(url):
# Identifies the number of mirrors for the content
print base_txt + url
link = grabUrlSource(url)
episodeMediaMirrors = url
epMedia = Episode_Media_Link(episodeMediaMirrors,0)
return epMedia
def Episode_Media_Link(url, mirror=1, part=1):
# Extracts the URL for the content media file
link = grabUrlSource(url)
match=re.compile('<br /><(iframe|embed)(.+?)src="(.+?)" ').findall(link)
epMedia = []
if(len(match) >= 1):
for garbage1, garbage2, episodeMediaLink in match:
if (not any(skip_ads in episodeMediaLink for skip_ads in remove_ads)):
if (base_url_name in episodeMediaLink):
episodeMediaLink = Media_Link_Finder(episodeMediaLink)
mirror = mirror + 1
epMedia.append([base_url_name,episodeMediaLink, mirror, part])
if(len(epMedia) < 1):
print base_txt + 'Nothing was parsed from Episode_Media_Link: ' + url
return epMedia
def Media_Link_Finder(url):
# Extracts the URL for the content media file
link = grabUrlSource(url)
link = link.replace(' ','')
match = re.compile('(iframe|embed)src="(.+?)"').findall(link)
match1 = re.compile('(iframe|embed)src=\'(.+?)\'').findall(link)
epMediaFound = []
if(len(match) >= 1):
epMediaFound = match[0][1]
if(len(match1) >= 1):
epMediaFound = match1[0][1]
if (len(epMediaFound) < 1):
epMediaFound = url
print base_txt + 'Nothing was parsed from Media_Link_Finder: ' + url
return epMediaFound
def Video_List_Searched(searchText, link):
# Generate list of shows/movies based on the provide keyword(s)
# url = 'http://www.myanimelinks.com/full-anime-list/'
searchRes = []
match=re.compile('<a(.+?)>'+searchText).findall(link)
videoName = searchText
if(len(match) >= 1):
for linkFound in match:
videoInfo = re.compile('href="(.+?)"').findall(linkFound)
videoLink = videoInfo[-1]
videoNameSplit = videoLink.split('/')
videoName = videoNameSplit[-2].replace('-',' ').replace('_',' ').title().strip()
if (not (any(skip_ads in videoLink for skip_ads in remove_ads) and 'episode' in videoLink)):
searchRes.append([videoLink, videoName])
# else:
# print base_txt + 'Nothing was parsed from Video_List_Searched'
return searchRes
def Total_Video_List(link):
# Generate list of shows/movies
searchRes = []
match=re.compile('<a(.+?)>(.+?)</a>').findall(link)
if(len(match) >= 1):
for linkFound, videoName in match:
videoInfo = re.compile('href="(.+?)"').findall(linkFound)
if(len(videoInfo) >= 1):
videoLink = videoInfo[-1]
videoNameSplit = videoLink.split('/')
videoName = videoName.replace('-',' ').replace('_',' ').title().strip()
if (not (any(skip_ads in videoLink for skip_ads in remove_ads) and 'episode' in videoLink) and len(videoNameSplit) >= 2):
# searchRes.append([videoLink, videoName])
videoName = videoNameSplit[-2].replace('-',' ').replace('_',' ').title().strip()
searchRes.append([videoLink, videoName])
else:
print base_txt + 'Nothing was parsed from Total_Video_List'
# searchRes.sort(key=lambda name: name[1])
# searchRes = U2A_List(searchRes)
# searchRes = f2(searchRes)
return searchRes | gpl-3.0 | -8,915,532,413,233,321,000 | 30.938462 | 125 | 0.667256 | false |
Caranarq/01_Dmine | 01_Agua/P0112/P0112.py | 1 | 9487 | # -*- coding: utf-8 -*-
"""
Started on wed, oct 25th, 2017
@author: carlos.arana
Descripcion:
"""
import pandas as pd
import numpy as np
import sys
# Librerias locales utilizadas
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from SUN.asignar_sun import asignar_sun
from VarInt.VarInt import VarInt
from SUN_integridad.SUN_integridad import SUN_integridad
from PCCS_variables.PCCS_variables import variables
from ParametroEstandar.ParametroEstandar import ParametroEstandar
from AsignarDimension.AsignarDimension import AsignarDimension
from DocumentarParametro.DocumentarParametro import DocumentarParametro
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
asignar_sun | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/SUN
SUN_integridad | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/SUN_integridad
variables | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/PCCS_variables
ParametroEstandar | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/ParametroEstandar
AsignarDimension | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/AsignarDimension
DocumentarParametro | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/DocumentarParametro
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
ClaveParametro = 'P0112'
DescParam = 'Consumo (Litros/ Hora/ día) - Estimación del consumo de agua sin tomar en cuenta las pérdidas por fugas' \
'en la red y tomas domiciliarias. Estimado como Volumen de agua consumido (m3/año). Estimado por el PIGOO' \
'como el cociente del Volumen de agua consumido al año multiplicado por 1000, dividido entre 365 ' \
'multiplicado por el numero de habitantes'
UnidadesParam = 'l/h/d'
NombreParametro = 'Consumo real de agua'
TituloParametro = 'CONSUMO' # Para nombrar la columna del parametro
PeriodoParam = '2015'
# Descripciones del proceso de Minería
ContenidoHojaDatos = 'Datos de estimacion de consumo disponibles de 2002 a 2015'
ClaveDataset = 'Pigoo'
NomDataset = r'Programa de Indicadores de Gestión de Organismos Operadores'
DescDataset = r'Indicadores municipales generados por los Organismos Operadores de agua, recolectados por el ' \
r'Instituto Mexicano De Tecnologia del Agua y la Secretaría de Medio Ambiente y Recursos Naturales'
Notas = 'S/N'
DescVarIntegridad = 'La variable de integridad municipal para esta Dataset es binaria: \n' \
'1 = El municipio cuenta con informacion \n0 = El municipio no cuenta con información'
NomFuente = 'Programa de Indicadores de Gestión de Organismos Operadores'
UrlFuente = 'http://www.pigoo.gob.mx/index.php?option=com_content&view=article&id=674&Itemid=1677'
ActDatos = '2015'
DispTemp = '2002 a 2015'
PeriodoAct = 'Anual'
DesagrMax = 'Municipal'
# Descripciones generadas desde la clave del parámetro
DirFuente = r'D:\PCCS\01_Dmine\Datasets\{}'.format(ClaveDataset)
DSBase = '"{}.xlsx", disponible en ' \
'https://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/{}'.format(ClaveDataset, ClaveDataset)
ClaveDimension = ClaveParametro[1:3]
NomDimension = AsignarDimension(ClaveDimension)['nombre']
DirDimension = ClaveDimension + "_" + AsignarDimension(ClaveDimension)['directorio']
RepoMina = 'https://github.com/INECC-PCCS/01_Dmine/tree/master/{}/{}'.format(DirDimension, ClaveParametro)
DirDestino = r'D:\PCCS\01_Dmine\{}'.format(ClaveDimension+"_"+AsignarDimension(ClaveDimension)['directorio'])
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Dataset Inicial
dataset = pd.read_excel(DirFuente + r'\{}.xlsx'.format(ClaveDataset),
sheetname="Consumo", dtype={'CVE_MUN': str})
dataset.set_index('CVE_MUN', inplace=True)
# Generar dataset para parámetro y Variable de Integridad
del dataset['indicador'] # Quitar Columnas que no se utilizarán más
del dataset['ciudad'] # Quitar Columnas que no se utilizarán más
par_dataset = dataset['2015'].rename('Total_Parametro').to_frame()
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo = 1)
# Consolidar datos por ciudad para parametro
# par_dataset['CVE_MUN'] = par_dataset.index
variables_SUN = ['CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'NOM_ENT']
DatosLimpios = asignar_sun(par_dataset, vars=variables_SUN)
OrdenColumnas = (variables_SUN + variables_dataset)
DatosLimpios = DatosLimpios[OrdenColumnas] # Reordenar las columnas
# Consolidar datos por ciudad para hoja_datos
dataset.columns = [ClaveParametro+"_"+i for i in list(dataset)]
var_disponibles = list(dataset)
dataset['CVE_MUN'] = dataset.index
hoja_datos = asignar_sun(dataset)
hoja_datos = hoja_datos[(['CVE_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN'] + var_disponibles)].set_index('CVE_MUN')
# Revision de integridad
integridad_parametro = SUN_integridad(DatosLimpios)
info_completa = sum(integridad_parametro['INTEGRIDAD']['INTEGRIDAD'] == 1) # Para generar grafico de integridad
info_sin_info = sum(integridad_parametro['INTEGRIDAD']['INTEGRIDAD'] == 0) # Para generar grafico de integridad
info_incomple = 135 - info_completa - info_sin_info # Para generar grafico de integridad
# Construccion del Parametro
param_dataset = DatosLimpios.set_index('CVE_SUN')
param_dataset['CVE_SUN'] = param_dataset.index
param = param_dataset.groupby(level=0).agg('sum')['Total_Parametro'] # Agregacion por ciudad
intparam = param_dataset.groupby(level=0).agg('mean')['VAR_INTEGRIDAD'] # Integridad por ciudad
Tipo_Sun = integridad_parametro['EXISTENCIA']['TIPO_SUN']
Tipo_Sun = Tipo_Sun.groupby(Tipo_Sun.index).first()
std_nomsun = param_dataset['CVE_SUN'].map(str)+' - '+param_dataset['NOM_SUN'] # Nombres estandar CVE_SUN + NOM_SUN
std_nomsun.drop_duplicates(keep='first', inplace=True)
Parametro = pd.DataFrame()
Parametro['CIUDAD'] = std_nomsun
Parametro['TIPO_SUN'] = Tipo_Sun
Parametro[ClaveParametro] = param
Parametro['INTEGRIDAD'] = intparam
Parametro = Parametro.sort_index()
# Lista de Variables
variables_locales = sorted(list(set(list(DatosLimpios) +
list(dataset) +
list(integridad_parametro['INTEGRIDAD']) +
list(integridad_parametro['EXISTENCIA']) +
list(Parametro))))
metavariables = variables(variables_locales)
# Metadatos
d_parametro = {
'DESCRIPCION DEL PARAMETRO': np.nan,
'Clave': ClaveParametro,
'Nombre del Parametro': NombreParametro,
'Descripcion del Parametro': DescParam,
'Periodo' : PeriodoParam,
'Unidades': UnidadesParam
}
d_hojas = {
'METADATOS': 'Descripciones y notas relativas al Dataset',
'PARAMETRO': 'Dataset resultado de la minería, agregado por clave del Sistema Urbano Nacional, '
'para utilizarse en la construcción de Indicadores',
'DATOS': ContenidoHojaDatos,
'INTEGRIDAD': 'Revision de integridad de la información POR CLAVE DEL SUN. '
'Promedio de VAR_INTEGRIDAD de los municipios que componen una ciudad. '
'Si no se tiene información para el municipio, VAR_INTEGRIDAD es igual a cero',
'EXISTENCIA': 'Revision de integridad de la información POR MUNICIPIO.',
' ': np.nan,
'DESCRIPCION DE VARIABLES': np.nan
}
d_mineria = {
' ': np.nan,
'DESCRIPCION DEL PROCESO DE MINERIA:': np.nan,
'Nombre del Dataset': NomDataset,
'Descripcion del dataset': DescDataset,
'Disponibilidad Temporal': DispTemp,
'Periodo de actualizacion': PeriodoAct,
'Nivel de Desagregacion': DesagrMax,
'Notas': Notas,
'Fuente': NomFuente,
'URL_Fuente': UrlFuente,
'Dataset base': DSBase,
'Repositorio de mineria': RepoMina,
'VAR_INTEGRIDAD': DescVarIntegridad,
' ': np.nan,
'HOJAS INCLUIDAS EN EL LIBRO': np.nan
}
descripcion_parametro = pd.DataFrame.from_dict(d_parametro, orient='index').rename(columns={0: 'DESCRIPCION'})
descripcion_mineria = pd.DataFrame.from_dict(d_mineria, orient='index').rename(columns={0: 'DESCRIPCION'})
descripcion_hojas = pd.DataFrame.from_dict(d_hojas, orient='index').rename(columns={0: 'DESCRIPCION'})
MetaParametro = descripcion_parametro.append(descripcion_mineria).append(descripcion_hojas).append(metavariables)
# Diccionario de Descripciones
DescParametro = {
'ClaveParametro': ClaveParametro,
'NombreParametro': NombreParametro,
'info_completa': info_completa,
'info_sin_info': info_sin_info,
'info_incomple': info_incomple,
'RutaSalida': DirDestino,
'Clave de Dimension': ClaveDimension,
'Nombre de Dimension': NomDimension,
'Titulo de Columna': TituloParametro,
'Actualizacion de datos': ActDatos
}
# Crear archivo de Excel y documentar parametro
ParametroEstandar(DescParametro, MetaParametro, Parametro, DatosLimpios, integridad_parametro, hoja_datos)
DocumentarParametro(DescParametro, MetaParametro, Parametro) | gpl-3.0 | -6,876,247,542,139,268,000 | 46.808081 | 120 | 0.698891 | false |
UManPychron/pychron | pychron/processing/argon_calculations.py | 1 | 31756 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# ============= standard library imports ========================
import math
from numpy import asarray, average, array
from uncertainties import ufloat, umath, nominal_value, std_dev
from pychron.core.stats.core import calculate_weighted_mean
from pychron.core.utils import alpha_to_int
from pychron.processing.age_converter import converter
from pychron.processing.arar_constants import ArArConstants
from pychron.pychron_constants import FLECK
def extract_isochron_xy(analyses):
ans = [(ai.get_interference_corrected_value('Ar39'),
ai.get_interference_corrected_value('Ar36'),
ai.get_interference_corrected_value('Ar40'))
for ai in analyses]
a39, a36, a40 = array(ans).T
try:
xx = a39 / a40
yy = a36 / a40
except ZeroDivisionError:
return
return xx, yy, a39, a36, a40
def unpack_value_error(xx):
return list(zip(*[(nominal_value(xi), std_dev(xi)) for xi in xx]))
def calculate_isochron(analyses, error_calc_kind, exclude=None, reg='NewYork', include_j_err=True):
if exclude is None:
exclude = []
ref = analyses[0]
args = extract_isochron_xy(analyses)
if args is None:
return
xx, yy, a39, a36, a40 = args
xs, xerrs = unpack_value_error(xx)
ys, yerrs = unpack_value_error(yy)
xds, xdes = unpack_value_error(a40)
yns, ynes = unpack_value_error(a36)
xns, xnes = unpack_value_error(a39)
regx = isochron_regressor(ys, yerrs, xs, xerrs,
xds, xdes, yns, ynes, xns, xnes,
reg)
regx.user_excluded = exclude
reg = isochron_regressor(xs, xerrs, ys, yerrs,
xds, xdes, xns, xnes, yns, ynes,
reg)
reg.user_excluded = exclude
regx.error_calc_type = error_calc_kind
reg.error_calc_type = error_calc_kind
yint = ufloat(reg.get_intercept(), reg.get_intercept_error())
try:
r = 1 / ufloat(regx.get_intercept(), regx.get_intercept_error())
except ZeroDivisionError:
r = 0
age = ufloat(0, 0)
if r > 0:
if include_j_err:
j = ref.j
else:
j = (nominal_value(ref.j), 0)
age = age_equation(j, r, arar_constants=ref.arar_constants)
return age, yint, reg
def isochron_regressor(xs, xes, ys, yes, xds, xdes, xns, xnes, yns, ynes, reg='NewYork'):
reg = reg.lower()
if reg in ('newyork', 'new_york'):
from pychron.core.regression.new_york_regressor import NewYorkRegressor as klass
elif reg == 'york':
from pychron.core.regression.new_york_regressor import YorkRegressor as klass
else:
from pychron.core.regression.new_york_regressor import ReedYorkRegressor as klass
reg = klass(xs=xs, ys=ys,
xserr=xes, yserr=yes,
xds=xds, xdes=xdes,
xns=xns, xnes=xnes,
yns=yns, ynes=ynes)
reg.calculate()
return reg
def calculate_plateau_age(ages, errors, k39, kind='inverse_variance', method=FLECK, options=None, excludes=None):
"""
ages: list of ages
errors: list of corresponding 1sigma errors
k39: list of 39ArK signals
return age, error
"""
if options is None:
options = {}
ages = asarray(ages)
errors = asarray(errors)
k39 = asarray(k39)
fixed_steps = options.get('fixed_steps', False)
if fixed_steps and (fixed_steps[0] or fixed_steps[1]):
sstep, estep = fixed_steps
sstep, estep = sstep.upper(), estep.upper()
if not sstep:
sidx = 0
else:
sidx = alpha_to_int(sstep)
n = ages.shape[0] - 1
if not estep:
eidx = n
else:
eidx = alpha_to_int(estep)
sidx, eidx = min(sidx, eidx), min(max(sidx, eidx), n)
pidx = (sidx, eidx) if sidx < n else None
else:
from pychron.processing.plateau import Plateau
p = Plateau(ages=ages,
errors=errors,
signals=k39,
excludes=excludes,
overlap_sigma=options.get('overlap_sigma', 2),
nsteps=options.get('nsteps', 3),
gas_fraction=options.get('gas_fraction', 50))
pidx = p.find_plateaus(method)
if pidx:
sx = slice(pidx[0], pidx[1] + 1)
plateau_ages = ages[sx]
if kind == 'vol_fraction':
weights = k39[sx]
wm, we = average(plateau_ages, weights=weights)
else:
plateau_errors = errors[sx]
wm, we = calculate_weighted_mean(plateau_ages, plateau_errors)
return wm, we, pidx
def calculate_flux(f, age, arar_constants=None, lambda_k=None):
"""
#rad40: radiogenic 40Ar
#k39: 39Ar from potassium
f: F value rad40Ar/39Ar
age: age of monitor in years
solve age equation for J
"""
if isinstance(f, (list, tuple)):
f = ufloat(*f)
if isinstance(age, (list, tuple)):
age = ufloat(*age)
try:
if not lambda_k:
if arar_constants is None:
arar_constants = ArArConstants()
lambda_k = nominal_value(arar_constants.lambda_k)
j = (umath.exp(age * lambda_k) - 1) / f
return j
except ZeroDivisionError:
return ufloat(1, 0)
def calculate_decay_time(dc, f):
return math.log(f) / dc
def calculate_arar_decay_factors_dalrymple(dc37, dc39, segments):
df37 = 0
df39 = 0
try:
tpower = sum([pi * ti for pi, ti, _, _, _ in segments])
for pi, ti, ti_p, _, _ in segments:
pti = (pi * ti) / tpower
df37 += pti * (ti * dc37 * math.exp(dc37 * ti_p)) / (1 - math.exp(-dc37 * ti))
df39 += pti * (ti * dc39 * math.exp(dc39 * ti_p)) / (1 - math.exp(-dc39 * ti))
except ZeroDivisionError:
df37, df39 = 1.0, 1.0
return df37, df39
def calculate_arar_decay_factors(dc37, dc39, segments, use_mh=True):
"""
McDougall and Harrison
p.75 equation 3.22
the book suggests using ti==analysis_time-end of irradiation segment_i (Wijbrans 1985)
mass spec uses ti==analysis_time-start of irradiation segment_i
using start seems more appropriate.
Note: 7/22/19
looking more carefully at Dalrymple etal 81 its clear that
using the end of the irradiation segment is actually the correct approach.
The decay that occurs during irradiation is handled by equation 39.
See page 34 Dalrymple etal 81
"""
if segments is None:
df37, df39 = 1.0, 1.0
else:
tpower = sum([pi * ti for pi, ti, _, _, _ in segments])
if use_mh:
b = sum([pi * ((1 - math.exp(-dc37 * ti)) / (dc37 * math.exp(dc37 * dti)))
for pi, ti, dti, _, _ in segments])
c = sum([pi * ((1 - math.exp(-dc39 * ti)) / (dc39 * math.exp(dc39 * dti)))
for pi, ti, dti, _, _ in segments])
try:
df37 = tpower / b
except ZeroDivisionError:
df37 = 1.0
try:
df39 = tpower / c
except ZeroDivisionError:
df39 = 1.0
else:
df37, df39 = calculate_arar_decay_factors_dalrymple(dc37, dc39, segments)
return df37, df39
def abundance_sensitivity_correction(isos, abundance_sensitivity):
s40, s39, s38, s37, s36 = isos
# correct for abundance sensitivity
# assumes symmetric and equal abundant sens for all peaks
n40 = s40 - abundance_sensitivity * (s39 + s39)
n39 = s39 - abundance_sensitivity * (s40 + s38)
n38 = s38 - abundance_sensitivity * (s39 + s37)
n37 = s37 - abundance_sensitivity * (s38 + s36)
n36 = s36 - abundance_sensitivity * (s37 + s37)
return [n40, n39, n38, n37, n36]
def apply_fixed_k3739(a39, pr, fixed_k3739):
"""
x=ca37/k39
y=ca37/ca39
T=s39dec_cor
T=ca39+k39
T=ca37/y+ca37/x
ca37=(T*x*y)/(x+y)
"""
x = fixed_k3739
try:
y = 1 / pr.get('Ca3937', 1)
except ZeroDivisionError:
y = 1
ca37 = (a39 * x * y) / (x + y)
ca39 = pr.get('Ca3937', 0) * ca37
k39 = a39 - ca39
k37 = x * k39
return ca37, ca39, k37, k39
def interference_corrections(a39, a37, production_ratios, arar_constants=None, fixed_k3739=False):
if production_ratios is None:
production_ratios = {}
if arar_constants is None:
arar_constants = ArArConstants()
pr = production_ratios
if arar_constants.k3739_mode.lower() == 'normal' and not fixed_k3739:
ca3937 = pr.get('Ca3937', 0)
k3739 = pr.get('K3739', 0)
k39 = (a39 - ca3937 * a37) / (1 - k3739 * ca3937)
k37 = pr.get('K3739', 0) * k39
ca37 = a37 - k37
ca39 = pr.get('Ca3937', 0) * ca37
else:
if not fixed_k3739:
fixed_k3739 = arar_constants.fixed_k3739
ca37, ca39, k37, k39 = apply_fixed_k3739(a39, pr, fixed_k3739)
k38 = pr.get('K3839', 0) * k39
if not arar_constants.allow_negative_ca_correction:
ca37 = max(ufloat(0, 0), ca37)
ca36 = pr.get('Ca3637', 0) * ca37
ca38 = pr.get('Ca3837', 0) * ca37
return k37, k38, k39, ca36, ca37, ca38, ca39
def calculate_atmospheric(a38, a36, k38, ca38, ca36, decay_time, production_ratios=None, arar_constants=None):
"""
McDougall and Harrison
Roddick 1983
Foland 1993
calculate atm36, cl36, cl38
# starting with the following equations
atm36 = a36 - ca36 - cl36
m = cl3638*lambda_cl36*decay_time
cl36 = cl38 * m
cl38 = a38 - k38 - ca38 - ar38atm
ar38atm = atm3836 * atm36
# rearranging to solve for atm36
cl38 = a38 - k38 - c38 - atm3836 * atm36
cl36 = m * (a38 - k38 - ca38 - atm3836 * atm36)
= m (a38 - k38 - ca38) - m * atm3836 * atm36
atm36 = a36 - ca36 - m (a38 - k38 - ca38) + m * atm3836 * atm36
atm36 - m * atm3836 * atm36 = a36 - ca36 - m (a38 - k38 - ca38)
atm36 * (1 - m*atm3836) = a36 - ca36 - m (a38 - k38 - ca38)
atm36 = (a36 - ca36 - m (a38 - k38 - c38))/(1 - m*atm3836)
"""
if production_ratios is None:
production_ratios = {}
if arar_constants is None:
arar_constants = ArArConstants()
pr = production_ratios
m = pr.get('Cl3638', 0) * nominal_value(arar_constants.lambda_Cl36) * decay_time
atm3836 = nominal_value(arar_constants.atm3836)
atm36 = (a36 - ca36 - m * (a38 - k38 - ca38)) / (1 - m * atm3836)
ar38atm = atm3836 * atm36
cl38 = a38 - ar38atm - k38 - ca38
cl36 = cl38 * m
return atm36, cl36, cl38
def calculate_f(isotopes, decay_time, interferences=None, arar_constants=None, fixed_k3739=False):
"""
isotope values corrected for blank, baseline, (background)
ic_factor, (discrimination), ar37 and ar39 decay
"""
a40, a39, a38, a37, a36 = isotopes
def calc_f(pr):
k37, k38, k39, ca36, ca37, ca38, ca39 = interference_corrections(a39, a37, pr, arar_constants, fixed_k3739)
atm36, cl36, cl38 = calculate_atmospheric(a38, a36, k38, ca38, ca36,
decay_time,
pr,
arar_constants)
# calculate radiogenic
trapped_4036 = ufloat(nominal_value(arar_constants.atm4036), std_dev(arar_constants.atm4036))
trapped_4036.tag = 'trapped_4036'
atm40 = atm36 * trapped_4036
k4039 = pr.get('K4039', 0)
k40 = k39 * k4039
rad40 = a40 - atm40 - k40
try:
ff = rad40 / k39
except ZeroDivisionError:
ff = ufloat(1.0, 0)
nar = {'k40': k40, 'ca39': ca39, 'k38': k38, 'ca38': ca38,
'cl38': cl38, 'k37': k37, 'ca37': ca37, 'ca36': ca36,
'cl36': cl36}
try:
rp = rad40 / a40 * 100
except ZeroDivisionError:
rp = ufloat(0, 0)
comp = {'rad40': rad40, 'a40': a40, 'radiogenic_yield': rp,
'ca37': ca37, 'ca39': ca39, 'ca36': ca36, 'k39': k39,
'atm40': atm40}
ifc = {'Ar40': a40 - k40, 'Ar39': k39, 'Ar38': a38, 'Ar37': a37, 'Ar36': atm36}
return ff, nar, comp, ifc
if interferences is None:
interferences = {}
if arar_constants is None:
arar_constants = ArArConstants()
# make local copy of interferences
pr = {k: ufloat(nominal_value(v), std_dev=0, tag=v.tag) for k, v in interferences.items()}
f_wo_irrad, _, _, _ = calc_f(pr)
f, non_ar_isotopes, computed, interference_corrected = calc_f(interferences)
return f, f_wo_irrad, non_ar_isotopes, computed, interference_corrected
def convert_age(uage, original_monitor_age, original_lambda_k, new_monitor_age, new_lambda_k):
converter.setup(original_monitor_age, original_lambda_k)
if new_monitor_age is None:
age, err = converter.convert(nominal_value(uage), std_dev(uage))
uage = ufloat(age, err, tag=uage.tag)
return uage
def age_equation(j, f, include_decay_error=False, lambda_k=None, arar_constants=None):
if isinstance(j, tuple):
j = ufloat(*j)
elif isinstance(j, str):
j = ufloat(j)
if isinstance(f, tuple):
f = ufloat(*f)
elif isinstance(f, str):
f = ufloat(f)
if not lambda_k:
if arar_constants is None:
arar_constants = ArArConstants()
lambda_k = arar_constants.lambda_k
if arar_constants is None:
arar_constants = ArArConstants()
if not include_decay_error:
lambda_k = nominal_value(lambda_k)
try:
# lambda is defined in years, so age is in years
age = lambda_k ** -1 * umath.log(1 + j * f)
return arar_constants.scale_age(age, current='a')
except (ValueError, TypeError):
return ufloat(0, 0)
# ===============================================================================
# non-recursive
# ===============================================================================
def calculate_error_F(signals, F, k4039, ca3937, ca3637):
"""
McDougall and Harrison
p92 eq 3.43
"""
m40, m39, m38, m37, m36 = signals
G = m40 / m39
B = m36 / m39
D = m37 / m39
C1 = 295.5
C2 = ca3637.nominal_value
C3 = k4039.nominal_value
C4 = ca3937.nominal_value
ssD = D.std_dev ** 2
ssB = B.std_dev ** 2
ssG = G.std_dev ** 2
G = G.nominal_value
B = B.nominal_value
D = D.nominal_value
ssF = ssG + C1 ** 2 * ssB + ssD * (C4 * G - C1 * C4 * B + C1 * C2) ** 2
return ssF ** 0.5
def calculate_error_t(F, ssF, j, ssJ):
"""
McDougall and Harrison
p92 eq. 3.43
"""
JJ = j * j
FF = F * F
constants = ArArConstants()
ll = constants().lambdak.nominal_value ** 2
sst = (JJ * ssF + FF * ssJ) / (ll * (1 + F * j) ** 2)
return sst ** 0.5
def calculate_fractional_loss(t, temp, a, model='plane', material='kfeldspar'):
"""
:param t: years
:param a: mm
:return:
"""
r = 1.9872036E-3 # kcal/(K*mol)
# convert a (mm) to cm
a /= 10
# convert t (years) to seconds
t *= 365.25 * 24 * 3600
# convert temp (C) to Kelvin
temp += 273.15
if material == 'kfeldspar':
d_0 = 0.0098 # cm/s**2
ea = 43.8 # kcal/mol
d = d_0 * math.exp(-ea / (r * temp))
if model == 'plane':
f = 2 / math.pi ** 0.5 * (d * t / a ** 2) ** 0.5
if 1 >= f >= 0.45:
f = 1 - (8 / math.pi ** 2) * math.exp(-math.pi ** 2 * d * t / (4 * a ** 2))
return f
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
constant_a = True
if constant_a:
# constant a
a = 0.1
for temp in (300, 400, 500, 600, 700):
ts = np.linspace(1, 1000)
fs = [calculate_fractional_loss(ti, temp, a) for ti in ts]
# for t in ts:
# print(t, calculate_fractional_loss(t, temp, a))
plt.plot(ts, fs, label='{}C'.format(temp))
plt.title('Constant Diffusion Length Scale ({}mm)'.format(a))
else:
# constant temp
temp = 475
for a in (0.01, 0.1, 1):
ts = (1, 5, 10, 50, 100, 500, 1000)
fs = [calculate_fractional_loss(ti, temp, a) for ti in ts]
# for t in ts:
# print(t, calculate_fractional_loss(t, temp, a))
plt.plot(ts, fs, label='{}mm'.format(a))
plt.title('Constant Temp ({}C)'.format(temp))
plt.legend()
plt.xlabel('Time (a)')
plt.ylabel('fractional loss')
plt.ylim(0, 1)
plt.show()
# ============= EOF =====================================
# # plateau definition
# plateau_criteria = {'number_steps': 3}
#
#
# def overlap(a1, a2, e1, e2, overlap_sigma):
# e1 *= overlap_sigma
# e2 *= overlap_sigma
# if a1 - e1 < a2 + e2 and a1 + e1 > a2 - e2:
# return True
#
# def find_plateaus(ages, errors, signals, overlap_sigma=1, exclude=None):
# """
# return list of plateau indices
# """
#
# if exclude is None:
# exclude = []
# plats = []
# platids = []
# for i in range(len(ages)):
# if i in exclude:
# continue
# ids = _find_plateau(ages, errors, signals, i, overlap_sigma, exclude)
# if ids is not None and ids.any():
# start, end = ids
# plats.append(end - start)
# platids.append((start, end))
#
# # print plats, platids
# if plats:
# plats = asarray(plats)
# #platids = asarray(platids)
#
# ps = platids[argmax(plats)]
# if ps[0] != ps[1]:
# return ps
#
#
# def _find_plateau(ages, errors, signals, start, overlap_sigma, exclude):
# plats = []
# platids = []
# for i in range(1, len(ages)):
# if i in exclude:
# continue
# if check_plateau(ages, errors, signals, start, i, overlap_sigma, exclude):
# plats.append(i - start)
# platids.append((start, i))
# if plats:
# plats = asarray(plats)
# platids = asarray(platids)
# return platids[argmax(plats)]
#
#
# def check_plateau(ages, errors, signals, start, end, overlap_sigma, exclude):
# for i in range(start, min(len(ages), end + 1)):
# if i in exclude:
# continue
# for j in range(start, min(len(ages), end + 1)):
# if j in exclude:
# continue
# if i != j:
# obit = not overlap(ages[i], ages[j], errors[i], errors[j], overlap_sigma)
# mswdbit = not check_mswd(ages, errors, start, end)
# percent_releasedbit = not check_percent_released(signals, start, end)
# n_steps_bit = (end - start) + 1 < 3
# if (obit or
# mswdbit or
# percent_releasedbit or
# n_steps_bit):
# return False
#
# return True
#
#
# def check_percent_released(signals, start, end):
# tot = sum(signals)
# s = sum(signals[start:end + 1])
# return s / tot >= 0.5
#
#
# def check_mswd(ages, errors, start, end):
# # a_s = ages[start:end + 1]
# # e_s = errors[start:end + 1]
# # print calculate_mswd(a_s, e_s)
# return True
#
#
# # ===============================================================================
# # recursive
# # from timeit testing recursive method is not any faster
# # use non recursive method instead purely for readablity
# # ===============================================================================
#
# def find_plateaus_r(ages, errors, start=0, end=1, plats=None, platids=None):
# if plats is None:
# plats = []
# platids = []
#
# if start == len(ages) or end == len(ages):
# plats = asarray(plats)
# platids = asarray(platids)
# return platids[argmax(plats)]
# else:
# a = check_plateau_r(ages, errors, start, end)
# if a:
# plats.append((end - start))
# platids.append((start, end))
#
# return find_plateaus_r(ages, errors, start, end + 1, plats, platids)
# else:
# return find_plateaus_r(ages, errors, start + 1, end + 1, plats, platids)
#
#
# def check_plateau_r(ages, errors, start, end, isplat=True):
# if end < len(ages):
# return isplat and check_plateau_r(ages, errors, start, end + 1, isplat)
# else:
# for i in range(start, min(len(ages), end + 1)):
# for j in range(start, min(len(ages), end + 1)):
# if i != j:
# if not overlap(ages[i], ages[j], errors[i], errors[j]):
# return False
# return True
#
#
#
# #def plateau_age(data):
# # '''
# # data = rowtuple of corrected data
# # '''
# # #calculate the ages and store ref to 39
# # ages = []
# # ar_39_signals = []
# #
# # integrated = new_unknown()
# # keys = ['ar40', 'ar39', 'ar38', 'ar37', 'ar36']
# # integrated.j_value = data[0].j_value
# # for d in data:
# # for k in keys:
# # integrated.isotopes[k] += d.isotopes[k]
# # ar_39_signals.append(d.isotopes['ar39'])
# #
# # ages.append(calc_corrected_age(d))
# # print 'integrated age :', calc_corrected_age(integrated)
# #
# # indices = find_plateaus(ages)
# # if indices is None:
# # print 'no plateau'
# # for d in data[indices[0]:indices[1]+1]:
# # print 'plateau step',d.AnalysisID,d.DataReductionSessionID
# #
# #def calc_corrected_age(corrected_unknown):
# # '''
# # return age in Ma
# #
# # '''
# #
# # #correct unknown for blank value
# # corrected_unknown.correct_for_blank()
# # corrected_unknown.correct_for_decay()
# #
# # days_since_irradiation = corrected_unknown.days_since_irradiation()
# #
# # #set up some shorthand names
# # corrected_40 = corrected_unknown.isotopes['ar40']
# # corrected_39 = corrected_unknown.isotopes['ar39']
# # corrected_38 = corrected_unknown.isotopes['ar38']
# # corrected_37 = corrected_unknown.isotopes['ar37']
# # corrected_36 = corrected_unknown.isotopes['ar36']
# #
# #
# # j_value = corrected_unknown.j_value
# # production_ratio = corrected_unknown.production_ratio
# #
# # return __corrected_age_calc__(corrected_40, corrected_39, corrected_38, corrected_37, corrected_36,
# # j_value, production_ratio, days_since_irradiation) / 1e6
#
#
#
# def calculate_arar_age(signals, ratios, ratio_errs,
# a37decayfactor, a39decayfactor, j, jer, d, der):
# s40, s40er, s39, s39er, s38, s38er, s37, s37er, s36, s36er = signals
# p36cl38cl, k4039, k3839, ca3637, ca3937, ca3837 = ratios
# k4039er, ca3637er, ca3937er = ratio_errs
# # a37decayfactor = 1
# # a39decayfactor = 1
# #convert to ufloat
# from uncertainties import ufloat
# from uncertainties.umath import log
#
# s40 = ufloat((s40, s40er))
# s39 = ufloat((s39, s39er))
# s38 = ufloat((s38, s38er))
# s37 = ufloat((s37, s37er))
# s36 = ufloat((s36, s36er))
# k4039 = ufloat((k4039, k4039er))
# ca3637 = ufloat((ca3637, ca3637er))
# ca3937 = ufloat((ca3937, ca3937er))
# j = ufloat((j, jer))
# d = ufloat((d, der))
#
# # #calculate the age
# ca37 = s37 * a37decayfactor
# s39 = s39 * a39decayfactor
# ca36 = ca3637 * ca37
# ca38 = ca3837 * ca37
# ca39 = ca3937 * ca37
# k39 = s39 - ca39
# k38 = k3839 * k39
#
# time_since_irradiation = (log(1 / a37decayfactor) /
# (-1 * constants.lambda_37 * 365.25))
#
# if constants.lambda_cl36 < 0.1:
# m = p36cl38cl * constants.lambda_cl36 * time_since_irradiation
# else:
# m = p36cl38cl
# mcl = m / (m * constants.atm3836 - 1)
# cl36 = mcl * (constants.atm3836 * (s36 - ca36) - s38 + k38 + ca38)
# atm36 = s36 - ca36 - cl36
#
# atm40 = atm36 * constants.atm4036
# k40 = k39 * k4039
# ar40rad = s40 - atm40 - k40
# JR = j * ar40rad / k39
# # age = (1 / constants.lambdak) * math.log(1 + JR)
# age = (1 / constants.lambdak) * log(1 + JR)
#
# # ==========================================================================
# # errors mass spec copy
# # ==========================================================================
#
# square = lambda x: x * x
#
# Tot40Er = s40er
# Tot39Er = s39er
# Tot38Er = s38er
# Tot37Er = s37er
# Tot36Er = s36er
#
# D = d
# D2 = d * d
# D3 = d * D2
# D4 = d * D3
#
# T40 = s40 / D4
# T39 = s39 / D3
# T38 = s39 / D2
# T37 = s39 / D
# T36 = s36
#
# A4036 = constants.atm4036
# A3836 = constants.atm3836
#
# s = ca3937 * D * T37
# T = ca3637 * D * T37
# G = D3 * T39 - s
# # P = mcl * (ca3837 * D * T37 + A3836 * (T36 - T) - D2 * T38 + k3839 * G)
# R = (-k4039 * G - A4036 * (T36 - T - mcl * (ca3837 * D * T37 + A3836 *
# (T36 - T) - D2 * T38 + k3839 * G)) + D4 * T40)
# G2 = G * G
#
# er40 = square(D4 * j / G) * square(Tot40Er)
#
# er39 = square((j * (-D3 * k4039 + A4036 * D3 * k3839 * mcl)) / G - (D3 * j * R) / G2) * square(Tot39Er)
#
# er38 = square(A4036 * D2 * j * mcl / G) * square(Tot38Er)
#
# er37 = square((j * (ca3937 * D * k4039 - A4036 *
# (-ca3637 * D - (-A3836 * ca3637 * D + ca3837 * D - ca3937 * D * k3839) * mcl)))
# / G + (ca3937 * D * j * R) / G2) * square(Tot37Er)
#
# er36 = square(A4036 * j * (1 - A3836 * mcl) / G) * square(Tot36Er)
# '''
# square((j * (4 * T40 * D3 - K4039 * (3 * D2 * T39 - Ca3937 * T37)
# - A4036 * (-(Ca3637 * T37) - MCl * (-(A3836 * Ca3637 * T37)
# + Ca3837 * T37 + K3839 * (3 * D2 * T39 - Ca3937 * T37)
# - 2 * D * T38))))
# / (D3 * T39 - s) - (1 * j * (3 * D2 * T39 - Ca3937 * T37)
# * (T40 * D4 - K4039 * (D3 * T39 - s)
# - A4036 * (T36 - T - MCl * (-(T38 * D2) + Ca3837 * T37 * D + A3836 * (T36 - T) + K3839 * (D3 * T39 - s)))))
# / square(D3 * T39 - s)) * square(DiscEr)
# '''
# erD = square((j * (4 * T40 * D3 - k4039 * (3 * D2 * T39 - ca3937 * T37)
# - A4036 * (-(ca3637 * T37) - mcl * (-(A3836 * ca3637 * T37)
# + ca3837 * T37 + k3839 * (3 * D2 * T39 - ca3937 * T37)
# - 2 * D * T38))))
# / (D3 * T39 - s) - (1 * j * (3 * D2 * T39 - ca3937 * T37)
# * (T40 * D4 - k4039 * (D3 * T39 - s)
# - A4036 * (T36 - T - mcl * (-(T38 * D2) + ca3837 * T37 * D + A3836 * (T36 - T) + k3839 * (D3 * T39 - s)))))
# / square(D3 * T39 - s)) * square(der)
#
# er4039 = square(j * (s - D3 * T39) / G) * square(k4039er)
#
# er3937 = square((j * (D * k4039 * T37 - A4036 * D * k3839 * mcl * T37)) / G +
# (D * j * T37 * R) / G2) * square(ca3937er)
#
# er3637 = square(-((A4036 * j * (-D * T37 + A3836 * D * mcl * T37)) / G)) * square(ca3637er)
#
# erJ = square(R / G) * square(jer)
# JRer = (er40 + er39 + er38 + er37 + er36 + erD + er4039 + er3937 + er3637 + erJ) ** 0.5
# age_err = (1e-6 / constants.lambdak) * JRer / (1 + ar40rad / k39 * j)
# ===============================================================================
# # error pychron port
# ===============================================================================
# # s = ca3937 * s37
# # T = ca3637 * s37
# # G = s39 - s
# # R = (-k4039 * G - constants.atm4036 * (s36 - T - mcl * (ca3837 * s37 +
# constants.atm3836 * (s36 - T) - s38 + k3839 * G)) + s40)
# # #ErComp(1) = square(D4 * j / G) * square(Tot40Er)
# # er40 = (d ** 4 * j / G) ** 2 * s40er ** 2
# #
# # #square((j * (-D3 * K4039 + A4036 * D3 * K3839 * MCl)) / G - (D3 * j * R) / G2) * square(Tot39Er)
# # d3 = d ** 3
# # er39 = ((j * (-d3 * k4039 + constants.atm4036 * d3 * k3839 * mcl)) / G - (d3 * j * R) / G ** 2) ** 2 * s39er ** 2
# #
# # #square(A4036 * D2 * j * MCl / G) * square(Tot38Er)
# # er38 = (constants.atm4036 * d * d * j * mcl / G) ** 2 * s38er ** 2
# #
# # #square((j * (Ca3937 * D * K4039 - A4036 *
# # # (-Ca3637 * D - (-A3836 * Ca3637 * D + Ca3837 * D - Ca3937 * D * K3839) * MCl)))
# # # / G + (Ca3937 * D * j * R) / G2) * square(Tot37Er)
# # er37 = ((j * (ca3937 * d * k4039 - constants.atm4036
# # * (-ca3637 * d - (-constants.atm3836 * ca3637 * d + ca3837 * d - ca3937 * d * k3839) * mcl)))
# # / G + (ca3937 * d * j * R) / G ** 2) ** 2 * s37er ** 2
# #
# # #square(A4036 * j * (1 - A3836 * MCl) / G) * square(Tot36Er)
# # er36 = (constants.atm4036 * j * (1 - constants.atm3836 * mcl) / G) ** 2 * s36er ** 2
# #
# # #square((j * (4 * T40 * D3 - K4039 * (3 * D2 * T39 - Ca3937 * T37)
# # # -A4036 * (-(Ca3637 * T37) - MCl * (-(A3836 * Ca3637 * T37)
# # # + Ca3837 * T37 + K3839 * (3 * D2 * T39 - Ca3937 * T37)
# # # - 2 * D * T38))))
# # # / (D3 * T39 - s) - (1 * j * (3 * D2 * T39 - Ca3937 * T37)
# # # * (T40 * D4 - K4039 * (D3 * T39 - s)
# # # - A4036 * (T36 - T - MCl * (-(T38 * D2) + Ca3837 * T37 * D + A3836 * (T36 - T) + K3839 * (D3 * T39 - s)))))
# # # / square(D3 * T39 - s)) * square(DiscEr)
# #
# # erD = ((j * (4 * s40 / d - k4039 * (3 * s39 / d - ca3937 * s37 / d)
# # - constants.atm4036 * (-(ca3637 * s37 / d) - mcl * (-(constants.atm3836 * ca3637 * s37 / d)
# # + ca3837 * s37 / d + k3839 * (3 * s39 / d - ca3937 * s37 / d)
# # - 2 * s38 / d))))
# # / (s39 / d - s) - (1 * j * (3 * s39 / d - ca3937 * s37 / d)
# # * (s40 / d - k4039 * (s40 / d - s)
# # - constants.atm4036 * (s36 - T - mcl * (-(s38 / d) + ca3837 * s37 +
# constants.atm3836 * (s36 - T) + k3839 * (s39 / d - s)))))
# # / (s39 / d - s) ** 2) ** 2 * der ** 2
# # #square(j * (s - D3 * T39) / G) * square(K4039Er)
# # er4039 = (j * (s - s39 / d) / G) ** 2 * k4039er ** 2
# #
# # #square((j * (D * K4039 * T37 - A4036 * D * K3839 * MCl * T37)) / G + (D * j * T37 * R) / G2) * square(Ca3937Er)
# # er3937 = ((j * (k4039 * s37 - constants.atm4036 * k3839 * mcl * s37)) / G +
# (j * s37 * R) / G ** 2) ** 2 * ca3937er ** 2
# #
# # #square(-((A4036 * j * (-D * T37 + A3836 * D * MCl * T37)) / G)) * square(Ca3637Er)
# # er3637 = (-((constants.atm4036 * j * (-s37 + constants.atm3836 * mcl * s37)) / G)) ** 2 * ca3637er ** 2
# #
# # #square(R / G) * square(JErLocal)
# # erJ = (R / G) ** 2 * jer ** 2
# # JRer = (er40 + er39 + er38 + er37 + er36 + erD + er4039 + er3937 + er3637 + erJ) ** 0.5
# # age_err = (1e-6 / constants.lambdak) * JRer / (1 + ar40rad / k39 * j)
#
# return age / 1e6, age_err
| apache-2.0 | 2,994,392,841,669,814,300 | 32.182863 | 120 | 0.519052 | false |
elkingtonmcb/bcbio-nextgen | bcbio/structural/metasv.py | 1 | 2666 | """Perform ensemble calling of structural variants using MetaSV.
https://github.com/chapmanb/metasv
http://dx.doi.org/10.1093/bioinformatics/btv204
"""
import os
import sys
from bcbio import utils
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.structural import shared
from bcbio.variation import vfilter
MIN_CALLERS = 2
SUPPORTED = set(["manta", "lumpy", "cnvkit", "wham"])
def run(calls, data):
"""Run MetaSV if we have enough supported callers, adding output to the set of calls.
"""
work_dir = _sv_workdir(data)
out_file = os.path.join(work_dir, "variants.vcf.gz")
cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data),
"--bam", dd.get_align_bam(data), "--outdir", work_dir]
available_callers = 0
for call in calls:
if call["variantcaller"] in SUPPORTED:
available_callers += 1
cmd += ["--%s_vcf" % call["variantcaller"], call.get("vcf_file", call["vrn_file"])]
if available_callers >= MIN_CALLERS:
if not utils.file_exists(out_file):
tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data),
os.path.join(tx_work_dir, "insert-stats.yaml"))
cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))]
cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")]
cmd += ["--assembly_max_tools=1", "--assembly_pad=500"]
cmd += ["--boost_ins", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]]
do.run(cmd, "Combine variant calls with MetaSV")
filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>10000) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>20) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || "
"(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>1)")
filter_file = vfilter.hard_w_expression(out_file, filters,
data, name="ReassemblyStats", limit_regions=None)
calls.append({"variantcaller": "metasv",
"vrn_file": filter_file})
return calls
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "metasv"))
def _get_cmd():
return [sys.executable, os.path.join(os.path.dirname(sys.executable), "run_metasv.py")]
| mit | -8,279,006,433,332,740,000 | 47.472727 | 107 | 0.574644 | false |
ai-se/parGALE | epoal_src/Z3ModelWebPortalUpdateAllMin.py | 1 | 13336 | from z3 import *
from consts import METRICS_MAXIMIZE, METRICS_MINIMIZE
FeatureIndexMap = {}
FeatureVariable = []
FeatureIndexMap['web_portal'] = 0
web_portal = Bool('web_portal')
FeatureVariable.append(web_portal)
FeatureIndexMap['web_portal'] = 1
web_portal = Bool('web_portal')
FeatureVariable.append(web_portal)
FeatureIndexMap['add_services'] = 2
add_services = Bool('add_services')
FeatureVariable.append(add_services)
FeatureIndexMap['site_stats'] = 3
site_stats = Bool('site_stats')
FeatureVariable.append(site_stats)
FeatureIndexMap['basic'] = 4
basic = Bool('basic')
FeatureVariable.append(basic)
FeatureIndexMap['advanced'] = 5
advanced = Bool('advanced')
FeatureVariable.append(advanced)
FeatureIndexMap['site_search'] = 6
site_search = Bool('site_search')
FeatureVariable.append(site_search)
FeatureIndexMap['images'] = 7
images = Bool('images')
FeatureVariable.append(images)
FeatureIndexMap['text'] = 8
text = Bool('text')
FeatureVariable.append(text)
FeatureIndexMap['html'] = 9
html = Bool('html')
FeatureVariable.append(html)
FeatureIndexMap['dynamic'] = 10
dynamic = Bool('dynamic')
FeatureVariable.append(dynamic)
FeatureIndexMap['ad_server'] = 11
ad_server = Bool('ad_server')
FeatureVariable.append(ad_server)
FeatureIndexMap['reports'] = 12
reports = Bool('reports')
FeatureVariable.append(reports)
FeatureIndexMap['popups'] = 13
popups = Bool('popups')
FeatureVariable.append(popups)
FeatureIndexMap['banners'] = 14
banners = Bool('banners')
FeatureVariable.append(banners)
FeatureIndexMap['ban_img'] = 15
ban_img = Bool('ban_img')
FeatureVariable.append(ban_img)
FeatureIndexMap['ban_flash'] = 16
ban_flash = Bool('ban_flash')
FeatureVariable.append(ban_flash)
FeatureIndexMap['keyword'] = 17
keyword = Bool('keyword')
FeatureVariable.append(keyword)
FeatureIndexMap['web_server'] = 18
web_server = Bool('web_server')
FeatureVariable.append(web_server)
FeatureIndexMap['logging'] = 19
logging = Bool('logging')
FeatureVariable.append(logging)
FeatureIndexMap['db'] = 20
db = Bool('db')
FeatureVariable.append(db)
FeatureIndexMap['file'] = 21
file = Bool('file')
FeatureVariable.append(file)
FeatureIndexMap['protocol'] = 22
protocol = Bool('protocol')
FeatureVariable.append(protocol)
FeatureIndexMap['nttp'] = 23
nttp = Bool('nttp')
FeatureVariable.append(nttp)
FeatureIndexMap['ftp'] = 24
ftp = Bool('ftp')
FeatureVariable.append(ftp)
FeatureIndexMap['https'] = 25
https = Bool('https')
FeatureVariable.append(https)
FeatureIndexMap['cont'] = 26
cont = Bool('cont')
FeatureVariable.append(cont)
FeatureIndexMap['static'] = 27
static = Bool('static')
FeatureVariable.append(static)
FeatureIndexMap['active'] = 28
active = Bool('active')
FeatureVariable.append(active)
FeatureIndexMap['asp'] = 29
asp = Bool('asp')
FeatureVariable.append(asp)
FeatureIndexMap['php'] = 30
php = Bool('php')
FeatureVariable.append(php)
FeatureIndexMap['jsp'] = 31
jsp = Bool('jsp')
FeatureVariable.append(jsp)
FeatureIndexMap['cgi'] = 32
cgi = Bool('cgi')
FeatureVariable.append(cgi)
FeatureIndexMap['persistence'] = 33
persistence = Bool('persistence')
FeatureVariable.append(persistence)
FeatureIndexMap['xml'] = 34
xml = Bool('xml')
FeatureVariable.append(xml)
FeatureIndexMap['database'] = 35
database = Bool('database')
FeatureVariable.append(database)
FeatureIndexMap['ri'] = 36
ri = Bool('ri')
FeatureVariable.append(ri)
FeatureIndexMap['data_storage'] = 37
data_storage = Bool('data_storage')
FeatureVariable.append(data_storage)
FeatureIndexMap['data_transfer'] = 38
data_transfer = Bool('data_transfer')
FeatureVariable.append(data_transfer)
FeatureIndexMap['user_auth'] = 39
user_auth = Bool('user_auth')
FeatureVariable.append(user_auth)
FeatureIndexMap['performance'] = 40
performance = Bool('performance')
FeatureVariable.append(performance)
FeatureIndexMap['ms'] = 41
ms = Bool('ms')
FeatureVariable.append(ms)
FeatureIndexMap['sec'] = 42
sec = Bool('sec')
FeatureVariable.append(sec)
FeatureIndexMap['min'] = 43
min = Bool('min')
FeatureVariable.append(min)
s = Solver()
# Parent-Children
s.add(Implies(add_services, web_portal))
s.add(Implies(web_server, web_portal))
s.add(Implies(persistence, web_portal))
s.add(Implies(ri, web_portal))
s.add(Implies(performance, web_portal))
s.add(Implies(site_stats, add_services))
s.add(Implies(site_search, add_services))
s.add(Implies(ad_server, add_services))
s.add(Implies(basic, site_stats))
s.add(Implies(advanced, site_stats))
s.add(Implies(images, site_search))
s.add(Implies(text, site_search))
s.add(Implies(html, text))
s.add(Implies(dynamic, text))
s.add(Implies(reports, ad_server))
s.add(Implies(popups, ad_server))
s.add(Implies(banners, ad_server))
s.add(Implies(keyword, ad_server))
s.add(Implies(ban_img, banners))
s.add(Implies(ban_flash, banners))
s.add(Implies(logging, web_server))
s.add(Implies(protocol, web_server))
s.add(Implies(cont, web_server))
s.add(Implies(db, logging))
s.add(Implies(file, logging))
s.add(Implies(nttp, protocol))
s.add(Implies(ftp, protocol))
s.add(Implies(https, protocol))
s.add(Implies(static, cont))
s.add(Implies(active, cont))
s.add(Implies(asp, active))
s.add(Implies(php, active))
s.add(Implies(jsp, active))
s.add(Implies(cgi, active))
s.add(Implies(xml, persistence))
s.add(Implies(database, persistence))
s.add(Implies(data_storage, ri))
s.add(Implies(data_transfer, ri))
s.add(Implies(user_auth, ri))
s.add(Implies(ms, performance))
s.add(Implies(sec, performance))
s.add(Implies(min, performance))
# Mandatory-Children
s.add(web_server == web_portal)
s.add(basic == site_stats)
s.add(html == text)
s.add(reports == ad_server)
s.add(banners == ad_server)
s.add(ban_img == banners)
s.add(cont == web_server)
s.add(static == cont)
# Exclusive-Or Constraints
s.add(db == And(Not(file), logging))
s.add(file == And(Not(db), logging))
s.add(xml == And(Not(database), persistence))
s.add(database == And(Not(xml), persistence))
s.add(ms == And(Not(sec), Not(min), performance))
s.add(sec == And(Not(ms), Not(min), performance))
s.add(min == And(Not(ms), Not(sec), performance))
# Or Constraints
s.add(protocol == Or(nttp, ftp, https))
s.add(active == Or(asp, php, jsp, cgi))
s.add(ri == Or(data_storage, data_transfer, user_auth))
# Requires Constraints
s.add(Implies(dynamic, active))
s.add(Implies(keyword, text))
s.add(Implies(db, database))
s.add(Implies(file, ftp))
s.add(Implies(data_transfer, https))
# Excludes Constraints
s.add(Not(And(https, ms)))
# Attributes
total_Cost = Real('total_Cost')
total_UsedBefore = Int('total_UsedBefore')
total_FeatureCount = Int('total_FeatureCount')
total_Defects = Int('total_Defects')
# Sums for Attributes
s.add(total_Cost == 7.6 * If(web_portal, 1.0, 0.0) \
+ 7.5 * If(add_services, 1.0, 0.0) \
+ 14.7 * If(site_stats, 1.0, 0.0) \
+ 6.2 * If(basic, 1.0, 0.0) \
+ 10.4 * If(advanced, 1.0, 0.0) \
+ 9.8 * If(site_search, 1.0, 0.0) \
+ 8.1 * If(images, 1.0, 0.0) \
+ 13.5 * If(text, 1.0, 0.0) \
+ 11.0 * If(html, 1.0, 0.0) \
+ 8.9 * If(dynamic, 1.0, 0.0) \
+ 7.7 * If(ad_server, 1.0, 0.0) \
+ 6.5 * If(reports, 1.0, 0.0) \
+ 11.4 * If(popups, 1.0, 0.0) \
+ 7.4 * If(banners, 1.0, 0.0) \
+ 13.4 * If(ban_img, 1.0, 0.0) \
+ 5.5 * If(ban_flash, 1.0, 0.0) \
+ 6.1 * If(keyword, 1.0, 0.0) \
+ 12.3 * If(web_server, 1.0, 0.0) \
+ 5.7 * If(logging, 1.0, 0.0) \
+ 10.3 * If(db, 1.0, 0.0) \
+ 5.9 * If(file, 1.0, 0.0) \
+ 11.5 * If(protocol, 1.0, 0.0) \
+ 13.7 * If(nttp, 1.0, 0.0) \
+ 13.0 * If(ftp, 1.0, 0.0) \
+ 10.0 * If(https, 1.0, 0.0) \
+ 9.7 * If(cont, 1.0, 0.0) \
+ 11.1 * If(static, 1.0, 0.0) \
+ 12.2 * If(active, 1.0, 0.0) \
+ 8.6 * If(asp, 1.0, 0.0) \
+ 10.6 * If(php, 1.0, 0.0) \
+ 13.0 * If(jsp, 1.0, 0.0) \
+ 12.1 * If(cgi, 1.0, 0.0) \
+ 10.5 * If(persistence, 1.0, 0.0) \
+ 14.1 * If(xml, 1.0, 0.0) \
+ 6.7 * If(database, 1.0, 0.0) \
+ 5.0 * If(ri, 1.0, 0.0) \
+ 9.6 * If(data_storage, 1.0, 0.0) \
+ 5.2 * If(data_transfer, 1.0, 0.0) \
+ 12.2 * If(user_auth, 1.0, 0.0) \
+ 13.7 * If(performance, 1.0, 0.0) \
+ 11.7 * If(ms, 1.0, 0.0) \
+ 9.1 * If(sec, 1.0, 0.0) \
+ 8.3 * If(min, 1.0, 0.0) \
)
s.add(total_UsedBefore == 1 * If(web_portal, 0, 1) \
+ 1 * If(add_services, 0, 1) \
+ 0 * If(site_stats, 0, 1) \
+ 1 * If(basic, 0, 1) \
+ 1 * If(advanced, 0, 1) \
+ 1 * If(site_search, 0, 1) \
+ 1 * If(images, 0, 1) \
+ 1 * If(text, 0, 1) \
+ 1 * If(html, 0, 1) \
+ 1 * If(dynamic, 0, 1) \
+ 1 * If(ad_server, 0, 1) \
+ 1 * If(reports, 0, 1) \
+ 0 * If(popups, 0, 1) \
+ 1 * If(banners, 0, 1) \
+ 1 * If(ban_img, 0, 1) \
+ 1 * If(ban_flash, 0, 1) \
+ 1 * If(keyword, 0, 1) \
+ 1 * If(web_server, 0, 1) \
+ 0 * If(logging, 0, 1) \
+ 1 * If(db, 0, 1) \
+ 0 * If(file, 0, 1) \
+ 1 * If(protocol, 0, 1) \
+ 1 * If(nttp, 0, 1) \
+ 1 * If(ftp, 0, 1) \
+ 0 * If(https, 0, 1) \
+ 0 * If(cont, 0, 1) \
+ 1 * If(static, 0, 1) \
+ 0 * If(active, 0, 1) \
+ 0 * If(asp, 0, 1) \
+ 1 * If(php, 0, 1) \
+ 1 * If(jsp, 0, 1) \
+ 1 * If(cgi, 0, 1) \
+ 0 * If(persistence, 0, 1) \
+ 0 * If(xml, 0, 1) \
+ 1 * If(database, 0, 1) \
+ 0 * If(ri, 0, 1) \
+ 1 * If(data_storage, 0, 1) \
+ 1 * If(data_transfer, 0, 1) \
+ 1 * If(user_auth, 0, 1) \
+ 0 * If(performance, 0, 1) \
+ 0 * If(ms, 0, 1) \
+ 1 * If(sec, 0, 1) \
+ 1 * If(min, 0, 1) \
)
s.add(total_FeatureCount == 1 * If(web_portal, 0, 1) \
+ 1 * If(add_services, 0, 1) \
+ 1 * If(site_stats, 0, 1) \
+ 1 * If(basic, 0, 1) \
+ 1 * If(advanced, 0, 1) \
+ 1 * If(site_search, 0, 1) \
+ 1 * If(images, 0, 1) \
+ 1 * If(text, 0, 1) \
+ 1 * If(html, 0, 1) \
+ 1 * If(dynamic, 0, 1) \
+ 1 * If(ad_server, 0, 1) \
+ 1 * If(reports, 0, 1) \
+ 1 * If(popups, 0, 1) \
+ 1 * If(banners, 0, 1) \
+ 1 * If(ban_img, 0, 1) \
+ 1 * If(ban_flash, 0, 1) \
+ 1 * If(keyword, 0, 1) \
+ 1 * If(web_server, 0, 1) \
+ 1 * If(logging, 0, 1) \
+ 1 * If(db, 0, 1) \
+ 1 * If(file, 0, 1) \
+ 1 * If(protocol, 0, 1) \
+ 1 * If(nttp, 0, 1) \
+ 1 * If(ftp, 0, 1) \
+ 1 * If(https, 0, 1) \
+ 1 * If(cont, 0, 1) \
+ 1 * If(static, 0, 1) \
+ 1 * If(active, 0, 1) \
+ 1 * If(asp, 0, 1) \
+ 1 * If(php, 0, 1) \
+ 1 * If(jsp, 0, 1) \
+ 1 * If(cgi, 0, 1) \
+ 1 * If(persistence, 0, 1) \
+ 1 * If(xml, 0, 1) \
+ 1 * If(database, 0, 1) \
+ 1 * If(ri, 0, 1) \
+ 1 * If(data_storage, 0, 1) \
+ 1 * If(data_transfer, 0, 1) \
+ 1 * If(user_auth, 0, 1) \
+ 1 * If(performance, 0, 1) \
+ 1 * If(ms, 0, 1) \
+ 1 * If(sec, 0, 1) \
+ 1 * If(min, 0, 1) \
)
s.add(total_Defects == 5 * If(web_portal, 1, 0) \
+ 6 * If(add_services, 1, 0) \
+ 0 * If(site_stats, 1, 0) \
+ 8 * If(basic, 1, 0) \
+ 5 * If(advanced, 1, 0) \
+ 6 * If(site_search, 1, 0) \
+ 5 * If(images, 1, 0) \
+ 6 * If(text, 1, 0) \
+ 4 * If(html, 1, 0) \
+ 4 * If(dynamic, 1, 0) \
+ 6 * If(ad_server, 1, 0) \
+ 4 * If(reports, 1, 0) \
+ 0 * If(popups, 1, 0) \
+ 2 * If(banners, 1, 0) \
+ 4 * If(ban_img, 1, 0) \
+ 5 * If(ban_flash, 1, 0) \
+ 5 * If(keyword, 1, 0) \
+ 1 * If(web_server, 1, 0) \
+ 0 * If(logging, 1, 0) \
+ 5 * If(db, 1, 0) \
+ 0 * If(file, 1, 0) \
+ 7 * If(protocol, 1, 0) \
+ 6 * If(nttp, 1, 0) \
+ 6 * If(ftp, 1, 0) \
+ 0 * If(https, 1, 0) \
+ 0 * If(cont, 1, 0) \
+ 3 * If(static, 1, 0) \
+ 0 * If(active, 1, 0) \
+ 0 * If(asp, 1, 0) \
+ 3 * If(php, 1, 0) \
+ 6 * If(jsp, 1, 0) \
+ 5 * If(cgi, 1, 0) \
+ 0 * If(persistence, 1, 0) \
+ 0 * If(xml, 1, 0) \
+ 5 * If(database, 1, 0) \
+ 0 * If(ri, 1, 0) \
+ 3 * If(data_storage, 1, 0) \
+ 4 * If(data_transfer, 1, 0) \
+ 4 * If(user_auth, 1, 0) \
+ 0 * If(performance, 1, 0) \
+ 0 * If(ms, 1, 0) \
+ 6 * If(sec, 1, 0) \
+ 6 * If(min, 1, 0) \
)
s.add(web_portal == True)
metrics_variables = [total_Cost, total_Defects, total_FeatureCount, total_UsedBefore]
metrics_objective_direction = [METRICS_MINIMIZE, METRICS_MINIMIZE, METRICS_MINIMIZE, METRICS_MINIMIZE]
# GuidedImprovementAlgorithm(s, metrics_variables, metrics_objective_direction, args=args, verbose=True)
# parser = argparse.ArgumentParser(description="Computes Pareto Front")
# parser.add_argument('--logfile', dest='logfile', metavar='logfile',\
# default="WebPortalICSE2013.json", type=str,
# help='File where to store detailed call logs')
# parser.add_argument('--timefile', dest='timefile', metavar='timefile',\
# default="timefile.csv", type=str,
# help='File where to store a total time count')
# parser.add_argument('--randomseedfile', dest='randomseedfile', metavar='randomseedfile',\
# default="randomseed.csv", type=str,
# help='File where to store random seed used')
#
# args = parser.parse_args()
#
#
# GIAOptions = GuidedImprovementAlgorithmOptions(verbosity=0, \
# incrementallyWriteLog=True, writeLogFilename=args.logfile, \
# writeTotalTimeFilename=args.timefile, writeRandomSeedsFilename=args.randomseedfile)
# GIAAlgorithm = GuidedImprovementAlgorithm(s, metrics_variables, metrics_objective_direction, FeatureVariable, options=GIAOptions)
# GIAAlgorithm.ExecuteGuidedImprovementAlgorithm()
#
#
# if __name__ == '__main__':
# execute_main()
| unlicense | 5,921,559,828,932,283,000 | 28.587156 | 136 | 0.606929 | false |
gangadharkadam/frappecontribution | frappe/model/base_document.py | 1 | 18678 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, sys
from frappe import _
from frappe.utils import cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError, "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self):
d = {}
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in doc.keys():
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_starred_by"):
if self.get(key):
doc[key] = self.get(key)
return frappe._dict(doc)
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError, (self.doctype, self.name, e), traceback
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [d.get("name")])
except Exception, e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname, label = str(e).split("'")[-2], None
# unique_first_fieldname_second_fieldname is the constraint name
# created using frappe.db.add_unique
if "unique_" in fieldname:
fieldname = fieldname.split("_", 1)[1]
df = self.meta.get_field(fieldname)
if df:
label = df.label
frappe.msgprint(_("{0} must be unique".format(label or fieldname)))
# this is used to preserve traceback
raise frappe.UniqueValidationError, (self.doctype, self.name, e), traceback
def db_set(self, fieldname, value, update_modified=True):
self.set(fieldname, value)
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in self.meta.get_link_fields() + self.meta.get("fields",
{"fieldtype":"Dynamic Link"}):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname, "name", cache=True)
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new():
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_update_after_submit(self):
db_values = frappe.db.get_value(self.doctype, self.name, "*", as_dict=True)
for key, db_value in db_values.iteritems():
df = self.meta.get_field(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
if not doc:
doc = getattr(self, "parent_doc", None) or self
return format_value(val, df=df, doc=doc, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
if df:
return df.print_hide
if meta_df:
return meta_df.print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, val, df):
if df.fieldtype in ("Currency", "Float", "Percent"):
val = flt(val)
elif df.fieldtype in ("Int", "Check"):
val = cint(val)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
val = cstr(val)
elif df.fieldtype == "Date":
val = getdate(val)
elif df.fieldtype == "Datetime":
val = get_datetime(val)
elif df.fieldtype == "Time":
val = to_timedelta(val)
return val
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
| mit | 9,206,606,112,648,290,000 | 28.8848 | 117 | 0.658582 | false |
napjon/krisk | krisk/plot/make_bar_line.py | 1 | 10559 | from copy import deepcopy
import numpy as np
import pandas as pd
from krisk.plot.make_chart import insert_series_data, round_list
from krisk.util import future_warning
d_annotate = {'normal': {'show': True, 'position': 'top'}}
def set_full_style_condition(chart, data, c, **kwargs):
if kwargs['full']:
if kwargs['stacked']:
if c:
data = data.div(data.sum(1), axis=0)
chart.option['yAxis']['max'] = 1
else:
raise ValueError("For full to worked, stacked must be set to True")
return data
def set_bar_line_chart(chart, df, x, c, **kwargs):
"""Construct Bar, Line, and Histogram"""
data = None
chart_type = kwargs['type']
if chart_type in ['bar', 'line']:
data = get_bar_or_line_data(df, x, c, **kwargs)
chart.option['xAxis']['data'] = data.index.values.tolist()
elif chart_type == 'hist':
chart_type = 'bar'
data, bins = get_hist_data(df, x, c, **kwargs)
chart.option['xAxis']['data'] = bins
elif chart_type == 'bar_line':
data = set_barline(df, x, chart, **kwargs)
chart.option['xAxis']['data'] = data.index.values.tolist()
return
elif chart_type in ['bar_tidy', 'line_tidy']:
chart_type = chart_type.replace('_tidy', '')
data = df
chart.option['xAxis']['data'] = data.index.astype(str).tolist()
if chart_type in ['bar', 'line'] and kwargs['type'] != 'hist':
data = set_full_style_condition(chart, data, c, **kwargs)
if c:
# append data for every category
for cat in data.columns:
insert_series_data(data[cat], x, chart_type, chart, cat)
else:
insert_series_data(data, x, chart_type, chart)
series = chart.option['series']
########Provide stacked,annotate, area for bar line hist#################
if c and kwargs['stacked']:
for s in series:
s['stack'] = c
if chart_type == 'line' and kwargs['area']:
s['areaStyle'] = {'normal': {}}
if kwargs['annotate'] == 'all':
s['label'] = deepcopy(d_annotate)
if chart_type == 'bar':
s['label']['normal']['position'] = 'inside'
if kwargs['annotate'] == 'top':
series[-1]['label'] = d_annotate
# TODO: make annotate receive all kinds supported in echarts.
# Add Custom Styling
if kwargs['type'] == 'hist':
histogram_custom_style(chart, data, c, series, **kwargs)
elif chart_type == 'bar':
bar_custom_style(c, series, **kwargs)
elif chart_type == 'line':
line_custom_style(series, **kwargs)
def bar_custom_style(c, series, **kwargs):
# Special Bar Condition: Trendline
if kwargs['trendline']:
trendline = {'name': 'trendline', 'type': 'line',
'lineStyle': {'normal': {'color': '#000'}}}
if c and kwargs['stacked']:
trendline['data'] = [0] * len(series[-1]['data'])
trendline['stack'] = c
elif c is None:
trendline['data'] = series[0]['data']
else:
raise ValueError('Trendline must either stacked category,'
' or not category')
series.append(trendline)
def line_custom_style(series, **kwargs):
# Special Line Condition: Smooth
if kwargs['smooth']:
for s in series:
s['smooth'] = True
def histogram_custom_style(chart, data, c, series, **kwargs):
# Special Histogram Condition: Density
#TODO NEED IMPROVEMENT!
if kwargs['type'] == 'hist' and kwargs['density']:
density = {'name':'density', 'type': 'line', 'smooth': True,
'lineStyle': {'normal': {'color': '#000'}}}
chart.option['xAxis']['boundaryGap'] = False
# The density have to be closed at zero. So all of xAxis and series
# must be updated to incorporate the changes
chart.option['xAxis']['data'] = [0] + chart.option['xAxis']['data'] + [0]
for s in series:
s['data'] = [0] + s['data']
if c and kwargs['stacked']:
density['data'] = [0] + round_list(data.sum(axis=1)) + [0]
elif c is None:
density['data'] = [0] + round_list(data) + [0]
else:
raise ValueError('Density must either stacked category, '
'or not category')
series.append(density)
def get_bar_or_line_data(df, x, c, y, **kwargs):
"""Get Bar and Line manipulated data"""
if c and y:
data = df.pivot_table(
index=x,
values=y,
columns=c,
aggfunc=kwargs['how'],
fill_value=None)
elif c and y is None:
data = pd.crosstab(df[x], df[c])
elif c is None and y:
data = df.groupby(x)[y].agg(kwargs['how'])
else:
data = df[x].value_counts()
# Specify sort_on and order method
sort_on = kwargs['sort_on']
descr_keys = pd.Series([0]).describe().keys().tolist()
if isinstance(sort_on, str):
assert sort_on in ['index','values'] + descr_keys
if sort_on == 'index':
data.sort_index(inplace=True, ascending=kwargs['ascending'])
else:
if sort_on != 'values':
val_deviation = sort_on(data) if callable(sort_on) else sort_on
data = data - val_deviation
if c:
assert kwargs['sort_c_on'] is not None
(data.sort_values(kwargs['sort_c_on'],
inplace=True,
ascending=kwargs['ascending']))
else:
data.sort_values(inplace=True, ascending=kwargs['ascending'])
return data
def get_hist_data(df, x, c, **kwargs):
"""Get Histogram manipulated data"""
y_val, x_val = np.histogram(
df[x], bins=kwargs['bins'], normed=kwargs['normed'])
bins = x_val.astype(int).tolist()
if c:
data = pd.DataFrame()
for cat, sub in df.groupby(c):
data[cat] = (pd.cut(sub[x], x_val).value_counts(
sort=False, normalize=kwargs['normed']))
else:
data = pd.Series(y_val)
return data, bins
def set_barline(chart, df, x, **kwargs):
"""Set Bar-Line charts"""
ybar = kwargs['ybar']
yline = kwargs['yline']
if kwargs['is_distinct'] is True:
data = df[[x, ybar, yline]].drop_duplicates(subset=[x]).copy()
data.index = data.pop(x)
else:
data = (df
.groupby(x)
.agg({ybar: kwargs['bar_aggfunc'],
yline: kwargs['line_aggfunc']}))
if kwargs['sort_on'] == 'index':
data.sort_index(ascending=kwargs['ascending'], inplace=True)
else:
data.sort_values(kwargs[kwargs['sort_on']],
ascending=kwargs['ascending'], inplace=True)
def get_series(col, type): return dict(name=col, type=type,
data=round_list(data[col]))
chart.option['series'] = [
get_series(ybar, 'bar'),
dict(yAxisIndex=1, **get_series(yline, 'line'))
]
if kwargs['hide_split_line'] is True:
def get_yaxis(col): return {'name': col, 'splitLine': {'show': False}}
chart.option['yAxis'] = [get_yaxis(ybar), get_yaxis(yline)]
if kwargs['style_tooltip'] is True:
chart.set_tooltip_style(axis_pointer='shadow', trigger='axis')
chart.option['xAxis']['data'] = data.index.values.tolist()
return data
def set_waterfall(chart, s, **kwargs):
# TODO
# * Find a way to naming index and value
invisible_bar = {'name': '',
'type': 'bar',
'stack': 'stack',
"itemStyle": {
"normal": {
"barBorderColor": 'rgba(0,0,0,0)',
"color": 'rgba(0,0,0,0)'
},
"emphasis": {
"barBorderColor": 'rgba(0,0,0,0)',
"color": 'rgba(0,0,0,0)'
}
}}
visible_bar = {'type': 'bar', 'stack': 'stack'}
invisible_series = s.cumsum().shift(1).fillna(0)
if (invisible_series >= 0).all() is np.False_:
raise NotImplementedError("cumulative sum should be positive")
invisible_series = np.where(s < 0,
invisible_series - s.abs(),
invisible_series)
invisible_bar['data'] = invisible_series.round(3).tolist()
chart.option['series'].append(invisible_bar)
def add_bar(series, name):
"""Append bar to chart series"""
bar = deepcopy(visible_bar)
bar['name'] = name
bar['data'] = series.values.tolist()
chart.option['series'].append(bar)
def add_annotate(bar_series, position):
bar_series['label'] = deepcopy(d_annotate)
bar_series['label']['normal']['position'] = position
if kwargs['color_coded']:
boolean_pivot = (pd.DataFrame(s)
.pivot_table(values=s.name,
index=s.index,
columns=s > 0)
.abs()
.round(3)
.fillna('-'))
add_bar(boolean_pivot[True], kwargs['up_name'])
add_bar(boolean_pivot[False], kwargs['down_name'])
chart.option['legend']['data'] = [kwargs['up_name'],
kwargs['down_name']]
else:
add_bar(s.abs().round(3), s.name)
if kwargs['annotate']:
if kwargs['annotate'] == 'inside':
for bar_series in chart.option['series']:
add_annotate(bar_series, kwargs['annotate'])
else:
add_annotate(chart.option['series'][1], "top")
if kwargs['color_coded']:
add_annotate(chart.option['series'][2], "bottom")
chart.option['xAxis']['data'] = s.index.values.tolist()
chart.set_tooltip_style(trigger='axis', axis_pointer='shadow')
chart.option['tooltip']['formatter'] = """function (params) {
var tar;
if (params[1].value != '-') {
tar = params[1];
}
else {
tar = params[2];
}
return tar.name + '<br/>' + tar.seriesName + ' : ' + tar.value;
}"""
return s
| bsd-3-clause | 4,542,803,885,063,292,000 | 32.100313 | 81 | 0.5152 | false |
geopm/geopm | integration/experiment/monitor/run_monitor_hpl_mkl.py | 1 | 2369 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
Run Intel (R) MKL HPL with the monitor agent.
'''
import argparse
from experiment.monitor import monitor
from experiment import machine
from apps.hpl_mkl import hpl_mkl
from apps.hpl_netlib import hpl_netlib
if __name__ == '__main__':
parser = argparse.ArgumentParser()
monitor.setup_run_args(parser)
hpl_netlib.setup_run_args(parser)
args, extra_args = parser.parse_known_args()
mach = machine.init_output_dir(args.output_dir)
app_conf = hpl_mkl.HplMklAppConf(num_nodes=args.node_count,
mach=mach,
frac_dram_per_node=args.frac_dram_per_node)
monitor.launch(app_conf=app_conf, args=args,
experiment_cli_args=extra_args)
| bsd-3-clause | 6,653,605,277,760,801,000 | 41.303571 | 80 | 0.713803 | false |
reddit-diabetes/musicbot-cloud | musicbot/permissions.py | 1 | 6508 | import shutil
import logging
import traceback
import configparser
import discord
log = logging.getLogger(__name__)
class PermissionsDefaults:
perms_file = 'config/permissions.ini'
CommandWhiteList = set()
CommandBlackList = set()
IgnoreNonVoice = set()
GrantToRoles = set()
UserList = set()
MaxSongs = 0
MaxSongLength = 0
MaxPlaylistLength = 0
AllowPlaylists = True
InstaSkip = False
class Permissions:
def __init__(self, config_file, grant_all=None):
self.config_file = config_file
self.config = configparser.ConfigParser(interpolation=None)
if not self.config.read(config_file, encoding='utf-8'):
log.info("Permissions file not found, copying example_permissions.ini")
try:
shutil.copy('config/example_permissions.ini', config_file)
self.config.read(config_file, encoding='utf-8')
except Exception as e:
traceback.print_exc()
raise RuntimeError("Unable to copy config/example_permissions.ini to {}: {}".format(config_file, e))
self.default_group = PermissionGroup('Default', self.config['Default'])
self.groups = set()
for section in self.config.sections():
self.groups.add(PermissionGroup(section, self.config[section]))
# Create a fake section to fallback onto the permissive default values to grant to the owner
# noinspection PyTypeChecker
owner_group = PermissionGroup("Owner (auto)", configparser.SectionProxy(self.config, None))
if hasattr(grant_all, '__iter__'):
owner_group.user_list = set(grant_all)
self.groups.add(owner_group)
async def async_validate(self, bot):
log.debug("Validating permissions...")
og = discord.utils.get(self.groups, name="Owner (auto)")
if 'auto' in og.user_list:
log.debug("Fixing automatic owner group")
og.user_list = {bot.config.owner_id}
def save(self):
with open(self.config_file, 'w') as f:
self.config.write(f)
def for_user(self, user):
"""
Returns the first PermissionGroup a user belongs to
:param user: A discord User or Member object
"""
for group in self.groups:
if user.id in group.user_list:
return group
# The only way I could search for roles is if I add a `server=None` param and pass that too
if type(user) == discord.User:
return self.default_group
# We loop again so that we don't return a role based group before we find an assigned one
for group in self.groups:
for role in user.roles:
if role.id in group.granted_to_roles:
return group
return self.default_group
def create_group(self, name, **kwargs):
self.config.read_dict({name:kwargs})
self.groups.add(PermissionGroup(name, self.config[name]))
# TODO: Test this
class PermissionGroup:
def __init__(self, name, section_data):
self.name = name
self.command_whitelist = section_data.get('CommandWhiteList', fallback=PermissionsDefaults.CommandWhiteList)
self.command_blacklist = section_data.get('CommandBlackList', fallback=PermissionsDefaults.CommandBlackList)
self.ignore_non_voice = section_data.get('IgnoreNonVoice', fallback=PermissionsDefaults.IgnoreNonVoice)
self.granted_to_roles = section_data.get('GrantToRoles', fallback=PermissionsDefaults.GrantToRoles)
self.user_list = section_data.get('UserList', fallback=PermissionsDefaults.UserList)
self.max_songs = section_data.get('MaxSongs', fallback=PermissionsDefaults.MaxSongs)
self.max_song_length = section_data.get('MaxSongLength', fallback=PermissionsDefaults.MaxSongLength)
self.max_playlist_length = section_data.get('MaxPlaylistLength', fallback=PermissionsDefaults.MaxPlaylistLength)
self.allow_playlists = section_data.get('AllowPlaylists', fallback=PermissionsDefaults.AllowPlaylists)
self.instaskip = section_data.get('InstaSkip', fallback=PermissionsDefaults.InstaSkip)
self.validate()
def validate(self):
if self.command_whitelist:
self.command_whitelist = set(self.command_whitelist.lower().split())
if self.command_blacklist:
self.command_blacklist = set(self.command_blacklist.lower().split())
if self.ignore_non_voice:
self.ignore_non_voice = set(self.ignore_non_voice.lower().split())
if self.granted_to_roles:
self.granted_to_roles = set(self.granted_to_roles.split())
if self.user_list:
self.user_list = set(self.user_list.split())
try:
self.max_songs = max(0, int(self.max_songs))
except:
self.max_songs = PermissionsDefaults.MaxSongs
try:
self.max_song_length = max(0, int(self.max_song_length))
except:
self.max_song_length = PermissionsDefaults.MaxSongLength
try:
self.max_playlist_length = max(0, int(self.max_playlist_length))
except:
self.max_playlist_length = PermissionsDefaults.MaxPlaylistLength
self.allow_playlists = configparser.RawConfigParser.BOOLEAN_STATES.get(
self.allow_playlists, PermissionsDefaults.AllowPlaylists
)
self.instaskip = configparser.RawConfigParser.BOOLEAN_STATES.get(
self.instaskip, PermissionsDefaults.InstaSkip
)
@staticmethod
def _process_list(seq, *, split=' ', lower=True, strip=', ', coerce=str, rcoerce=list):
lower = str.lower if lower else None
_strip = (lambda x: x.strip(strip)) if strip else None
coerce = coerce if callable(coerce) else None
rcoerce = rcoerce if callable(rcoerce) else None
for ch in strip:
seq = seq.replace(ch, split)
values = [i for i in seq.split(split) if i]
for fn in (_strip, lower, coerce):
if fn: values = map(fn, values)
return rcoerce(values)
def add_user(self, uid):
self.user_list.add(uid)
def remove_user(self, uid):
if uid in self.user_list:
self.user_list.remove(uid)
def __repr__(self):
return "<PermissionGroup: %s>" % self.name
def __str__(self):
return "<PermissionGroup: %s: %s>" % (self.name, self.__dict__)
| mit | 7,524,979,346,034,419,000 | 34.369565 | 120 | 0.638291 | false |
yougov/pmxbot | pmxbot/system.py | 1 | 1701 | """
System commands
"""
import sys
import operator
import io
import time
import importlib_resources
import pmxbot.core
from pmxbot.core import command, Handler
@command(aliases='h')
def help(rest):
"""Help (this command)"""
rs = rest.strip()
if rs:
# give help for matching commands
for handler in Handler._registry:
if handler.name == rs.lower():
yield '!%s: %s' % (handler.name, handler.doc)
break
else:
yield "command not found"
return
# give help for all commands
def mk_entries():
handlers = (
handler
for handler in Handler._registry
if type(handler) is pmxbot.core.CommandHandler
)
handlers = sorted(handlers, key=operator.attrgetter('name'))
for handler in handlers:
res = "!" + handler.name
if handler.aliases:
alias_names = (alias.name for alias in handler.aliases)
res += " (%s)" % ', '.join(alias_names)
yield res
o = io.StringIO(" ".join(mk_entries()))
more = o.read(160)
while more:
yield more
time.sleep(0.3)
more = o.read(160)
@command(aliases=('controlaltdelete', 'controlaltdelete', 'cad', 'restart', 'quit'))
def ctlaltdel(rest):
"""Quits pmxbot. A supervisor should automatically restart it."""
if 'real' in rest.lower():
sys.exit()
return "Really?"
@command()
def logo():
"""The pmxbot logo in ascii art. Fixed-width font recommended!"""
logo_txt = importlib_resources.read_text('pmxbot', 'asciilogo.txt')
for line in logo_txt:
yield line.rstrip()
| mit | 8,581,655,922,734,470,000 | 24.772727 | 84 | 0.579659 | false |
mashaka/Inspiry | algo/trip.py | 1 | 2105 | #####
# Author: Maria Sandrikova
# Copyright 2017, Inspiry
#####
from typing import List
from .compute_tools import TripHyposesis
NOT_FOUND = -1
class Trip:
def __init__(self, trip_hyposesis: TripHyposesis):
self.posts = trip_hyposesis.posts
self.country = trip_hyposesis.country
city_names = []
self.cities = []
for post in self.posts:
# Store city in post.City class format
if post.city.name not in city_names:
self.cities.append(post.city)
city_names.append(post.city.name)
self.start = self.posts[0].created
self.end = self.posts[-1].created
self.name = self.generate_name()
self.avatar = self.select_avatar()
self.hashtags = self.generate_hashtags()
self.owner = [self.posts[0].owner]
def generate_name(self) -> str:
return self.country + ', ' + str(self.posts[0].get_year())
def select_avatar(self) -> List[dict]:
max_likes = NOT_FOUND
avatar = None
for post in self.posts:
if post.likes > max_likes and post.photo is not None:
max_likes = post.likes
avatar = post.photo
return avatar
def generate_hashtags(self) -> List[str]:
hashtags = set()
for post in self.posts:
hashtags |= set(post.hashtags)
return list(hashtags)
def serialize(self) -> dict:
trip_dict = dict()
trip_dict['name'] = self.name
trip_dict['visible'] = True
trip_dict['hashtags'] = self.hashtags
trip_dict['owners'] = self.owner
trip_dict['comments'] = []
trip_dict['cities'] = [city.serialize() for city in self.cities]
trip_dict['avatar'] = self.avatar
trip_dict['start'] = self.start
trip_dict['end'] = self.end
trip_dict['countries'] = [self.country]
trip_dict['posts'] = self.get_posts_ids()
return trip_dict
def get_posts_ids(self) -> List[str]:
post_ids = [post.id for post in self.posts]
return post_ids
| apache-2.0 | -8,931,164,978,473,874,000 | 30.893939 | 72 | 0.575772 | false |
smilusingjavascript/blink | Source/bindings/scripts/idl_definitions.py | 2 | 35716 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Blink IDL Intermediate Representation (IR) classes.
Classes are primarily constructors, which build an IdlDefinitions object
(and various contained objects) from an AST (produced by blink_idl_parser).
This is in two steps:
* Constructors walk the AST, creating objects.
* Typedef resolution.
Typedefs are all resolved here, and not stored in IR.
Typedef resolution uses some auxiliary classes and OOP techniques to make this
a generic call, via the resolve_typedefs() method.
Class hierarchy (mostly containment, '<' for inheritance):
IdlDefinitions
IdlCallbackFunction < TypedObject
IdlEnum :: FIXME: remove, just use a dict for enums
IdlInterface
IdlAttribute < TypedObject
IdlConstant < TypedObject
IdlLiteral
IdlOperation < TypedObject
IdlArgument < TypedObject
IdlStringifier
IdlException < IdlInterface
(same contents as IdlInterface)
TypedObject :: mixin for typedef resolution
IdlArgument is 'picklable', as it is stored in interfaces_info.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
from idl_types import IdlType, IdlUnionType, IdlArrayType, IdlSequenceType, IdlNullableType
SPECIAL_KEYWORD_LIST = ['GETTER', 'SETTER', 'DELETER']
STANDARD_TYPEDEFS = {
# http://www.w3.org/TR/WebIDL/#common-DOMTimeStamp
'DOMTimeStamp': 'unsigned long long',
}
################################################################################
# TypedObject (mixin for typedef resolution)
################################################################################
class TypedObject(object):
"""Object with a type, such as an Attribute or Operation (return value).
The type can be an actual type, or can be a typedef, which must be resolved
before passing data to the code generator.
"""
__metaclass__ = abc.ABCMeta
idl_type = None
def resolve_typedefs(self, typedefs):
"""Resolve typedefs to actual types in the object."""
# Constructors don't have their own return type, because it's the
# interface itself.
if not self.idl_type:
return
# Need to re-assign self.idl_type, not just mutate idl_type,
# since type(idl_type) may change.
self.idl_type = self.idl_type.resolve_typedefs(typedefs)
################################################################################
# Definitions (main container class)
################################################################################
class IdlDefinitions(object):
def __init__(self, idl_name, node):
"""Args: node: AST root node, class == 'File'"""
self.callback_functions = {}
self.dictionaries = {}
self.enumerations = {}
self.implements = []
self.interfaces = {}
self.idl_name = idl_name
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
typedefs = dict((typedef_name, IdlType(type_name))
for typedef_name, type_name in
STANDARD_TYPEDEFS.iteritems())
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = IdlInterface(idl_name, child)
self.interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = IdlException(idl_name, child)
# For simplicity, treat exceptions as interfaces
self.interfaces[exception.name] = exception
elif child_class == 'Typedef':
type_name = child.GetName()
typedefs[type_name] = typedef_node_to_type(child)
elif child_class == 'Enum':
enumeration = IdlEnum(idl_name, child)
self.enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = IdlCallbackFunction(idl_name, child)
self.callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
self.implements.append(IdlImplement(child))
elif child_class == 'Dictionary':
dictionary = IdlDictionary(idl_name, child)
self.dictionaries[dictionary.name] = dictionary
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Typedefs are not stored in IR:
# Resolve typedefs with the actual types and then discard the Typedefs.
# http://www.w3.org/TR/WebIDL/#idl-typedefs
self.resolve_typedefs(typedefs)
def resolve_typedefs(self, typedefs):
for callback_function in self.callback_functions.itervalues():
callback_function.resolve_typedefs(typedefs)
for interface in self.interfaces.itervalues():
interface.resolve_typedefs(typedefs)
def update(self, other):
"""Update with additional IdlDefinitions."""
for interface_name, new_interface in other.interfaces.iteritems():
if not new_interface.is_partial:
# Add as new interface
self.interfaces[interface_name] = new_interface
continue
# Merge partial to existing interface
try:
self.interfaces[interface_name].merge(new_interface)
except KeyError:
raise Exception('Tried to merge partial interface for {0}, '
'but no existing interface by that name'
.format(interface_name))
# Merge callbacks and enumerations
self.enumerations.update(other.enumerations)
self.callback_functions.update(other.callback_functions)
################################################################################
# Callback Functions
################################################################################
class IdlCallbackFunction(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children != 2:
raise ValueError('Expected 2 children, got %s' % num_children)
type_node, arguments_node = children
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Arguments node, got %s' % arguments_node_class)
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = type_node_to_type(type_node)
self.arguments = arguments_node_to_arguments(idl_name, arguments_node)
def resolve_typedefs(self, typedefs):
TypedObject.resolve_typedefs(self, typedefs)
for argument in self.arguments:
argument.resolve_typedefs(typedefs)
################################################################################
# Dictionary
################################################################################
class IdlDictionary(object):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.is_partial = node.GetProperty('Partial') or False
self.idl_name = idl_name
self.name = node.GetName()
self.members = []
self.parent = None
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Key':
self.members.append(IdlDictionaryMember(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
class IdlDictionaryMember(object):
def __init__(self, idl_name, node):
self.default_value = None
self.extended_attributes = {}
self.idl_type = None
self.idl_name = idl_name
self.name = node.GetName()
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Enumerations
################################################################################
class IdlEnum(object):
# FIXME: remove, just treat enums as a dictionary
def __init__(self, idl_name, node):
self.idl_name = idl_name
self.name = node.GetName()
self.values = []
for child in node.GetChildren():
self.values.append(child.GetName())
################################################################################
# Interfaces and Exceptions
################################################################################
class IdlInterface(object):
def __init__(self, idl_name, node=None):
self.attributes = []
self.constants = []
self.constructors = []
self.custom_constructors = []
self.extended_attributes = {}
self.operations = []
self.parent = None
self.stringifier = None
self.original_interface = None
self.partial_interfaces = []
if not node: # Early exit for IdlException.__init__
return
self.is_callback = node.GetProperty('CALLBACK') or False
self.is_exception = False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
self.is_partial = node.GetProperty('Partial') or False
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
self.attributes.append(IdlAttribute(idl_name, child))
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(idl_name, extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'Operation':
self.operations.append(IdlOperation(idl_name, child))
elif child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Stringifier':
self.stringifier = IdlStringifier(idl_name, child)
self.process_stringifier()
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def resolve_typedefs(self, typedefs):
for attribute in self.attributes:
attribute.resolve_typedefs(typedefs)
for constant in self.constants:
constant.resolve_typedefs(typedefs)
for constructor in self.constructors:
constructor.resolve_typedefs(typedefs)
for custom_constructor in self.custom_constructors:
custom_constructor.resolve_typedefs(typedefs)
for operation in self.operations:
operation.resolve_typedefs(typedefs)
def process_stringifier(self):
"""Add the stringifier's attribute or named operation child, if it has
one, as a regular attribute/operation of this interface."""
if self.stringifier.attribute:
self.attributes.append(self.stringifier.attribute)
elif self.stringifier.operation:
self.operations.append(self.stringifier.operation)
def merge(self, other):
"""Merge in another interface's members (e.g., partial interface)"""
self.attributes.extend(other.attributes)
self.constants.extend(other.constants)
self.operations.extend(other.operations)
class IdlException(IdlInterface):
# Properly exceptions and interfaces are distinct, and thus should inherit a
# common base class (say, "IdlExceptionOrInterface").
# However, there is only one exception (DOMException), and new exceptions
# are not expected. Thus it is easier to implement exceptions as a
# restricted subclass of interfaces.
# http://www.w3.org/TR/WebIDL/#idl-exceptions
def __init__(self, idl_name, node):
# Exceptions are similar to Interfaces, but simpler
IdlInterface.__init__(self, idl_name)
self.is_callback = False
self.is_exception = True
self.is_partial = False
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = IdlAttribute(idl_name, child)
self.attributes.append(attribute)
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'ExceptionOperation':
self.operations.append(IdlOperation.from_exception_operation_node(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Attributes
################################################################################
class IdlAttribute(TypedObject):
def __init__(self, idl_name, node):
self.is_read_only = node.GetProperty('READONLY') or False
self.is_static = node.GetProperty('STATIC') or False
self.idl_name = idl_name
self.name = node.GetName()
# Defaults, overridden below
self.idl_type = None
self.extended_attributes = {}
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Constants
################################################################################
class IdlConstant(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
self.idl_name = idl_name
self.name = node.GetName()
# ConstType is more limited than Type, so subtree is smaller and
# we don't use the full type_node_to_type function.
self.idl_type = type_node_inner_to_type(type_node)
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
if value_node.GetProperty('TYPE') == 'float':
self.value = value_node.GetProperty('VALUE')
else:
self.value = value_node.GetName()
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
else:
self.extended_attributes = {}
################################################################################
# Literals
################################################################################
class IdlLiteral(object):
def __init__(self, idl_type, value):
self.idl_type = idl_type
self.value = value
self.is_null = False
def __str__(self):
if self.idl_type == 'DOMString':
return 'String("%s")' % self.value
if self.idl_type == 'integer':
return '%d' % self.value
if self.idl_type == 'float':
return '%g' % self.value
if self.idl_type == 'boolean':
return 'true' if self.value else 'false'
raise ValueError('Unsupported literal type: %s' % self.idl_type)
class IdlLiteralNull(IdlLiteral):
def __init__(self):
self.idl_type = 'NULL'
self.value = None
self.is_null = True
def __str__(self):
return 'nullptr'
def default_node_to_idl_literal(node):
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
idl_type = node.GetProperty('TYPE')
if idl_type == 'DOMString':
value = node.GetProperty('NAME')
if '"' in value or '\\' in value:
raise ValueError('Unsupported string value: %r' % value)
return IdlLiteral(idl_type, value)
if idl_type == 'integer':
return IdlLiteral(idl_type, int(node.GetProperty('NAME'), base=0))
if idl_type == 'float':
return IdlLiteral(idl_type, float(node.GetProperty('VALUE')))
if idl_type == 'boolean':
return IdlLiteral(idl_type, node.GetProperty('VALUE'))
if idl_type == 'NULL':
return IdlLiteralNull()
raise ValueError('Unrecognized default value type: %s' % idl_type)
################################################################################
# Operations
################################################################################
class IdlOperation(TypedObject):
def __init__(self, idl_name, node=None):
self.arguments = []
self.extended_attributes = {}
self.specials = []
self.is_constructor = False
if not node:
self.is_static = False
return
self.idl_name = idl_name
self.name = node.GetName() # FIXME: should just be: or ''
# FIXME: AST should use None internally
if self.name == '_unnamed_':
self.name = ''
self.is_static = node.GetProperty('STATIC') or False
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
self.specials.append(special_keyword.lower())
self.idl_type = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
self.arguments = arguments_node_to_arguments(idl_name, child)
elif child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
@classmethod
def from_exception_operation_node(cls, idl_name, node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
operation = cls(idl_name)
operation.name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
operation.idl_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
operation.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
return operation
@classmethod
def constructor_from_arguments_node(cls, name, idl_name, arguments_node):
constructor = cls(idl_name)
constructor.name = name
constructor.arguments = arguments_node_to_arguments(idl_name, arguments_node)
constructor.is_constructor = True
return constructor
def resolve_typedefs(self, typedefs):
TypedObject.resolve_typedefs(self, typedefs)
for argument in self.arguments:
argument.resolve_typedefs(typedefs)
################################################################################
# Arguments
################################################################################
class IdlArgument(TypedObject):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.idl_type = None
self.is_optional = node.GetProperty('OPTIONAL') # syntax: (optional T)
self.is_variadic = False # syntax: (T...)
self.idl_name = idl_name
self.name = node.GetName()
self.default_value = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
self.is_variadic = child.GetProperty('ELLIPSIS') or False
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def __getstate__(self):
# FIXME: Return a picklable object which has enough information to
# unpickle.
return {}
def __setstate__(self, state):
pass
def arguments_node_to_arguments(idl_name, node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
return [IdlArgument(idl_name, argument_node)
for argument_node in node.GetChildren()]
################################################################################
# Stringifiers
################################################################################
class IdlStringifier(object):
def __init__(self, idl_name, node):
self.attribute = None
self.operation = None
self.extended_attributes = {}
self.idl_name = idl_name
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Attribute':
self.attribute = IdlAttribute(idl_name, child)
elif child_class == 'Operation':
operation = IdlOperation(idl_name, child)
if operation.name:
self.operation = operation
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Copy the stringifier's extended attributes (such as [Unforgable]) onto
# the underlying attribute or operation, if there is one.
if self.attribute or self.operation:
(self.attribute or self.operation).extended_attributes.update(
self.extended_attributes)
################################################################################
# Implement statements
################################################################################
class IdlImplement(object):
def __init__(self, node):
self.left_interface = node.GetName()
self.right_interface = node.GetProperty('REFERENCE')
################################################################################
# Extended attributes
################################################################################
def ext_attributes_node_to_extended_attributes(idl_name, node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with these exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possible signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possible signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
SetWrapperReferenceTo: value is an Arguments node.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
def child_node(extended_attribute_node):
children = extended_attribute_node.GetChildren()
if not children:
return None
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
return children[0]
extended_attribute_node_list = node.GetChildren()
for extended_attribute_node in extended_attribute_node_list:
name = extended_attribute_node.GetName()
child = child_node(extended_attribute_node)
child_class = child and child.GetClass()
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetWrapperReferenceTo':
if not child:
raise ValueError('[SetWrapperReferenceTo] requires a child, but has none.')
if child_class != 'Arguments':
raise ValueError('[SetWrapperReferenceTo] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = arguments_node_to_arguments(idl_name, child)
elif child:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = extended_attribute_node.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(idl_name, extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for IdlInterface.__init__.
"""
constructor_list = extended_attributes.get('Constructors', [])
constructors = [
IdlOperation.constructor_from_arguments_node('Constructor', idl_name, arguments_node)
for arguments_node in constructor_list]
custom_constructor_list = extended_attributes.get('CustomConstructors', [])
custom_constructors = [
IdlOperation.constructor_from_arguments_node('CustomConstructor', idl_name, arguments_node)
for arguments_node in custom_constructor_list]
if 'NamedConstructor' in extended_attributes:
# FIXME: support overloaded named constructors, and make homogeneous
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
named_constructor = IdlOperation.constructor_from_arguments_node('NamedConstructor', idl_name, arguments_node)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
################################################################################
# Types
################################################################################
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
base_type = type_node_inner_to_type(children[0])
if node.GetProperty('NULLABLE'):
base_type = IdlNullableType(base_type)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
array_type = IdlArrayType(base_type)
if array_node.GetProperty('NULLABLE'):
return IdlNullableType(array_type)
return array_type
return base_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'Typeref']:
# unrestricted syntax: unrestricted double | unrestricted float
is_unrestricted = node.GetProperty('UNRESTRICTED') or False
return IdlType(node.GetName(), is_unrestricted=is_unrestricted)
elif node_class == 'Any':
return IdlType('any')
elif node_class == 'Sequence':
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
elif node_class == 'Promise':
return IdlType('Promise')
raise ValueError('Unrecognized node class: %s' % node_class)
def sequence_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Sequence node expects exactly 1 child, got %s' % len(children))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(sequence_child)
sequence_type = IdlSequenceType(element_type)
if node.GetProperty('NULLABLE'):
return IdlNullableType(sequence_type)
return sequence_type
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
def union_type_node_to_idl_union_type(node):
member_types = [type_node_to_type(member_type_node)
for member_type_node in node.GetChildren()]
return IdlUnionType(member_types)
| bsd-3-clause | 6,397,822,024,024,746,000 | 41.018824 | 158 | 0.595027 | false |
leandrotoledo/python-correios | correios/test/test_encomenda.py | 1 | 1848 | #-*- encoding: utf-8 -*-
"""
API Python Correios - Rastreamento de Encomendas
Copyright (C) 2012 Leandro T. de Souza <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import unittest
from datetime import datetime
root = os.path.dirname(os.path.abspath(__file__))
os.chdir(root)
sys.path.insert(0, os.path.dirname(root))
sys.path.insert(0, root)
from correios import *
class CorreiosTests(unittest.TestCase):
def test_get_encomenda(self):
encomenda = Correios.get_encomenda('RA222491899CN')
assert len(encomenda.status) == 4
primeiro_status = encomenda.status[0]
assert primeiro_status.atualizacao == datetime(2012, 2, 19, 3, 28)
assert primeiro_status.pais == 'CHINA'
assert primeiro_status.situacao == 'Postado'
ultimo_status = encomenda.status[-1]
assert ultimo_status.atualizacao == datetime(2012, 5, 03, 19, 36)
assert ultimo_status.pais == 'BRASIL'
assert ultimo_status.estado == 'SP'
assert ultimo_status.cidade == 'SAO BERNARDO'
assert ultimo_status.agencia == 'CDD RUDGE RAMOS'
assert ultimo_status.situacao == 'Entrega Efetuada'
def main():
unittest.main()
if __name__ == '__main__':
main()
| gpl-3.0 | -7,990,466,861,356,577,000 | 30.862069 | 74 | 0.70671 | false |
pdbartlett/misc-stuff | unfuddle/AppEngine/paulb/testapp.py | 1 | 2935 | import cgi
import datetime
import os
import wsgiref.handlers
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
# Model classes
class Message(db.Model):
user = db.UserProperty()
email = db.StringProperty()
subject = db.StringProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
def getSummary(self):
return '%s: %s (%s)' % (self.date, self.subject, self.email)
# Pages
class TemplatePage(webapp.RequestHandler):
def __init__(self):
self.user = users.get_current_user()
def render(self, file, args):
path = os.path.join(os.path.dirname(__file__), file)
self.response.out.write(template.render(path, args))
class MainPage(TemplatePage):
def get(self):
TemplatePage.render(self, 'main.html', {})
class HeaderPage(TemplatePage):
def get(self):
if self.user:
user_desc = self.user.nickname()
user_link = users.create_logout_url('/')
link_text = 'Logout'
else:
user_desc = None
user_link = users.create_login_url('/')
link_text = 'Login'
TemplatePage.render(self, 'header.html', {
'user_desc': user_desc,
'user_link': user_link,
'link_text': link_text,
'is_admin': users.is_current_user_admin()
})
class WelcomePage(TemplatePage):
def get(self):
TemplatePage.render(self, 'welcome.html', {})
class ContactPage(TemplatePage):
def get(self):
PAGESIZE = 10
if self.user:
email = self.user.email()
query = Message.all()
if not users.is_current_user_admin():
query.filter('user =', self.user)
messages = query.order('-date').fetch(PAGESIZE)
more = (query.count(PAGESIZE + 1) > PAGESIZE)
else:
email = ''
messages = None
more = False
TemplatePage.render(self, 'contact.html', {
'email': email,
'admin': users.is_current_user_admin(),
'messages': messages,
'more': more,
'posted': self.request.get('posted')
})
def post(self):
message = Message()
if users.get_current_user():
message.user = users.get_current_user()
message.email = self.request.get('email')
message.subject = self.request.get('subject')
message.content = self.request.get('content')
message.put()
self.redirect('/contact?posted=1')
class AdminPage(TemplatePage):
def get(self):
TemplatePage.render(self, 'admin.html', {})
# main(), etc.
def main():
application = webapp.WSGIApplication([
('/', MainPage),
('/admin', AdminPage),
('/contact', ContactPage),
('/header', HeaderPage),
('/welcome', WelcomePage),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| apache-2.0 | -6,952,409,323,259,496,000 | 25.95283 | 64 | 0.621465 | false |
ttm/mass | src/aux/filters/filter1.py | 1 | 1163 | #-*- coding: utf8 -*-
# fazendo umi
# perfil quadrado do espectro
# e retornando o as amostras
import numpy as n, pylab as p, scikits.audiolab as a
duracao=2. # segundo
N=int(duracao*44100) #numero de amostras
# sequência de 2205 amostras (50 milisegundos em 44100 kHz)
# coeficientes espectrais de fourier em mesmo numero
# som é real, espectro par:
#perfil_espectral=[0.]*(N/4)+[0.]*(N/8)+[1.]*(N/4)+[0.]*(N/8)+[0.]*(N/4)# eh par
#perfil_espectral=[0.]*(N/4)*3 +[1.]*(N/4)*2 +[0.]*(N/4)*3# eh par
#fase_impar=n.zeros((N*8)/4)
#espectro=perfil_espectral+fase_impar*
perfil_espectral=([0]+[0]*30+[1]*30+[0]*30)
#perfil_espectral=[0.]*(N/4)+[0.]*(N/4)+[1.]*(N/4)+[0.]*(N/4)+[0.]*(N/4)# eh par
#perfil_espectral=[0.]*(N/3)+[1.]*(N/3)+[0.]*(N/3)# eh par
#perfil_espectral=[0.]*(N/5)+[0.]*(N/5)+[1.]*(N/5)+[0.]*(N/5)+[0.]*(N/5)# eh par
#perfil_espectral=[1.]*1000+[0.]*1000+[0.]*1000 # nao eh par
som=n.fft.ifft(perfil_espectral)
p.subplot(411)
p.plot(som.real)
p.plot(som.real,"ro")
p.subplot(412)
p.plot(som.imag)
p.plot(som.imag,"ro")
p.subplot(413)
p.plot(n.abs(som))
p.plot(n.abs(som),"ro")
p.subplot(414)
p.plot(perfil_espectral)
p.show()
| gpl-3.0 | -6,308,406,848,316,789,000 | 22.693878 | 80 | 0.615848 | false |
eaplatanios/tensorflow | tensorflow/python/ops/distributions/categorical.py | 1 | 12151 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util.tf_export import tf_export
def _broadcast_cat_event_and_params(event, params, base_dtype=dtypes.int32):
"""Broadcasts the event or distribution parameters."""
if event.shape.ndims is None:
raise NotImplementedError(
"Cannot broadcast with an event tensor of unknown rank.")
if event.dtype.is_integer:
pass
elif event.dtype.is_floating:
# When `validate_args=True` we've already ensured int/float casting
# is closed.
event = math_ops.cast(event, dtype=dtypes.int32)
else:
raise TypeError("`value` should have integer `dtype` or "
"`self.dtype` ({})".format(base_dtype))
if params.get_shape()[:-1] == event.get_shape():
params = params
else:
params *= array_ops.ones_like(
array_ops.expand_dims(event, -1), dtype=params.dtype)
params_shape = array_ops.shape(params)[:-1]
event *= array_ops.ones(params_shape, dtype=event.dtype)
event.set_shape(tensor_shape.TensorShape(params.get_shape()[:-1]))
return event, params
@tf_export("distributions.Categorical")
class Categorical(distribution.Distribution):
"""Categorical distribution.
The Categorical distribution is parameterized by either probabilities or
log-probabilities of a set of `K` classes. It is defined over the integers
`{0, 1, ..., K}`.
The Categorical distribution is closely related to the `OneHotCategorical` and
`Multinomial` distributions. The Categorical distribution can be intuited as
generating samples according to `argmax{ OneHotCategorical(probs) }` itself
being identical to `argmax{ Multinomial(probs, total_count=1) }.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; pi) = prod_j pi_j**[k == j]
```
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
Creates a 3-class distribution with the 2nd class being most likely.
```python
dist = Categorical(probs=[0.1, 0.5, 0.4])
n = 1e4
empirical_prob = tf.cast(
tf.histogram_fixed_width(
dist.sample(int(n)),
[0., 2],
nbins=3),
dtype=tf.float32) / n
# ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32)
```
Creates a 3-class distribution with the 2nd class being most likely.
Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than
probabilities.
```python
dist = Categorical(logits=np.log([0.1, 0.5, 0.4])
n = 1e4
empirical_prob = tf.cast(
tf.histogram_fixed_width(
dist.sample(int(n)),
[0., 2],
nbins=3),
dtype=tf.float32) / n
# ==> array([0.1045, 0.5047, 0.3908], dtype=float32)
```
Creates a 3-class distribution with the 3rd class being most likely.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(probs=p)
dist.prob(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `probs` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs]):
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
multidimensional=True,
name=name)
if validate_args:
self._logits = distribution_util.embed_check_categorical_event_shape(
self._logits)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if logits_shape_static[-1].value is not None:
self._event_size = ops.convert_to_tensor(
logits_shape_static[-1].value,
dtype=dtypes.int32,
name="event_size")
else:
with ops.name_scope(name="event_size"):
self._event_size = logits_shape[self._batch_rank]
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.identity(self._batch_shape_val)
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.event_size])
sample_dtype = dtypes.int64 if self.dtype.size > 4 else dtypes.int32
draws = random_ops.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = array_ops.reshape(
array_ops.transpose(draws),
array_ops.concat([[n], self.batch_shape_tensor()], 0))
return math_ops.cast(draws, self.dtype)
def _cdf(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=dtypes.int32)
k, probs = _broadcast_cat_event_and_params(
k, self.probs, base_dtype=self.dtype.base_dtype)
# batch-flatten everything in order to use `sequence_mask()`.
batch_flattened_probs = array_ops.reshape(probs,
(-1, self._event_size))
batch_flattened_k = array_ops.reshape(k, [-1])
to_sum_over = array_ops.where(
array_ops.sequence_mask(batch_flattened_k, self._event_size),
batch_flattened_probs,
array_ops.zeros_like(batch_flattened_probs))
batch_flattened_cdf = math_ops.reduce_sum(to_sum_over, axis=-1)
# Reshape back to the shape of the argument.
return array_ops.reshape(batch_flattened_cdf, array_ops.shape(k))
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=dtypes.int32)
k, logits = _broadcast_cat_event_and_params(
k, self.logits, base_dtype=self.dtype.base_dtype)
return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
logits=logits)
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, axis=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.batch_shape)
return ret
@kullback_leibler.RegisterKL(Categorical, Categorical)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Categorical.
Args:
a: instance of a Categorical distribution object.
b: instance of a Categorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical",
values=[a.logits, b.logits]):
# sum(probs log(probs / (1 - probs)))
delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
nn_ops.log_softmax(b.logits))
return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1,
axis=-1)
| apache-2.0 | -1,370,741,532,496,404,000 | 34.843658 | 80 | 0.650152 | false |
weka-io/easypy | easypy/tokens.py | 1 | 3339 | """
This module is about Tokens. Tokens are string-based objects for when
``None`` is not enough, and ``Enum`` is too much.
Tokens are used to indicate a desired behavior, instead of some specific value.
Tokens can be created by simple instantiation::
from easypy.tokens import Token
AUTO = Token('AUTO')
If you don't like repeating yourself, however, you can use this 'dark' magic::
from easypy.tokens import AUTO
"""
import sys
from types import ModuleType
__all__ = ["Token", "if_auto"]
class Token(str):
"""
When ``None`` is not enough, and ``Enum`` is too much.
Use to indicate a desired behavior, instead of some specific value:
from easypy.tokens import AUTO, MAX
def create_file(fname=AUTO, size=AUTO):
if size is AUTO:
size = get_capacity() / 2
elif size is MAX:
size = get_capacity()
if fname is AUTO:
from .random import random_nice_name
fname = random_nice_name()
os.truncate(fname, size)
Also, to support use as cli options, a token can be compared with an str:
AUTO == '<AUTO>'
AUTO == 'AUTO'
AUTO == '<auto>'
AUTO == 'auto'
"""
_all = {}
def __new__(cls, name):
name = name.strip("<>")
try:
return cls._all[name]
except KeyError:
pass
cls._all[name] = self = super().__new__(cls, "<%s>" % name)
return self
def __repr__(self):
return self
def __eq__(self, other):
if isinstance(other, self.__class__):
return self is other
elif isinstance(other, str):
# we allows this so that cli flags can be easily transformed into tokens (AUTO == 'auto')
return self.strip("<>").lower() == other.strip("<>").lower()
return False
# we're already case insensitive when comparing
def lower(self):
return self
def upper(self):
return self
def __hash__(self):
return super().__hash__()
def if_auto(val, auto):
"""
Convenience for the popular ``auto if val is AUTO else val``
Example:
config.foo_level = 100
def foo(level=AUTO):
level = if_auto(level, config.foo_level)
return level
assert foo() == 100
assert foo(AUTO) == 100
assert foo(1) == 1
"""
AUTO = Token("AUTO")
return auto if val is AUTO else val
class _TokensModule(ModuleType):
"""
The module-hack that allows us to use ``from easypy.tokens import AUTO``
"""
__all__ = () # to make help() happy
__package__ = __name__
_orig_module = sys.modules[__name__]
def __getattr__(self, attr):
try:
return getattr(self._orig_module, attr)
except AttributeError:
pass
if attr.startswith("_") or attr == 'trait_names':
raise AttributeError(attr)
token = Token("<%s>" % attr)
setattr(self._orig_module, attr, token)
return token
def __dir__(self):
return sorted(dir(self._orig_module) + list(Token._all))
__path__ = []
__file__ = __file__
mod = _TokensModule(__name__, __doc__)
sys.modules[__name__] = mod
del ModuleType
del _TokensModule
del mod, sys
| bsd-3-clause | 9,029,391,845,331,402,000 | 23.021583 | 101 | 0.557951 | false |
linfanangel/Trality | cart/cart/settings.py | 1 | 3738 | """
Django settings for cart project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from django.core.mail import send_mail
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*r4w4nt(x@zsro*@hf2#ossx*@=n+*4_sqv9-cr$w!h2t3(j(('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'cartapp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cart.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cart',
'USER': 'root',
'PASSWORD': '##',
}
}
ACCOUNT_ACTIVATION_DAYS = 7
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = ''
SERVER_EMAIL = '[email protected]'
DEFAULT_FROM_EMAIL = '' | gpl-3.0 | 5,015,623,358,729,952,000 | 24.786207 | 91 | 0.684323 | false |
danielterhorst/open_exchange_rates | open_exchange_rates/client.py | 1 | 2646 | from __future__ import unicode_literals
import logging
import httplib
import operator
from requests.adapters import HTTPAdapter
import requests
from .exceptions import OpenExchangeRatesAccessRestrictedError, OpenExchangeRatesAccessRestrictedOverUseError, \
OpenExchangeRatesInvalidAppIdError, OpenExchangeRatesInvalidBaseError, OpenExchangeRatesMissingAppIdError, \
OpenExchangeRatesNotAllowedError, OpenExchangeRatesNotFoundError
logger = logging.getLogger(__name__)
class Client(object):
API_URL_PREFIX = 'https://openexchangerates.org/api'
API_URL_LATEST = '{}/latest.json?app_id={{}}&base={{}}'.format(API_URL_PREFIX)
HTTP_CODE_ERROR_MAP = {
(httplib.NOT_FOUND, 'not_found'): OpenExchangeRatesNotFoundError,
(httplib.UNAUTHORIZED, 'missing_app_id'): OpenExchangeRatesMissingAppIdError,
(httplib.UNAUTHORIZED, 'invalid_app_id'): OpenExchangeRatesInvalidAppIdError,
(httplib.UNAUTHORIZED, 'not_allowed'): OpenExchangeRatesNotAllowedError,
(httplib.FORBIDDEN, 'access_restricted'): OpenExchangeRatesAccessRestrictedError,
(429, 'access_restricted'): OpenExchangeRatesAccessRestrictedOverUseError,
(httplib.BAD_REQUEST, 'invalid_base'): OpenExchangeRatesInvalidBaseError
}
def __init__(self, app_id):
self.app_id = app_id
# Provides cookie persistence, connection-pooling, and configuration.
self.session = requests.Session()
# Create an requests HTTP adapter and set number of retries to attempt
adapter = HTTPAdapter()
adapter.max_retries = 5
# Register transport adapter for given URL prefix and enable connection retrying.
self.session.mount(self.API_URL_PREFIX, adapter=adapter)
def get_latest_for_currency(self, iso_name):
""" Get latest exchange rate for given base currency """
return self.execute(self.API_URL_LATEST.format(self.app_id, iso_name))['rates']
def execute(self, url):
""" Generic execute method to perform API call and handle errors. """
response = self.session.get(url, timeout=5)
content_json = response.json()
# If we get an HTTP error code, raise an exception
if response.status_code in map(operator.itemgetter(0), self.HTTP_CODE_ERROR_MAP.keys()):
# Get the exception based on the HTTP status and error message
exception_class = self.HTTP_CODE_ERROR_MAP[(response.status_code, content_json.get('message'))]
# Raise it with the description (if available)
raise exception_class(content_json.get('description'))
return content_json
| gpl-2.0 | 6,761,920,065,992,111,000 | 38.492537 | 112 | 0.710884 | false |
tmhorne/simplewiki | zim/plugins/tags.py | 1 | 32247 | # -*- coding: utf-8 -*-
# Copyright 2010 Fabian Moser
# Copyright 2011-2014 Jaap Karssenberg
import gobject
import gtk
import pango
import logging
from zim.plugins import PluginClass, extends, WindowExtension
from zim.gui.pageindex import PageTreeStore, PageTreeIter, PageTreeView, \
NAME_COL, PATH_COL, EMPTY_COL, STYLE_COL, FGCOLOR_COL, WEIGHT_COL, N_CHILD_COL, TIP_COL
from zim.notebook import Path
from zim.index import IndexPath, IndexTag
from zim.gui.widgets import LEFT_PANE, PANE_POSITIONS, populate_popup_add_separator, ScrolledWindow, encode_markup_text
from zim.gui.clipboard import pack_urilist, INTERNAL_PAGELIST_TARGET_NAME
from zim.signals import ConnectorMixin
logger = logging.getLogger('zim.plugins.tags')
class TagsPlugin(PluginClass):
plugin_info = {
'name': _('Tags'), # T: plugin name
'description': _('''\
This plugin provides a page index filtered by means of selecting tags in a cloud.
'''), # T: plugin description
'author': 'Fabian Moser',
'help': 'Plugins:Tags',
}
plugin_preferences = (
# key, type, label, default
('pane', 'choice', _('Position in the window'), LEFT_PANE, PANE_POSITIONS),
# T: option for plugin preferences
)
@extends('MainWindow')
class MainWindowExtension(WindowExtension):
def __init__(self, plugin, window):
WindowExtension.__init__(self, plugin, window)
self.widget = TagsPluginWidget(self.window.ui.notebook.index, self.uistate, self.window.ui) # XXX
self.on_preferences_changed(plugin.preferences)
self.connectto(plugin.preferences, 'changed', self.on_preferences_changed)
self.uistate.setdefault('vpane_pos', 150)
self.widget.set_position(self.uistate['vpane_pos'])
def update_uistate(*a):
self.uistate['vpane_pos'] = self.widget.get_position()
self.widget.connect('notify::position', update_uistate)
def on_preferences_changed(self, preferences):
if self.widget is None:
return
try:
self.window.remove(self.widget)
except ValueError:
pass
self.window.add_tab(_('Tags'), self.widget, preferences['pane'])
self.widget.show_all()
def teardown(self):
self.window.remove(self.widget)
self.widget.disconnect_all()
self.widget = None
class TagsPluginWidget(ConnectorMixin, gtk.VPaned):
'''Widget combining a tag cloud and a tag based page treeview'''
def __init__(self, index, uistate, ui): # XXX
gtk.VPaned.__init__(self)
self.index = index
self.uistate = uistate
self.uistate.setdefault('treeview', 'tagged', set(['tagged', 'tags']))
self.uistate.setdefault('tagcloud_sorting', 'score', set(['alpha', 'score']))
self.uistate.setdefault('show_full_page_name', True)
self.tagcloud = TagCloudWidget(self.index, sorting=self.uistate['tagcloud_sorting'])
self.pack1(ScrolledWindow(self.tagcloud), shrink=False)
self.treeview = TagsPageTreeView(ui) # XXX
self._treeview_mode = (None, None)
self.pack2(ScrolledWindow(self.treeview), shrink=False)
self.treeview.connect('populate-popup', self.on_populate_popup)
self.tagcloud.connect('selection-changed', self.on_cloud_selection_changed)
self.tagcloud.connect('sorting-changed', self.on_cloud_sortin_changed)
self.connectto_all(ui, ( # XXX
'open-page',
('start-index-update', lambda o: self.disconnect_model()),
('end-index-update', lambda o: self.reload_model()),
))
self.reload_model()
def on_open_page(self, ui, page, path):
self.treeview.select_page(path)
def toggle_treeview(self):
'''Toggle the treeview type in the widget'''
if self.uistate['treeview'] == 'tagged':
self.uistate['treeview'] = 'tags'
else:
self.uistate['treeview'] = 'tagged'
self.reload_model()
def toggle_show_full_page_name(self):
self.uistate['show_full_page_name'] = not self.uistate['show_full_page_name']
self.reload_model()
def on_populate_popup(self, treeview, menu):
# Add a popup menu item to switch the treeview mode
populate_popup_add_separator(menu, prepend=True)
item = gtk.CheckMenuItem(_('Show full page name')) # T: menu option
item.set_active(self.uistate['show_full_page_name'])
item.connect_object('toggled', self.__class__.toggle_show_full_page_name, self)
menu.prepend(item)
item = gtk.CheckMenuItem(_('Sort pages by tags')) # T: menu option
item.set_active(self.uistate['treeview'] == 'tags')
item.connect_object('toggled', self.__class__.toggle_treeview, self)
menu.prepend(item)
menu.show_all()
def on_cloud_selection_changed(self, cloud):
filter = cloud.get_tag_filter()
type, was_filtered = self._treeview_mode
is_filtered = (filter is not None)
if type == 'tagged' and was_filtered != is_filtered:
# Switch between tag view and normal index or vice versa
self._reload_model(type, filter)
else:
self.treeview.set_tag_filter(filter)
def on_cloud_sortin_changed(self, cloud, sorting):
self.uistate['tagcloud_sorting'] = sorting
def disconnect_model(self):
'''Stop the model from listening to the index. Used to
unhook the model before reloading the index. Typically
should be followed by reload_model().
'''
self.treeview.disconnect_index()
self.tagcloud.disconnect_index()
def reload_model(self):
'''Re-initialize the treeview model. This is called when
reloading the index to get rid of out-of-sync model errors
without need to close the app first.
'''
assert self.uistate['treeview'] in ('tagged', 'tags')
if self.tagcloud.index is None:
self.tagcloud.set_index(self.index)
type = self.uistate['treeview']
filter = self.tagcloud.get_tag_filter()
self._reload_model(type, filter)
def _reload_model(self, type, filter):
if type == 'tagged':
if filter is None:
model = DuplicatePageTreeStore(self.index)
# show the normal index in this case
else:
model = TaggedPageTreeStore(self.index, self.uistate['show_full_page_name'])
elif type == 'tags':
model = TagsPageTreeStore(self.index, self.uistate['show_full_page_name'])
else:
assert False
is_filtered = (filter is not None)
self._treeview_mode = (type, is_filtered)
self.treeview.set_model(model, filter)
class PageTreeTagIter(object):
'''Simple wrapper for IndexTag objects used as tree iters
This class is used mixed with PageTreeIter but does not derive
from it. Any method that assumes PageTreeIter will fail if it
suddenly encounters a PageTreeTagIter, this is intentional.
'''
__slots__ = ('indextag', 'treepath', 'n_children')
def __init__(self, treepath, indextag):
self.treepath = treepath
self.indextag = indextag
self.n_children = None # None means unknown
def __repr__(self):
return '<PageTreeTagIter, %s, %s>' % (self.treepath, self.indextag.name)
class DuplicatePageTreeStore(PageTreeStore):
'''Sub-class of PageTreeStore that allows for the same page appearing
multiple times in the tree.
'''
def select_page(self, path):
'''Since there may be duplicates of each page, highlight all of them'''
oldpath = self.selected_page
self.selected_page = path
for mypath in (oldpath, path):
if mypath:
for treepath in self.get_treepaths(mypath):
if treepath:
treeiter = self.get_iter(treepath)
self.emit('row-changed', treepath, treeiter)
def get_treepath(self, path):
# Just returns the first treepath matching notebook path
treepaths = self.get_treepaths(path)
if treepaths:
return treepaths[0]
else:
return None
def get_treepaths(self, path):
'''Return all treepaths matching notebook path 'path'
Default implementation assumes we are a non-duplicate treeview
after all and uses L{PageTreeStore.get_treepath()}.
@implementation: must be overloaded by subclasses that are real
duplicate stores
'''
return [PageTreeStore.get_treepath(self, path)]
class TagsPageTreeStore(DuplicatePageTreeStore):
'''Subclass of the PageTreeStore that shows tags as the top level
for sub-sets of the page tree.
A special top level item 'untagged' is created with all the
untagged (top level) pages.
'''
filter_depth = 2 # tag filter applies to top two levels
def __init__(self, index, show_full_page_name=True):
self._reverse_cache = {}
self.show_full_page_name = show_full_page_name
self.untagged = IndexTag(_('untagged'), -1)
# T: label for untagged pages in side pane
PageTreeStore.__init__(self, index)
def _connect(self):
def on_page_changed(o, path, signal):
#~ print '!!', signal, path
self._flush()
treepaths = self.get_treepaths(path)
for treepath in treepaths:
#~ print '!!', signal, path, treepath
try:
treeiter = self.get_iter(treepath)
except:
logger.exception('BUG: Invalid treepath: %s %s %s', signal, path, treepath)
else:
self.emit(signal, treepath, treeiter)
def on_page_deleted(o, path):
#~ print '!! page delete', path
treepaths = self.get_treepaths(path)
for treepath in treepaths:
self.emit('row-deleted', treepath)
self._flush()
def on_tag_created(o, tag):
self._flush()
treepath = (self.index.get_tag_index(tag) + 1,)
treeiter = self.get_iter(treepath)
#~ print '!! tag created', tag, treepath
self.row_inserted(treepath, treeiter)
def on_tag_to_be_inserted(o, tag, path, first):
if first and not path.namespace:
# Remove from untagged branch
treepath = (0, self.index.get_untagged_root_page_index(path))
#~ print '!! removed from untagged', treepath
self.row_deleted(treepath)
self._flush()
def on_tag_inserted(o, tag, path, first):
if first and not path.namespace:
# Finish update of untagged branch
if not self.index.n_list_untagged_root_pages():
treeiter = self.get_iter((0,))
self.row_has_child_toggled((0,), treeiter)
# Add to tag branch
self._flush()
tagindex = self.index.get_tag_index(tag)
pageindex = self.index.get_tagged_page_index(tag, path)
treepath = (tagindex + 1, pageindex)
treeiter = self.get_iter(treepath)
#~ print '!! tag inserted', tag, treepath
self.row_inserted(treepath, treeiter)
if not path.hasdata:
path = self.index.lookup_data(path)
if path.haschildren:
self.row_has_child_toggled(treepath, treeiter)
def on_tag_to_be_removed(o, tag, path, last):
# Remove from tag branch
tagindex = self.index.get_tag_index(tag)
pageindex = self.index.get_tagged_page_index(tag, path)
treepath = (tagindex + 1, pageindex)
#~ print '!! tag removed', tag, treepath
self.row_deleted(treepath)
self._flush()
def on_tag_removed(o, tag, path, last):
if last and not path.namespace:
# Add to untagged
pageindex = self.index.get_untagged_root_page_index(path)
treepath = (0, pageindex)
treeiter = self.get_iter(treepath)
#~ print '!! new untagged', treepath
if self.index.n_list_untagged_root_pages() == 1:
treeiter = self.get_iter((0,))
self.row_has_child_toggled((0,), treeiter)
self.row_inserted(treepath, treeiter)
def on_tag_to_be_deleted(o, tag):
treepath = (self.index.get_tag_index(tag) + 1,)
#~ print '!! tag deleted', tag, treepath
self.row_deleted(treepath)
self._flush()
self.connectto_all(self.index, (
('page-inserted', on_page_changed, 'row-inserted'),
('page-updated', on_page_changed, 'row-changed'),
('page-haschildren-toggled', on_page_changed, 'row-has-child-toggled'),
('page-to-be-deleted', on_page_deleted),
# TODO: Treat tag-inserted and new tag differently
('tag-created', on_tag_created),
('tag-to-be-inserted', on_tag_to_be_inserted),
('tag-inserted', on_tag_inserted),
('tag-to-be-removed', on_tag_to_be_removed),
('tag-removed', on_tag_removed),
('tag-to-be-deleted', on_tag_to_be_deleted),
))
# The page-to-be-deleted signal is a hack so we have time to ensure we know the
# treepath of this indexpath - once we get page-deleted it is to late to get this
def _get_iter(self, treepath):
'''Convert the tree hierarchy to a PageTreeIter'''
# Take care of caching and make sure we keep ref to paths long
# enough while they are used in an iter. Also schedule a flush
# to be execute as soon as the loop is idle again.
# The cache is a dict which takes treepath tuples as keys and
# has pagetreeiter objects as values, it is filled on demand.
# No TreePath gtk object, treepaths are just tuples of ints
# Path (0,) is the first item in the root namespace
# Path (2, 4) is the 5th child of the 3rd item
#~ print '>>> Lookup path', treepath
if not treepath in self._cache:
parent = None
for i in range(1, len(treepath)+1):
mytreepath = treepath[:i]
if not mytreepath in self._cache:
parenttreepath = treepath[:i-1]
offset = mytreepath[-1]
if parent is None:
# The first tree level are tags
#~ print '>>>> Load taglist'
if offset == 0:
iter = PageTreeTagIter((0,), self.untagged)
self._cache.setdefault((0,), iter)
else:
tags = self.index.list_all_tags(offset-1, limit=20)
# offset -1 because we use 0 for untagged
for j, path in enumerate(tags):
childtreepath = parenttreepath + (offset + j,)
iter = PageTreeTagIter(childtreepath, path)
self._cache.setdefault(childtreepath, iter)
else:
#~ print '>>>> Load pagelist for', parent, 'offset', offset
if isinstance(parent, IndexTag):
if parent == self.untagged:
pages = self.index.list_untagged_root_pages(offset, limit=20)
else:
pages = self.index.list_tagged_pages(parent, offset, limit=20)
else:
pages = self.index.list_pages(parent, offset, limit=20)
for j, path in enumerate(pages):
childtreepath = parenttreepath + (offset + j,)
iter = PageTreeIter(childtreepath, path)
self._cache.setdefault(childtreepath, iter)
try:
iter = self._cache[mytreepath]
except KeyError:
return None
else:
if isinstance(iter, PageTreeTagIter):
parent = iter.indextag
else:
parent = iter.indexpath
#~ print '>>> Return', self._cache.get(treepath, None)
self._schedule_flush()
return self._cache.get(treepath, None)
def _flush(self):
self._reverse_cache = {}
return PageTreeStore._flush(self)
def get_treepath(self, path):
if isinstance(path, IndexTag):
if path == self.untagged:
return (0,)
else:
return (self.index.get_tag_index(path) + 1,)
else:
return DuplicatePageTreeStore.get_treepath(self, path)
def get_treepaths(self, path):
'''Convert a Zim path to tree hierarchy, in general results in multiple
matches
'''
if isinstance(path, IndexTag):
treepath = self.get_treepath(path)
#~ print ">>> Found", path, '->', treepath
if treepath:
return (treepath,)
else:
return ()
else:
assert isinstance(path, Path)
if path.isroot:
raise ValueError
path = self.index.lookup_path(path)
if path is None or not path.hasdata:
return ()
# See if it is in cache already
if path in self._reverse_cache:
#~ print '>>> Return from cache', path, "->", self._reverse_cache[path]
return self._reverse_cache[path]
# Try getting it while populating cache
paths = list(path.parents())
paths.pop() # get rid of root namespace as parent
paths.insert(0, path)
child = None
childpath = () # partial treepath for child part
treepaths = []
for p in paths: # iter from child to parents
if child:
pageindex = self.index.get_page_index(child)
childpath = (pageindex,) + childpath
# Get tags of this path
tags = list(self.index.list_tags(p))
for t in tags:
tagindex = self.index.get_tag_index(t) + 1 # +1 due to untagged
pageindex = self.index.get_tagged_page_index(t, p)
treepaths.append((tagindex, pageindex) + childpath)
child = p
root_page = paths[-1]
try:
pageindex = self.index.get_untagged_root_page_index(root_page)
except ValueError:
pass
else:
treepaths.append((0, pageindex) + childpath)
treepaths.sort()
#~ print ">>> Found", path, "->", treepaths
self._reverse_cache[path] = treepaths
self._schedule_flush()
return treepaths
def get_indexpath(self, treeiter):
'''Returns an IndexPath for a TreeIter or None'''
# Note that iter is TreeIter here, not PageTreeIter
iter = self.get_user_data(treeiter)
if isinstance(iter, PageTreeIter):
return iter.indexpath
else:
return None
def get_indextag(self, treeiter):
'''Returns an IndexTag for a TreeIter or None'''
# Note that iter is TreeIter here, not PageTreeIter
iter = self.get_user_data(treeiter)
if isinstance(iter, PageTreeTagIter):
return iter.indextag
else:
return None
def on_iter_has_child(self, iter):
'''Returns True if the iter has children'''
if isinstance(iter, PageTreeTagIter):
if iter.indextag == self.untagged:
return self.index.n_list_untagged_root_pages() > 0
else:
return self.index.n_list_tagged_pages(iter.indextag) > 0
else:
return PageTreeStore.on_iter_has_child(self, iter)
def on_iter_n_children(self, iter):
'''Returns the number of children in a namespace. As a special case,
when iter is None the number of tags is given.
'''
if iter is None:
return self.index.n_list_all_tags() + 1 # +1 to include untagged
elif isinstance(iter, PageTreeTagIter):
if iter.indextag == self.untagged:
return self.index.n_list_untagged_root_pages()
else:
return self.index.n_list_tagged_pages(iter.indextag)
else:
return PageTreeStore.on_iter_n_children(self, iter)
def on_get_value(self, iter, column):
'''Returns the data for a specific column'''
if isinstance(iter, PageTreeTagIter):
tag = iter.indextag
if column == NAME_COL:
return tag.name
elif column == TIP_COL:
return encode_markup_text(tag.name)
elif column == PATH_COL:
return tag
elif column == EMPTY_COL:
return tag == self.untagged
elif column == STYLE_COL:
if tag == self.untagged:
return pango.STYLE_ITALIC
else:
return pango.STYLE_NORMAL
elif column == FGCOLOR_COL:
if tag == self.untagged:
return self.EMPTY_COLOR
else:
return self.NORMAL_COLOR
elif column == WEIGHT_COL:
return pango.WEIGHT_NORMAL
# TODO: use this property to show tags in current page?
elif column == N_CHILD_COL:
return ''
## Due to multiple tag filtering this result is no good..
#~ if tag == self.untagged:
#~ return str(self.index.n_list_untagged_root_pages())
#~ else:
#~ return str(self.index.n_list_tagged_pages(tag))
else:
if column == NAME_COL and self.show_full_page_name:
# Show top level pages with full contex
# top level tree is tags, so top level pages len(path) is 2
if len(iter.treepath) <= 2:
return iter.indexpath.name
else:
return iter.indexpath.basename
else:
return PageTreeStore.on_get_value(self, iter, column)
class TaggedPageTreeStore(DuplicatePageTreeStore):
'''
A TreeModel that lists all Zim pages in a flat list filtered by tags.
Pages with associated sub-pages still show them as sub-nodes.
'''
filter_depth = 1 # tag filter only applies to top level
def __init__(self, index, show_full_page_name=True):
PageTreeStore.__init__(self, index)
self._reverse_cache = {}
self.show_full_page_name = show_full_page_name
def _connect(self):
def on_page_changed(o, path, signal):
self._flush()
treepaths = self.get_treepaths(path)
for treepath in treepaths:
treeiter = self.get_iter(treepath)
self.emit(signal, treepath, treeiter)
def on_page_deleted(o, path):
treepaths = self.get_treepaths(path)
for treepath in treepaths:
self.emit('row-deleted', treepath)
self._flush()
self.connectto_all(self.index, (
('page-inserted', on_page_changed, 'row-inserted'),
('page-updated', on_page_changed, 'row-changed'),
('page-haschildren-toggled', on_page_changed, 'row-has-child-toggled'),
('page-to-be-deleted', on_page_deleted),
))
def _get_iter(self, treepath):
'''
Cached conversion of the tree hierarchy to a PageTreeIter.
@param treepath: A tuple of int e.g. (0,) is the first item in the root namespace.
@returns: A PageTreeIter instance corresponding to the given path
'''
if not treepath in self._cache:
parent = None
for i in xrange(1, len(treepath) + 1):
leveltreepath = treepath[:i]
if not leveltreepath in self._cache:
parenttreepath = leveltreepath[:-1]
offset = leveltreepath[-1]
if parent is None:
pages = self.index.list_all_pages(offset, limit = 20)
else:
pages = self.index.list_pages(parent, offset, limit=20)
for j, path in enumerate(pages):
childtreepath = parenttreepath + (offset + j,)
iter = PageTreeIter(childtreepath, path)
self._cache.setdefault(childtreepath, iter)
if leveltreepath in self._cache:
parent = self._cache[leveltreepath].indexpath
else:
return None
self._schedule_flush() # Clear the cache when idle
return self._cache.get(treepath, None)
def _flush(self):
self._reverse_cache = {}
return PageTreeStore._flush(self)
def get_treepaths(self, path):
'''
Cached conversion of a Zim path to a node in the tree hierarchy, i.e.
the inverse operation of _get_iter.
@param path: Usually an IndexPath instance
@returns: A list of tuples of ints (one page can be represented many times)
'''
assert isinstance(path, Path)
if path.isroot:
raise ValueError # There can be no tree node for the tree root
path = self.index.lookup_path(path)
if path is None or not path.hasdata:
return ()
# See if it is in cache already
if path in self._reverse_cache:
return self._reverse_cache[path]
paths = [path] + list(path.parents())[:-1] # Zim paths for the path and all parents (except root)
child = None
childpath = ()
treepaths = []
for p in paths:
if child:
pageindex = self.index.get_page_index(child)
childpath = (pageindex,) + childpath
pageindex = self.index.get_all_pages_index(p)
treepaths.append((pageindex,) + childpath)
child = p
treepaths.sort()
self._reverse_cache[path] = treepaths
self._schedule_flush()
return treepaths
def on_iter_n_children(self, iter):
'''Returns the number of children in a namespace. As a special case,
when iter is None the number of pages in the root namespace is given.
'''
if iter is None:
return self.index.n_list_all_pages()
else:
return PageTreeStore.on_iter_n_children(self, iter)
def on_get_value(self, iter, column):
'''Returns the data for a specific column'''
if column == NAME_COL and self.show_full_page_name:
# Show top level pages with full contex
if len(iter.treepath) == 1:
return iter.indexpath.name
else:
return iter.indexpath.basename
else:
return PageTreeStore.on_get_value(self, iter, column)
class TagsPageTreeView(PageTreeView):
def __init__(self, ui, model=None):
PageTreeView.__init__(self, ui)
self.set_name('zim-tags-pagelist')
self._tag_filter = None
if model:
self.set_model(model)
def set_model(self, model, filter=None):
'''Set the model to be used'''
# disconnect previous model
oldmodel = self.get_model()
if oldmodel:
childmodel = oldmodel.get_model()
childmodel.disconnect_index()
# Filter is also provided here, just to make it more efficient to
# set model and filter in one go without need for refilter
self._set_tag_filter(filter)
# set new model
def func(model, iter):
index = self.ui.notebook.index
if self._tag_filter is None:
return True # no filtering
else:
iter = model.get_user_data(iter)
if len(iter.treepath) > model.filter_depth:
return True # deeper levels are not filtered at all
else:
if isinstance(iter, PageTreeTagIter): # -> tag
return iter.indextag in self._tag_filter[1] # show filtered tags
else: # PageTreeIter -> page
tags = frozenset(index.list_tags(iter.indexpath))
return tags >= self._tag_filter[0] # match all selected tags
filtermodel = model.filter_new(root = None)
filtermodel.set_visible_func(func)
# HACK add some methods and attributes
# (can not subclass gtk.TreeModelFilter because it lacks a constructor)
def get_indexpath(treeiter):
childiter = filtermodel.convert_iter_to_child_iter(treeiter)
if childiter:
return model.get_indexpath(childiter)
else:
return None
def get_treepath(path):
for treepath in model.get_treepaths(path):
filtered = filtermodel.convert_child_path_to_path(treepath)
if not filtered is None:
return filtered
else:
return None
def get_treepaths(path):
treepaths = model.get_treepaths(path)
if treepaths:
treepaths = map(filtermodel.convert_child_path_to_path, treepaths)
return tuple(t for t in treepaths if not t is None)
else:
return ()
filtermodel.get_indexpath = get_indexpath
filtermodel.get_treepath = get_treepath
filtermodel.get_treepaths = get_treepaths
filtermodel.index = model.index
filtermodel.select_page = model.select_page
PageTreeView.set_model(self, filtermodel)
def set_tag_filter(self, filter):
'''Sets the tags to filter on. The filter should be a tuple of
two lists of tags, or None to not do any filtering.
First list of tags are the tags that we filter on, so only pages
matching all these tags should be selected.
Second set is a superset of the first set and includes all tags
that appear in one of the selected pages. So selecting one of these
tags on top of the current selection should result in a subset
of the current page selection.
'''
self._set_tag_filter(filter)
model = self.get_model()
if model:
model.refilter()
def _set_tag_filter(self, filter):
if not filter:
self._tag_filter = None
else:
self._tag_filter = (frozenset(filter[0]), frozenset(filter[1]))
def do_drag_data_get(self, dragcontext, selectiondata, info, time):
assert selectiondata.target == INTERNAL_PAGELIST_TARGET_NAME
model, iter = self.get_selection().get_selected()
path = model.get_indexpath(iter)
if isinstance(path, IndexTag):
link = '@' + path.name
else:
link = path.name
logger.debug('Drag data requested, we have internal tag/path "%s"', link)
data = pack_urilist((link,))
selectiondata.set(INTERNAL_PAGELIST_TARGET_NAME, 8, data)
def select_page(self, path, vivificate=False):
'''Select a page in the treeview if the page is not already selected
@param path: a notebook L{Path} object for the page
@keyword vivificate: when C{True} the path is created
temporarily when it did not yet exist
@returns: a gtk TreePath (tuple of intergers) or C{None}
'''
#~ print '!! SELECT', path
model = self.get_model()
if model is None:
return None # index not yet initialized ...
# change selection only if necessary
selected_path = self.get_selected_path()
if path == selected_path:
_, iter = self.get_selection().get_selected()
treepath = model.get_path(iter)
logger.debug('Already selected: "%s"', treepath)
else:
treepath = model.get_treepath(path)
if treepath:
# path existed, now select it
self.select_treepath(treepath)
elif vivificate:
# path does not exist, but we can create it
path = model.index.touch(path)
treepath = model.get_treepath(path)
assert treepath, 'BUG: failed to touch placeholder'
self.select_treepath(treepath)
else:
# path does not exist and we are not going to create it
return None
rowreference = gtk.TreeRowReference(model, treepath)
# make reference before cleanup - path may change
if self._cleanup and self._cleanup.valid():
mytreepath = self._cleanup.get_path()
if mytreepath != treepath:
indexpath = model.get_indexpath( model.get_iter(mytreepath) )
#~ print '!! CLEANUP', indexpath
model.index.cleanup(indexpath)
self._cleanup = rowreference
model.select_page(path) # highlight in model
return treepath
# Need to register classes defining gobject signals
gobject.type_register(TagsPageTreeView)
class TagCloudItem(gtk.ToggleButton):
'''Button item used on the tag cloud widget'''
def __init__(self, indextag):
gtk.ToggleButton.__init__(self, indextag.name, use_underline=False)
self.set_relief(gtk.RELIEF_NONE)
self.indextag = indextag
def update_label(self):
# Make button text bold when active
label = self.get_child()
if self.get_active():
label.set_markup('<b>'+label.get_text()+'</b>')
else:
label.set_text(label.get_text())
# get_text() gives string without markup
self.connect_after('toggled', update_label)
class TagCloudWidget(ConnectorMixin, gtk.TextView):
'''Text-view based list of tags, where each tag is represented by a
button inserted as a child in the textview.
@signal: C{selection-changed ()}: emitted when tag selection changes
@signal: C{sorting-changed ()}: emitted when tag sorting changes
'''
# define signals we want to use - (closure type, return type and arg types)
__gsignals__ = {
'selection-changed': (gobject.SIGNAL_RUN_LAST, None, ()),
'sorting-changed': (gobject.SIGNAL_RUN_LAST, None, (object,)),
}
def __init__(self, index, sorting='score'):
gtk.TextView.__init__(self, None) # Create TextBuffer implicitly
self.set_name('zim-tags-tagcloud')
self.index = None
self.set_editable(False)
self.set_cursor_visible(False)
self.set_wrap_mode(gtk.WRAP_CHAR)
self.set_sorting(sorting)
self.set_index(index)
def set_index(self, index):
'''Connect to an Index object'''
self.disconnect_index() # just to be sure
self.index = index
self.connectto_all(self.index, (
('tag-created', self._update),
('tag-deleted', self._update),
))
self._update()
def set_sorting(self, sorting):
self._alphabetically = (sorting == 'alpha')
def disconnect_index(self):
'''Stop the model from listening to the index. Used to unhook
the model before reloading the index.
'''
self.disconnect_from(self.index)
self._clear()
self.index = None
def get_tag_filter(self):
'''Returns a tuple with two lists of tags; the first gives all
tags that are selected, the second gives all tags shown in the
cloud. By definition the first list is a subset of the second.
If no tags are selected returns None instead.
'''
selected = []
filtered = []
for button in self.get_children():
filtered.append(button.indextag)
if button.get_active():
selected.append(button.indextag)
if selected:
return (selected, filtered)
else:
return None
def _clear(self):
'''Clears the cloud'''
self.foreach(lambda b: self.remove(b))
buffer = self.get_buffer()
buffer.delete(*buffer.get_bounds())
def _update(self, *a):
'''Update the cloud to show only tags that share a set of pages
with the selected tags.'''
selected = [b.indextag for b in self.get_children()
if b.get_active() and
self.index.lookup_tag(b.indextag.name)]
# Need the lookup here in case the tag went missing in the
# mean time e.g. due to editing of the page
self._clear()
buffer = self.get_buffer()
if selected:
tags = self.index.list_intersecting_tags(selected)
else:
tags = []
if not tags:
tags = self.index.list_all_tags_by_score()
# Can be we have a "selected", but the selected tags have
# disappeared and thus list_intersecting returns empty
if self._alphabetically:
tags = sorted(tags, key=lambda t: t.name)
# else leave sorted by score
for tag in tags:
iter = buffer.get_end_iter()
anchor = buffer.create_child_anchor(iter)
button = TagCloudItem(tag)
button.set_active(tag in selected)
button.connect("toggled", lambda b: self._update())
self.add_child_at_anchor(button, anchor)
self.show_all()
self.emit('selection-changed')
def do_populate_popup(self, menu):
populate_popup_add_separator(menu, prepend=True)
item = gtk.CheckMenuItem(_('Sort alphabetically')) # T: Context menu item for tag cloud
item.set_active(self._alphabetically)
item.connect('toggled', self._switch_sorting)
item.show_all()
menu.prepend(item)
def _switch_sorting(self, widget, *a):
self._alphabetically = widget.get_active()
self._update()
if self._alphabetically:
self.emit('sorting-changed', 'alpha')
else:
self.emit('sorting-changed', 'score')
# Need to register classes defining gobject signals
gobject.type_register(TagCloudWidget)
| gpl-2.0 | 1,718,784,003,087,782,100 | 30.399221 | 119 | 0.691165 | false |
heyandie/django-qsstats-magic | qsstats/__init__.py | 1 | 7034 | __author__ = 'Matt Croydon, Mikhail Korobov, Pawel Tomasiewicz'
__version__ = (0, 7, 0)
from functools import partial
import datetime
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse
from django.db.models import Count
from django.db import DatabaseError, transaction
from django.conf import settings
from qsstats.utils import get_bounds, _to_datetime, _parse_interval, get_interval_sql, _remove_time
from qsstats import compat
from qsstats.exceptions import *
class QuerySetStats(object):
"""
Generates statistics about a queryset using Django aggregates. QuerySetStats
is able to handle snapshots of data (for example this day, week, month, or
year) or generate time series data suitable for graphing.
"""
def __init__(self, qs=None, date_field=None, aggregate=None, today=None):
self.qs = qs
self.date_field = date_field
self.aggregate = aggregate or Count('id')
self.today = today or self.update_today()
def _guess_engine(self):
if hasattr(self.qs, 'db'): # django 1.2+
engine_name = settings.DATABASES[self.qs.db]['ENGINE']
else:
engine_name = settings.DATABASE_ENGINE
if 'mysql' in engine_name:
return 'mysql'
if 'postg' in engine_name: #postgres, postgis
return 'postgresql'
if 'sqlite' in engine_name:
return 'sqlite'
# Aggregates for a specific period of time
def for_interval(self, interval, dt, date_field=None, aggregate=None):
start, end = get_bounds(dt, interval)
date_field = date_field or self.date_field
kwargs = {'%s__range' % date_field : (start, end)}
return self._aggregate(date_field, aggregate, kwargs)
def this_interval(self, interval, date_field=None, aggregate=None):
method = getattr(self, 'for_%s' % interval)
return method(self.today, date_field, aggregate)
# support for this_* and for_* methods
def __getattr__(self, name):
if name.startswith('for_'):
return partial(self.for_interval, name[4:])
if name.startswith('this_'):
return partial(self.this_interval, name[5:])
raise AttributeError
def time_series(self, start, end=None, interval='days',
date_field=None, aggregate=None, engine=None):
''' Aggregate over time intervals '''
end = end or self.today
args = [start, end, interval, date_field, aggregate]
engine = engine or self._guess_engine()
sid = transaction.savepoint()
try:
return self._fast_time_series(*(args+[engine]))
except (QuerySetStatsError, DatabaseError,):
transaction.savepoint_rollback(sid)
return self._slow_time_series(*args)
def _slow_time_series(self, start, end, interval='days',
date_field=None, aggregate=None):
''' Aggregate over time intervals using 1 sql query for one interval '''
num, interval = _parse_interval(interval)
if interval not in ['minutes', 'hours',
'days', 'weeks',
'months', 'years'] or num != 1:
raise InvalidInterval('Interval is currently not supported.')
method = getattr(self, 'for_%s' % interval[:-1])
stat_list = []
dt, end = _to_datetime(start), _to_datetime(end)
while dt <= end:
value = method(dt, date_field, aggregate)
stat_list.append((dt, value,))
dt = dt + relativedelta(**{interval : 1})
return stat_list
def _fast_time_series(self, start, end, interval='days',
date_field=None, aggregate=None, engine=None):
''' Aggregate over time intervals using just 1 sql query '''
date_field = date_field or self.date_field
aggregate = aggregate or self.aggregate
engine = engine or self._guess_engine()
num, interval = _parse_interval(interval)
start, _ = get_bounds(start, interval.rstrip('s'))
_, end = get_bounds(end, interval.rstrip('s'))
interval_sql = get_interval_sql(date_field, interval, engine)
kwargs = {'%s__range' % date_field : (start, end)}
aggregate_data = self.qs.extra(select = {'d': interval_sql}).\
filter(**kwargs).order_by().values('d').\
annotate(agg=aggregate)
today = _remove_time(compat.now())
def to_dt(d):
# Fix from https://bitbucket.org/aztrock/django-qsstats-magic
try:
if isinstance(d, basestring):
return parse(d, yearfirst=True, default=today)
return d
except:
if isinstance(d, str):
return parse(d, yearfirst=True, default=today)
return d
data = dict((to_dt(item['d']), item['agg']) for item in aggregate_data)
stat_list = []
dt = start
while dt < end:
idx = 0
value = 0
for i in range(num):
value = value + data.get(dt, 0)
if i == 0:
stat_list.append((dt, value,))
idx = len(stat_list) - 1
elif i == num - 1:
stat_list[idx] = (dt, value,)
dt = dt + relativedelta(**{interval : 1})
return stat_list
# Aggregate totals using a date or datetime as a pivot
def until(self, dt, date_field=None, aggregate=None):
return self.pivot(dt, 'lte', date_field, aggregate)
def until_now(self, date_field=None, aggregate=None):
return self.pivot(compat.now(), 'lte', date_field, aggregate)
def after(self, dt, date_field=None, aggregate=None):
return self.pivot(dt, 'gte', date_field, aggregate)
def after_now(self, date_field=None, aggregate=None):
return self.pivot(compat.now(), 'gte', date_field, aggregate)
def pivot(self, dt, operator=None, date_field=None, aggregate=None):
operator = operator or self.operator
if operator not in ['lt', 'lte', 'gt', 'gte']:
raise InvalidOperator("Please provide a valid operator.")
kwargs = {'%s__%s' % (date_field or self.date_field, operator) : dt}
return self._aggregate(date_field, aggregate, kwargs)
# Utility functions
def update_today(self):
_now = compat.now()
self.today = _remove_time(_now)
return self.today
def _aggregate(self, date_field=None, aggregate=None, filter=None):
date_field = date_field or self.date_field
aggregate = aggregate or self.aggregate
if not date_field:
raise DateFieldMissing("Please provide a date_field.")
if self.qs is None:
raise QuerySetMissing("Please provide a queryset.")
agg = self.qs.filter(**filter).aggregate(agg=aggregate)
return agg['agg']
| bsd-3-clause | 4,031,242,892,359,311,400 | 37.021622 | 99 | 0.589565 | false |
yarnaid/psycho_test | psycho_test/settings.py | 1 | 3757 | """
Django settings for psycho_test project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '18v==7hkrsn2z#)w)jmzbxz%5j)^(29!)qw%0#%-yo@#ch_922'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'psycho_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'psycho_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# heroku database
DATABASES = {'default': dj_database_url.config()}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
try:
from psycho_test.local_settings import *
except ImportError:
pass
| gpl-3.0 | 4,785,241,607,220,612,000 | 25.64539 | 91 | 0.693372 | false |
jmartinm/inspire-next | inspire/modules/harvester/tasks/__init__.py | 1 | 1539 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from invenio.celery import celery
@celery.task()
def run_harvest(workflow, **kwargs):
"""Run given harvesting workflow in Celery."""
from invenio_base.globals import cfg
from invenio_workflows.models import BibWorkflowObject
args = {
"workflow": workflow
}
args.update(**kwargs)
data = BibWorkflowObject.create_object()
extra_data = data.get_extra_data()
extra_data["args"] = args
extra_data["config"] = cfg["HARVESTER_WORKFLOWS_CONFIG"].get(workflow, {})
data.set_extra_data(extra_data)
data.set_data({})
data.save()
data.start_workflow(workflow, delayed=True)
| gpl-2.0 | -1,044,023,978,882,769,200 | 33.977273 | 78 | 0.721248 | false |
adrn/streams | scripts/submit.py | 1 | 4681 | # coding: utf-8
""" Create and submit a job to the cluster given a streams config file. """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
import logging
from subprocess import Popen, PIPE
import cStringIO as StringIO
import yaml
# Create logger
logger = logging.getLogger(__name__)
job_sh = """#!/bin/sh
# Directives
#PBS -N {name}
#PBS -W group_list={group:s}astro
#PBS -l nodes={nodes:d}:ppn={ppn:d}{ib},walltime={time},mem={memory}
#PBS -M [email protected]
#PBS -m abe
#PBS -V
# Set output and error directories
#PBS -o localhost:{astro:s}/pbs_output
#PBS -e localhost:{astro:s}/pbs_output
cd $STREAMSPATH
pwd
# print date and time to file
date
#Command to execute Python program
mpiexec -n {mpi_threads:d} {astro:s}/{py:s}/bin/python {astro:s}/projects/streams/scripts/{script} -f {astro:s}/projects/streams/config/{config_file} -v --mpi {overwrite} {contin}
{astro:s}/{py:s}/bin/python {astro:s}/projects/streams/scr\
ipts/check_acor.py -f {astro:s}/projects/streams/config/{config_file}
date
#End of script
"""
def main(config_file, mpi_threads=None, walltime=None, memory=None,
job_name=None, astro=None, overwrite=False, infiniband=False,
continue_sampler=False):
# Read in simulation parameters from config file
with open(config_file) as f:
config = yaml.load(f.read())
if job_name is None:
if config.has_key("name"):
name = config["name"]
else:
name = "adrn"
else:
name = job_name
if mpi_threads is None:
mpi_threads = 999999
mpi_threads = min(config.get("walkers"), 256, mpi_threads)
group = astro.split("/")[1]
if group == "vega":
group = 'yeti'
nproc_per_node = 16
py = "anaconda"
else:
nproc_per_node = 8
py = "yt-x86_64"
if overwrite:
ovr = "-o"
else:
ovr = ""
if continue_sampler:
cnt = "--continue"
else:
cnt = ""
if infiniband:
ib = ":ib"
else:
ib = ""
sh = job_sh.format(mpi_threads=mpi_threads,
nodes=mpi_threads//nproc_per_node,
time=walltime,
memory=memory,
config_file=os.path.basename(config_file),
name=name,
script=config["script"],
astro=astro,
group=group,
overwrite=ovr,
ppn=nproc_per_node,
ib=ib,
contin=cnt,
py=py)
yn = raw_input("About to submit the following job: \n\n{0}\n\n Is "
"this right? [y]/n: ".format(sh))
if yn.strip().lower() == "y" or yn.strip() == "":
p = Popen(['qsub -'], stdout=PIPE, stdin=PIPE, stderr=PIPE, shell=True)
stdout_data = p.communicate(input=sh)[0]
print("\n\n")
print("Job started: {0}".format(stdout_data.split(".")[0]))
else:
sys.exit(1)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="")
parser.add_argument("-f", "--file", dest="file", required=True,
help="Path to the configuration file to run with.")
parser.add_argument("--walltime", dest="time", default="12:00:00",
help="Amount of time to request.")
parser.add_argument("--memory", dest="memory", default="32gb",
help="Amount of memory to request.")
parser.add_argument("--name", dest="job_name", default=None,
help="The name of the job.")
parser.add_argument("--threads", dest="mpi_threads", default=None,
type=int, help="The number of MPI threads.")
parser.add_argument("-o","--overwrite", dest="overwrite", default=False,
action="store_true", help="")
parser.add_argument("--ib", dest="infiniband", default=False,
action="store_true", help="")
parser.add_argument("--continue", dest="continue_sampler", default=False,
action="store_true", help="")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
filename = os.path.join(os.environ['STREAMSPATH'], args.file)
main(filename, mpi_threads=args.mpi_threads, walltime=args.time,
memory=args.memory, job_name=args.job_name, astro=os.environ['ASTRO'],
overwrite=args.overwrite, infiniband=args.infiniband,
continue_sampler=args.continue_sampler)
sys.exit(0)
| mit | -8,872,010,152,076,321,000 | 30 | 179 | 0.573595 | false |
mxmaslin/Test-tasks | tests_python/Wargaming Forge Task/Username_task_1_src.py | 1 | 1692 | # -*- coding: utf-8 -*-
import os
from operator import itemgetter
def closest(p, c, n):
# выбираем из p и n, кто ближе к c
try:
ptup = p, teams[c] - teams[p]
except KeyError:
ptup = c, teams[c]
try:
ntup = n, teams[n] - teams[c]
except KeyError:
ntup = c, teams[c]
return min([ptup, ntup], key=itemgetter(1))[0]
for root, dirs, files in os.walk(os.path.join('Wargaming Forge Task', 'task_1_data')):
if not files:
continue
users = dict()
# получаем словарь с парами user_id:user_rating
with open(os.path.join(root, files[0])) as f_players:
for line in f_players:
print(line)
user_id, user_rating = line.split()
users[user_id] = user_rating
teams = dict()
# получаем словарь с парами team_id:team_rating
with open(os.path.join(root, files[1])) as f_teams:
for line in f_teams:
print(line)
team_id, *user_ids = line.split()
teams[team_id] = sum([int(users[x]) for x in user_ids])
# ищем подходящую пару
teams_sorted = sorted(teams, key=teams.__getitem__)
for p, c, n in zip([None]+teams_sorted[:-1],
teams_sorted,
teams_sorted[1:]+[None]):
t2 = closest(p, c, n)
test_name = root.split('/')[-1]
print(c, t2)
with open(os.path.join(f'Username_task_1_team_pairs', f'{test_name}_pairs.txt'), 'a') as f_result:
if c != t2:
f_result.write(f'{c} {t2}\n')
else:
f_result.write(c)
| gpl-3.0 | -4,939,634,655,291,112,000 | 26.288136 | 106 | 0.53354 | false |
libyal/libcreg | tests/pycreg_test_support.py | 1 | 3621 | #!/usr/bin/env python
#
# Python-bindings support functions test script
#
# Copyright (C) 2013-2021, Joachim Metz <[email protected]>
#
# Refer to AUTHORS for acknowledgements.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import sys
import unittest
import pycreg
class SupportFunctionsTests(unittest.TestCase):
"""Tests the support functions."""
def test_get_version(self):
"""Tests the get_version function."""
version = pycreg.get_version()
self.assertIsNotNone(version)
def test_check_file_signature(self):
"""Tests the check_file_signature function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
result = pycreg.check_file_signature(test_source)
self.assertTrue(result)
def test_check_file_signature_file_object(self):
"""Tests the check_file_signature_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
with open(test_source, "rb") as file_object:
result = pycreg.check_file_signature_file_object(file_object)
self.assertTrue(result)
def test_check_file_signature_file_object(self):
"""Tests the check_file_signature_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
with open(test_source, "rb") as file_object:
result = pycreg.check_file_signature_file_object(file_object)
self.assertTrue(result)
def test_open(self):
"""Tests the open function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
creg_file = pycreg.open(test_source)
self.assertIsNotNone(creg_file)
creg_file.close()
with self.assertRaises(TypeError):
pycreg.open(None)
with self.assertRaises(ValueError):
pycreg.open(test_source, mode="w")
def test_open_file_object(self):
"""Tests the open_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
if not os.path.isfile(test_source):
raise unittest.SkipTest("source not a regular file")
with open(test_source, "rb") as file_object:
creg_file = pycreg.open_file_object(file_object)
self.assertIsNotNone(creg_file)
creg_file.close()
with self.assertRaises(TypeError):
pycreg.open_file_object(None)
with self.assertRaises(ValueError):
pycreg.open_file_object(file_object, mode="w")
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"source", nargs="?", action="store", metavar="PATH",
default=None, help="path of the source file.")
options, unknown_options = argument_parser.parse_known_args()
unknown_options.insert(0, sys.argv[0])
setattr(unittest, "source", options.source)
unittest.main(argv=unknown_options, verbosity=2)
| lgpl-3.0 | -3,767,197,005,364,257,300 | 29.686441 | 77 | 0.705606 | false |
blublud/networkx | networkx/algorithms/__init__.py | 1 | 3175 | from networkx.algorithms.assortativity import *
from networkx.algorithms.block import *
from networkx.algorithms.boundary import *
from networkx.algorithms.centrality import *
from networkx.algorithms.cluster import *
from networkx.algorithms.clique import *
from networkx.algorithms.community import *
from networkx.algorithms.components import *
from networkx.algorithms.coloring import *
from networkx.algorithms.core import *
from networkx.algorithms.cycles import *
from networkx.algorithms.dag import *
from networkx.algorithms.distance_measures import *
from networkx.algorithms.dominance import *
from networkx.algorithms.dominating import *
from networkx.algorithms.hierarchy import *
from networkx.algorithms.hybrid import *
from networkx.algorithms.matching import *
from networkx.algorithms.minors import *
from networkx.algorithms.mis import *
from networkx.algorithms.link_analysis import *
from networkx.algorithms.link_prediction import *
from networkx.algorithms.operators import *
from networkx.algorithms.shortest_paths import *
from networkx.algorithms.smetric import *
from networkx.algorithms.triads import *
from networkx.algorithms.traversal import *
from networkx.algorithms.isolate import *
from networkx.algorithms.euler import *
from networkx.algorithms.vitality import *
from networkx.algorithms.chordal import *
from networkx.algorithms.richclub import *
from networkx.algorithms.distance_regular import *
from networkx.algorithms.swap import *
from networkx.algorithms.graphical import *
from networkx.algorithms.simple_paths import *
import networkx.algorithms.assortativity
import networkx.algorithms.bipartite
import networkx.algorithms.centrality
import networkx.algorithms.cluster
import networkx.algorithms.clique
import networkx.algorithms.components
import networkx.algorithms.connectivity
import networkx.algorithms.coloring
import networkx.algorithms.flow
import networkx.algorithms.isomorphism
import networkx.algorithms.link_analysis
import networkx.algorithms.shortest_paths
import networkx.algorithms.traversal
import networkx.algorithms.chordal
import networkx.algorithms.operators
import networkx.algorithms.tree
# bipartite
from networkx.algorithms.bipartite import (projected_graph, project, is_bipartite,
complete_bipartite_graph)
# connectivity
from networkx.algorithms.connectivity import (minimum_edge_cut, minimum_node_cut,
average_node_connectivity, edge_connectivity, node_connectivity,
stoer_wagner, all_pairs_node_connectivity, all_node_cuts, k_components)
# isomorphism
from networkx.algorithms.isomorphism import (is_isomorphic, could_be_isomorphic,
fast_could_be_isomorphic, faster_could_be_isomorphic)
# flow
from networkx.algorithms.flow import (maximum_flow, maximum_flow_value,
minimum_cut, minimum_cut_value, capacity_scaling, network_simplex,
min_cost_flow_cost, max_flow_min_cost, min_cost_flow, cost_of_flow)
from .tree.recognition import *
from .tree.mst import *
from .tree.branchings import (
maximum_branching, minimum_branching,
maximum_spanning_arborescence, minimum_spanning_arborescence
)
from networkx.algorithms.kernels import *
import networkx.algorithms.kernels | bsd-3-clause | 3,198,548,627,527,594,000 | 38.209877 | 82 | 0.830236 | false |
deepmind/jax_verify | jax_verify/src/nonconvex_duals.py | 1 | 25046 | # coding=utf-8
# Copyright 2020 The jax_verify Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement the dual computations for the NonConvex Reformulation."""
import collections
import functools
import jax
import jax.numpy as jnp
from jax_verify.src import ibp
from jax_verify.src import nonconvex
Tensor = jnp.ndarray
def _sum_fn(fn, *args, **kwargs):
out = fn(*args, **kwargs)
summand = out[0] if isinstance(out, tuple) else out
return summand.sum(), out
def _sum_over_acts(var):
return var.sum(axis=tuple(range(2, var.ndim)))
class WolfeNonConvexBound(nonconvex.NonConvexBound):
"""This subclass allows the computation of the WolfeDual.
This is done through the `wolfe_dual_fn`, which propagates dual variables
backwards and compute the contribution of this layer to the wolfe_dual.
"""
def __init__(self, wolfe_dual_fn,
index, shape, previous_bounds,
eval_fn, variables, concretizer):
"""Create a NonConvexBound that can compute the WolfeDual.
Args:
wolfe_dual_fn: Function performing backward propagation of bounds for the
wolfe dual and computing the contribution of this layer to the dual.
index: Unique index representing the position of this activation in the
computation graph.
shape: Shape of the activations that this bound represent.
previous_bounds: Dict mapping index of activation to the bound
representing it. We need it to be able to obtain the contributions of
previous layers to the Lagrangian.
eval_fn: Function to evaluate the bound computation problem in the primal
variables: Dict mapping index of activation to the shape of variables
required to optimize them.
concretizer: Instance of Concretizer, used to obtain bounds over the
activation this object represents.
"""
super(WolfeNonConvexBound, self).__init__(index, shape, previous_bounds,
eval_fn, variables, concretizer)
self.wolfe_dual_fn = wolfe_dual_fn
def dual(self, var_set, objectives):
acts = {}
pos_dvar = jnp.maximum(objectives, 0.)
neg_dvar = jnp.maximum(-objectives, 0.)
dual_vars = {self.index: (pos_dvar, neg_dvar)}
primal, acts = self.primal_fn(var_set, objectives)
# Compute the dual variables and at the same time, collect the terms of the
# wolfe dual.
dual_gap = 0
index_bound_list = list(self.previous_bounds.items())
for index, intermediate_bound in reversed(index_bound_list):
wolfe_dual_fn = intermediate_bound.wolfe_dual_fn
pos_dvar, neg_dvar = dual_vars[index]
wolfe_dual_contrib = wolfe_dual_fn(var_set, pos_dvar, neg_dvar,
acts, dual_vars)
dual_gap = dual_gap + wolfe_dual_contrib
wolfe_dual = primal + dual_gap
return primal, wolfe_dual
@classmethod
def get_initial_bound_constructor(cls, index, lb, ub):
def wolfe_dual_fn(var_set, pos_dvar, neg_dvar, acts, dual_vars):
del var_set
del dual_vars
x_0 = acts[index]
dual_contrib = _sum_over_acts(pos_dvar * (lb - x_0)
+ neg_dvar * (x_0 - ub))
return dual_contrib
return functools.partial(cls, wolfe_dual_fn)
@classmethod
def get_linear_activation_constructor(cls, index, vlin_fun, in_vals):
def wolfe_dual_fn(var_set, pos_dvar, neg_dvar, acts, dual_vars):
all_inps = [nonconvex.eval_if_nonconvexbound(inp, var_set, None, acts)
for inp in in_vals]
posdvarp_fun = lambda x: (pos_dvar * vlin_fun(x)).sum()
negdvarq_fun = lambda x: (neg_dvar * vlin_fun(x)).sum()
all_pp_dvars = jax.grad(posdvarp_fun)(all_inps)
all_nq_dvars = jax.grad(negdvarq_fun)(all_inps)
prev_dvars = [pp_dvar - nq_dvar
for pp_dvar, nq_dvar in zip(all_pp_dvars, all_nq_dvars)]
for inp, prev_dvar in zip(in_vals, prev_dvars):
if not isinstance(inp, nonconvex.NonConvexBound):
continue
prev_posdvar = jnp.maximum(prev_dvar, 0.)
prev_negdvar = jnp.maximum(-prev_dvar, 0.)
if inp.index in dual_vars:
prev_posdvar = dual_vars[inp.index][0] + prev_posdvar
prev_negdvar = dual_vars[inp.index][1] + prev_negdvar
dual_vars[inp.index] = (prev_posdvar, prev_negdvar)
# There shouldn't be a contrib term here, everything cancels out
return 0
return functools.partial(cls, wolfe_dual_fn)
@classmethod
def get_nonlinearity_activation_constructor(cls, index, inp, act_type,
lb_fun, ub_fun):
def wolfe_dual_fn(var_set, pos_dvar, neg_dvar, acts, dual_vars):
inp_val = inp.evaluate(var_set, {}, acts)
lb_val = lb_fun(inp_val)
ub_val = ub_fun(inp_val)
grad_lb = jax.grad(lambda x: lb_fun(x).sum())(inp_val)
grad_ub = jax.grad(lambda x: ub_fun(x).sum())(inp_val)
prev_dvar = (pos_dvar * grad_lb - neg_dvar * grad_ub)
prev_posdvar = jnp.maximum(prev_dvar, 0.)
prev_negdvar = jnp.maximum(-prev_dvar, 0.)
if inp.index in dual_vars:
prev_posdvar = dual_vars[inp.index][0] + prev_posdvar
prev_negdvar = dual_vars[inp.index][1] + prev_negdvar
dual_vars[inp.index] = (prev_posdvar, prev_negdvar)
theta = var_set[index]
out_val = lb_val + theta * (ub_val - lb_val)
dual_contrib = _sum_over_acts(neg_dvar * (out_val - ub_val)
+ pos_dvar * (lb_val - out_val))
return dual_contrib
return functools.partial(cls, wolfe_dual_fn)
def _initial_lagrangian_term(dvar, lb, ub, x):
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
dual_contrib = (neg_dvar * (x - ub) + pos_dvar * (lb - x))
return dual_contrib
class LinLagrangianNonConvexBound(nonconvex.NonConvexBound):
"""This subclass allows the computation of the Linearized Lagrangian dual.
The lagrangian and its linearization are obtained through the
`lagrangian_level_fn` which compute the contribution of this layer to the
lagrangian, based on precomputed activation.
The minimization of linear function (such as the linearized lagrangian) over
the feasible domain is done through the `bounding_fn` function.
"""
def __init__(self, lagrangian_level_fn, bounding_fn,
index, shape, previous_bounds,
eval_fn, variables, concretizer):
"""Create a NonConvexBound that can compute the Linearized Lagrangian dual.
Args:
lagrangian_level_fn: Function returning the contribution of this layer to
the lagrangian, based on precomputed activations.
bounding_fn: Function to perform linear minimization over the domain of
an activation.
index: Unique index representing the position of this activation in the
computation graph.
shape: Shape of the activations that this bound represent.
previous_bounds: Dict mapping index of activation to the bound
representing it. We need it to be able to obtain the contributions of
previous layers to the Lagrangian.
eval_fn: Function to evaluate the bound computation problem in the primal
variables: Dict mapping index of activation to the shape of variables
required to optimize them.
concretizer: Instance of Concretizer, used to obtain bounds over the
activation this object represents.
"""
super(LinLagrangianNonConvexBound, self).__init__(
index, shape, previous_bounds, eval_fn, variables, concretizer)
self.lagrangian_level_fn = lagrangian_level_fn
self.bounding_fn = bounding_fn
def lagrangian(acts, objectives, dual_vars):
final_acts = acts[self.index]
primals = _sum_over_acts(final_acts * objectives)
lagrangian = primals
for index, intermediate_bound in self.previous_bounds.items():
lagrangian_level_fn = intermediate_bound.lagrangian_level_fn
dvar = dual_vars[index]
contrib = lagrangian_level_fn(dvar, acts)
lagrangian += contrib
return lagrangian, primals
self._lagrangian_fn = lagrangian
self._lagrangian_sumfn = functools.partial(_sum_fn, lagrangian)
def dual(self, var_set, objectives):
dual_vars, acts = self._compute_derivatives_dualvars(var_set, objectives)
# Compute the gradients of all the lagrangians (done by taking their sum),
# with regards to the activations.
lag_grad_fun = jax.value_and_grad(self._lagrangian_sumfn, argnums=0,
has_aux=True)
((_, (lagrangians, primals)),
laggrad_wrt_acts) = lag_grad_fun(acts, objectives, dual_vars)
lin_duals = lagrangians
for index, intermediate_bound in self.previous_bounds.items():
bounding_fn = intermediate_bound.bounding_fn
lag_grad = laggrad_wrt_acts[index]
contrib = bounding_fn(lag_grad, acts)
lin_duals += contrib
return primals, lin_duals
@classmethod
def get_initial_bound_constructor(cls, index, lb, ub):
def lagrangian_level_fn(dvar, acts):
x_0 = acts[index]
dual_contrib = _sum_over_acts(_initial_lagrangian_term(dvar, lb, ub, x_0))
return dual_contrib
def bounding_fn(lag_grad, acts):
x_0 = acts[index]
bound_contrib = _sum_over_acts(jnp.maximum(lag_grad, 0.) * (lb - x_0) +
jnp.minimum(lag_grad, 0.) * (ub - x_0))
return bound_contrib
return functools.partial(cls, lagrangian_level_fn, bounding_fn)
@classmethod
def get_linear_activation_constructor(cls, index, vlin_fun, in_vals):
def lagrangian_level_fn(dvar, acts):
act_inp_eval = [
acts[inp.index] if isinstance(inp, nonconvex.NonConvexBound) else inp
for inp in in_vals]
# Because this is linear, the function is both the lower bound and the
# upper bound.
act_out_eval = acts[index]
f_inp_eval = vlin_fun(act_inp_eval)
dual_contrib = _sum_over_acts(dvar * (f_inp_eval - act_out_eval))
return dual_contrib
def bounding_fn(lag_grad, acts):
act_out_eval = acts[index]
# We need to minimize the dotproduct between the lagrangian and the output
# of that linear layer. Let's take the gradient (because everything is
# linear and then we can simply assign bounds based on sign of gradient
# coefficients.)
dot_lagrangian_output = lambda x: (lag_grad * vlin_fun(x)).sum()
act_inp_eval = [
acts[inp.index] if isinstance(inp, nonconvex.NonConvexBound) else inp
for inp in in_vals]
minimizing_inps = []
grads = jax.grad(dot_lagrangian_output)(act_inp_eval)
for inp, grad in zip(in_vals, grads):
if isinstance(inp, nonconvex.NonConvexBound):
broad_lb = jnp.expand_dims(inp.lower, 1)
broad_ub = jnp.expand_dims(inp.upper, 1)
minimizing_inps.append(jnp.where(grad >= 0, broad_lb, broad_ub))
else:
minimizing_inps.append(inp)
bound_contrib = _sum_over_acts((vlin_fun(minimizing_inps) - act_out_eval)
* lag_grad)
return bound_contrib
return functools.partial(cls, lagrangian_level_fn, bounding_fn)
@classmethod
def get_nonlinearity_activation_constructor(cls, index, inp, act_type,
lb_fun, ub_fun):
def lagrangian_level_fn(dvar, acts):
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
act_inp_eval = acts[inp.index]
act_out_eval = acts[index]
lb_val = lb_fun(act_inp_eval)
ub_val = ub_fun(act_inp_eval)
dual_contrib = _sum_over_acts(neg_dvar * (act_out_eval - ub_val)
+ pos_dvar * (lb_val - act_out_eval))
return dual_contrib
# We consider convex monotonous activation functions, so
# - The lower bound is exact.
# - The lower/upper bound on the output can be obtained by forwarding
# through the exact function the lower/upper bound on the input.
out_lb = lb_fun(inp.lower)
out_ub = lb_fun(inp.upper)
def bounding_fn(lag_grad, acts):
act_out_eval = acts[index]
lb_val = jnp.expand_dims(out_lb, 1)
ub_val = jnp.expand_dims(out_ub, 1)
bound_contrib = _sum_over_acts(
jnp.maximum(lag_grad, 0.) * (lb_val - act_out_eval)
+ jnp.minimum(lag_grad, 0.) * (ub_val - act_out_eval))
return bound_contrib
return functools.partial(cls, lagrangian_level_fn, bounding_fn)
class MinLagrangianNonConvexBound(nonconvex.NonConvexBound):
"""This subclass allows the computation of the primal minimized lagrangian.
The contribution of each primal variables are collected by the
`lagrangian_varterms_fn`. It does not directly compute the lagrangian but
fills in a dictionary mapping variables to the terms that involve them.
This is done so that we can reorganize the lagrangian per variable, and then
minimize it one variable at a time.
"""
def __init__(self, lagrangian_varterms_fn,
index, shape, previous_bounds,
eval_fn, variables, concretizer):
"""Create a NonConvexBound that can compute the primal minimized Lagrangian.
Args:
lagrangian_varterms_fn: Function filling in a dictionary mapping each
variable to the terms involving it in the lagrangian.
index: Unique index representing the position of this activation in the
computation graph.
shape: Shape of the activations that this bound represent.
previous_bounds: Dict mapping index of activation to the bound
representing it. We need it to be able to obtain the contributions of
previous layers to the Lagrangian.
eval_fn: Function to evaluate the bound computation problem in the primal
variables: Dict mapping index of activation to the shape of variables
required to optimize them.
concretizer: Instance of Concretizer, used to obtain bounds over the
activation this object represents.
"""
super(MinLagrangianNonConvexBound, self).__init__(
index, shape, previous_bounds, eval_fn, variables, concretizer)
self.lagrangian_varterms_fn = lagrangian_varterms_fn
def collect_lagrangian_varterms(self, objectives, dual_vars):
lagrangian_dict = collections.defaultdict(list)
for index, intermediate_bound in self.previous_bounds.items():
lagrangian_varterms_fn = intermediate_bound.lagrangian_varterms_fn
dvar = dual_vars[index]
lagrangian_varterms_fn(dvar, lagrangian_dict)
return lagrangian_dict
def dual(self, var_set, objectives):
dual_vars, acts = self._compute_derivatives_dualvars(var_set, objectives)
nb_targets = objectives.shape[1]
# Compute the primals. This is not based on the activation minimizing the
# lagrangian (because those are not necessarily primal feasible)
final_acts = acts[self.index]
primals = _sum_over_acts(final_acts * objectives)
lagrangian_terms = self.collect_lagrangian_varterms(objectives, dual_vars)
# For each item in the network, we have a list of all the terms it is
# involved in. Let's use this to minimize the lagrangian.
opt_acts = {}
for index, lag_terms in lagrangian_terms.items():
intermediate_bound = self.previous_bounds[index]
broad_lb = jnp.repeat(jnp.expand_dims(intermediate_bound.lower, axis=1),
nb_targets, axis=1)
broad_ub = jnp.repeat(jnp.expand_dims(intermediate_bound.upper, axis=1),
nb_targets, axis=1)
opt_acts[index] = _optimize_lagrangian_terms(lag_terms,
broad_lb, broad_ub)
final_opt_acts = opt_acts[self.index]
opt_primals = _sum_over_acts(final_opt_acts * objectives)
minimized_lagrangian = opt_primals
for index, lag_terms in lagrangian_terms.items():
for term in lag_terms:
out_term = term[1](opt_acts[index])
minimized_lagrangian = minimized_lagrangian + _sum_over_acts(out_term)
return primals, minimized_lagrangian
@classmethod
def get_initial_bound_constructor(cls, index, lb, ub):
def lagrangian_varterms_fn(dvar, lagrangian_dict):
lagrangian_dict[index].append(
('Linear', functools.partial(_initial_lagrangian_term, dvar, lb, ub)))
return functools.partial(cls, lagrangian_varterms_fn)
@classmethod
def get_linear_activation_constructor(cls, index, vlin_fun, in_vals):
def lagrangian_varterms_fn(dvar, lagrangian_dict):
# There is a linear term of dvar over the outputs.
lagrangian_dict[index].append(('Linear', lambda x: (-dvar*x)))
# If only one of the input is a variable, we can do things in a simple
# way. Special casing this pattern avoids a bunch of failures on TPUs.
inp_is_bound = list(isinstance(inp, nonconvex.NonConvexBound)
for inp in in_vals)
if sum(inp_is_bound) == 1:
bound_arg_pos = inp_is_bound.index(True)
# The linear function has only one input, so we can just use it
# directly.
def single_input_vlin_fun(x):
inps = [inp if not is_bound else x
for inp, is_bound in zip(in_vals, inp_is_bound)]
return dvar * vlin_fun(inps)
lagrangian_dict[in_vals[bound_arg_pos].index].append(
('Linear', single_input_vlin_fun))
else:
# There is multiple inputs, so we need to separate the contribution of
# each one, and assign the bias to one of them.
inps = []
for inp in in_vals:
if isinstance(inp, nonconvex.NonConvexBound):
# Add the opt dimension, and put in all the examples to 0, so that
# we can identify the bias term.
shape = inp.shape
inp_shape = (shape[0], dvar.shape[1]) + shape[1:]
example_inp = jnp.zeros(inp_shape)
inps.append(example_inp)
else:
inps.append(inp)
# Get the linear term over the inputs through auto-diff
def lag_inp_contrib(x):
contrib = dvar * vlin_fun(x)
contrib = _sum_over_acts(contrib)
return contrib.sum(), contrib
(_, bias), grads = jax.value_and_grad(lag_inp_contrib,
has_aux=True)(inps)
grad_dot_prod = lambda grad, bias, x: _sum_over_acts(grad * x) + bias
for inp, grad in zip(in_vals, grads):
if isinstance(inp, nonconvex.NonConvexBound):
lagrangian_dict[inp.index].append(
('Linear', functools.partial(grad_dot_prod, grad, bias)))
# Zero out the bias now that it has been included in one term.
bias = 0. * bias
return functools.partial(cls, lagrangian_varterms_fn)
@classmethod
def get_nonlinearity_activation_constructor(cls, index, inp, act_type,
lb_fun, ub_fun):
def lagrangian_varterms_fn(dvar, lagrangian_dict):
# There is a linear term of dvar over the outputs.
lagrangian_dict[index].append(('Linear', lambda x: (-dvar*x)))
# For the inputs, there is a linear term through the upper bound:
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
negdvar_dot_ub = lambda x: (-neg_dvar * ub_fun(x))
lagrangian_dict[inp.index].append(('Linear', negdvar_dot_ub))
# For the inputs, there is a ReLU term through the lower bound
lagrangian_dict[inp.index].append(
(act_type, lambda x: (pos_dvar * lb_fun(x))))
return functools.partial(cls, lagrangian_varterms_fn)
def _optimize_lagrangian_terms(lagrangian_terms, lower_bound, upper_bound):
"""Minimize the part of the lagrangian corresponding to a given variable.
Args:
lagrangian_terms: A list of the terms involving that variable.
lower_bound: A tensor with the lower bound on the variable to optimize.
upper_bound: A tensor with the upper bound on the variable to optimize.
Returns:
opt_act: A tensor with the inputs minimizing the lagrangian terms for each
optimization target.
"""
act_term = None
# Get the total linear term
def linear_term(x):
out = 0
for term in lagrangian_terms:
if term[0] == 'Linear':
out += term[1](x).sum()
return out
# Identify the NonLinear term if there is one
for term in lagrangian_terms:
if term[0] in _lagrangian_opt_fns:
if act_term is not None:
raise ValueError('Variable involved in several activations.')
act_term = term
elif term[0] == 'Linear':
continue
else:
raise ValueError('Unexpected contribution.')
# Perform the minimization
lin_coeffs = jax.grad(linear_term)(lower_bound)
if act_term is None:
# This does not involve a non linearity, this is just a linear term
return jnp.where(lin_coeffs >= 0, lower_bound, upper_bound)
else:
return _lagrangian_opt_fns[act_term[0]](lin_coeffs, act_term[1],
lower_bound, upper_bound)
def _optimize_softplus_lagrangian(lin_coeffs, nonlin_term,
lower_bound, upper_bound):
"""Compute the input minimizing a sum of a linear term and a softplus.
To minimize a * softplus(x) + b * x
Either cancel gradient is feasible:
a * (1 / (1 + exp(-x))) + b = 0
<=> a + b * (1 + exp(-x)) = 0
<=> - (a + b) / b = exp(-x)
<=> x = ln(- b / (a + b))
If b=0, this is just normal linear minimization.
If b / (a + b) > 0, that means there is no point where the gradient
cancels, which means that the minimum will be obtained at one of the
extremum. We can simply do linear minimization with the gradient.
Otherwise, the minimum is for x = ln(-b / (a+b)), clipped to valid bounds.
Args:
lin_coeffs: b in the previous equation.
nonlin_term: x -> a * softplus(x)
lower_bound: Lower bound on the input we're minimizing over.
upper_bound: Upper bound on the input we're minimizing over.
Returns:
opt_act: A tensor with the inputs minimizing the function specified.
"""
# Get the coefficients on the softplus
dummy_inp = jnp.ones_like(lower_bound)
softplus_coeffs = nonlin_term(dummy_inp) / jax.nn.softplus(dummy_inp)
grad_at_lb = lin_coeffs + softplus_coeffs * jax.nn.sigmoid(lower_bound)
# Check condition where we can disregard the 0-gradient solution
safe_denom = jnp.where(lin_coeffs + softplus_coeffs != 0,
lin_coeffs + softplus_coeffs, 1e-12)
inner_log = -lin_coeffs / safe_denom
safe_inner_log = jnp.where(inner_log > 0,
inner_log, jnp.ones_like(inner_log))
zero_grad_infeasible = jnp.any(
jnp.stack([(lin_coeffs + jnp.zeros_like(softplus_coeffs)) == 0,
lin_coeffs + softplus_coeffs == 0,
inner_log <= 0], axis=0), axis=0)
return jnp.where(zero_grad_infeasible,
jnp.where(grad_at_lb >= 0, lower_bound, upper_bound),
jnp.clip(jnp.log(safe_inner_log),
a_min=lower_bound, a_max=upper_bound))
def _optimize_relu_lagrangian(lin_coeffs, nonlin_term,
lower_bound, upper_bound):
"""Compute the input minimizing a sum of a linear term and a ReLU.
To minimize a * relu(x) + b * x,
We know that the function is piecewise linear. We will stack the three
possible solutions along axis = 0 and then keep the minimum one.
Args:
lin_coeffs: b in the previous equation.
nonlin_term: x -> a * relu(x)
lower_bound: Lower bound on the input we're minimizing over.
upper_bound: Upper bound on the input we're minimizing over.
Returns:
opt_act: A tensor with the inputs minimizing the function specified.
"""
zero_inp = jnp.zeros_like(lower_bound)
possible_inps = jnp.stack([
lower_bound,
jnp.clip(zero_inp, a_min=lower_bound, a_max=upper_bound),
upper_bound], axis=0)
out_val = lin_coeffs * possible_inps + nonlin_term(possible_inps)
choice = out_val.argmin(axis=0)
return jnp.choose(choice, possible_inps, mode='clip')
_lagrangian_opt_fns = {
'ReLU': _optimize_relu_lagrangian,
'Softplus': _optimize_softplus_lagrangian
}
nonconvex_ibp_bound_propagation = functools.partial(
nonconvex.build_nonconvex_formulation,
WolfeNonConvexBound,
lambda: nonconvex.BaseBoundConcretizer(ibp.bound_transform)
)
| apache-2.0 | 3,330,927,620,808,200,000 | 40.466887 | 80 | 0.650244 | false |
arokem/pyAFQ | AFQ/utils/tests/test_streamlines.py | 2 | 4057 | import os.path as op
import numpy as np
import numpy.testing as npt
import nibabel as nib
import nibabel.tmpdirs as nbtmp
from AFQ.utils import streamlines as aus
import dipy.tracking.utils as dtu
import dipy.tracking.streamline as dts
from dipy.io.stateful_tractogram import StatefulTractogram, Space
def test_bundles_to_tgram():
affine = np.array([[2., 0., 0., -80.],
[0., 2., 0., -120.],
[0., 0., 2., -60.],
[0., 0., 0., 1.]])
img = nib.Nifti1Image(np.ones((10, 10, 10, 30)), affine)
bundles = {'b1': StatefulTractogram([np.array([[0, 0, 0],
[0, 0, 0.5],
[0, 0, 1],
[0, 0, 1.5]]),
np.array([[0, 0, 0],
[0, 0.5, 0.5],
[0, 1, 1]])],
img,
Space.VOX),
'b2': StatefulTractogram([np.array([[0, 0, 0],
[0, 0, 0.5],
[0, 0, 2],
[0, 0, 2.5]]),
np.array([[0, 0, 0],
[0, 0.5, 0.5],
[0, 2, 2]])],
img,
Space.VOX)}
bundle_dict = {'b1': {'uid': 1}, 'b2':{'uid': 2}}
tgram = aus.bundles_to_tgram(bundles, bundle_dict, img)
new_bundles = aus.tgram_to_bundles(tgram, bundle_dict, img)
for k1 in bundles.keys():
for k2 in bundles[k1].__dict__.keys():
for sl1, sl2 in zip(bundles[k1].streamlines, new_bundles[k1].streamlines):
npt.assert_equal(sl1, sl2)
def test_split_streamline():
streamlines = dts.Streamlines([np.array([[1.,2.,3.],
[4.,5.,6.]]),
np.array([[7.,8.,9.],
[10.,11.,12.],
[13., 14., 15.]])])
assert streamlines == streamlines
sl_to_split = 1
split_idx = 1
new_streamlines = aus.split_streamline(streamlines, sl_to_split, split_idx)
test_streamlines = dts.Streamlines([np.array([[1., 2., 3.],
[4., 5., 6.]]),
np.array([[7., 8., 9.]]),
np.array([[10., 11., 12.],
[13., 14., 15.]])])
# Test equality of the underlying dict items:
for k in new_streamlines.__dict__.keys():
if isinstance(new_streamlines.__dict__[k], np.ndarray):
npt.assert_array_equal(
new_streamlines.__dict__[k],
test_streamlines.__dict__[k]
)
else:
assert new_streamlines.__dict__[k] == test_streamlines.__dict__[k]
def test_add_bundles():
t1 = nib.streamlines.Tractogram(
[np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])])
t2 = nib.streamlines.Tractogram(
[np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])])
added = aus.add_bundles(t1, t2)
test_tgram =nib.streamlines.Tractogram(
[np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]]),
np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])])
for sl1, sl2 in zip(added.streamlines, test_tgram.streamlines):
npt.assert_array_equal(sl1, sl2) | bsd-2-clause | -8,935,996,251,178,177,000 | 43.108696 | 86 | 0.38033 | false |
bbc/ebu-tt-live-toolkit | ebu_tt_live/config/adapters.py | 1 | 1858 |
from .common import ConfigurableComponent
from ebu_tt_live.adapters import document_data, node_carriage
data_adapters_by_directed_conversion = {
'xml->ebutt1': document_data.XMLtoEBUTT1Adapter,
'xml->ebutt3': document_data.XMLtoEBUTT3Adapter,
'xml->ebuttd': document_data.XMLtoEBUTTDAdapter,
'ebutt3->xml': document_data.EBUTT3toXMLAdapter,
'ebuttd->xml': document_data.EBUTTDtoXMLAdapter
}
def parse_adapter_list(value):
# This is working around a bug that configman leaves the lists intact
parsed_value = []
if value is not None:
for item in value:
conv_type = item['type']
kwargs = {ckey: carg for ckey, carg in list(item.items()) if ckey != 'type'}
parsed_value.append(data_adapters_by_directed_conversion.get(conv_type)(**kwargs))
return parsed_value or None
class ProducerNodeCarriageAdapter(ConfigurableComponent):
@classmethod
def configure_component(cls, config, local_config, producer=None, carriage=None, **kwargs):
instance = cls(config=config, local_config=local_config)
adapter_list = parse_adapter_list(local_config)
instance.component = node_carriage.ProducerNodeCarriageAdapter(
producer_carriage=carriage,
producer_node=producer,
data_adapters=adapter_list
)
class ConsumerNodeCarriageAdapter(ConfigurableComponent):
@classmethod
def configure_component(cls, config, local_config, consumer=None, carriage=None, **kwargs):
instance = cls(config=config, local_config=local_config)
adapter_list = parse_adapter_list(local_config)
instance.component = node_carriage.ConsumerNodeCarriageAdapter(
consumer_carriage=carriage,
consumer_node=consumer,
data_adapters=adapter_list
)
return instance
| bsd-3-clause | -2,798,503,392,882,257,400 | 35.431373 | 95 | 0.69268 | false |
rs2/pandas | pandas/io/excel/_xlsxwriter.py | 1 | 7909 | from typing import Dict, List, Tuple
import pandas._libs.json as json
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import validate_freeze_panes
class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING: Dict[str, List[Tuple[Tuple[str, ...], str]]] = {
"font": [
(("name",), "font_name"),
(("sz",), "font_size"),
(("size",), "font_size"),
(("color", "rgb"), "font_color"),
(("color",), "font_color"),
(("b",), "bold"),
(("bold",), "bold"),
(("i",), "italic"),
(("italic",), "italic"),
(("u",), "underline"),
(("underline",), "underline"),
(("strike",), "font_strikeout"),
(("vertAlign",), "font_script"),
(("vertalign",), "font_script"),
],
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
"alignment": [
(("horizontal",), "align"),
(("vertical",), "valign"),
(("text_rotation",), "rotation"),
(("wrap_text",), "text_wrap"),
(("indent",), "indent"),
(("shrink_to_fit",), "shrink"),
],
"fill": [
(("patternType",), "pattern"),
(("patterntype",), "pattern"),
(("fill_type",), "pattern"),
(("start_color", "rgb"), "fg_color"),
(("fgColor", "rgb"), "fg_color"),
(("fgcolor", "rgb"), "fg_color"),
(("start_color",), "fg_color"),
(("fgColor",), "fg_color"),
(("fgcolor",), "fg_color"),
(("end_color", "rgb"), "bg_color"),
(("bgColor", "rgb"), "bg_color"),
(("bgcolor", "rgb"), "bg_color"),
(("end_color",), "bg_color"),
(("bgColor",), "bg_color"),
(("bgcolor",), "bg_color"),
],
"border": [
(("color", "rgb"), "border_color"),
(("color",), "border_color"),
(("style",), "border"),
(("top", "color", "rgb"), "top_color"),
(("top", "color"), "top_color"),
(("top", "style"), "top"),
(("top",), "top"),
(("right", "color", "rgb"), "right_color"),
(("right", "color"), "right_color"),
(("right", "style"), "right"),
(("right",), "right"),
(("bottom", "color", "rgb"), "bottom_color"),
(("bottom", "color"), "bottom_color"),
(("bottom", "style"), "bottom"),
(("bottom",), "bottom"),
(("left", "color", "rgb"), "left_color"),
(("left", "color"), "left_color"),
(("left", "style"), "left"),
(("left",), "left"),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props["num_format"] = num_format_str
if style_dict is None:
return props
if "borders" in style_dict:
style_dict = style_dict.copy()
style_dict["border"] = style_dict.pop("borders")
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get("pattern"), str):
# TODO: support other fill patterns
props["pattern"] = 0 if props["pattern"] == "none" else 1
for k in ["border", "top", "right", "bottom", "left"]:
if isinstance(props.get(k), str):
try:
props[k] = [
"none",
"thin",
"medium",
"dashed",
"dotted",
"thick",
"double",
"hair",
"mediumDashed",
"dashDot",
"mediumDashDot",
"dashDotDot",
"mediumDashDotDot",
"slantDashDot",
].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get("font_script"), str):
props["font_script"] = ["baseline", "superscript", "subscript"].index(
props["font_script"]
)
if isinstance(props.get("underline"), str):
props["underline"] = {
"none": 0,
"single": 1,
"double": 2,
"singleAccounting": 33,
"doubleAccounting": 34,
}[props["underline"]]
return props
class XlsxWriter(ExcelWriter):
engine = "xlsxwriter"
supported_extensions = (".xlsx",)
def __init__(
self,
path,
engine=None,
date_format=None,
datetime_format=None,
mode="w",
**engine_kwargs,
):
# Use the xlsxwriter module as the Excel writer.
from xlsxwriter import Workbook
if mode == "a":
raise ValueError("Append mode is not supported with xlsxwriter!")
super().__init__(
path,
engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
**engine_kwargs,
)
self.book = Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {"null": None}
if validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(
startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
val,
style,
)
else:
wks.write(startrow + cell.row, startcol + cell.col, val, style)
| bsd-3-clause | 1,335,923,263,812,707,600 | 32.371308 | 82 | 0.442407 | false |
hMatoba/Piexif | piexif/_common.py | 1 | 2590 | import struct
from ._exceptions import InvalidImageDataError
def split_into_segments(data):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = 2
segments = [b"\xff\xd8"]
while 1:
if data[head: head + 2] == b"\xff\xda":
segments.append(data[head:])
break
else:
length = struct.unpack(">H", data[head + 2: head + 4])[0]
endPoint = head + length + 2
seg = data[head: endPoint]
segments.append(seg)
head = endPoint
if (head >= len(data)):
raise InvalidImageDataError("Wrong JPEG data.")
return segments
def read_exif_from_file(filename):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
f = open(filename, "rb")
data = f.read(6)
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = data[2:6]
HEAD_LENGTH = 4
exif = None
while len(head) == HEAD_LENGTH:
length = struct.unpack(">H", head[2: 4])[0]
if head[:2] == b"\xff\xe1":
segment_data = f.read(length - 2)
if segment_data[:4] != b'Exif':
head = f.read(HEAD_LENGTH)
continue
exif = head + segment_data
break
elif head[0:1] == b"\xff":
f.read(length - 2)
head = f.read(HEAD_LENGTH)
else:
break
f.close()
return exif
def get_exif_seg(segments):
"""Returns Exif from JPEG meta data list
"""
for seg in segments:
if seg[0:2] == b"\xff\xe1" and seg[4:10] == b"Exif\x00\x00":
return seg
return None
def merge_segments(segments, exif=b""):
"""Merges Exif with APP0 and APP1 manipulations.
"""
if segments[1][0:2] == b"\xff\xe0" and \
segments[2][0:2] == b"\xff\xe1" and \
segments[2][4:10] == b"Exif\x00\x00":
if exif:
segments[2] = exif
segments.pop(1)
elif exif is None:
segments.pop(2)
else:
segments.pop(1)
elif segments[1][0:2] == b"\xff\xe0":
if exif:
segments[1] = exif
elif segments[1][0:2] == b"\xff\xe1" and \
segments[1][4:10] == b"Exif\x00\x00":
if exif:
segments[1] = exif
elif exif is None:
segments.pop(1)
else:
if exif:
segments.insert(1, exif)
return b"".join(segments)
| mit | 2,539,157,328,622,996,500 | 26.553191 | 69 | 0.519305 | false |
brython-dev/brython | www/src/Lib/test/test_userdict.py | 4 | 7744 | # Check every path through every method of UserDict
from test import mapping_tests
import unittest
import collections
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = collections.UserDict
def test_all(self):
# Test constructors
u = collections.UserDict()
u0 = collections.UserDict(d0)
u1 = collections.UserDict(d1)
u2 = collections.UserDict(d2)
uu = collections.UserDict(u)
uu0 = collections.UserDict(u0)
uu1 = collections.UserDict(u1)
uu2 = collections.UserDict(u2)
# keyword arg constructor
self.assertEqual(collections.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(collections.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]),
{'dict': [('one', 1), ('two', 2)]})
# both together
self.assertEqual(collections.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(collections.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(collections.UserDict().fromkeys('one two'.split(), 1), d5)
self.assertTrue(u1.fromkeys('one two'.split()) is not u1)
self.assertIsInstance(u1.fromkeys('one two'.split()), collections.UserDict)
self.assertIsInstance(u2.fromkeys('one two'.split()), collections.UserDict)
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertIn(repr(u2), ("{'one': 1, 'two': 2}",
"{'two': 2, 'one': 1}"))
# Test rich comparison and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(a == b, len(a) == len(b))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = collections.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = collections.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(collections.UserDict):
def display(self): print(self)
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(sorted(u2.keys()), sorted(d2.keys()))
self.assertEqual(sorted(u2.items()), sorted(d2.items()))
self.assertEqual(sorted(u2.values()), sorted(d2.values()))
# Test "in".
for i in u2.keys():
self.assertIn(i, u2)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u0, i in d0)
# Test update
t = collections.UserDict()
t.update(u2)
self.assertEqual(t, u2)
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in range(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = collections.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assertIn("x", t)
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = collections.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = collections.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_init(self):
for kw in 'self', 'other', 'iterable':
self.assertEqual(list(collections.UserDict(**{kw: 42}).items()),
[(kw, 42)])
self.assertEqual(list(collections.UserDict({}, dict=42).items()),
[('dict', 42)])
self.assertEqual(list(collections.UserDict({}, dict=None).items()),
[('dict', None)])
self.assertEqual(list(collections.UserDict(dict={'a': 42}).items()),
[('dict', {'a': 42})])
self.assertRaises(TypeError, collections.UserDict, 42)
self.assertRaises(TypeError, collections.UserDict, (), ())
self.assertRaises(TypeError, collections.UserDict.__init__)
def test_update(self):
for kw in 'self', 'dict', 'other', 'iterable':
d = collections.UserDict()
d.update(**{kw: 42})
self.assertEqual(list(d.items()), [(kw, 42)])
self.assertRaises(TypeError, collections.UserDict().update, 42)
self.assertRaises(TypeError, collections.UserDict().update, {}, {})
self.assertRaises(TypeError, collections.UserDict.update)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(collections.UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at all
class D(collections.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(collections.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(collections.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
collections.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(collections.UserDict):
pass
g = G()
try:
g[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 8,896,096,979,546,404,000 | 34.360731 | 90 | 0.5461 | false |
HeraldStudio/webservice-py | mod/emptyroom/new_new_handler.py | 1 | 2424 | # -*- coding: utf-8 -*-
import json, urllib
import tornado.web
import tornado.gen
from datetime import date, timedelta
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
import traceback
class NewNewHandler(tornado.web.RequestHandler):
def get(self):
self.write('Herald Web Service')
@tornado.web.asynchronous
@tornado.gen.engine
def post(self):
ret = { 'code': 200, 'content': '' }
try:
campus_id = self.get_argument('campusId', default='22')
date = self.get_argument('date')
building_id = self.get_argument('buildingId', default='')
start_sequence = self.get_argument('startSequence')
end_sequence = self.get_argument('endSequence')
page = self.get_argument('page', default='1')
page_size = self.get_argument('pageSize', default='10')
client = AsyncHTTPClient()
request = HTTPRequest('http://58.192.114.179/classroom/common/getdateofweek?date=' + date)
response = yield tornado.gen.Task(client.fetch, request)
date_info = json.loads(response.body)
request = HTTPRequest('http://58.192.114.179/classroom/show/getemptyclassroomlist',
method='POST',
body=urllib.urlencode({
'pageNo': page,
'pageSize': page_size,
'campusId': campus_id,
'buildingId': building_id,
'startWeek': date_info['week'],
'endWeek': date_info['week'],
'dayOfWeek': date_info['dayOfWeek'],
'startSequence': start_sequence,
'endSequence': end_sequence,
'termId': date_info['termId']
}))
response = yield tornado.gen.Task(client.fetch, request)
ret['content'] = json.loads(response.body)
except Exception,e:
print e
ret['code'] = 500
ret['content'] = 'error'
traceback.print_exc(e)
self.write(json.dumps(ret,ensure_ascii=False, indent=2))
self.finish()
| mit | 28,707,944,265,570,976 | 43.888889 | 102 | 0.49835 | false |
robwarm/gpaw-symm | doc/documentation/xc/s22_test.py | 1 | 1509 | import sys
from ase import *
from ase.parallel import paropen
from ase.data.s22 import data, s22
from ase.calculators.vdwcorrection import vdWTkatchenko09prl
from gpaw import *
from gpaw.cluster import Cluster
from gpaw.analyse.hirshfeld import HirshfeldDensity, HirshfeldPartitioning
from gpaw.analyse.vdwradii import vdWradii
h = 0.18
box = 4.
xc = 'TS09'
f = paropen('energies_' + xc +'.dat', 'w')
print >> f, '# h=', h
print >> f, '# box=', box
print >> f, '# molecule E[1] E[2] E[1+2] E[1]+E[2]-E[1+2]'
for molecule in data:
print >> f, molecule,
ss = Cluster(Atoms(data[molecule]['symbols'],
data[molecule]['positions']))
# split the structures
s1 = ss.find_connected(0)
s2 = ss.find_connected(-1)
assert(len(ss) == len(s1) + len(s2))
if xc == 'TS09' or xc == 'TPSS' or xc == 'M06L':
c = GPAW(xc='PBE', h=h, nbands=-6, occupations=FermiDirac(width=0.1))
else:
c = GPAW(xc=xc, h=h, nbands=-6, occupations=FermiDirac(width=0.1))
E = []
for s in [s1, s2, ss]:
s.set_calculator(c)
s.minimal_box(box, h=h)
if xc == 'TS09':
s.get_potential_energy()
cc = vdWTkatchenko09prl(HirshfeldPartitioning(c),
vdWradii(s.get_chemical_symbols(), 'PBE'))
s.set_calculator(cc)
if xc == 'TPSS' or xc == 'M06L':
ene = s.get_potential_energy()
ene += c.get_xc_difference(xc)
E.append(ene)
else:
E.append(s.get_potential_energy())
print >> f, E[0], E[1], E[2],
print >> f, E[0] + E[1] - E[2]
f.flush()
f.close()
| gpl-3.0 | 9,142,085,672,902,052,000 | 31.106383 | 74 | 0.620941 | false |
junneyang/studysummary | 008Python学习/Python NetWork/TCPServer.py | 1 | 1064 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from socket import *
import json
HOST="10.85.156.249"
PORT=9999
ADDR=(HOST,PORT)
s=socket(AF_INET,SOCK_STREAM)
s.bind(ADDR)
s.listen(99999*2)
print("Server Listen...")
while(True):
client,addr=s.accept()
print("Connect from "+str(addr))
RecvStr=client.recv(1024)
RecvStr=json.loads(RecvStr,encoding="utf-8")
print("RecvStr:"+RecvStr["Type"]+";"+RecvStr["Des"]+";"+RecvStr["Content"])
if(RecvStr["Type"] == "0"):
SendStr=RecvStr["Content"].upper()
SendStr=json.dumps({
"ErrorCode":u"0",
"Des":u"Transaction Finished[处理完毕]",
"Content":SendStr
})
else:
SendStr=RecvStr["Content"]
SendStr=json.dumps({
"ErrorCode":u"-1",
"Des":u"UnSupported Type[不支持的请求]",
"Content":SendStr
})
client.send(SendStr)
client.close()
print("Connect Release...\n")
print("Server Listen...")
| mit | -3,871,972,358,702,519,300 | 25.473684 | 79 | 0.536398 | false |
NexTVTeam/hbbtv-plugin | HbbTV/hbbtv.py | 1 | 3321 | from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from enigma import eTimer
import os, struct, vbcfg
from vbipc import VBController
class HbbTVWindow(Screen):
skin = """
<screen name="HbbTVWindow" position="0,0" size="1280,720" backgroundColor="transparent" flags="wfNoBorder" title="HbbTV Plugin">
</screen>
"""
def __init__(self, session, url=None, app_info=None):
from enigma import getDesktop
self.width = getDesktop(0).size().width()
self.height = getDesktop(0).size().height()
if (self.width > 1920):
self.width = 1920
elif (self.width < 720):
self.width = 720
if (self.height > 1080):
self.height = 1080
elif (self.height < 576):
self.height = 576
vbcfg.g_position = vbcfg.getPosition()
vbcfg.osd_lock()
Screen.__init__(self, session)
self._url = url
self._info = app_info
self.onLayoutFinish.append(self.start_hbbtv_application)
self._close_timer = eTimer()
self._close_timer.callback.append(self.stop_hbbtv_application)
try:
if self._cb_set_title not in vbcfg.g_main.vbhandler.onSetTitleCB:
vbcfg.g_main.vbhandler.onSetTitleCB.append(self._cb_set_title)
except Exception:
pass
try:
if self._cb_close_window not in vbcfg.g_main.vbhandler.onCloseCB:
vbcfg.g_main.vbhandler.onCloseCB.append(self._cb_close_window)
except Exception:
pass
def _cb_set_title(self, title=None):
vbcfg.DEBUG("pate title: %s" % title)
if title is None:
return
self.setTitle(title)
def _cb_close_window(self):
self._close_timer.start(1000)
def start_hbbtv_application(self):
#print "=========== start_hbbtv_application ================"
vbcfg.g_main.vbhandler.soft_volume = -1
self.setTitle(_('HbbTV Plugin'))
vbcfg.DEBUG("Starting HbbTV")
#vbcfg.DEBUG("url : %s" % self._url and self._url)
vbcfg.DEBUG("info: %s" % self._info and self._info)
if self._info and self._info["control"] == 1 and vbcfg.g_channel_info is not None:
os.system("run.sh restart %d %d %s" % (self.width, self.height, self._info["url"]))
else:
if self._url is not None:
os.system("run.sh restart %d %d %s" % (self.width, self.height, self._url))
else:
os.system("run.sh restart %d %d %s" % (self.width, self.height, self._info["url"]))
vbcfg.g_main._timer_update_video_size.start(100)
def stop_hbbtv_application(self):
#print "=========== stop_hbbtv_application ================"
self._close_timer.stop()
self._close_timer = None
vbcfg.g_main._timer_update_video_size.stop()
try:
if self._cb_set_title in vbcfg.g_main.vbhandler.onSetTitleCB:
vbcfg.g_main.vbhandler.onSetTitleCB.remove(self._cb_set_title)
except Exception:
pass
try:
if self._cb_close_window in vbcfg.g_main.vbhandler.onCloseCB:
vbcfg.g_main.vbhandler.onCloseCB.remove(self._cb_close_window)
except Exception:
pass
from enigma import getDesktop, gMainDC
dsk = getDesktop(0)
desktop_size = dsk.size()
gMainDC.getInstance().setResolution(desktop_size.width(), desktop_size.height())
#print "========= stop_hbbtv_application:: g_position: ", vbcfg.g_position, " ==================="
vbcfg.setPosition(vbcfg.g_position)
vbcfg.osd_unlock()
dsk.paint()
vbcfg.set_bgcolor("0")
vbcfg.DEBUG("Stop HbbTV")
os.system("run.sh stop")
self.close()
| lgpl-2.1 | -1,640,176,830,540,437,500 | 26.907563 | 130 | 0.668774 | false |
Igelinmist/ivi | ivi/settings.py | 1 | 1555 | # -*- coding: utf-8 -*-
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('IVI_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| bsd-3-clause | 7,413,580,529,132,813,000 | 30.1 | 101 | 0.655305 | false |
jlehtnie/junity | junity/titan.py | 1 | 1366 | import os.path
import re
import junity.base as base
class Titan(object):
VERDICT = re.compile(r"""
Test\ case\
(?P<testcase>[^\ ]+)\
finished.\
Verdict:\
(?P<verdict>[a-z]+)
""", re.VERBOSE)
VERDICTS = {
'fail': base.TestVerdict.FAILURE,
'none': base.TestVerdict.FAILURE,
'pass': base.TestVerdict.SUCCESS
}
class TitanFormatHandler(base.FormatHandler):
def accept(self, path, text):
return text.find("TESTCASE") != -1
def read(self, path, text):
test_suite = base.TestSuite(os.path.basename(path))
matches = Titan.VERDICT.findall(text)
for match in matches:
test_suite.append(self.read_test_case(path, match))
if len(test_suite.children) == 0:
raise TitanFormatHandlerError(path)
return base.TestSuites([ test_suite ])
def read_test_case(self, path, match):
name = match[0]
verdict = Titan.VERDICTS.get(match[1], base.TestVerdict.ERROR)
test_case = base.TestCase(name, verdict)
return test_case
class TitanFormatHandlerError(base.FormatHandlerError):
MESSAGE = "This TITAN log file appears to contain no test cases."
def __init__(self, path):
base.FormatHandlerError.__init__(self, path,
TitanFormatHandlerError.MESSAGE)
| mit | 4,929,085,738,887,875,000 | 26.32 | 70 | 0.612738 | false |
lnls-fac/collective_effects | pycolleff/pycolleff/sirius_hc.py | 1 | 3061 | #!/usr/bin/env python-sirius
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
import mathphys as _mp
import simulate_landau as landau_cav
c = _mp.constants.light_speed
def main():
ring = landau_cav.Ring()
# Sirius
ring.E0 = 3e9
ring.nom_cur = 350e-3
ring.en_lost_rad = 872.7e3
ring.frf = 499.658e6
ring.peak_rf = 3e6
ring.harm_num = 864
ring.en_spread = 8.5e-4
ring.mom_cmpct = 1.639e-4
r = ring.en_lost_rad/ring.peak_rf
wrf = ring.wrf
krf = wrf/c
h = ring.harm_num
It = ring.nom_cur
sigz = ring.bunlen
Q = 2.6e8
hc = landau_cav.HarmCav(wrf, n=3, Q=Q, Rs=88*Q, r=r) # NSLS-2 SC-3HC
F = np.exp(-(sigz*hc.num*wrf/c)**2)
hc.calc_flat_potential(ring, F=F)
# hc.psi = 40*np.pi/180
zlim = 30*sigz
npoints = 1501
z = np.linspace(-zlim, zlim, npoints)
Ib = np.zeros(h, dtype=float)
s_fill = h
n_trains = 1
s_gap = (h - n_trains * s_fill) // n_trains
Ib[:] = It / s_fill / n_trains
for j in range(n_trains):
Ib[j * (s_fill + s_gap) + s_fill:(j + 1) * (s_fill+s_gap)] = 0
lamb = landau_cav.Lambda(z, Ib, ring)
_, _, dist_new = landau_cav.calc_equilibrium_potential(ring, lamb, hc, z,
epsilon=1e-7,
param_conv=20,
n_iters=1000)
lamb.dist = np.array(dist_new)
sigma_z_imp = lamb.get_bun_lens()
z_ave_i = lamb.get_synch_phases()
bl_imp = np.mean(sigma_z_imp)
z_ave_ave_i = np.mean(z_ave_i)
print('IMPEDANCE')
hc.print_param(ring)
# fwhm = 2*np.sqrt(2*np.log(2))
print('sync phase: {0:7.3f} mm'.format(z_ave_ave_i*1e3))
print('bun length: {0:7.3f} mm ({1:7.3f} ps)'.format(bl_imp*1e3,
bl_imp*1e12/c))
plt.figure(figsize=(10, 14))
gs = gridspec.GridSpec(4, 1)
gs.update(left=0.10, right=0.95, bottom=0.10,
top=0.97, wspace=0.35, hspace=0.25)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0], sharex=ax2)
ax4 = plt.subplot(gs[3, 0], sharex=ax2)
ph = z*krf
ax1.plot(ph, dist_new[0, :], label='Distribution 1st bunch')
ax2.plot(lamb.cur, label='Current - [mA]')
mask = lamb.cur < 1e-6
sigma_z_imp[mask] = np.nan
z_ave_i[mask] = np.nan
ax3.plot(sigma_z_imp/ring.bunlen, label='Bunch Lengthening factor')
ax4.plot(ring.synch_phase + z_ave_i*krf, label='Synch Phase')
ax1.legend(loc='best')
ax2.legend(loc='best')
ax3.legend(loc='best')
ax4.legend(loc='best')
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
plt.show()
if __name__ == "__main__":
landau_cav.memory_limit() # Limitates maximum memory usage to half
try:
main()
except MemoryError:
sys.stderr.write('\n\nERROR: Memory Exception\n')
sys.exit(1)
| mit | 6,399,122,000,002,394,000 | 25.850877 | 77 | 0.54786 | false |
cloudbase/coriolis | coriolis/endpoints/api.py | 1 | 2592 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
from coriolis import utils
from coriolis.conductor.rpc import client as rpc_conductor_client
from coriolis.minion_manager.rpc import client as rpc_minion_manager_client
class API(object):
def __init__(self):
self._rpc_conductor_client = rpc_conductor_client.ConductorClient()
self._rpc_minion_manager_client = (
rpc_minion_manager_client.MinionManagerClient())
def create(self, ctxt, name, endpoint_type, description,
connection_info, mapped_regions):
return self._rpc_conductor_client.create_endpoint(
ctxt, name, endpoint_type, description, connection_info,
mapped_regions)
def update(self, ctxt, endpoint_id, properties):
return self._rpc_conductor_client.update_endpoint(
ctxt, endpoint_id, properties)
def delete(self, ctxt, endpoint_id):
self._rpc_conductor_client.delete_endpoint(ctxt, endpoint_id)
def get_endpoints(self, ctxt):
return self._rpc_conductor_client.get_endpoints(ctxt)
def get_endpoint(self, ctxt, endpoint_id):
return self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id)
def validate_connection(self, ctxt, endpoint_id):
return self._rpc_conductor_client.validate_endpoint_connection(
ctxt, endpoint_id)
@utils.bad_request_on_error("Invalid destination environment: %s")
def validate_target_environment(self, ctxt, endpoint_id, target_env):
return self._rpc_conductor_client.validate_endpoint_target_environment(
ctxt, endpoint_id, target_env)
@utils.bad_request_on_error("Invalid source environment: %s")
def validate_source_environment(self, ctxt, endpoint_id, source_env):
return self._rpc_conductor_client.validate_endpoint_source_environment(
ctxt, endpoint_id, source_env)
@utils.bad_request_on_error("Invalid source minion pool environment: %s")
def validate_endpoint_source_minion_pool_options(
self, ctxt, endpoint_id, pool_environment):
return self._rpc_minion_manager_client.validate_endpoint_source_minion_pool_options(
ctxt, endpoint_id, pool_environment)
@utils.bad_request_on_error(
"Invalid destination minion pool environment: %s")
def validate_endpoint_destination_minion_pool_options(
self, ctxt, endpoint_id, pool_environment):
return self._rpc_minion_manager_client.validate_endpoint_destination_minion_pool_options(
ctxt, endpoint_id, pool_environment)
| agpl-3.0 | -81,276,994,539,366,080 | 42.932203 | 97 | 0.697145 | false |
monk-ee/puppetdb-python | puppetdb/v2/nodes.py | 1 | 3314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Arcus, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from puppetdb import utils
API_VERSION = 'v2'
def get_nodes(api_url=None, verify=False, cert=list()):
"""
Returns info for all Nodes
:param api_url: Base PuppetDB API url
"""
return utils._make_api_request(api_url, '/nodes', verify, cert)
def get_node(api_url=None, node_name=None, verify=False, cert=list()):
"""
Returns info for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
"""
return utils._make_api_request(api_url, '/nodes/{0}'.format(node_name), verify, cert)
def get_node_facts(api_url=None, node_name=None, verify=False, cert=list()):
"""
Returns facts for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
"""
return utils._make_api_request(api_url, '/nodes/{0}/facts'.format(node_name), verify, cert)
def get_node_fact_by_name(api_url=None, node_name=None, fact_name=None, verify=False, cert=list()):
"""
Returns specified fact for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
:param fact_name: Name of fact
"""
return utils._make_api_request(api_url, '/nodes/{0}/facts/{1}'.format(node_name,
fact_name), verify, cert)
def get_node_resources(api_url=None, node_name=None, verify=False, cert=list()):
"""
Returns resources for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
"""
return utils._make_api_request(api_url, '/nodes/{0}/resources'.format(node_name), verify, cert)
def get_node_resource_by_type(api_url=None, node_name=None,
type_name=None, verify=False, cert=list()):
"""
Returns specified resource for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
:param type_name: Type of resource
"""
return utils._make_api_request(api_url, '/nodes/{0}/resources/{1}'.format(node_name,
type_name), verify, cert)
def get_facts(api_url=None, query={}, verify=False, cert=list()):
"""
Returns info for all Nodes
:param api_url: Base PuppetDB API url
"""
return utils._make_api_request(api_url, '/nodes', verify, cert)
| mit | -5,458,084,079,127,352,000 | 33.164948 | 99 | 0.690404 | false |
rishig/zulip | zerver/lib/feedback.py | 1 | 2665 |
from django.conf import settings
from django.core.mail import EmailMessage
from typing import Any, Mapping, Optional
from zerver.lib.actions import internal_send_message
from zerver.lib.send_email import FromAddress
from zerver.lib.redis_utils import get_redis_client
from zerver.models import get_system_bot
import time
client = get_redis_client()
def has_enough_time_expired_since_last_message(sender_email: str, min_delay: float) -> bool:
# This function returns a boolean, but it also has the side effect
# of noting that a new message was received.
key = 'zilencer:feedback:%s' % (sender_email,)
t = int(time.time())
last_time = client.getset(key, t) # type: Optional[bytes]
if last_time is None:
return True
delay = t - int(last_time)
return delay > min_delay
def deliver_feedback_by_zulip(message: Mapping[str, Any]) -> None:
subject = "%s" % (message["sender_email"],)
if len(subject) > 60:
subject = subject[:57].rstrip() + "..."
content = ''
sender_email = message['sender_email']
# We generate ticket numbers if it's been more than a few minutes
# since their last message. This avoids some noise when people use
# enter-send.
need_ticket = has_enough_time_expired_since_last_message(sender_email, 180)
if need_ticket:
ticket_number = message['id']
content += '\n~~~'
content += '\nticket Z%03d (@support please ack)' % (ticket_number,)
content += '\nsender: %s' % (message['sender_full_name'],)
content += '\nemail: %s' % (sender_email,)
if 'sender_realm_str' in message:
content += '\nrealm: %s' % (message['sender_realm_str'],)
content += '\n~~~'
content += '\n\n'
content += message['content']
user_profile = get_system_bot(settings.FEEDBACK_BOT)
internal_send_message(user_profile.realm, settings.FEEDBACK_BOT,
"stream", settings.FEEDBACK_STREAM, subject, content)
def handle_feedback(event: Mapping[str, Any]) -> None:
if not settings.ENABLE_FEEDBACK:
return
if settings.FEEDBACK_EMAIL is not None:
to_email = settings.FEEDBACK_EMAIL
subject = "Zulip feedback from %s" % (event["sender_email"],)
content = event["content"]
from_email = '"%s" <%s>' % (event["sender_full_name"], FromAddress.SUPPORT)
headers = {'Reply-To': '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
if settings.FEEDBACK_STREAM is not None:
deliver_feedback_by_zulip(event)
| apache-2.0 | -3,259,584,417,288,743,400 | 37.623188 | 96 | 0.642402 | false |
strubell/Parser | lib/rnn_cells/base_cell.py | 1 | 2534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from configurable import Configurable
#***************************************************************
class BaseCell(Configurable):
""""""
#=============================================================
def __init__(self, *args, **kwargs):
""""""
input_size = kwargs.pop('input_size', None)
output_size = kwargs.pop('output_size', None)
recur_diag_bilin = kwargs.pop('recur_diag_bilin', False)
self.moving_params = kwargs.pop('moving_params', None)
super(BaseCell, self).__init__(*args, **kwargs)
self._output_size = output_size if output_size is not None else self.recur_size
self._input_size = input_size if input_size is not None else self.output_size
self._recur_diag_bilin = recur_diag_bilin
#=============================================================
def __call__(self, inputs, state, scope=None):
""""""
raise NotImplementedError()
#=============================================================
def zero_state(self, batch_size, dtype):
""""""
zero_state = tf.get_variable('Zero_state',
shape=self.state_size,
dtype=dtype,
initializer=tf.zeros_initializer())
state = tf.reshape(tf.tile(zero_state, tf.stack([batch_size])), tf.stack([batch_size, self.state_size]))
state.set_shape([None, self.state_size])
return state
#=============================================================
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
@property
def recur_diag_bilin(self):
return self._recur_diag_bilin
@property
def state_size(self):
raise NotImplementedError()
| apache-2.0 | -6,182,647,582,745,570,000 | 33.712329 | 108 | 0.575375 | false |
sjdv1982/seamless | docs/archive/slash/parse_slash0_utils.py | 1 | 2425 | import re
quote_match = re.compile(r'(([\"\']).*?\2)')
double_quote = re.compile(r'(\A|[^\\])\"')
single_quote = re.compile(r"(\A|[^\\])\'")
doc_name = re.compile(r'^[A-Za-z_][A-Za-z0-9_/]*$')
token_separators=r'(?P<sep1>[\s]+)|[\s](?P<sep2>2>)[^&][^1]|[\s](?P<sep3>!>)[\s]|[\s](?P<sep4>2>&1)|(?P<sep5>(?<![2!])>)|(?P<sep6>[;|])'
token_separators = re.compile(token_separators)
literal = re.compile(r'.*') #rely on shlex.quote
def find_node(node_name, nodetypes, nodes):
if isinstance(nodetypes, str):
nodetypes = [nodetypes]
for nodetype in nodetypes:
for node_index, node in enumerate(nodes[nodetype]):
if node["name"] == node_name:
return nodetype, node_index
raise NameError(node_name, nodetypes)
def append_node(nodes, nodetype, node):
for curr_node_index, curr_node in enumerate(nodes[nodetype]):
if curr_node:
if curr_node["name"] == node["name"]:
for field in curr_node:
assert field in node, field #TODO: more informative message...
assert node[field] == curr_node[field], field #TODO: more informative message...
for field in node:
assert field in curr_node, field #TODO: more informative message...
return curr_node_index
for other_nodetype in nodes.keys():
if other_nodetype == nodetype:
continue
for curr_node in nodes[other_nodetype]:
assert curr_node["name"] != node["name"], (nodetype, other_nodetype, node["name"]) #TODO: nicer error message
nodes[nodetype].append(node)
return len(nodes[nodetype]) - 1
def syntax_error(lineno, line, message):
message = " " + "\n ".join(message.splitlines())
msg = """Line {0}:
{1}
Error message:
{2}""".format(lineno, line, message)
raise SyntaxError(msg)
def tokenize(text, masked_text):
tokens = []
pos = 0
for match in token_separators.finditer(masked_text):
seps = match.groupdict()
split_tokens = [v for v in seps.values() if v is not None]
assert len(split_tokens) == 1, seps
split_token = split_tokens[0].strip()
newpos = match.start()
if pos != newpos:
tokens.append(text[pos:newpos])
pos = match.end()
if len(split_token):
tokens.append(split_token)
tokens.append(text[pos:])
return tokens
| mit | 1,262,014,449,949,030,700 | 39.416667 | 136 | 0.581856 | false |
beeftornado/sentry | src/sentry/models/groupsubscription.py | 1 | 6863 | from __future__ import absolute_import
from django.conf import settings
from django.db import IntegrityError, models, transaction
from django.db.models import Q
from django.utils import timezone
from sentry.db.models import (
BaseManager,
BoundedPositiveIntegerField,
FlexibleForeignKey,
Model,
sane_repr,
)
class GroupSubscriptionReason(object):
implicit = -1 # not for use as a persisted field value
committed = -2 # not for use as a persisted field value
processing_issue = -3 # not for use as a persisted field value
unknown = 0
comment = 1
assigned = 2
bookmark = 3
status_change = 4
deploy_setting = 5
mentioned = 6
team_mentioned = 7
descriptions = {
implicit: u"have opted to receive updates for all issues within "
"projects that you are a member of",
committed: u"were involved in a commit that is part of this release",
processing_issue: u"are subscribed to alerts for this project",
comment: u"have commented on this issue",
assigned: u"have been assigned to this issue",
bookmark: u"have bookmarked this issue",
status_change: u"have changed the resolution status of this issue",
deploy_setting: u"opted to receive all deploy notifications for this organization",
mentioned: u"have been mentioned in this issue",
team_mentioned: u"are a member of a team mentioned in this issue",
}
def get_user_options(key, user_ids, project, default):
from sentry.models import UserOption
options = {
(option.user_id, option.project_id): option.value
for option in UserOption.objects.filter(
Q(project__isnull=True) | Q(project=project),
user_id__in=user_ids,
key="workflow:notifications",
)
}
results = {}
for user_id in user_ids:
results[user_id] = options.get((user_id, project.id), options.get((user_id, None), default))
return results
class GroupSubscriptionManager(BaseManager):
def subscribe(self, group, user, reason=GroupSubscriptionReason.unknown):
"""
Subscribe a user to an issue, but only if the user has not explicitly
unsubscribed.
"""
try:
with transaction.atomic():
self.create(
user=user, group=group, project=group.project, is_active=True, reason=reason
)
except IntegrityError:
pass
def subscribe_actor(self, group, actor, reason=GroupSubscriptionReason.unknown):
from sentry.models import User, Team
if isinstance(actor, User):
return self.subscribe(group, actor, reason)
if isinstance(actor, Team):
# subscribe the members of the team
team_users_ids = list(actor.member_set.values_list("user_id", flat=True))
return self.bulk_subscribe(group, team_users_ids, reason)
raise NotImplementedError("Unknown actor type: %r" % type(actor))
def bulk_subscribe(self, group, user_ids, reason=GroupSubscriptionReason.unknown):
"""
Subscribe a list of user ids to an issue, but only if the users are not explicitly
unsubscribed.
"""
user_ids = set(user_ids)
# 5 retries for race conditions where
# concurrent subscription attempts cause integrity errors
for i in range(4, -1, -1): # 4 3 2 1 0
existing_subscriptions = set(
GroupSubscription.objects.filter(
user_id__in=user_ids, group=group, project=group.project
).values_list("user_id", flat=True)
)
subscriptions = [
GroupSubscription(
user_id=user_id,
group=group,
project=group.project,
is_active=True,
reason=reason,
)
for user_id in user_ids
if user_id not in existing_subscriptions
]
try:
with transaction.atomic():
self.bulk_create(subscriptions)
return True
except IntegrityError as e:
if i == 0:
raise e
def get_participants(self, group):
"""
Identify all users who are participating with a given issue.
"""
from sentry.models import User, UserOptionValue
users = {
user.id: user
for user in User.objects.filter(
sentry_orgmember_set__teams__in=group.project.teams.all(), is_active=True
)
}
excluded_ids = set()
subscriptions = {
subscription.user_id: subscription
for subscription in GroupSubscription.objects.filter(
group=group, user_id__in=users.keys()
)
}
for user_id, subscription in subscriptions.items():
if not subscription.is_active:
excluded_ids.add(user_id)
options = get_user_options(
"workflow:notifications",
list(users.keys()),
group.project,
UserOptionValue.participating_only,
)
for user_id, option in options.items():
if option == UserOptionValue.no_conversations:
excluded_ids.add(user_id)
elif option == UserOptionValue.participating_only:
if user_id not in subscriptions:
excluded_ids.add(user_id)
results = {}
for user_id, user in users.items():
if user_id in excluded_ids:
continue
subscription = subscriptions.get(user_id)
if subscription is not None:
results[user] = subscription.reason
else:
results[user] = GroupSubscriptionReason.implicit
return results
class GroupSubscription(Model):
"""
Identifies a subscription relationship between a user and an issue.
"""
__core__ = False
project = FlexibleForeignKey("sentry.Project", related_name="subscription_set")
group = FlexibleForeignKey("sentry.Group", related_name="subscription_set")
# namespace related_name on User since we don't own the model
user = FlexibleForeignKey(settings.AUTH_USER_MODEL)
is_active = models.BooleanField(default=True)
reason = BoundedPositiveIntegerField(default=GroupSubscriptionReason.unknown)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = GroupSubscriptionManager()
class Meta:
app_label = "sentry"
db_table = "sentry_groupsubscription"
unique_together = (("group", "user"),)
__repr__ = sane_repr("project_id", "group_id", "user_id")
| bsd-3-clause | -1,947,804,583,507,867,600 | 32.315534 | 100 | 0.600612 | false |
ChuanleiGuo/AlgorithmsPlayground | LeetCodeSolutions/python/76_Minimum_Window_Substring.py | 1 | 1277 | class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
begin, end = 0, 0
min_begin, min_end = 0, 0
min_size = float('inf')
need_find = {}
found = {}
for c in t:
need_find[c] = need_find.get(c, 0) + 1
found[s[0]] = found.get(s[0], 0) + 1
count = len(t)
if need_find.get(s[0], 0) >= found.get(s[0], 0):
count -= 1
while True:
if count == 0:
while found.get(s[begin], 0) > need_find.get(s[begin], 0):
found[s[begin]] -= 1
begin += 1
size = end - begin + 1
if size < min_size:
min_begin, min_end = begin, end
min_size = size
if end < len(s) - 1:
end += 1
found[s[end]] = found.get(s[end], 0) + 1
if need_find.get(s[end], 0) >= found.get(s[end], 0):
count -= 1
else:
break
if min_size < float('inf'):
return s[min_begin: min_end + 1]
else:
return ""
print Solution().minWindow("ADOBECODEBANC", "ABC")
| mit | -7,023,012,057,661,462,000 | 29.404762 | 74 | 0.397807 | false |
annndrey/npui-unik | netprofile_ipaddresses/netprofile_ipaddresses/__init__.py | 1 | 2901 | #!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: IP addresses module
# © Copyright 2013-2014 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
from netprofile.common.modules import ModuleBase
from sqlalchemy.orm.exc import NoResultFound
from pyramid.i18n import TranslationStringFactory
_ = TranslationStringFactory('netprofile_ipaddresses')
class Module(ModuleBase):
def __init__(self, mmgr):
self.mmgr = mmgr
mmgr.cfg.add_translation_dirs('netprofile_ipaddresses:locale/')
mmgr.cfg.scan()
@classmethod
def get_deps(cls):
return ('networks', 'dialup')
@classmethod
def get_models(cls):
from netprofile_ipaddresses import models
return (
models.IPv4Address,
models.IPv6Address
)
@classmethod
def get_sql_functions(cls):
from netprofile_ipaddresses import models
return (
models.IPAddrGetDotStrFunction,
models.IPAddrGetOffsetGenFunction,
models.IPAddrGetOffsetHGFunction,
models.IP6AddrGetOffsetGenFunction,
models.IP6AddrGetOffsetHGFunction
)
@classmethod
def get_sql_data(cls, modobj, sess):
from netprofile_core.models import (
Group,
GroupCapability,
LogType,
Privilege
)
sess.add(LogType(
id=8,
name='IPv4 Addresses'
))
sess.add(LogType(
id=19,
name='IPv6 Addresses'
))
privs = (
Privilege(
code='BASE_IPADDR',
name='Access: IP Addresses'
),
Privilege(
code='IPADDR_LIST',
name='IP Addresses: List'
),
Privilege(
code='IPADDR_CREATE',
name='IP Addresses: Create'
),
Privilege(
code='IPADDR_EDIT',
name='IP Addresses: Edit'
),
Privilege(
code='IPADDR_DELETE',
name='IP Addresses: Delete'
)
)
for priv in privs:
priv.module = modobj
sess.add(priv)
try:
grp_admins = sess.query(Group).filter(Group.name == 'Administrators').one()
for priv in privs:
cap = GroupCapability()
cap.group = grp_admins
cap.privilege = priv
except NoResultFound:
pass
def get_css(self, request):
return (
'netprofile_ipaddresses:static/css/main.css',
)
@property
def name(self):
return _('IP Addresses')
| agpl-3.0 | 1,859,566,080,437,848,000 | 22.015873 | 78 | 0.706207 | false |
CryptoManiac/electrum-nvc | gui/android.py | 1 | 31276 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import android
from electrum_nvc import SimpleConfig, Wallet, WalletStorage, format_satoshis, mnemonic_encode, mnemonic_decode
from electrum_nvc.bitcoin import is_valid
from electrum_nvc import util
from decimal import Decimal
import datetime, re
def modal_dialog(title, msg = None):
droid.dialogCreateAlert(title,msg)
droid.dialogSetPositiveButtonText('OK')
droid.dialogShow()
droid.dialogGetResponse()
droid.dialogDismiss()
def modal_input(title, msg, value = None, etype=None):
droid.dialogCreateInput(title, msg, value, etype)
droid.dialogSetPositiveButtonText('OK')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
print "modal input: result is none"
return modal_input(title, msg, value, etype)
if result.get('which') == 'positive':
return result.get('value')
def modal_question(q, msg, pos_text = 'OK', neg_text = 'Cancel'):
droid.dialogCreateAlert(q, msg)
droid.dialogSetPositiveButtonText(pos_text)
droid.dialogSetNegativeButtonText(neg_text)
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
print "modal question: result is none"
return modal_question(q,msg, pos_text, neg_text)
return result.get('which') == 'positive'
def edit_label(addr):
v = modal_input('Edit label',None,wallet.labels.get(addr))
if v is not None:
if v:
wallet.labels[addr] = v
else:
if addr in wallet.labels.keys():
wallet.labels.pop(addr)
wallet.update_tx_history()
wallet.save()
droid.fullSetProperty("labelTextView", "text", v)
def select_from_contacts():
title = 'Contacts:'
droid.dialogCreateAlert(title)
l = []
for i in range(len(wallet.addressbook)):
addr = wallet.addressbook[i]
label = wallet.labels.get(addr,addr)
l.append( label )
droid.dialogSetItems(l)
droid.dialogSetPositiveButtonText('New contact')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if response.get('which') == 'positive':
return 'newcontact'
result = response.get('item')
print result
if result is not None:
addr = wallet.addressbook[result]
return addr
def select_from_addresses():
droid.dialogCreateAlert("Addresses:")
l = []
addresses = wallet.addresses()
for i in range(len(addresses)):
addr = addresses[i]
label = wallet.labels.get(addr,addr)
l.append( label )
droid.dialogSetItems(l)
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result.get('item')
droid.dialogDismiss()
if result is not None:
addr = addresses[result]
return addr
def protocol_name(p):
if p == 't': return 'TCP'
if p == 'h': return 'HTTP'
if p == 's': return 'SSL'
if p == 'g': return 'HTTPS'
def protocol_dialog(host, protocol, z):
droid.dialogCreateAlert('Protocol',host)
if z:
protocols = z.keys()
else:
protocols = 'thsg'
l = []
current = protocols.index(protocol)
for p in protocols:
l.append(protocol_name(p))
droid.dialogSetSingleChoiceItems(l, current)
droid.dialogSetPositiveButtonText('OK')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
selected_item = droid.dialogGetSelectedItems().result
droid.dialogDismiss()
if not response: return
if not selected_item: return
if response.get('which') == 'positive':
return protocols[selected_item[0]]
def make_layout(s, scrollable = False):
content = """
<LinearLayout
android:id="@+id/zz"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:background="#ff222222">
<TextView
android:id="@+id/textElectrum"
android:text="Electrum-NVC"
android:textSize="7pt"
android:textColor="#ff4444ff"
android:gravity="left"
android:layout_height="wrap_content"
android:layout_width="match_parent"
/>
</LinearLayout>
%s """%s
if scrollable:
content = """
<ScrollView
android:id="@+id/scrollview"
android:layout_width="match_parent"
android:layout_height="match_parent" >
<LinearLayout
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="wrap_content" >
%s
</LinearLayout>
</ScrollView>
"""%content
return """<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/background"
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="#ff000022">
%s
</LinearLayout>"""%content
def main_layout():
return make_layout("""
<TextView android:id="@+id/balanceTextView"
android:layout_width="match_parent"
android:text=""
android:textColor="#ffffffff"
android:textAppearance="?android:attr/textAppearanceLarge"
android:padding="7dip"
android:textSize="8pt"
android:gravity="center_vertical|center_horizontal|left">
</TextView>
<TextView android:id="@+id/historyTextView"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Recent transactions"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>
%s """%get_history_layout(15),True)
def qr_layout(addr):
return make_layout("""
<TextView android:id="@+id/addrTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="%s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>
<ImageView
android:id="@+id/qrView"
android:gravity="center"
android:layout_width="match_parent"
android:layout_height="350"
android:antialias="false"
android:src="file:///sdcard/sl4a/qrcode.bmp" />
<TextView android:id="@+id/labelTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="%s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>
"""%(addr,wallet.labels.get(addr,'')), True)
payto_layout = make_layout("""
<TextView android:id="@+id/recipientTextView"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Pay to:"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="left">
</TextView>
<EditText android:id="@+id/recipient"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:tag="Tag Me" android:inputType="text">
</EditText>
<LinearLayout android:id="@+id/linearLayout1"
android:layout_width="match_parent"
android:layout_height="wrap_content">
<Button android:id="@+id/buttonQR" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="From QR code"></Button>
<Button android:id="@+id/buttonContacts" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="From Contacts"></Button>
</LinearLayout>
<TextView android:id="@+id/labelTextView"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Description:"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="left">
</TextView>
<EditText android:id="@+id/label"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:tag="Tag Me" android:inputType="text">
</EditText>
<TextView android:id="@+id/amountLabelTextView"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Amount:"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="left">
</TextView>
<EditText android:id="@+id/amount"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:tag="Tag Me" android:inputType="numberDecimal">
</EditText>
<LinearLayout android:layout_width="match_parent"
android:layout_height="wrap_content" android:id="@+id/linearLayout1">
<Button android:id="@+id/buttonPay" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="Send"></Button>
</LinearLayout>""",False)
settings_layout = make_layout(""" <ListView
android:id="@+id/myListView"
android:layout_width="match_parent"
android:layout_height="wrap_content" />""")
def get_history_values(n):
values = []
h = wallet.get_tx_history()
length = min(n, len(h))
for i in range(length):
tx_hash, conf, is_mine, value, fee, balance, timestamp = h[-i-1]
try:
dt = datetime.datetime.fromtimestamp( timestamp )
if dt.date() == dt.today().date():
time_str = str( dt.time() )
else:
time_str = str( dt.date() )
except Exception:
time_str = 'pending'
conf_str = 'v' if conf else 'o'
label, is_default_label = wallet.get_label(tx_hash)
values.append((conf_str, ' ' + time_str, ' ' + format_satoshis(value,True), ' ' + label ))
return values
def get_history_layout(n):
rows = ""
i = 0
values = get_history_values(n)
for v in values:
a,b,c,d = v
color = "#ff00ff00" if a == 'v' else "#ffff0000"
rows += """
<TableRow>
<TextView
android:id="@+id/hl_%d_col1"
android:layout_column="0"
android:text="%s"
android:textColor="%s"
android:padding="3" />
<TextView
android:id="@+id/hl_%d_col2"
android:layout_column="1"
android:text="%s"
android:padding="3" />
<TextView
android:id="@+id/hl_%d_col3"
android:layout_column="2"
android:text="%s"
android:padding="3" />
<TextView
android:id="@+id/hl_%d_col4"
android:layout_column="3"
android:text="%s"
android:padding="4" />
</TableRow>"""%(i,a,color,i,b,i,c,i,d)
i += 1
output = """
<TableLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:stretchColumns="0,1,2,3">
%s
</TableLayout>"""% rows
return output
def set_history_layout(n):
values = get_history_values(n)
i = 0
for v in values:
a,b,c,d = v
droid.fullSetProperty("hl_%d_col1"%i,"text", a)
if a == 'v':
droid.fullSetProperty("hl_%d_col1"%i, "textColor","#ff00ff00")
else:
droid.fullSetProperty("hl_%d_col1"%i, "textColor","#ffff0000")
droid.fullSetProperty("hl_%d_col2"%i,"text", b)
droid.fullSetProperty("hl_%d_col3"%i,"text", c)
droid.fullSetProperty("hl_%d_col4"%i,"text", d)
i += 1
status_text = ''
def update_layout():
global status_text
if not network.is_connected():
text = "Not connected..."
elif not wallet.up_to_date:
text = "Synchronizing..."
else:
c, u = wallet.get_balance()
text = "Balance:"+format_satoshis(c)
if u : text += ' [' + format_satoshis(u,True).strip() + ']'
# vibrate if status changed
if text != status_text:
if status_text and network.is_connected() and wallet.up_to_date:
droid.vibrate()
status_text = text
droid.fullSetProperty("balanceTextView", "text", status_text)
if wallet.up_to_date:
set_history_layout(15)
def pay_to(recipient, amount, fee, label):
if wallet.use_encryption:
password = droid.dialogGetPassword('Password').result
if not password: return
else:
password = None
droid.dialogCreateSpinnerProgress("Electrum", "signing transaction...")
droid.dialogShow()
try:
tx = wallet.mktx( [(recipient, amount)], password, fee)
except Exception as e:
modal_dialog('error', e.message)
droid.dialogDismiss()
return
if label:
wallet.labels[tx.hash()] = label
droid.dialogDismiss()
r, h = wallet.sendtx( tx )
if r:
modal_dialog('Payment sent', h)
return True
else:
modal_dialog('Error', h)
def make_new_contact():
code = droid.scanBarcode()
r = code.result
if r:
data = r['extras']['SCAN_RESULT']
if data:
if re.match('^novacoin:', data):
address, _, _, _, _ = util.parse_URI(data)
elif is_valid(data):
address = data
else:
address = None
if address:
if modal_question('Add to contacts?', address):
wallet.add_contact(address)
else:
modal_dialog('Invalid address', data)
do_refresh = False
def update_callback():
global do_refresh
print "gui callback", network.is_connected()
do_refresh = True
droid.eventPost("refresh",'z')
def main_loop():
global do_refresh
update_layout()
out = None
quitting = False
while out is None:
event = droid.eventWait(1000).result
if event is None:
if do_refresh:
update_layout()
do_refresh = False
continue
print "got event in main loop", repr(event)
if event == 'OK': continue
if event is None: continue
if not event.get("name"): continue
# request 2 taps before we exit
if event["name"]=="key":
if event["data"]["key"] == '4':
if quitting:
out = 'quit'
else:
quitting = True
else: quitting = False
if event["name"]=="click":
id=event["data"]["id"]
elif event["name"]=="settings":
out = 'settings'
elif event["name"] in menu_commands:
out = event["name"]
if out == 'contacts':
global contact_addr
contact_addr = select_from_contacts()
if contact_addr == 'newcontact':
make_new_contact()
contact_addr = None
if not contact_addr:
out = None
elif out == "receive":
global receive_addr
receive_addr = select_from_addresses()
if receive_addr:
amount = modal_input('Amount', 'Amount you want receive. ', '', "numberDecimal")
if amount:
receive_addr = 'bitcoin:%s?amount=%s'%(receive_addr, amount)
if not receive_addr:
out = None
return out
def payto_loop():
global recipient
if recipient:
droid.fullSetProperty("recipient","text",recipient)
recipient = None
out = None
while out is None:
event = droid.eventWait().result
if not event: continue
print "got event in payto loop", event
if event == 'OK': continue
if not event.get("name"): continue
if event["name"] == "click":
id = event["data"]["id"]
if id=="buttonPay":
droid.fullQuery()
recipient = droid.fullQueryDetail("recipient").result.get('text')
label = droid.fullQueryDetail("label").result.get('text')
amount = droid.fullQueryDetail('amount').result.get('text')
if not is_valid(recipient):
modal_dialog('Error','Invalid Novacoin address')
continue
try:
amount = int( 1000000 * Decimal(amount) )
except Exception:
modal_dialog('Error','Invalid amount')
continue
result = pay_to(recipient, amount, wallet.fee, label)
if result:
out = 'main'
elif id=="buttonContacts":
addr = select_from_contacts()
droid.fullSetProperty("recipient","text",addr)
elif id=="buttonQR":
code = droid.scanBarcode()
r = code.result
if r:
data = r['extras']['SCAN_RESULT']
if data:
if re.match('^novacoin:', data):
payto, amount, label, _, _ = util.parse_URI(data)
droid.fullSetProperty("recipient", "text",payto)
droid.fullSetProperty("amount", "text", amount)
droid.fullSetProperty("label", "text", label)
else:
droid.fullSetProperty("recipient", "text", data)
elif event["name"] in menu_commands:
out = event["name"]
elif event["name"]=="key":
if event["data"]["key"] == '4':
out = 'main'
#elif event["name"]=="screen":
# if event["data"]=="destroy":
# out = 'main'
return out
receive_addr = ''
contact_addr = ''
recipient = ''
def receive_loop():
out = None
while out is None:
event = droid.eventWait().result
print "got event", event
if event["name"]=="key":
if event["data"]["key"] == '4':
out = 'main'
elif event["name"]=="clipboard":
droid.setClipboard(receive_addr)
modal_dialog('Address copied to clipboard',receive_addr)
elif event["name"]=="edit":
edit_label(receive_addr)
return out
def contacts_loop():
global recipient
out = None
while out is None:
event = droid.eventWait().result
print "got event", event
if event["name"]=="key":
if event["data"]["key"] == '4':
out = 'main'
elif event["name"]=="clipboard":
droid.setClipboard(contact_addr)
modal_dialog('Address copied to clipboard',contact_addr)
elif event["name"]=="edit":
edit_label(contact_addr)
elif event["name"]=="paytocontact":
recipient = contact_addr
out = 'send'
elif event["name"]=="deletecontact":
if modal_question('delete contact', contact_addr):
out = 'main'
return out
def server_dialog(servers):
droid.dialogCreateAlert("Public servers")
droid.dialogSetItems( servers.keys() )
droid.dialogSetPositiveButtonText('Private server')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if not response: return
if response.get('which') == 'positive':
return modal_input('Private server', None)
i = response.get('item')
if i is not None:
response = servers.keys()[i]
return response
def show_seed():
if wallet.use_encryption:
password = droid.dialogGetPassword('Seed').result
if not password: return
else:
password = None
try:
seed = wallet.get_seed(password)
except Exception:
modal_dialog('error','incorrect password')
return
modal_dialog('Your seed is',seed)
modal_dialog('Mnemonic code:', ' '.join(mnemonic_encode(seed)) )
def change_password_dialog():
if wallet.use_encryption:
password = droid.dialogGetPassword('Your wallet is encrypted').result
if password is None: return
else:
password = None
try:
wallet.get_seed(password)
except Exception:
modal_dialog('error','incorrect password')
return
new_password = droid.dialogGetPassword('Choose a password').result
if new_password == None:
return
if new_password != '':
password2 = droid.dialogGetPassword('Confirm new password').result
if new_password != password2:
modal_dialog('error','passwords do not match')
return
wallet.update_password(password, new_password)
if new_password:
modal_dialog('Password updated','your wallet is encrypted')
else:
modal_dialog('No password','your wallet is not encrypted')
return True
def settings_loop():
def set_listview():
host, port, p = network.default_server.split(':')
fee = str( Decimal( wallet.fee)/1000000 )
is_encrypted = 'yes' if wallet.use_encryption else 'no'
protocol = protocol_name(p)
droid.fullShow(settings_layout)
droid.fullSetList("myListView",['Server: ' + host, 'Protocol: '+ protocol, 'Port: '+port, 'Transaction fee: '+fee, 'Password: '+is_encrypted, 'Seed'])
set_listview()
out = None
while out is None:
event = droid.eventWait()
event = event.result
print "got event", event
if event == 'OK': continue
if not event: continue
servers = network.get_servers()
name = event.get("name")
if not name: continue
if name == "itemclick":
pos = event["data"]["position"]
host, port, protocol = network.default_server.split(':')
network_changed = False
if pos == "0": #server
host = server_dialog(servers)
if host:
p = servers[host]
port = p[protocol]
network_changed = True
elif pos == "1": #protocol
if host in servers:
protocol = protocol_dialog(host, protocol, servers[host])
z = servers[host]
port = z[protocol]
network_changed = True
elif pos == "2": #port
a_port = modal_input('Port number', 'If you use a public server, this field is set automatically when you set the protocol', port, "number")
if a_port != port:
port = a_port
network_changed = True
elif pos == "3": #fee
fee = modal_input('Transaction fee', 'The fee will be this amount multiplied by the number of inputs in your transaction. ', str( Decimal( wallet.fee)/1000000 ), "numberDecimal")
if fee:
try:
fee = int( 1000000 * Decimal(fee) )
except Exception:
modal_dialog('error','invalid fee value')
wallet.set_fee(fee)
set_listview()
elif pos == "4":
if change_password_dialog():
set_listview()
elif pos == "5":
show_seed()
if network_changed:
proxy = None
auto_connect = False
try:
network.set_parameters(host, port, protocol, proxy, auto_connect)
except Exception:
modal_dialog('error','invalid server')
set_listview()
elif name in menu_commands:
out = event["name"]
elif name == 'cancel':
out = 'main'
elif name == "key":
if event["data"]["key"] == '4':
out = 'main'
return out
def add_menu(s):
droid.clearOptionsMenu()
if s == 'main':
droid.addOptionsMenuItem("Send","send",None,"")
droid.addOptionsMenuItem("Receive","receive",None,"")
droid.addOptionsMenuItem("Contacts","contacts",None,"")
droid.addOptionsMenuItem("Settings","settings",None,"")
elif s == 'receive':
droid.addOptionsMenuItem("Copy","clipboard",None,"")
droid.addOptionsMenuItem("Label","edit",None,"")
elif s == 'contacts':
droid.addOptionsMenuItem("Copy","clipboard",None,"")
droid.addOptionsMenuItem("Label","edit",None,"")
droid.addOptionsMenuItem("Pay to","paytocontact",None,"")
#droid.addOptionsMenuItem("Delete","deletecontact",None,"")
def make_bitmap(addr):
# fixme: this is highly inefficient
droid.dialogCreateSpinnerProgress("please wait")
droid.dialogShow()
try:
import pyqrnative, bmp
qr = pyqrnative.QRCode(4, pyqrnative.QRErrorCorrectLevel.L)
qr.addData(addr)
qr.make()
k = qr.getModuleCount()
assert k == 33
bmp.save_qrcode(qr,"/sdcard/sl4a/qrcode.bmp")
finally:
droid.dialogDismiss()
droid = android.Android()
menu_commands = ["send", "receive", "settings", "contacts", "main"]
wallet = None
network = None
class ElectrumGui:
def __init__(self, config, _network):
global wallet, network
network = _network
network.register_callback('updated', update_callback)
network.register_callback('connected', update_callback)
network.register_callback('disconnected', update_callback)
network.register_callback('disconnecting', update_callback)
storage = WalletStorage(config)
if not storage.file_exists:
action = self.restore_or_create()
if not action: exit()
wallet = Wallet(storage)
if action == 'create':
wallet.init_seed(None)
self.show_seed()
wallet.save_seed(None)
wallet.synchronize() # generate first addresses offline
elif action == 'restore':
seed = self.seed_dialog()
if not seed:
exit()
wallet.init_seed(str(seed))
wallet.save_seed(None)
else:
exit()
wallet.start_threads(network)
if action == 'restore':
if not self.restore_wallet():
exit()
self.password_dialog()
else:
wallet = Wallet(storage)
wallet.start_threads(network)
def main(self, url):
s = 'main'
while True:
add_menu(s)
if s == 'main':
droid.fullShow(main_layout())
s = main_loop()
elif s == 'send':
droid.fullShow(payto_layout)
s = payto_loop()
elif s == 'receive':
make_bitmap(receive_addr)
droid.fullShow(qr_layout(receive_addr))
s = receive_loop()
elif s == 'contacts':
make_bitmap(contact_addr)
droid.fullShow(qr_layout(contact_addr))
s = contacts_loop()
elif s == 'settings':
s = settings_loop()
else:
break
droid.makeToast("Bye!")
def restore_or_create(self):
droid.dialogCreateAlert("Wallet not found","Do you want to create a new wallet, or restore an existing one?")
droid.dialogSetPositiveButtonText('Create')
droid.dialogSetNeutralButtonText('Restore')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if not response: return
if response.get('which') == 'negative':
return
return 'restore' if response.get('which') == 'neutral' else 'create'
def seed_dialog(self):
if modal_question("Enter your seed","Input method",'QR Code', 'mnemonic'):
code = droid.scanBarcode()
r = code.result
if r:
seed = r['extras']['SCAN_RESULT']
else:
return
else:
m = modal_input('Mnemonic','please enter your code')
try:
seed = mnemonic_decode(m.split(' '))
except Exception:
modal_dialog('error: could not decode this seed')
return
return str(seed)
def network_dialog(self):
return True
def show_seed(self):
modal_dialog('Your seed is:', wallet.seed)
modal_dialog('Mnemonic code:', ' '.join(mnemonic_encode(wallet.seed)) )
def password_dialog(self):
change_password_dialog()
def restore_wallet(self):
msg = "recovering wallet..."
droid.dialogCreateSpinnerProgress("Electrum", msg)
droid.dialogShow()
wallet.restore(lambda x: None)
droid.dialogDismiss()
droid.vibrate()
if wallet.is_found():
wallet.fill_addressbook()
modal_dialog("recovery successful")
else:
if not modal_question("no transactions found for this seed","do you want to keep this wallet?"):
return False
return True
| gpl-3.0 | 1,227,546,001,268,356,900 | 29.424125 | 194 | 0.557904 | false |
dnstanciu/dnstanciu.github.io-src | pelicanconf.py | 1 | 3246 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
from datetime import date
# disable caching when changing settings
LOAD_CONTENT_CACHE = False
AUTHOR = u'Dragos Stanciu'
SITENAME = u'Dragos Stanciu'
SITEURL = ''
##########################
### Flex theme options ###
##########################
SITETITLE = AUTHOR
SITESUBTITLE = u'Software Developer'
SITEDESCRIPTION = u'Dragos Stanciu\'s website'
SITELOGO = u'https://secure.gravatar.com/avatar/9520bffa0515e54859f849fc8b6b99e1?size=400'
#MAIN_MENU = True
MAIN_MENU = False
# Times and dates
DEFAULT_DATE_FORMAT = '%d %b, %Y'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
PATH = 'content'
#######################
### Static homepage ###
#######################
# found in content/pages/home.md
# Uncomment the 2 liens below to use a static page as home page
INDEX_SAVE_AS = 'blog/index.html'
LINKS = (('Dragon Programmer blog', 'https://dragonprogrammer.com/'),) # add link to blog
# Set article URL
ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{slug}/'
ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{slug}/index.html'
# Set page URL
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}/index.html'
CATEGORY_URL = 'blog/category/{slug}/'
CATEGORY_SAVE_AS = 'blog/category/{slug}/index.html'
TAG_URL = 'blog/tag/{slug}/'
TAG_SAVE_AS = 'blog/tag/{slug}/index.html'
# don't need author pages, as I'm the only author
AUTHOR_URL = ''
AUTHOR_SAVE_AS = ''
# create per year and per month archives
YEAR_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/index.html'
ARCHIVES_SAVE_AS = 'blog/archives.html'
CATEGORIES_SAVE_AS = 'blog/categories.html'
TAGS_SAVE_AS = 'blog/tags.html'
DEFAULT_PAGINATION = 10
STATIC_PATHS = ['images', 'figures', 'downloads', 'extra/CNAME', 'extra/robots.txt', 'extra/favicon.ico']
EXTRA_PATH_METADATA = {
'extra/CNAME': {'path': 'CNAME'},
'extra/robots.txt': {'path': 'robots.txt'},
'extra/favicon.ico': {'path': 'favicon.ico'}
}
THEME = '/home/dragos/src/pelican-themes/Flex'
PLUGIN_PATHS = ['/home/dragos/src/pelican-plugins']
PLUGINS = ['sitemap']
# Sitemap
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.6,
'indexes': 0.5,
'pages': 0.5
},
'changefreqs': {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
},
'exclude': ['tag/', 'category/'],
}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Social widget
SOCIAL = (('linkedin', 'https://www.linkedin.com/in/dragosstanciu'),
('github', 'https://github.com/dnstanciu'),
('twitter', 'https://twitter.com/dnstanciu'),
('facebook', 'https://www.facebook.com/DragonProgrammer/'),
('youtube', 'https://www.youtube.com/channel/UCvxBX7213FF2JCQC68Dx50A'))
COPYRIGHT_YEAR = date.today().year
# MENUITEMS = (('Archives', '/blog/archives.html'),
# ('Categories', '/blog/categories.html'))#,
#('Tags', '/blog/tags.html'),)
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
| mit | -7,458,738,056,854,363,000 | 25.390244 | 105 | 0.634627 | false |
zhlinh/leetcode | 0003.Longest Substring Without Repeating Characters/solution.py | 1 | 1047 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-01-05
Last_modify: 2016-01-05
******************************************
'''
'''
Given a string, find the length of the longest substring
without repeating characters.
For example, the longest substring without repeating letters
for "abcabcbb" is "abc", which the length is 3.
For "bbbbb" the longest substring is "b", with the length of 1.
'''
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
chList = [-1] * 256
maxLen = 0
start = 0
for i in range(len(s)):
asc = ord(s[i])
if chList[asc] !=-1 and chList[asc] >= start:
if (i - start) > maxLen:
maxLen = i - start
start = chList[asc] + 1
chList[asc] = i
return max(maxLen, len(s)-start)
| apache-2.0 | 3,627,866,095,556,254,000 | 27.297297 | 63 | 0.501433 | false |
dangu/nes_game | src/generateSoundData.py | 1 | 11474 | def writeBinary(filename):
"""Writes binary data to file"""
f1=open(filename, 'w')
for x in range(8192):
f1.write(chr((x*3)&0xFF))
f1.close()
def writeTestPalette(filename):
"""Write a test palette (20 bytes)
lda #$01
lda #$02
lda #$03
lda #$04
lda #$05
lda #$06
lda #$07
lda #$08
lda #$01 ;stop here
lda #$08
lda #$09
lda #$0A
lda #$01
lda #$0B
lda #$0C
lda #$0D
lda #$01 ;Start sprite colors
lda #$0D
lda #$08
lda #$2B
lda #$01
lda #$05
lda #$06
lda #$07
lda #$01
lda #$08
lda #$09
lda #$0A
lda #$01
lda #$0B
lda #$0C
lda #$0D
"""
f1=open(filename, 'w')
# Palette memory according to https://wiki.nesdev.com/w/index.php/PPU_palettes
data = [0x0E, # 0x3F00 Universal background color
0x0E, 0x20, 0x20, 0x20, # 0x3F01-0x3F03 Background pal 0
0x20, 0x20, 0x00, 0x00, # 0x3F05-0x3F73 Background pal 1
0x20, 0x00, 0x00, 0x00, # 0x3F09-0x3F0B Background pal 2
0x20, 0x00, 0x00, 0x00, # 0x3F0D-0x3F0F Background pal 3
0x30, 0x31, 0x34, 0x2A, # 0x3F11-0x3F13 Sprite palette 0
0x20, 0x00, 0x00, 0x00, # 0x3F15-0x3F17 Sprite palette 1
0x20, 0x00, 0x00, 0x00, # 0x3F19-0x3F1B Sprite palette 2
0x20, 0x00, 0x00, 0x00] # 0x3F1D-0x3F1F Sprite palette 3
for x in data:
f1.write(chr(x))
f1.close()
class NESSound:
"""A class for handling NES sounds
The conversion formula (according to
http://nintendoage.com/forum/messageview.cfm?catid=22&threadid=22776):
P = C/(F*16) - 1
P = Period
C = CPU speed (in Hz)
F = Frequency of the note (also in Hz).
The inverse will be:
C/(F*16)= P + 1
F*16/C = 1/(P + 1)
F = C/(16*(P + 1))
"""
def __init__(self, cpuFrequency):
"""Init"""
self.cpuFrequency = cpuFrequency
self.f1 = None
def open(self, filename):
"""Open file for writing"""
self.f1 = open(filename, 'w')
def close(self):
"""Close output file"""
self.f1.close()
def freq2period(self, f):
"""Convert a given frequency to period"""
p = self.cpuFrequency/(f*16.0) - 1
return p
def period2freq(self, p):
"""Convert a given period to frequency"""
f = self.cpuFrequency/(16*(p + 1))
return f
def halftoneFactor(self, halftone):
"""The equally tempered scale is defined as
2^(n/12) where n is the halftone number:
n note
0 C
1 C#
2 D
3 D#
4 E
5 F
6 F#
7 G
8 G#
9 A
10 A#
11 B
12 C2"""
factor = 2**(halftone/12.0)
return factor
def generateNoteTable(self):
"""Generate a note table that can be imported to
NESASM-programs
The definition is A4 = 440Hz
C5 is 3 halftones above A4
C4 is 9 halftones below A4
C3 is 12 halftones below C4, 21 halftones below A4
C1 is 21+24=45 halftones below A4
"""
names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
f1 = self.f1
freqA4 = 440 # [Hz]
# Loop from C1 to C5
notes = []
for octave in range(1, 9+1):
baseHalftoneNumber = 12*(octave-1) - 45 # C1 is 45 halftones below A4
notesInOctave = []
for halftoneNumberInOctave in range(0, 12):
halftoneNumber = baseHalftoneNumber + halftoneNumberInOctave
note = {}
note['octave'] = octave
note['name'] = names[halftoneNumberInOctave]
note['freq'] = freqA4*self.halftoneFactor(halftoneNumber)
note['period'] = self.freq2period(note['freq'])
notesInOctave.append(note)
notes.append(notesInOctave)
# Write header
f1.write("; Frequency values [Hz]\n; ")
for notename in names:
f1.write("%8s" %(notename))
f1.write("\n")
for octave in notes:
f1.write("; ")
for note in octave:
f1.write("%8s" %("%.1f " %(note['freq'])))
f1.write("; Octave %d\n" %(note['octave']))
f1.write("\n")
f1.write("; Period values, floating point\n; ")
for notename in names:
f1.write("%8s" %(notename))
f1.write("\n")
for octave in notes:
f1.write("; ")
for note in octave:
f1.write("%8s" %("%.1f " %(note['period'])))
f1.write("; Octave %d\n" %(note['octave']))
f1.write("\n")
f1.write("; Frequency error (actual - wanted)[Hz]\n; ")
for notename in names:
f1.write("%8s" %(notename))
f1.write("\n")
for octave in notes:
f1.write("; ")
for note in octave:
f1.write("%8s" %("%.1f " %(self.period2freq(round(note['period']))-note['freq'])))
f1.write("; Octave %d\n" %(note['octave']))
f1.write("\n")
f1.write("; Period values, rounded nearest\n; ")
for notename in names:
f1.write("%8s" %(notename))
f1.write("\n")
for octave in notes:
f1.write("; ")
for note in octave:
f1.write("%8s" %("%d " %(round(note['period']))))
f1.write("; Octave %d\n" %(note['octave']))
f1.write("\n")
f1.write("; Period values, rounded nearest, hex\n; ")
for notename in names:
f1.write("%8s" %(notename))
f1.write("\n")
for octave in notes:
row = " .word "
for note in octave:
row +="%8s" %("$%04X, " %(round(note['period'])))
row = row[:-2] # Remove trailing ','
f1.write("%s ; Octave %d\n" %(row, note['octave']))
f1.write("\n")
# Define a dummy period for a rest
f1.write("; Rest (dummy value)\n")
f1.write(" .word $0000\n\n")
# Write the corresponding note defines, for example "Cs1" for C# first octave
f1.write("; Note defines\n")
offset = 0
for octave in notes:
for note in octave:
# C# => Cs
assemplerNoteName = note['name'].replace('#','s')
completeNoteName = "%s%d" %(assemplerNoteName, note['octave'])
f1.write("%-3s = $%02X\n" %(completeNoteName, offset))
# Ugly hack to produce Db out of C#
if(note['name'].count('#')):
# G# => Ab and not Hb
if(note['name'][0]=='G'):
completeNoteName2 = "Ab%d" %(note['octave'])
else:
completeNoteName2 = "%sb%d" %(chr(ord(note['name'][0])+1), note['octave'])
f1.write("%-3s = $%02X\n" %(completeNoteName2, offset))
offset += 1
f1.write("\n; The rest will have the index $%02X in the table:\n" %offset)
f1.write("rest = $%02X\n" %offset)
f1.write("\nnote_length_table:\n")
f1.write(" .byte $01 ; 32nd note\n")
f1.write(" .byte $02 ; 16th note\n")
f1.write(" .byte $04 ; 8th note\n")
f1.write(" .byte $08 ; quarter note\n")
f1.write(" .byte $10 ; half note\n")
f1.write(" .byte $20 ; whole note\n\n")
f1.write(" .byte $03 ; Dotted sixteenth\n")
f1.write(" .byte $06 ; Dotted eighth\n")
f1.write(" .byte $0C ; Dotted quarter\n")
f1.write(" .byte $18 ; Dotted half\n")
f1.write(" .byte $30 ; Dotted whole\n\n")
f1.write(" .byte $07 ; Modified quarter to fit after sixteenth triplets\n")
f1.write(" .byte $14 ; \n")
f1.write(" .byte $0A ; \n")
f1.write("; Note length defines (for using defining a song)\n")
f1.write("thirtysecond = $80\n")
f1.write("sixteenth = $81\n")
f1.write("eighth = $82\n")
f1.write("quarter = $83\n")
f1.write("half = $84\n")
f1.write("whole = $85\n")
f1.write("d_sixteenth = $86 ; Dotted notes\n")
f1.write("d_eighth = $87\n")
f1.write("d_quarter = $88\n")
f1.write("d_half = $89\n")
f1.write("d_whole = $8A\n")
f1.write("t_quarter = $8B ; Triplets\n")
def generateVolumeEnvelopes(self):
"""Create the volume envelopes"""
self.f1.write("\n; Volume envelopes\n")
# Define some envelopes
envelopeDataList = [[15, 14, 13, 6, 3, 0],
[15, 5, 15, 5, 15, 5],
[14,9,8,6,4,3,2,1,0], # Drum decay
]
envelopeNumber = 1
envelopeNameList = []
# Loop through all envelopes
for envelopeData in envelopeDataList:
envelopeName = "se_ve_%d" %envelopeNumber
self.f1.write(envelopeName + ":\n")
envelopeNameList.append(envelopeName)
self.f1.write(" .byte ")
envelopeDataString = ""
for data in envelopeData:
if not (0<=data<=0x0F):
raise ValueError("Encountered an invalid volume value (%d=0x%02X)" %(data, data))
envelopeDataString += "$%02X, " %data
envelopeDataString = envelopeDataString[:-2] # Strip off the last ","
self.f1.write(envelopeDataString)
self.f1.write("\n .byte $FF\n")
envelopeNumber += 1
# Now create the table of volume envelopes
self.f1.write("\n; Table of volume envelopes:\n")
self.f1.write("volume_envelopes:\n")
envelopeTableString = ""
for envelopeName in envelopeNameList:
envelopeTableString += "%s, " %envelopeName
envelopeTableString = envelopeTableString[:-2] # Strip off the last ", "
self.f1.write(" .word %s\n" %envelopeTableString)
def testSound():
"""Test the sound class"""
cpuFreqNTSC = 1790000
cpuFreqPAL = 1662607
s = NESSound(cpuFrequency = cpuFreqNTSC)
s.generateNoteTable("sound_data.asm")
for note in range(24):
s = NESSound(cpuFrequency = cpuFreqPAL)
f = 220*s.halftoneFactor(note)
p = s.freq2period(f)
pRound = round(p)
fRound = s.period2freq(pRound)
fError = fRound-f
print "Freq: %g\tPeriod: %g\tPeriod rounded: %g\tFreq rounded: %g\tFreq error: %g" %(f, p, pRound, fRound, fError)
if __name__=="__main__":
# writeBinary("test.chr")
#writeTestPalette("test.pal")
#testSound()
cpuFreqNTSC = 1790000
cpuFreqPAL = 1662607
s = NESSound(cpuFrequency = cpuFreqNTSC)
filename = "sound_data.asm"
s.open(filename)
s.generateNoteTable()
s.generateVolumeEnvelopes()
s.close()
print "Finished generating data file " + filename | gpl-3.0 | -3,254,924,269,458,319,000 | 32.75 | 122 | 0.494509 | false |
jleclanche/bugzilla-to-github | xmlrpc_download.py | 1 | 2598 | #!/usr/bin/env python
"""
Connect to a bugzilla xml-rpc.cgi and download all the things.
This exports products, bugs, comments and bug history to a "bugzilla.json"
output file which can in turn be used to quickly import things to a different
format.
"""
import json
import sys
import xmlrpc.client
# Edit these to your liking or in local_settings.py
# Highest bug id in Bugzilla. Any bug with a higher id will not be imported.
MAX_BUG_ID = 10000
# Export output file
XMLRPC_EXPORT_FILE = "bugzilla.json"
# List of bugs that will not be exported
XMLRPC_BLACKLIST = []
try:
from local_settings import *
except ImportError:
pass
class RPCEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, xmlrpc.client.DateTime):
return o.value
raise NotImplementedError
def main():
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [URL TO XML-RPC]\n" % (sys.argv[0]))
exit(1)
emails = set()
print("Connecting to %r" % (sys.argv[1]))
bugzilla = xmlrpc.client.ServerProxy(sys.argv[1])
print("Exporting products")
_products = bugzilla.Product.get(bugzilla.Product.get_selectable_products())["products"]
products = {product["name"]: product for product in _products}
print("Exporting bugs")
valid_ids = filter(lambda i: i not in XMLRPC_BLACKLIST, range(1, MAX_BUG_ID))
bugs = bugzilla.Bug.get({"ids": list(valid_ids), "permissive": True})["bugs"]
valid_ids = [k["id"] for k in bugs]
print("Exporting bug history")
history = bugzilla.Bug.history({"ids": valid_ids})["bugs"]
print("Exporting comments")
_comments = bugzilla.Bug.comments({"ids": valid_ids})["bugs"]
# god damn it bugzilla
comments = {int(id): _comments[id] for id in _comments}
for histitem, bug in zip(history, bugs):
assert histitem["id"] == bug["id"]
bug["history"] = histitem["history"]
# turn bugs into a dict
bugs = {int(bug["id"]): bug for bug in bugs}
for id, comments in comments.items():
comments = comments["comments"]
for comment in comments:
# Add to the list of users we want to export
emails.add(comment["author"])
bugs[id]["comments"] = comments
# now move the bugs dict to the products
for product in products.values():
product["bugs"] = {}
for id, bug in bugs.items():
products[bug["product"]]["bugs"][id] = bug
json_out = {"products": products}
print("Exporting all users")
users = bugzilla.User.get({"names": list(emails)})["users"]
json_out["users"] = {user["name"]: user["real_name"] for user in users}
with open(XMLRPC_EXPORT_FILE, "w") as f:
f.write(json.dumps(json_out, cls=RPCEncoder))
if __name__ == "__main__":
main()
| mit | -8,742,910,421,061,512,000 | 25.510204 | 89 | 0.688607 | false |
BoomButton/DF-BAMM | src/bamm/common/parsing.py | 1 | 5257 | '''
Created on May 21, 2015
@author: Button
'''
from src.bamm.common import config
import re
import traceback
ascii_codes = None
userlog = config.userlog
modderslog = config.modderslog
def _load_ascii_conversions(ascii_file):
"""Load ASCII conversions from file.
The file is structured as a .property file, with a new conversion on each
line.
Each line of the file should be in the format " '%'=# ", where % is a
single character, and # is the character's position on code page 437 (the
basis of the Dwarf Fortress tileset).
Comments are not allowed in this file.
These conversions will later be used by the function
escape_problematic_literals.
"""
global ascii_codes
userlog.info("Loading ASCII conversions...")
if ascii_codes is None:
ascii_codes = {}
try:
for line in open(ascii_file):
real_line = line.strip()
if len(real_line) == 0:
continue
elif '=' not in real_line:
userlog.warning('ASCII conversion file contains the \
improperly-formatted line %s .', real_line)
else:
point = real_line.rindex('=')
if real_line[:point] in ascii_codes.keys():
userlog.warning('Duplicate entry for ascii replacement %s',
real_line[:point])
else:
ascii_codes[real_line[:point]] = real_line[point+1:]
userlog.info("ASCII conversions loaded.")
except TypeError:
userlog.error("Undefined ascii conversion file. Please add an 'ascii'",
"property in", config.runconfig, ".")
userlog.error(traceback.format_exc())
raise
except:
userlog.error("Problem loading ASCII conversions. " +
"If you have made changes to " + ascii_file +
", please restore it. " +
"Otherwise, please contact a BAMM! developer.")
userlog.error(traceback.format_exc())
raise
def tags(line):
"""Return an ordered list of all the tags in this line, without brackets,
with literals escaped if necessary."""
processed_line = escape_problematic_literals(line)
to_return = [] # list of strings, a la split()
while ('[' in processed_line and
']' in processed_line and
processed_line.index('[') < processed_line.rindex(']')):
if processed_line.index(']') < processed_line.index('['):
processed_line = processed_line[processed_line.index('['):]
to_return.append(processed_line[processed_line.index('[')+1:
processed_line.index(']')])
processed_line = processed_line[processed_line.index(']')+1:]
return to_return
def escape_problematic_literals(line):
""" Returns line with its char literals replaced with their cp437 codes.
Char literals are usually used for defining tiles, and are two single
quotes around a character, so: '*'. Since this is the only case in which
the DF raw characters ']', '[' and ':' are allowed within a tag outside
their uses, and since cp437 codes are equally valid, replacing these with
their cp437 codes is harmless and streamlines lexing considerably.
"""
global ascii_codes
# Replace literal key characters with number codes
# Literal colons are going to require some special processing, because of
# the following case: GROWTH:'r':'x': etc. That's why we can't just use
# a blind replaceAll.
# If odd, we are inside a tag. If even, we are outside a tag.
bracketscount = 0
count = 0 # Where we are in the string
quotescount = 0
while count < len(line)-2:
# Going from inside a tag to outside or vice versa
if (((bracketscount % 2 == 0 and line[count] == "[") or
(bracketscount % 2 == 1 and line[count] == "]"))):
bracketscount += 1
# We are inside a tag and we have discovered a ' character beginning a
# literal value, with another 2 characters down on the other side.
elif (quotescount % 2 == 0 and bracketscount % 2 == 1 and
line[count:count+3] in ascii_codes.keys()):
# If the character could be a problem for later processing, replace
# it with its ascii code.
line = line[:count] + ascii_codes[line[count:count+3]] + \
line[count+3:]
elif line[count] == "'":
quotescount += 1
elif bracketscount % 2 == 1 and line[count] == ':':
quotescount = 0
count += 1
# line has now had its literal "use this tile" versions of its special
# characters replaced with their numbers.
return line
def path_compatible(full_path, allowed_paths):
"""Return True if full_path regex matches anything in allowed_paths, or
False otherwise."""
full_path = full_path.replace('\\', '/')
for allowed_path in allowed_paths:
allowed_path = allowed_path.replace('\\', '/')
match = re.match(allowed_path, full_path)
if match is not None:
return True
return False
| agpl-3.0 | -8,795,052,315,464,268,000 | 37.372263 | 79 | 0.600723 | false |
nmc-probe/emulab-nome | protogeni/test/stitch.py | 1 | 10003 | #! /usr/bin/env python
#
# Copyright (c) 2008-2012 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
#
#
import sys
import pwd
import getopt
import os
import re
import xmlrpclib
import urllib
from xml.sax.handler import ContentHandler
import xml.sax
import xml.dom.minidom
import string
from M2Crypto import X509
ACCEPTSLICENAME=1
def Usage():
print "usage: " + sys.argv[ 0 ] + " [option...] rspec-file \
[component-manager-1 component-manager-2]"
print """Options:
-c file, --credentials=file read self-credentials from file
[default: query from SA]
-d, --debug be verbose about XML methods invoked
-f file, --certificate=file read SSL certificate from file
[default: ~/.ssl/encrypted.pem]
-h, --help show options and usage
-n name, --slicename=name specify human-readable name of slice
[default: mytestslice]
-p file, --passphrase=file read passphrase from file
[default: ~/.ssl/password]
-r file, --read-commands=file specify additional configuration file
-s file, --slicecredentials=file read slice credentials from file
[default: query from SA]
component-manager-1 and component-manager-2 are hrns
rspec-file is the rspec to be sent to the two component managers."""
execfile( "test-common.py" )
if len(args) == 1 or len(args) == 3:
try:
rspecfile = open(args[ 0 ])
rspec = rspecfile.read()
rspecfile.close()
except IOError, e:
print >> sys.stderr, args[ 0 ] + ": " + e.strerror
sys.exit( 1 )
pass
if len(args) == 3:
managers = (args[1], args[2])
else:
managers = None;
pass
else:
Usage()
sys.exit( 1 )
pass
#
# Get a credential for myself, that allows me to do things at the SA.
#
mycredential = get_self_credential()
print "Got my SA credential"
#
# Lookup slice.
#
params = {}
params["credential"] = mycredential
params["type"] = "Slice"
params["hrn"] = SLICENAME
rval,response = do_method("sa", "Resolve", params)
if rval:
#
# Create a slice.
#
print "Creating new slice called " + SLICENAME
params = {}
params["credential"] = mycredential
params["type"] = "Slice"
params["hrn"] = SLICENAME
rval,response = do_method("sa", "Register", params)
if rval:
Fatal("Could not create new slice")
pass
myslice = response["value"]
print "New slice created"
pass
else:
#
# Get the slice credential.
#
print "Asking for slice credential for " + SLICENAME
myslice = response["value"]
myslice = get_slice_credential( myslice, mycredential )
print "Got the slice credential"
pass
#
# Ask the clearinghouse for a list of component managers.
#
params = {}
params["credential"] = mycredential
rval,response = do_method("ch", "ListComponents", params)
if rval:
Fatal("Could not get a list of components from the ClearingHouse")
pass
components = response["value"];
if managers:
def FindCM( name, cmlist ):
for cm in cmlist:
hrn = cm[ "hrn" ]
if hrn == name or hrn == name + ".cm":
return (cm[ "url" ], cm[ "urn" ])
Fatal( "Could not find component manager " + name )
url1 = FindCM( managers[ 0 ], components )
url2 = FindCM( managers[ 1 ], components )
else:
url1 = "https://www.emulab.net:12369/protogeni/xmlrpc/cm"
url2 = "https://boss.utah.geniracks.net:12369/protogeni/xmlrpc/cm"
pass
def DeleteSlivers():
#
# Delete the slivers.
#
print "Deleting sliver1 now"
params = {}
params["credentials"] = (myslice,)
params["slice_urn"] = SLICEURN
rval,response = do_method(None, "DeleteSlice",
params, URI=url1, version="2.0")
if rval:
Fatal("Could not delete sliver on CM1")
pass
print "Sliver1 has been deleted"
print "Deleting sliver2 now"
params = {}
params["credentials"] = (myslice,)
params["slice_urn"] = SLICEURN
rval,response = do_method(None, "DeleteSlice",
params, URI=url2, version="2.0")
if rval:
Fatal("Could not delete sliver on CM2")
pass
print "Sliver2 has been deleted"
sys.exit(0);
pass
if DELETE:
DeleteSlivers()
sys.exit(1)
pass
#
# Lookup my ssh keys.
#
params = {}
params["credential"] = mycredential
rval,response = do_method("sa", "GetKeys", params)
if rval:
Fatal("Could not get my keys")
pass
mykeys = response["value"]
if debug: print str(mykeys)
print "Asking for a ticket from CM1 ..."
params = {}
params["slice_urn"] = SLICEURN
params["credentials"] = (myslice,)
params["rspec"] = rspec
rval,response = do_method(None, "GetTicket", params, URI=url1, version="2.0")
if rval:
if response and response["value"]:
print >> sys.stderr, ""
print >> sys.stderr, str(response["value"])
print >> sys.stderr, ""
pass
Fatal("Could not get ticket")
pass
ticket1 = response["value"]
print "Got a ticket from CM1, asking for a ticket from CM2 ..."
#
# Get a ticket for a node on another CM.
#
params = {}
params["slice_urn"] = SLICEURN
params["credentials"] = (myslice,)
params["rspec"] = rspec
rval,response = do_method(None, "GetTicket", params, URI=url2, version="2.0")
if rval:
if response and response["value"]:
print >> sys.stderr, ""
print >> sys.stderr, str(response["value"])
print >> sys.stderr, ""
pass
Fatal("Could not get ticket")
pass
ticket2 = response["value"]
print "Got a ticket from CM2, redeeming ticket on CM1 ..."
#
# Create the slivers.
#
params = {}
params["credentials"] = (myslice,)
params["ticket"] = ticket1
params["slice_urn"] = SLICEURN
params["keys"] = mykeys
rval,response = do_method(None, "RedeemTicket", params,
URI=url1, version="2.0")
if rval:
Fatal("Could not redeem ticket on CM1")
pass
sliver1,manifest1 = response["value"]
print "Created a sliver on CM1, redeeming ticket on CM2 ..."
print str(manifest1);
params = {}
params["credentials"] = (myslice,)
params["ticket"] = ticket2
params["slice_urn"] = SLICEURN
params["keys"] = mykeys
rval,response = do_method(None, "RedeemTicket", params,
URI=url2, version="2.0")
if rval:
Fatal("Could not redeem ticket on CM2")
pass
sliver2,manifest2 = response["value"]
print "Created a sliver on CM2"
print str(manifest2)
#
# Start the slivers.
#
params = {}
params["credentials"] = (sliver1,)
params["slice_urn"] = SLICEURN
rval,response = do_method(None, "StartSliver", params, URI=url1, version="2.0")
if rval:
Fatal("Could not start sliver on CM1")
pass
print "Started sliver on CM1. Starting sliver on CM2 ..."
params = {}
params["credentials"] = (sliver2,)
params["slice_urn"] = SLICEURN
rval,response = do_method(None, "StartSliver", params, URI=url2, version="2.0")
if rval:
Fatal("Could not start sliver on CM2")
pass
print "Slivers have been started"
print "You should be able to log into the sliver after a little bit."
print "Polling CM1 for a while, type ^C to stop."
params = {}
params["slice_urn"] = SLICEURN
params["credentials"] = (sliver1,)
# Python does not have do loops
while True:
rval,response = do_method("cm", "SliverStatus", params,
URI=url1, version="2.0")
if rval:
if rval != 14:
Fatal("Could not get sliver status")
pass
elif response[ "value" ][ "status" ] == "ready": # no #@(%ing switch, either
break
elif response[ "value" ][ "status" ] == "changing":
print "Not ready, waiting a bit before asking again";
time.sleep( 5 )
else:
Fatal( "Sliver state is " + response[ "value" ][ "status" ] )
pass
pass
print "Sliver on CM1 is ready. Polling CM2 now ..."
params = {}
params["slice_urn"] = SLICEURN
params["credentials"] = (sliver2,)
# Python does not have do loops
while True:
rval,response = do_method("cm", "SliverStatus", params,
URI=url2, version="2.0")
if rval:
if rval != 14:
Fatal("Could not get sliver status")
pass
elif response[ "value" ][ "status" ] == "ready": # no #@(%ing switch, either
break
elif response[ "value" ][ "status" ] == "changing":
print "Not ready, waiting a bit before asking again";
time.sleep( 5 )
else:
Fatal( "Sliver state is " + response[ "value" ][ "status" ] )
pass
pass
print "Sliver on CM2 is ready."
| agpl-3.0 | -6,926,078,632,628,024,000 | 28.859701 | 80 | 0.613416 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.