repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
danguria/linux-kernel-study | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
okuta/chainer | chainer/types.py | 4 | 2270 | import numbers
import typing as tp # NOQA
import typing_extensions as tpe # NOQA
try:
from typing import TYPE_CHECKING # NOQA
except ImportError:
# typing.TYPE_CHECKING doesn't exist before Python 3.5.2
TYPE_CHECKING = False
# import chainer modules only for type checkers to avoid circular import
if TYPE_CHECKING:
from types import ModuleType # NOQA
import numpy # NOQA
from chainer import backend # NOQA
from chainer.backends import cuda, intel64 # NOQA
from chainer import initializer # NOQA
import chainerx # NOQA
Shape = tp.Tuple[int, ...]
ShapeSpec = tp.Union[int, tp.Sequence[int]] # Sequence includes Tuple[int, ...] # NOQA
DTypeSpec = tp.Union[tp.Any] # TODO(okapies): encode numpy.dtype
NdArray = tp.Union[
'numpy.ndarray',
'cuda.ndarray',
# 'intel64.mdarray',
# TODO(okapies): mdarray is partially incompatible with other ndarrays
'chainerx.ndarray',
]
"""The ndarray types supported in :func:`chainer.get_array_types`
"""
Xp = tp.Union[tp.Any] # TODO(okapies): encode numpy/cupy/ideep/chainerx
class AbstractInitializer(tpe.Protocol):
"""Protocol class for Initializer.
It can be either an :class:`chainer.Initializer` or a callable object
that takes an ndarray.
This is only for PEP 544 compliant static type checkers.
"""
dtype = None # type: tp.Optional[DTypeSpec]
def __call__(self, array):
# type: (NdArray) -> None
pass
ScalarValue = tp.Union[
'numpy.generic',
bytes,
str,
memoryview,
numbers.Number,
]
"""The scalar types supported in :func:`numpy.isscalar`.
"""
InitializerSpec = tp.Union[AbstractInitializer, ScalarValue, 'numpy.ndarray']
DeviceSpec = tp.Union[
'backend.Device',
'chainerx.Device',
'cuda.Device',
str,
tp.Tuple[str, int],
'ModuleType', # numpy and intel64 module
tp.Tuple['ModuleType', int], # cupy module and device ID
]
"""The device specifier types supported in :func:`chainer.get_device`
"""
# TODO(okapies): Use Xp instead of ModuleType
CudaDeviceSpec = tp.Union['cuda.Device', int, 'numpy.integer'] # NOQA
"""
This type only for the deprecated :func:`chainer.cuda.get_device` API.
Use :class:`~chainer.types.DeviceSpec` instead.
"""
| mit |
bhansa/fireball | pyvenv/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| gpl-3.0 |
jlmadurga/django-oscar | src/oscar/apps/dashboard/users/views.py | 19 | 8320 | from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
DeleteView, DetailView, FormView, ListView, TemplateView, UpdateView)
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import FormMixin
from django_tables2 import SingleTableMixin
from oscar.apps.customer.utils import normalise_email
from oscar.core.compat import get_user_model
from oscar.core.loading import get_class, get_classes, get_model
from oscar.views.generic import BulkEditMixin
UserSearchForm, ProductAlertSearchForm, ProductAlertUpdateForm = get_classes(
'dashboard.users.forms', ('UserSearchForm', 'ProductAlertSearchForm',
'ProductAlertUpdateForm'))
PasswordResetForm = get_class('customer.forms', 'PasswordResetForm')
UserTable = get_class('dashboard.users.tables', 'UserTable')
ProductAlert = get_model('customer', 'ProductAlert')
User = get_user_model()
class IndexView(BulkEditMixin, SingleTableMixin, FormMixin, TemplateView):
template_name = 'dashboard/users/index.html'
table_pagination = True
model = User
actions = ('make_active', 'make_inactive', )
form_class = UserSearchForm
table_class = UserTable
context_table_name = 'users'
desc_template = _('%(main_filter)s %(email_filter)s %(name_filter)s')
description = ''
def dispatch(self, request, *args, **kwargs):
form_class = self.get_form_class()
self.form = self.get_form(form_class)
return super(IndexView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
"""
Only bind search form if it was submitted.
"""
kwargs = super(IndexView, self).get_form_kwargs()
if 'search' in self.request.GET:
kwargs.update({
'data': self.request.GET,
})
return kwargs
def get_queryset(self):
queryset = self.model.objects.all().order_by('-date_joined')
return self.apply_search(queryset)
def apply_search(self, queryset):
# Set initial queryset description, used for template context
self.desc_ctx = {
'main_filter': _('All users'),
'email_filter': '',
'name_filter': '',
}
if self.form.is_valid():
return self.apply_search_filters(queryset, self.form.cleaned_data)
else:
return queryset
def apply_search_filters(self, queryset, data):
"""
Function is split out to allow customisation with little boilerplate.
"""
if data['email']:
email = normalise_email(data['email'])
queryset = queryset.filter(email__istartswith=email)
self.desc_ctx['email_filter'] \
= _(" with email matching '%s'") % email
if data['name']:
# If the value is two words, then assume they are first name and
# last name
parts = data['name'].split()
if len(parts) == 2:
condition = Q(first_name__istartswith=parts[0]) \
| Q(last_name__istartswith=parts[1])
else:
condition = Q(first_name__istartswith=data['name']) \
| Q(last_name__istartswith=data['name'])
queryset = queryset.filter(condition).distinct()
self.desc_ctx['name_filter'] \
= _(" with name matching '%s'") % data['name']
return queryset
def get_table(self, **kwargs):
table = super(IndexView, self).get_table(**kwargs)
table.caption = self.desc_template % self.desc_ctx
return table
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['form'] = self.form
return context
def make_inactive(self, request, users):
return self._change_users_active_status(users, False)
def make_active(self, request, users):
return self._change_users_active_status(users, True)
def _change_users_active_status(self, users, value):
for user in users:
if not user.is_superuser:
user.is_active = value
user.save()
messages.info(self.request, _("Users' status successfully changed"))
return redirect('dashboard:users-index')
class UserDetailView(DetailView):
template_name = 'dashboard/users/detail.html'
model = User
context_object_name = 'customer'
class PasswordResetView(SingleObjectMixin, FormView):
form_class = PasswordResetForm
http_method_names = ['post']
model = User
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(PasswordResetView, self).post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordResetView, self).get_form_kwargs()
kwargs['data'] = {'email': self.object.email}
return kwargs
def form_valid(self, form):
# The PasswordResetForm's save method sends the reset email
form.save(request=self.request)
return super(PasswordResetView, self).form_valid(form)
def get_success_url(self):
messages.success(
self.request, _("A password reset email has been sent"))
return reverse(
'dashboard:user-detail', kwargs={'pk': self.object.id}
)
class ProductAlertListView(ListView):
model = ProductAlert
form_class = ProductAlertSearchForm
context_object_name = 'alerts'
template_name = 'dashboard/users/alerts/list.html'
paginate_by = settings.OSCAR_DASHBOARD_ITEMS_PER_PAGE
base_description = _('All Alerts')
description = ''
def get_queryset(self):
queryset = self.model.objects.all()
self.description = self.base_description
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
return queryset
data = self.form.cleaned_data
if data['status']:
queryset = queryset.filter(status=data['status']).distinct()
self.description \
+= _(" with status matching '%s'") % data['status']
if data['name']:
# If the value is two words, then assume they are first name and
# last name
parts = data['name'].split()
if len(parts) >= 2:
queryset = queryset.filter(
user__first_name__istartswith=parts[0],
user__last_name__istartswith=parts[1]
).distinct()
else:
queryset = queryset.filter(
Q(user__first_name__istartswith=parts[0]) |
Q(user__last_name__istartswith=parts[-1])
).distinct()
self.description \
+= _(" with customer name matching '%s'") % data['name']
if data['email']:
queryset = queryset.filter(
Q(user__email__icontains=data['email']) |
Q(email__icontains=data['email'])
)
self.description \
+= _(" with customer email matching '%s'") % data['email']
return queryset
def get_context_data(self, **kwargs):
context = super(ProductAlertListView, self).get_context_data(**kwargs)
context['form'] = self.form
context['queryset_description'] = self.description
return context
class ProductAlertUpdateView(UpdateView):
template_name = 'dashboard/users/alerts/update.html'
model = ProductAlert
form_class = ProductAlertUpdateForm
context_object_name = 'alert'
def get_success_url(self):
messages.success(self.request, _("Product alert saved"))
return reverse('dashboard:user-alert-list')
class ProductAlertDeleteView(DeleteView):
model = ProductAlert
template_name = 'dashboard/users/alerts/delete.html'
context_object_name = 'alert'
def get_success_url(self):
messages.warning(self.request, _("Product alert deleted"))
return reverse('dashboard:user-alert-list')
| bsd-3-clause |
sfstpala/Victory-Chat | cherrypy/test/test_auth_digest.py | 42 | 4553 | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
import cherrypy
from cherrypy.lib import auth_digest
from cherrypy.test import helper
class DigestAuthTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class DigestProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
def fetch_users():
return {'test': 'test'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(fetch_users())
conf = {'/digest': {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'localhost',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
'tools.auth_digest.debug': 'True'}}
root = Root()
root.digest = DigestProtected()
cherrypy.tree.mount(root, config=conf)
setup_server = staticmethod(setup_server)
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testDigest(self):
self.getPage("/digest/")
self.assertStatus(401)
value = None
for k, v in self.headers:
if k.lower() == "www-authenticate":
if v.startswith("Digest"):
value = v
break
if value is None:
self._handlewebError("Digest authentification scheme was not found")
value = value[7:]
items = value.split(', ')
tokens = {}
for item in items:
key, value = item.split('=')
tokens[key.lower()] = value
missing_msg = "%s is missing"
bad_value_msg = "'%s' was expecting '%s' but found '%s'"
nonce = None
if 'realm' not in tokens:
self._handlewebError(missing_msg % 'realm')
elif tokens['realm'] != '"localhost"':
self._handlewebError(bad_value_msg % ('realm', '"localhost"', tokens['realm']))
if 'nonce' not in tokens:
self._handlewebError(missing_msg % 'nonce')
else:
nonce = tokens['nonce'].strip('"')
if 'algorithm' not in tokens:
self._handlewebError(missing_msg % 'algorithm')
elif tokens['algorithm'] != '"MD5"':
self._handlewebError(bad_value_msg % ('algorithm', '"MD5"', tokens['algorithm']))
if 'qop' not in tokens:
self._handlewebError(missing_msg % 'qop')
elif tokens['qop'] != '"auth"':
self._handlewebError(bad_value_msg % ('qop', '"auth"', tokens['qop']))
get_ha1 = auth_digest.get_ha1_dict_plain({'test' : 'test'})
# Test user agent response with a wrong value for 'realm'
base_auth = 'Digest username="test", realm="wrong realm", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"'
auth_header = base_auth % (nonce, '11111111111111111111111111111111', '00000001')
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1(auth.realm, 'test')
response = auth.request_digest(ha1)
# send response with correct response digest, but wrong realm
auth_header = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth_header)])
self.assertStatus(401)
# Test that must pass
base_auth = 'Digest username="test", realm="localhost", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"'
auth_header = base_auth % (nonce, '11111111111111111111111111111111', '00000001')
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1('localhost', 'test')
response = auth.request_digest(ha1)
# send response with correct response digest
auth_header = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth_header)])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
| isc |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/features/wikibase/util.py | 3 | 1550 | class DictDiff:
"""
Represents the difference between two dictionaries
"""
__slots__ = ('added', 'removed', 'intersection', 'changed', 'unchanged')
def __init__(self, added, removed, intersection, changed, unchanged):
self.added = added
"""
`set` ( `mixed` ) : Keys that were added in the new dictionary
"""
self.removed = removed
"""
`set` ( `mixed` ) : Keys that were removed in the new dictionary
"""
self.intersection = intersection
"""
`set` ( `mixed` ) : Keys that appear in both dictionaries
"""
self.changed = changed
"""
`set` ( `mixed` ) : Keys that appear in both dictionaries, but the
values differ
"""
self.unchanged = unchanged
"""
`set` ( `mixed` ) : Keys that appear in both dictionaries and have
equivalent values
"""
def diff_dicts(a, b):
"""
Generates a diff between two dictionaries.
:Parameters:
a : `dict`
A dict to diff or `None`
b : `dict`
B dict to diff
"""
a = a or {}
added = b.keys() - a.keys()
removed = a.keys() - b.keys()
intersection = a.keys() & b.keys()
changed = set()
unchanged = set()
for key in intersection:
if a[key] == b[key]:
unchanged.add(key)
else:
changed.add(key)
return DictDiff(added, removed, intersection, changed, unchanged)
| mit |
dibenede/NFD-ICN2014 | ICN2014-apps/consumer.py | 2 | 4500 | # -*- Mode:python; c-file-style:"gnu"; indent-tabs-mode:nil -*- */
#
# Copyright (c) 2013-2014 Regents of the University of California.
# Copyright (c) 2014 Susmit Shannigrahi, Steve DiBenedetto
#
# This file is part of ndn-cxx library (NDN C++ library with eXperimental eXtensions).
#
# ndn-cxx library is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# ndn-cxx library is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received copies of the GNU General Public License and GNU Lesser
# General Public License along with ndn-cxx, e.g., in COPYING.md file. If not, see
# <http://www.gnu.org/licenses/>.
#
# See AUTHORS.md for complete list of ndn-cxx authors and contributors.
#
# @author Wentao Shang <http://irl.cs.ucla.edu/~wentao/>
# @author Steve DiBenedetto <http://www.cs.colostate.edu/~dibenede>
# @author Susmit Shannigrahi <http://www.cs.colostate.edu/~susmit>
# pylint: disable=line-too-long
import sys
import time
import argparse
import traceback
from pyndn import Interest
from pyndn import Name
from pyndn import Face
class Consumer(object):
'''Sends Interest, listens for data'''
def __init__(self, prefix, pipeline, count):
self.prefix = prefix
self.pipeline = pipeline
self.count = count
self.nextSegment = 0
self.outstanding = dict()
self.isDone = False
self.face = Face("127.0.0.1")
def run(self):
try:
while self.nextSegment < self.pipeline:
self._sendNextInterest(self.prefix)
self.nextSegment += 1
while not self.isDone:
self.face.processEvents()
time.sleep(0.01)
except RuntimeError as e:
print "ERROR: %s" % e
def _onData(self, interest, data):
payload = data.getContent()
name = data.getName()
print "Received data: %s\n" % payload.toRawStr()
del self.outstanding[name.toUri()]
if self.count == self.nextSegment or data.getMetaInfo().getFinalBlockID() == data.getName()[-1]:
self.isDone = True
else:
self._sendNextInterest(self.prefix)
self.nextSegment += 1
def _sendNextInterest(self, name):
self._sendNextInterestWithSegment(Name(name).appendSegment(self.nextSegment))
def _sendNextInterestWithSegment(self, name):
interest = Interest(name)
uri = name.toUri()
interest.setInterestLifetimeMilliseconds(4000)
interest.setMustBeFresh(True)
if name.toUri() not in self.outstanding:
self.outstanding[name.toUri()] = 1
self.face.expressInterest(interest, self._onData, self._onTimeout)
print "Sent Interest for %s" % uri
def _onTimeout(self, interest):
name = interest.getName()
uri = name.toUri()
print "TIMEOUT #%d: segment #%s" % (self.outstanding[uri], name[-1].toNumber())
self.outstanding[uri] += 1
if self.outstanding[uri] <= 3:
self._sendNextInterestWithSegment(name)
else:
self.isDone = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parse command line args for ndn consumer')
parser.add_argument("-u", "--uri", required=True, help='ndn URI to retrieve')
parser.add_argument("-p", "--pipe",required=False, help='number of Interests to pipeline, default = 1', nargs= '?', const=1, type=int, default=1)
parser.add_argument("-c", "--count", required=False, help='number of (unique) Interests to send before exiting, default = repeat until final block', nargs='?', const=1, type=int, default=None)
arguments = parser.parse_args()
try:
uri = arguments.uri
pipeline = arguments.pipe
count = arguments.count
if count is not None and count < pipeline:
print "Number of Interests to send must be >= pipeline size"
sys.exit(1)
Consumer(Name(uri), pipeline, count).run()
except:
traceback.print_exc(file=sys.stdout)
print "Error parsing command line arguments"
sys.exit(1)
| gpl-3.0 |
Eric89GXL/scipy | scipy/optimize/tests/test_lsq_common.py | 11 | 8749 | from __future__ import division, absolute_import, print_function
from numpy.testing import assert_, assert_allclose, assert_equal
from pytest import raises as assert_raises
import numpy as np
from scipy.sparse.linalg import LinearOperator
from scipy.optimize._lsq.common import (
step_size_to_bound, find_active_constraints, make_strictly_feasible,
CL_scaling_vector, intersect_trust_region, build_quadratic_1d,
minimize_quadratic_1d, evaluate_quadratic, reflective_transformation,
left_multiplied_operator, right_multiplied_operator)
class TestBounds(object):
def test_step_size_to_bounds(self):
lb = np.array([-1.0, 2.5, 10.0])
ub = np.array([1.0, 5.0, 100.0])
x = np.array([0.0, 2.5, 12.0])
s = np.array([0.1, 0.0, 0.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 10)
assert_equal(hits, [1, 0, 0])
s = np.array([0.01, 0.05, -1.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 2)
assert_equal(hits, [0, 0, -1])
s = np.array([10.0, -0.0001, 100.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, np.array(-0))
assert_equal(hits, [0, -1, 0])
s = np.array([1.0, 0.5, -2.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 1.0)
assert_equal(hits, [1, 0, -1])
s = np.zeros(3)
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, np.inf)
assert_equal(hits, [0, 0, 0])
def test_find_active_constraints(self):
lb = np.array([0.0, -10.0, 1.0])
ub = np.array([1.0, 0.0, 100.0])
x = np.array([0.5, -5.0, 2.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [0, 0, 0])
x = np.array([0.0, 0.0, 10.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 1, 0])
active = find_active_constraints(x, lb, ub, rtol=0)
assert_equal(active, [-1, 1, 0])
x = np.array([1e-9, -1e-8, 100 - 1e-9])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [0, 0, 1])
active = find_active_constraints(x, lb, ub, rtol=1.5e-9)
assert_equal(active, [-1, 0, 1])
lb = np.array([1.0, -np.inf, -np.inf])
ub = np.array([np.inf, 10.0, np.inf])
x = np.ones(3)
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 0, 0])
# Handles out-of-bound cases.
x = np.array([0.0, 11.0, 0.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 1, 0])
active = find_active_constraints(x, lb, ub, rtol=0)
assert_equal(active, [-1, 1, 0])
def test_make_strictly_feasible(self):
lb = np.array([-0.5, -0.8, 2.0])
ub = np.array([0.8, 1.0, 3.0])
x = np.array([-0.5, 0.0, 2 + 1e-10])
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
assert_(x_new[0] > -0.5)
assert_equal(x_new[1:], x[1:])
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4)
assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)])
x = np.array([-0.5, -1, 3.1])
x_new = make_strictly_feasible(x, lb, ub)
assert_(np.all((x_new >= lb) & (x_new <= ub)))
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
assert_(np.all((x_new >= lb) & (x_new <= ub)))
lb = np.array([-1, 100.0])
ub = np.array([1, 100.0 + 1e-10])
x = np.array([0, 100.0])
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8)
assert_equal(x_new, [0, 100.0 + 0.5e-10])
def test_scaling_vector(self):
lb = np.array([-np.inf, -5.0, 1.0, -np.inf])
ub = np.array([1.0, np.inf, 10.0, np.inf])
x = np.array([0.5, 2.0, 5.0, 0.0])
g = np.array([1.0, 0.1, -10.0, 0.0])
v, dv = CL_scaling_vector(x, g, lb, ub)
assert_equal(v, [1.0, 7.0, 5.0, 1.0])
assert_equal(dv, [0.0, 1.0, -1.0, 0.0])
class TestQuadraticFunction(object):
def setup_method(self):
self.J = np.array([
[0.1, 0.2],
[-1.0, 1.0],
[0.5, 0.2]])
self.g = np.array([0.8, -2.0])
self.diag = np.array([1.0, 2.0])
def test_build_quadratic_1d(self):
s = np.zeros(2)
a, b = build_quadratic_1d(self.J, self.g, s)
assert_equal(a, 0)
assert_equal(b, 0)
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
assert_equal(a, 0)
assert_equal(b, 0)
s = np.array([1.0, -1.0])
a, b = build_quadratic_1d(self.J, self.g, s)
assert_equal(a, 2.05)
assert_equal(b, 2.8)
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
assert_equal(a, 3.55)
assert_equal(b, 2.8)
s0 = np.array([0.5, 0.5])
a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0)
assert_equal(a, 3.55)
assert_allclose(b, 2.39)
assert_allclose(c, -0.1525)
def test_minimize_quadratic_1d(self):
a = 5
b = -1
t, y = minimize_quadratic_1d(a, b, 1, 2)
assert_equal(t, 1)
assert_equal(y, a * t**2 + b * t)
t, y = minimize_quadratic_1d(a, b, -2, -1)
assert_equal(t, -1)
assert_equal(y, a * t**2 + b * t)
t, y = minimize_quadratic_1d(a, b, -1, 1)
assert_equal(t, 0.1)
assert_equal(y, a * t**2 + b * t)
c = 10
t, y = minimize_quadratic_1d(a, b, -1, 1, c=c)
assert_equal(t, 0.1)
assert_equal(y, a * t**2 + b * t + c)
def test_evaluate_quadratic(self):
s = np.array([1.0, -1.0])
value = evaluate_quadratic(self.J, self.g, s)
assert_equal(value, 4.85)
value = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
assert_equal(value, 6.35)
s = np.array([[1.0, -1.0],
[1.0, 1.0],
[0.0, 0.0]])
values = evaluate_quadratic(self.J, self.g, s)
assert_allclose(values, [4.85, -0.91, 0.0])
values = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
assert_allclose(values, [6.35, 0.59, 0.0])
class TestTrustRegion(object):
def test_intersect(self):
Delta = 1.0
x = np.zeros(3)
s = np.array([1.0, 0.0, 0.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_equal(t_neg, -1)
assert_equal(t_pos, 1)
s = np.array([-1.0, 1.0, -1.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_allclose(t_neg, -3**-0.5)
assert_allclose(t_pos, 3**-0.5)
x = np.array([0.5, -0.5, 0])
s = np.array([0, 0, 1.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_allclose(t_neg, -2**-0.5)
assert_allclose(t_pos, 2**-0.5)
x = np.ones(3)
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
x = np.zeros(3)
s = np.zeros(3)
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
def test_reflective_transformation():
lb = np.array([-1, -2], dtype=float)
ub = np.array([5, 3], dtype=float)
y = np.array([0, 0])
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, y)
assert_equal(g, np.ones(2))
y = np.array([-4, 4], dtype=float)
x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf]))
assert_equal(x, [2, 4])
assert_equal(g, [-1, 1])
x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub)
assert_equal(x, [-4, 2])
assert_equal(g, [1, -1])
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, [2, 2])
assert_equal(g, [-1, -1])
lb = np.array([-np.inf, -2])
ub = np.array([5, np.inf])
y = np.array([10, 10], dtype=float)
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, [0, 10])
assert_equal(g, [-1, 1])
def test_linear_operators():
A = np.arange(6).reshape((3, 2))
d_left = np.array([-1, 2, 5])
DA = np.diag(d_left).dot(A)
J_left = left_multiplied_operator(A, d_left)
d_right = np.array([5, 10])
AD = A.dot(np.diag(d_right))
J_right = right_multiplied_operator(A, d_right)
x = np.array([-2, 3])
X = -2 * np.arange(2, 8).reshape((2, 3))
xt = np.array([0, -2, 15])
assert_allclose(DA.dot(x), J_left.matvec(x))
assert_allclose(DA.dot(X), J_left.matmat(X))
assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt))
assert_allclose(AD.dot(x), J_right.matvec(x))
assert_allclose(AD.dot(X), J_right.matmat(X))
assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
| bsd-3-clause |
sumedh123/debatify | venv/lib/python2.7/site-packages/pip/utils/build.py | 899 | 1312 | from __future__ import absolute_import
import os.path
import tempfile
from pip.utils import rmtree
class BuildDirectory(object):
def __init__(self, name=None, delete=None):
# If we were not given an explicit directory, and we were not given an
# explicit delete option, then we'll default to deleting.
if name is None and delete is None:
delete = True
if name is None:
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-"))
# If we were not given an explicit directory, and we were not given
# an explicit delete option, then we'll default to deleting.
if delete is None:
delete = True
self.name = name
self.delete = delete
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self.delete:
rmtree(self.name)
| mit |
joshuaspence/ThesisCode | MATLAB/Lib/matlab_bgl-4.0.1/libmbgl/boost1.36/libs/python/pyste/src/Pyste/exporterutils.py | 54 | 3331 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
'''
Various helpers for interface files.
'''
from settings import *
from policies import *
from declarations import *
#==============================================================================
# FunctionWrapper
#==============================================================================
class FunctionWrapper(object):
'''Holds information about a wrapper for a function or a method. It is
divided in 2 parts: the name of the Wrapper, and its code. The code is
placed in the declaration section of the module, while the name is used to
def' the function or method (with the pyste namespace prepend to it). If
code is None, the name is left unchanged.
'''
def __init__(self, name, code=None):
self.name = name
self.code = code
def FullName(self):
if self.code:
return namespaces.pyste + self.name
else:
return self.name
_printed_warnings = {} # used to avoid double-prints of warnings
#==============================================================================
# HandlePolicy
#==============================================================================
def HandlePolicy(function, policy):
'''Show a warning to the user if the function needs a policy and doesn't
have one. Return a policy to the function, which is the given policy itself
if it is not None, or a default policy for this method.
'''
def IsString(type):
'Return True if the Type instance can be considered a string'
return type.FullName() == 'const char*'
def IsPyObject(type):
return type.FullName() == '_object *' # internal name of PyObject
result = function.result
# if the function returns const char*, a policy is not needed
if IsString(result) or IsPyObject(result):
return policy
# if returns a const T&, set the default policy
if policy is None and result.const and isinstance(result, ReferenceType):
policy = return_value_policy(copy_const_reference)
# basic test if the result type demands a policy
needs_policy = isinstance(result, (ReferenceType, PointerType))
# show a warning to the user, if needed
if needs_policy and policy is None:
global _printed_warnings
warning = '---> Error: %s returns a pointer or a reference, ' \
'but no policy was specified.' % function.FullName()
if warning not in _printed_warnings:
print warning
print
# avoid double prints of the same warning
_printed_warnings[warning] = 1
return policy
#==============================================================================
# EspecializeTypeID
#==============================================================================
_exported_type_ids = {}
def EspecializeTypeID(typename):
global _exported_type_ids
macro = 'BOOST_PYTHON_OPAQUE_SPECIALIZED_TYPE_ID(%s)\n' % typename
if macro not in _exported_type_ids:
_exported_type_ids[macro] = 1
return macro
else:
return None
| gpl-3.0 |
houlixin/BBB-TISDK | linux-devkit/sysroots/i686-arago-linux/usr/lib/python2.7/encodings/hex_codec.py | 528 | 2309 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 |
V11/volcano | server/sqlmap/thirdparty/chardet/euckrprober.py | 236 | 1672 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import EUCKRDistributionAnalysis
from mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
coffenbacher/askbot-devel | askbot/importers/stackexchange/models.py | 21 | 14019 | from django.db import models
class Badge(models.Model):
id = models.IntegerField(primary_key=True)
class_type = models.IntegerField(null=True)
name = models.CharField(max_length=50, null=True)
description = models.TextField(null=True)
single = models.NullBooleanField(null=True)
secret = models.NullBooleanField(null=True)
tag_based = models.NullBooleanField(null=True)
command = models.TextField(null=True)
award_frequency = models.IntegerField(null=True)
class CloseReason(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200, null=True)
description = models.CharField(max_length=256, null=True)
display_order = models.IntegerField(null=True)
class Comment2Vote(models.Model):
id = models.IntegerField(primary_key=True)
post_comment = models.ForeignKey('PostComment', related_name='Comment2Vote_by_post_comment_set', null=True)
vote_type = models.ForeignKey('VoteType', related_name='Comment2Vote_by_vote_type_set', null=True)
creation_date = models.DateTimeField(null=True)
user = models.ForeignKey('User', related_name='Comment2Vote_by_user_set', null=True)
ip_address = models.CharField(max_length=40, null=True)
user_display_name = models.CharField(max_length=40, null=True)
deletion_date = models.DateTimeField(null=True)
class FlatPage(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
url = models.CharField(max_length=128, null=True)
value = models.TextField(null=True)
content_type = models.CharField(max_length=50, null=True)
active = models.NullBooleanField(null=True)
use_master = models.NullBooleanField(null=True)
class Message(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='Message_by_user_set', null=True)
message_type = models.ForeignKey('MessageType', related_name='Message_by_message_type_set', null=True)
is_read = models.NullBooleanField(null=True)
creation_date = models.DateTimeField(null=True)
text = models.TextField(null=True)
post = models.ForeignKey('Post', related_name='Message_by_post_set', null=True)
class MessageType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class ModeratorMessage(models.Model):
id = models.IntegerField(primary_key=True)
message_type = models.ForeignKey('MessageType', related_name='ModeratorMessage_by_message_type_set', null=True)
creation_date = models.DateTimeField(null=True)
creation_ip_address = models.CharField(max_length=40, null=True)
text = models.TextField(null=True)
user = models.ForeignKey('User', related_name='ModeratorMessage_by_user_set', null=True)
post = models.ForeignKey('Post', related_name='ModeratorMessage_by_post_set', null=True)
deletion_date = models.DateTimeField(null=True)
deletion_user = models.ForeignKey('User', related_name='ModeratorMessage_by_deletion_user_set', null=True)
deletion_ip_address = models.CharField(max_length=40, null=True)
user_display_name = models.CharField(max_length=40, null=True)
class PostComment(models.Model):
id = models.IntegerField(primary_key=True)
post = models.ForeignKey('Post', related_name='PostComment_by_post_set', null=True)
text = models.TextField(null=True)
creation_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=15, null=True)
user = models.ForeignKey('User', related_name='PostComment_by_user_set', null=True)
user_display_name = models.CharField(max_length=30, null=True)
deletion_date = models.DateTimeField(null=True)
deletion_user = models.ForeignKey('User', related_name='PostComment_by_deletion_user_set', null=True)
score = models.IntegerField(null=True)
class PostHistoryType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class PostHistory(models.Model):
id = models.IntegerField(primary_key=True)
post_history_type = models.ForeignKey('PostHistoryType', related_name='PostHistory_by_post_history_type_set', null=True)
post = models.ForeignKey('Post', related_name='PostHistory_by_post_set', null=True)
revision_guid = models.CharField(max_length=64, null=True)
creation_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=40, null=True)
user = models.ForeignKey('User', related_name='PostHistory_by_user_set', null=True)
comment = models.CharField(max_length=400, null=True)
text = models.TextField(null=True)
user_display_name = models.CharField(max_length=40, null=True)
user_email = models.CharField(max_length=100, null=True)
user_website_url = models.CharField(max_length=200, null=True)
class Post2Vote(models.Model):
id = models.IntegerField(primary_key=True)
post = models.ForeignKey('Post', related_name='Post2Vote_by_post_set', null=True)
user = models.ForeignKey('User', related_name='Post2Vote_by_user_set', null=True)
vote_type = models.ForeignKey('VoteType', related_name='Post2Vote_by_vote_type_set', null=True)
creation_date = models.DateTimeField(null=True)
deletion_date = models.DateTimeField(null=True)
target_user = models.ForeignKey('User', related_name='Post2Vote_by_target_user_set', null=True)
target_rep_change = models.IntegerField(null=True)
voter_rep_change = models.IntegerField(null=True)
comment = models.CharField(max_length=150, null=True)
ip_address = models.CharField(max_length=40, null=True)
linked_post = models.ForeignKey('Post', related_name='Post2Vote_by_linked_post_set', null=True)
class Post(models.Model):
id = models.IntegerField(primary_key=True)
post_type = models.ForeignKey('PostType', related_name='Post_by_post_type_set', null=True)
creation_date = models.DateTimeField(null=True)
score = models.IntegerField(null=True)
view_count = models.IntegerField(null=True)
body = models.TextField(null=True)
owner_user = models.ForeignKey('User', related_name='Post_by_owner_user_set', null=True)
last_editor_user = models.ForeignKey('User', related_name='Post_by_last_editor_user_set', null=True)
last_edit_date = models.DateTimeField(null=True)
last_activity_date = models.DateTimeField(null=True)
last_activity_user = models.ForeignKey('User', related_name='Post_by_last_activity_user_set', null=True)
parent = models.ForeignKey('self', related_name='Post_by_parent_set', null=True)
accepted_answer = models.ForeignKey('self', related_name='Post_by_accepted_answer_set', null=True)
title = models.CharField(max_length=250, null=True)
tags = models.CharField(max_length=150, null=True)
community_owned_date = models.DateTimeField(null=True)
history_summary = models.CharField(max_length=150, null=True)
answer_score = models.IntegerField(null=True)
answer_count = models.IntegerField(null=True)
comment_count = models.IntegerField(null=True)
favorite_count = models.IntegerField(null=True)
deletion_date = models.DateTimeField(null=True)
closed_date = models.DateTimeField(null=True)
locked_date = models.DateTimeField(null=True)
locked_duration = models.IntegerField(null=True)
owner_display_name = models.CharField(max_length=40, null=True)
last_editor_display_name = models.CharField(max_length=40, null=True)
bounty_amount = models.IntegerField(null=True)
bounty_closes = models.DateTimeField(null=True)
bounty_closed = models.DateTimeField(null=True)
last_owner_email_date = models.DateTimeField(null=True)
class PostType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class SchemaVersion(models.Model):
version = models.IntegerField(null=True)
class Setting(models.Model):
id = models.IntegerField(primary_key=True)
key = models.CharField(max_length=256, null=True)
value = models.TextField(null=True)
class SystemMessage(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='SystemMessage_by_user_set', null=True)
creation_date = models.DateTimeField(null=True)
text = models.TextField(null=True)
deletion_date = models.DateTimeField(null=True)
deletion_user = models.ForeignKey('User', related_name='SystemMessage_by_deletion_user_set', null=True)
class Tag(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
count = models.IntegerField(null=True)
user = models.ForeignKey('User', related_name='Tag_by_user_set', null=True)
creation_date = models.DateTimeField(null=True)
is_moderator_only = models.NullBooleanField(null=True)
is_required = models.NullBooleanField(null=True)
aliases = models.CharField(max_length=200, null=True)
class ThemeResource(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
value = models.TextField(null=True)
content_type = models.CharField(max_length=50, null=True)
version = models.CharField(max_length=6, null=True)
class ThemeTextResource(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
value = models.TextField(null=True)
content_type = models.CharField(max_length=50, null=True)
class ThrottleBucket(models.Model):
id = models.IntegerField(primary_key=True)
type = models.CharField(max_length=256, null=True)
ip_address = models.CharField(max_length=64, null=True)
tokens = models.IntegerField(null=True)
last_update = models.DateTimeField(null=True)
class UserHistoryType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class UserHistory(models.Model):
id = models.IntegerField(primary_key=True)
user_history_type = models.ForeignKey('UserHistoryType', related_name='UserHistory_by_user_history_type_set', null=True)
creation_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=40, null=True)
user = models.ForeignKey('User', related_name='UserHistory_by_user_set', null=True)
comment = models.CharField(max_length=400, null=True)
user_display_name = models.CharField(max_length=40, null=True)
moderator_user = models.ForeignKey('User', related_name='UserHistory_by_moderator_user_set', null=True)
reputation = models.IntegerField(null=True)
class User2Badge(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='User2Badge_by_user_set', null=True)
badge = models.ForeignKey('Badge', related_name='User2Badge_by_badge_set', null=True)
date = models.DateTimeField(null=True)
comment = models.CharField(max_length=50, null=True)
class User2Vote(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey('User', related_name='User2Vote_by_user_set', null=True)
vote_type = models.ForeignKey('VoteType', related_name='User2Vote_by_vote_type_set', null=True)
target_user = models.ForeignKey('User', related_name='User2Vote_by_target_user_set', null=True)
creation_date = models.DateTimeField(null=True)
deletion_date = models.DateTimeField(null=True)
ip_address = models.CharField(max_length=40, null=True)
class User(models.Model):
id = models.IntegerField(primary_key=True)
user_type = models.ForeignKey('UserType', related_name='User_by_user_type_set', null=True)
open_id = models.CharField(max_length=200, null=True)
reputation = models.IntegerField(null=True)
views = models.IntegerField(null=True)
creation_date = models.DateTimeField(null=True)
last_access_date = models.DateTimeField(null=True)
has_replies = models.NullBooleanField(null=True)
has_message = models.NullBooleanField(null=True)
opt_in_email = models.NullBooleanField(null=True)
opt_in_recruit = models.NullBooleanField(null=True)
last_login_date = models.DateTimeField(null=True)
last_email_date = models.DateTimeField(null=True)
last_login_ip = models.CharField(max_length=15, null=True)
open_id_alt = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=100, null=True)
display_name = models.CharField(max_length=40, null=True)
display_name_cleaned = models.CharField(max_length=40, null=True)
website_url = models.CharField(max_length=200, null=True)
real_name = models.CharField(max_length=100, null=True)
location = models.CharField(max_length=100, null=True)
birthday = models.DateTimeField(null=True)
badge_summary = models.CharField(max_length=50, null=True)
about_me = models.TextField(null=True)
preferences_raw = models.TextField(null=True)
timed_penalty_date = models.DateTimeField(null=True)
guid = models.CharField(max_length=64, null=True)
phone = models.CharField(max_length=20, null=True)
password_id = models.IntegerField(null=True)
class UserType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class VoteType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=300, null=True)
class Password(models.Model):
id = models.IntegerField(primary_key = True)
password = models.CharField(max_length = 128)
salt = models.CharField(max_length = 32)
| gpl-3.0 |
davidwaroquiers/abiflows | abiflows/fireworks/utils/custodian_utils.py | 2 | 3730 | import abc
from custodian.custodian import ErrorHandler, Validator
#TODO: do we stick to custodian's ErrorHandler/Validator inheritance ??
class SRCErrorHandler(ErrorHandler):
HANDLER_PRIORITIES = {'PRIORITY_FIRST': 0,
'PRIORITY_VERY_HIGH': 1,
'PRIORITY_HIGH': 2,
'PRIORITY_MEDIUM': 3,
'PRIORITY_LOW': 4,
'PRIORITY_VERY_LOW': 5,
'PRIORITY_LAST': 6}
PRIORITY_FIRST = HANDLER_PRIORITIES['PRIORITY_FIRST']
PRIORITY_VERY_HIGH = HANDLER_PRIORITIES['PRIORITY_VERY_HIGH']
PRIORITY_HIGH = HANDLER_PRIORITIES['PRIORITY_HIGH']
PRIORITY_MEDIUM = HANDLER_PRIORITIES['PRIORITY_MEDIUM']
PRIORITY_LOW = HANDLER_PRIORITIES['PRIORITY_LOW']
PRIORITY_VERY_LOW = HANDLER_PRIORITIES['PRIORITY_VERY_LOW']
PRIORITY_LAST = HANDLER_PRIORITIES['PRIORITY_LAST']
def __init__(self):
self.fw_spec = None
self.fw_to_check = None
@abc.abstractmethod
def as_dict(self):
pass
@abc.abstractmethod
def from_dict(cls, d):
pass
@abc.abstractmethod
def setup(self):
pass
def set_fw_spec(self, fw_spec):
self.fw_spec = fw_spec
def set_fw_to_check(self, fw_to_check):
self.fw_to_check = fw_to_check
def src_setup(self, fw_spec, fw_to_check):
self.set_fw_spec(fw_spec=fw_spec)
self.set_fw_to_check(fw_to_check=fw_to_check)
self.setup()
@abc.abstractproperty
def handler_priority(self):
pass
@property
def skip_remaining_handlers(self):
return False
@abc.abstractproperty
def allow_fizzled(self):
pass
@abc.abstractproperty
def allow_completed(self):
pass
@abc.abstractmethod
def has_corrections(self):
pass
class MonitoringSRCErrorHandler(ErrorHandler):
HANDLER_PRIORITIES = {'PRIORITY_FIRST': 0,
'PRIORITY_VERY_HIGH': 1,
'PRIORITY_HIGH': 2,
'PRIORITY_MEDIUM': 3,
'PRIORITY_LOW': 4,
'PRIORITY_VERY_LOW': 5,
'PRIORITY_LAST': 6}
PRIORITY_FIRST = HANDLER_PRIORITIES['PRIORITY_FIRST']
PRIORITY_VERY_HIGH = HANDLER_PRIORITIES['PRIORITY_VERY_HIGH']
PRIORITY_HIGH = HANDLER_PRIORITIES['PRIORITY_HIGH']
PRIORITY_MEDIUM = HANDLER_PRIORITIES['PRIORITY_MEDIUM']
PRIORITY_LOW = HANDLER_PRIORITIES['PRIORITY_LOW']
PRIORITY_VERY_LOW = HANDLER_PRIORITIES['PRIORITY_VERY_LOW']
PRIORITY_LAST = HANDLER_PRIORITIES['PRIORITY_LAST']
@abc.abstractmethod
def as_dict(self):
pass
@abc.abstractmethod
def from_dict(cls, d):
pass
@abc.abstractproperty
def handler_priority(self):
pass
@property
def skip_remaining_handlers(self):
return False
class SRCValidator(Validator):
HANDLER_PRIORITIES = {'PRIORITY_FIRST': 0,
'PRIORITY_VERY_HIGH': 1,
'PRIORITY_HIGH': 2,
'PRIORITY_MEDIUM': 3,
'PRIORITY_LOW': 4,
'PRIORITY_VERY_LOW': 5,
'PRIORITY_LAST': 6}
PRIORITY_FIRST = HANDLER_PRIORITIES['PRIORITY_FIRST']
PRIORITY_VERY_HIGH = HANDLER_PRIORITIES['PRIORITY_VERY_HIGH']
PRIORITY_HIGH = HANDLER_PRIORITIES['PRIORITY_HIGH']
PRIORITY_MEDIUM = HANDLER_PRIORITIES['PRIORITY_MEDIUM']
PRIORITY_LOW = HANDLER_PRIORITIES['PRIORITY_LOW']
PRIORITY_VERY_LOW = HANDLER_PRIORITIES['PRIORITY_VERY_LOW']
PRIORITY_LAST = HANDLER_PRIORITIES['PRIORITY_LAST']
pass
| gpl-2.0 |
oinopion/django | tests/utils_tests/test_module_loading.py | 281 | 9516 | import imp
import os
import sys
import unittest
from importlib import import_module
from zipimport import zipimporter
from django.test import SimpleTestCase, modify_settings
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import (
autodiscover_modules, import_string, module_has_submodule,
)
class DefaultLoader(unittest.TestCase):
def setUp(self):
sys.meta_path.insert(0, ProxyFinder())
def tearDown(self):
sys.meta_path.pop(0)
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module('utils_tests.test_module')
test_no_submodule = import_module(
'utils_tests.test_no_submodule')
# An importable child
self.assertTrue(module_has_submodule(test_module, 'good_module'))
mod = import_module('utils_tests.test_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.no_such_module')
# A child that doesn't exist, but is the name of a package on the path
self.assertFalse(module_has_submodule(test_module, 'django'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.django')
# Don't be confused by caching of import misses
import types # NOQA: causes attempted import of utils_tests.types
self.assertFalse(module_has_submodule(sys.modules['utils_tests'], 'types'))
# A module which doesn't have a __path__ (so no submodules)
self.assertFalse(module_has_submodule(test_no_submodule, 'anything'))
self.assertRaises(ImportError, import_module,
'utils_tests.test_no_submodule.anything')
class EggLoader(unittest.TestCase):
def setUp(self):
self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop('egg_module.sub1.sub2.bad_module', None)
sys.modules.pop('egg_module.sub1.sub2.good_module', None)
sys.modules.pop('egg_module.sub1.sub2', None)
sys.modules.pop('egg_module.sub1', None)
sys.modules.pop('egg_module.bad_module', None)
sys.modules.pop('egg_module.good_module', None)
sys.modules.pop('egg_module', None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = '%s/test_egg.egg' % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module('egg_module')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.no_such_module')
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = '%s/test_egg.egg' % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module('egg_module.sub1.sub2')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.sub1.sub2.good_module')
self.assertEqual(mod.content, 'Deep Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module')
class ModuleImportTestCase(unittest.TestCase):
def test_import_string(self):
cls = import_string('django.utils.module_loading.import_string')
self.assertEqual(cls, import_string)
# Test exceptions raised
self.assertRaises(ImportError, import_string, 'no_dots_in_path')
msg = 'Module "utils_tests" does not define a "unexistent" attribute'
with six.assertRaisesRegex(self, ImportError, msg):
import_string('utils_tests.unexistent')
@modify_settings(INSTALLED_APPS={'append': 'utils_tests.test_module'})
class AutodiscoverModulesTestCase(SimpleTestCase):
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop('utils_tests.test_module.another_bad_module', None)
sys.modules.pop('utils_tests.test_module.another_good_module', None)
sys.modules.pop('utils_tests.test_module.bad_module', None)
sys.modules.pop('utils_tests.test_module.good_module', None)
sys.modules.pop('utils_tests.test_module', None)
def test_autodiscover_modules_found(self):
autodiscover_modules('good_module')
def test_autodiscover_modules_not_found(self):
autodiscover_modules('missing_module')
def test_autodiscover_modules_found_but_bad_module(self):
with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"):
autodiscover_modules('bad_module')
def test_autodiscover_modules_several_one_bad_module(self):
with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"):
autodiscover_modules('good_module', 'bad_module')
def test_autodiscover_modules_several_found(self):
autodiscover_modules('good_module', 'another_good_module')
def test_autodiscover_modules_several_found_with_registry(self):
from .test_module import site
autodiscover_modules('good_module', 'another_good_module', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
def test_validate_registry_keeps_intact(self):
from .test_module import site
with six.assertRaisesRegex(self, Exception, "Some random exception."):
autodiscover_modules('another_bad_module', register_to=site)
self.assertEqual(site._registry, {})
def test_validate_registry_resets_after_erroneous_module(self):
from .test_module import site
with six.assertRaisesRegex(self, Exception, "Some random exception."):
autodiscover_modules('another_good_module', 'another_bad_module', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
def test_validate_registry_resets_after_missing_module(self):
from .test_module import site
autodiscover_modules('does_not_exist', 'another_good_module', 'does_not_exist2', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
class ProxyFinder(object):
def __init__(self):
self._cache = {}
def find_module(self, fullname, path=None):
tail = fullname.rsplit('.', 1)[-1]
try:
fd, fn, info = imp.find_module(tail, path)
if fullname in self._cache:
old_fd = self._cache[fullname][0]
if old_fd:
old_fd.close()
self._cache[fullname] = (fd, fn, info)
except ImportError:
return None
else:
return self # this is a loader as well
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
fd, fn, info = self._cache[fullname]
try:
return imp.load_module(fullname, fd, fn, info)
finally:
if fd:
fd.close()
class TestFinder(object):
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_module(self, path):
importer = self.importer.find_module(path)
if importer is None:
return
return TestLoader(importer)
class TestLoader(object):
def __init__(self, importer):
self.importer = importer
def load_module(self, name):
mod = self.importer.load_module(name)
mod.__loader__ = self
return mod
class CustomLoader(EggLoader):
"""The Custom Loader test is exactly the same as the EggLoader, but
it uses a custom defined Loader and Finder that is intentionally
split into two classes. Although the EggLoader combines both functions
into one class, this isn't required.
"""
def setUp(self):
super(CustomLoader, self).setUp()
sys.path_hooks.insert(0, TestFinder)
sys.path_importer_cache.clear()
def tearDown(self):
super(CustomLoader, self).tearDown()
sys.path_hooks.pop(0)
| bsd-3-clause |
brian-l/django-1.4.10 | tests/regressiontests/admin_views/admin.py | 18 | 19542 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import tempfile
import os
from django import forms
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.conf.urls import patterns, url
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse
from django.contrib.admin import BooleanFieldListFilter
from .models import (Article, Chapter, Account, Media, Child, Parent, Picture,
Widget, DooHickey, Grommet, Whatsit, FancyDoodad, Category, Link,
PrePopulatedPost, PrePopulatedSubPost, CustomArticle, Section,
ModelWithStringPrimaryKey, Color, Thing, Actor, Inquisition, Sketch, Person,
Persona, Subscriber, ExternalSubscriber, OldSubscriber, Vodcast, EmptyModel,
Fabric, Gallery, Language, Recommendation, Recommender, Collector, Post,
Gadget, Villain, SuperVillain, Plot, PlotDetails, CyclicOne, CyclicTwo,
WorkHour, Reservation, FoodDelivery, RowLevelChangePermissionModel, Paper,
CoverLetter, Story, OtherStory, Book, Promo, ChapterXtra1, Pizza, Topping,
Album, Question, Answer, ComplexSortedPerson, PrePopulatedPostLargeSlug,
AdminOrderedField, AdminOrderedModelMethod, AdminOrderedAdminMethod,
AdminOrderedCallable, Report, Color2, UnorderedObject, MainPrepopulated,
RelatedPrepopulated)
def callable_year(dt_value):
return dt_value.year
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
prepopulated_fields = {
'title' : ('content',)
}
fieldsets=(
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('content', 'date', callable_year, 'model_year', 'modeladmin_year')
list_filter = ('date', 'section')
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'[email protected]',
['[email protected]']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'[email protected]',
['[email protected]']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected')
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition',)
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request,
formset=BasePersonModelFormSet, **kwargs)
def queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).queryset(request).order_by('age')
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = u'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = u'bar'
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from a admin action',
'[email protected]',
['[email protected]']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'[email protected]',
['[email protected]']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail]
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def queryset(self, request):
return super(EmptyModelAdmin, self).queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR']))
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted",)
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug' : ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug' : ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = ('posted', 'awesomeness_level', 'coolness', 'value', lambda obj: "foo")
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unkown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
class CustomChangeList(ChangeList):
def get_query_set(self, request):
return self.root_query_set.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display=('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom queryset() method that uses only(), to test
verbose_name display in messages shown after adding Paper instances.
"""
def queryset(self, request):
return super(PaperAdmin, self).queryset(request).only('title')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom queryset() method that uses only(), to test
verbose_name display in messages shown after adding CoverLetter instances.
Note that the CoverLetter model defines a __unicode__ method.
"""
def queryset(self, request):
return super(CoverLetterAdmin, self).queryset(request).defer('date_written')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return '<span style="color: #%s;">%s</span>' % ('ff00ff', obj.name)
colored_name.allow_tags = True
colored_name.admin_order_field = 'name'
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug' : ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return patterns('',
url(r'^extra/$',
self.extra,
name='cable_extra'),
)
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
site = admin.AdminSite(name="admin")
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_unicode to avoid problems on Python 2.3) paths through
# contrib.admin.util's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
# Register core models we need in our tests
from django.contrib.auth.models import User, Group
from django.contrib.auth.admin import UserAdmin, GroupAdmin
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
| bsd-3-clause |
yannrouillard/weboob | modules/arte/video.py | 2 | 1250 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.video import BaseVideo
__all__ = ['ArteVideo', 'ArteLiveVideo']
class ArteVideo(BaseVideo):
@classmethod
def id2url(cls, _id):
lang = _id[-1:]
return 'http://arte.tv/papi/tvguide/videos/stream/%s/%s/HBBTV' % (lang, _id)
class ArteLiveVideo(BaseVideo):
def __init__(self, _id, *args, **kwargs):
BaseVideo.__init__(self, 'live.%s' % _id, *args, **kwargs)
@classmethod
def id2url(cls, _id):
return 'http://concert.arte.tv%s' % _id
| agpl-3.0 |
bakkou-badri/dataminingproject | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| gpl-2.0 |
ncoghlan/prototype | integration-tests/features/steps/cockpit_demo.py | 2 | 14754 | """Steps to test the demonstration Cockpit plugin"""
from behave import given, when, then
from hamcrest import (
assert_that, equal_to, greater_than, greater_than_or_equal_to
)
@given("Cockpit is installed on the testing host")
def check_cockpit_is_installed(context):
"""Checks for the `cockpit-bridge` command"""
if shutil.which("cockpit-bridge") is None:
context.scenario.skip("Unable to locate `cockpit-bridge` command")
@given("the demonstration user exists")
def create_demonstration_user(context):
"""Creates a demonstration user for the scenario"""
context.demo_user = demo_user = DemoCockpitUser(context)
demo_user.create()
@given("the demonstration plugin is installed")
def ensure_demo_plugin_is_installed(context):
"""Ensures expected symlinks into the testing repo exist"""
# Copy the demo plugin to the demo user's home directory
demo_user = context.demo_user
demo_user.install_plugin()
user_plugin_dir = str(demo_user.USER_DIR / ".local/share/cockpit/leapp")
assert_that(os.path.exists(user_plugin_dir), "User plugin not installed")
# Check the rest of the repo is linked where the plugin will find it
leapp_dir_link = pathlib.Path("/opt/leapp")
desired_leapp_target = context.BASE_REPO_DIR
_ensure_expected_link(leapp_dir_link, desired_leapp_target)
@when("the demonstration user visits the {menu_item} page")
def visit_demo_page(context, menu_item):
"""Clicks on the named menu item in the top level Cockpit menu"""
context.demo_session = session = DemoCockpitSession(context)
session.login()
session.open_plugin(menu_item)
@when("enters {source_vm}'s IP address as the import source")
def step_impl(context, source_vm):
session = context.demo_session
source_ip = context.vm_helper.get_ip_address(source_vm)
context.demo_user.register_host_key(source_ip)
session.enter_source_ip(source_ip)
session.wait_for_active_import_button(10)
@when('clicks the "Import" button')
def import_app_via_plugin(context):
session = context.demo_session
session.start_app_import()
session.wait_for_app_import_to_start(10)
@then("the app import should be reported as complete within {time_limit:g} seconds")
def check_app_import_result(context, time_limit):
session = context.demo_session
session.wait_for_successful_app_import(time_limit)
# Helper functions and classes
# Note: these are all candidates for moving into a `leapp_testing` submodule
import subprocess
def _run_command(*cmd):
print(" Running {}".format(cmd))
output = None
try:
output = subprocess.check_output(cmd, stderr=subprocess.PIPE).decode()
except subprocess.CalledProcessError as exc:
output = exc.output.decode()
print("=== stdout for failed command ===")
print(output)
print("=== stderr for failed command ===")
print(exc.stderr.decode())
raise
return output
import binascii
import os
def _token_hex(nbytes=32):
return binascii.hexlify(os.urandom(nbytes)).decode('ascii')
import tempfile
from crypt import crypt
import shutil
import pathlib
class DemoCockpitUser(object):
"""Cockpit user that's set up to run the demo"""
def __init__(self, context):
self._app_plugin_source_dir = str(context.BASE_REPO_DIR / "cockpit")
self.username = username = "leapp-" + _token_hex(8)
self.password = _token_hex()
temp = pathlib.Path(tempfile.gettempdir())
self.BASE_DIR = base_dir = temp / _token_hex(8)
self.USER_DIR = base_dir / username
self.BASE_REPO_DIR = context.BASE_REPO_DIR
context.scenario_cleanup.callback(self.destroy)
self._ssh_dir = self.USER_DIR / '.ssh'
self._user_key = self._ssh_dir / 'id_rsa'
self._user_known_hosts = self._ssh_dir / 'known_hosts'
def create(self):
"""Create a local user with required permissions to run the demo"""
base_dir = self.BASE_DIR
base_dir.mkdir(exist_ok=True)
_run_command("sudo", "useradd",
"--groups", "libvirt,wheel",
"--password", crypt(self.password),
"-M", "--base-dir", str(base_dir),
self.username)
# Sanity check and adds info to test logs
print(_run_command("id", self.username))
# We create the home directory manually, as asking useradd to do it
# triggers an SELinux error (presumably due to the use of tmpfs)
self.USER_DIR.mkdir()
self._ssh_dir.mkdir()
self._fix_dir_permissions()
self._setup_ssh_key()
def _setup_ssh_key(self):
key_path = str(self.BASE_REPO_DIR / 'integration-tests' / 'config' / 'leappto_testing_key')
user_key = str(self._user_key)
# Copy the testing key in as the user's default key
_run_command("sudo", "cp", key_path, user_key)
_run_command("sudo", "cp", key_path + ".pub", user_key + ".pub")
# Make sure the user's SSH directory has the correct permissions
ssh_dir = str(self._ssh_dir)
_run_command("sudo", "chown", "-R", self.username, ssh_dir)
_run_command("sudo", "chmod", "-R", "u=Xrw,g=u,o=", ssh_dir)
_run_command("sudo", "chmod", "600", user_key)
_run_command("sudo", "chmod", "644", user_key + ".pub")
# Make sure the user's SSH directory has the correct SELinux labels
_run_command("sudo", "chcon", "-R", "unconfined_u:object_r:ssh_home_t:s0", ssh_dir)
def _fix_dir_permissions(self, dir_to_fix=None):
# Ensure all the user's files are owned by the demo user,
# but can still be accessed via the gid running the test suite
if dir_to_fix is None:
dir_to_fix = str(self.USER_DIR)
_run_command("chmod", "-R", "u=Xrw,g=u,o=", dir_to_fix)
_run_command("sudo", "chown", "-R", self.username, dir_to_fix)
# Make sure the user's home directory has the correct SELinux labels
_run_command("sudo", "chcon", "-R", "unconfined_u:object_r:user_home_t:s0", dir_to_fix)
def register_host_key(self, source_ip):
host_key_info = _run_command("ssh-keyscan", "-t", "rsa", source_ip)
with self._user_known_hosts.open("a") as f:
f.write(host_key_info)
def install_plugin(self):
"""Install the Cockpit plugin into the demo user's home directory"""
cockpit_plugin_dir = self.USER_DIR / ".local" / "share" / "cockpit"
cockpit_plugin_dir.mkdir(parents=True)
user_plugin_dir = str(cockpit_plugin_dir / "leapp")
# We make a full copy of the plugin source, so its covered by
# the permissions changes below and Cockpit will load it
shutil.copytree(self._app_plugin_source_dir, user_plugin_dir)
self._fix_dir_permissions(str(cockpit_plugin_dir))
def destroy(self):
"""Destroy the created test user"""
# Allow some time for the browser session to fully close down
deadline = time.monotonic() + 2
while time.monotonic() < deadline:
time.sleep(0.1)
try:
_run_command("sudo", "userdel", "-r", self.username)
except subprocess.CalledProcessError as exc:
if b"currently used" in exc.stderr:
print("User still in use, waiting 100 ms to try again")
continue
break
# Ensure the entire temporary tree gets deleted,
# even the parts now owned by the temporary user
_run_command("sudo", "rm", "-r", str(self.BASE_DIR))
def _ensure_expected_link(symlink, expected_target):
"""Ensure a symlink resolves to the expected target"""
assert_that(symlink.resolve(), equal_to(expected_target))
import time
import splinter
# Map from plugin menu entries to expected iframe names
KNOWN_PLUGINS = {
"Import Apps": "cockpit1:localhost/leapp/leapp"
}
class DemoCockpitSession(object):
"""Splinter browser session to work with the Cockpit plugin"""
def __init__(self, context):
self._user = context.demo_user
self._browser = browser = splinter.Browser()
self._cockpit_url = "http://localhost:9090"
self._plugin_frame = None
self._scenario_cleanup = cleanup = context.scenario_cleanup
cleanup.enter_context(browser)
def login(self):
"""Logs into Cockpit using the test user's credentials
Ensures password based privilege escalation is enabled when logging in
"""
browser = self._browser
user = self._user
browser.visit(self._cockpit_url)
assert_that(browser.status_code.is_success(), "Failed to load login page")
# browser.fill_form looks form elements up by name rather than id, so we
# find and populate the form elements individually
browser.find_by_id("login-user-input").fill(user.username)
browser.find_by_id("login-password-input").fill(user.password)
browser.find_by_id("authorized-input").check()
browser.find_by_id("login-button").click()
self._scenario_cleanup.callback(self.logout)
self._logged_in = True
def logout(self):
"""Logs out of Cockpit, allowing the test user to be deleted"""
self._plugin_frame = None
self._browser.find_by_id("navbar-dropdown").click()
self._browser.find_by_id("go-logout").click()
def open_plugin(self, menu_item):
"""Opens the named plugin tab from the Cockpit navigation menu"""
if not self._logged_in:
raise RuntimeError("Must log in before accessing app plugin")
if menu_item not in KNOWN_PLUGINS:
raise RuntimeError("Unknown Cockpit plugin: {}".format(menu_item))
browser = self._browser
# Allow some time for Cockpit to render after initial login
found_plugin_name = browser.is_text_present(menu_item, wait_time=2)
err_msg = "{!r} menu item not found on page".format(menu_item)
assert_that(found_plugin_name, err_msg)
browser.click_link_by_partial_text(menu_item)
frame_name = KNOWN_PLUGINS[menu_item]
found_app = browser.is_element_present_by_name(frame_name, wait_time=2)
err_msg = "Plugin iframe {!r} not found on page".format(frame_name)
assert_that(found_app, err_msg)
_enter_context = self._scenario_cleanup.enter_context
self._plugin_frame = _enter_context(browser.get_iframe(frame_name))
@property
def plugin_frame(self):
result = self._plugin_frame
if result is None:
raise RuntimeError("Must open app plugin before querying content")
return result
def enter_source_ip(self, source_ip):
"""Specifies source IP for application to be imported"""
frame = self.plugin_frame
source_address = frame.find_by_id("source-address").first
source_address.fill(source_ip)
find_apps = frame.find_by_id("scan-source-btn").first
find_apps.click()
def wait_for_active_import_button(self, time_limit):
"""Waits for the import button to become active (if it isn't already)"""
frame = self.plugin_frame
import_app = frame.find_by_id("import-button")
deadline = time.monotonic() + 60
while time.monotonic() < deadline:
time.sleep(0.1)
# TODO: Switch to a supported public API for this check
# RFE: https://github.com/cobrateam/splinter/issues/544
if import_app and import_app.first._element.is_enabled():
break
else:
assert_that(False, "Specifying source failed to allow migration")
self.import_app = import_app
def start_app_import(self):
"""Selects source & target machine, then starts a migration"""
self.import_app.click()
def _dump_failed_command(self):
"""Dump the progress report from a failed web UI command to stdout
If there is no failed command, dumps all executed commands
"""
frame = self.plugin_frame
failure_log = frame.find_by_css("li.failed")
if not failure_log:
print("No failed commands reported in UI")
commands = frame.find_by_css("li.success")
for command in commands:
line = command.find_by_css("h4")
print("UI Command> {}".format(line.text))
else:
failure = failure_log.first
failure.click()
print("Last executed command failed")
line = failure.find_by_css("h4")
print("UI Command> {}".format(line.text))
progress_lines = frame.find_by_css("span.progress-line")
for line in progress_lines:
if line:
print("UI Log> {}".format(line.text))
# Uncomment one of these two lines for live debugging of failures
# Note: don't check either of these in, as they will hang in CI
# input("Press enter to resume execution") # Just browser exploration
# import pdb; pdb.set_trace() # Interactive Python debugging
def wait_for_app_import_to_start(self, time_limit):
frame = self.plugin_frame
started = frame.is_text_present("migrate-machine ", wait_time=time_limit)
if not started:
self._dump_failed_command()
assert_that(started, "Failed to start migration")
time.sleep(0.1) # Immediate check for argument parsing failure
already_failed = frame.find_by_css("li.failed")
if already_failed:
self._dump_failed_command()
assert_that(not already_failed,
"Migration operation immediately reported failure")
def wait_for_successful_app_import(self, time_limit):
frame = self.plugin_frame
# Wait for the app import operation to complete
deadline = time.monotonic() + time_limit
running = frame.find_by_css("li.running")
while running and time.monotonic() < deadline:
time.sleep(0.1)
already_failed = frame.find_by_css("li.failed")
if already_failed:
self._dump_failed_command()
assert_that(not already_failed, "Migration operation failed")
running = frame.find_by_css("li.running")
# Confirm that the import operation succeeded
last_command = frame.find_by_css("li.success")[-1]
last_command.click()
success_message = "Imported service is now starting"
succeeded = frame.is_text_present(success_message)
if not succeeded:
self._dump_failed_command()
assert_that(succeeded, "Migration failed to complete within time limit")
| lgpl-2.1 |
ulikoehler/UliEngineering | UliEngineering/Electronics/MOSFET.py | 1 | 1960 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Utility to calculate MOSFETs
"""
import numpy as np
from UliEngineering.EngineerIO import normalize_numeric
from UliEngineering.Units import Unit
__all__ = ["mosfet_gate_charge_losses", "mosfet_gate_charge_loss_per_cycle"]
def mosfet_gate_charge_losses(total_gate_charge, vsupply, frequency="100 kHz") -> Unit("W"):
"""
Compute the gate charge loss of a MOSFET in a switch-mode
power-supply application as a total power (integrated per second).
Ref:
http://rohmfs.rohm.com/en/products/databook/applinote/ic/power/switching_regulator/power_loss_appli-e.pdf
Parameters
----------
total_gate_charge: number or Engineer string
The total gate charge in Coulomb.
For multiple MOSFETs such as in synchronous applications,
add their gate charges together.
vsupply: number or Engineer string
The gate driver supply voltage in Volts
frequency: number or Engineer string
The switching frequency in Hz
"""
frequency = normalize_numeric(frequency)
return mosfet_gate_charge_loss_per_cycle(total_gate_charge, vsupply) * frequency
def mosfet_gate_charge_loss_per_cycle(total_gate_charge, vsupply) -> Unit("J"):
"""
Compute the gate charge loss of a MOSFET in a switch-mode
power-supply application per switching cycle.
Ref:
http://rohmfs.rohm.com/en/products/databook/applinote/ic/power/switching_regulator/power_loss_appli-e.pdf
Parameters
----------
total_gate_charge: number or Engineer string
The total gate charge in Coulomb.
For multiple MOSFETs such as in synchronous applications,
add their gate charges together.
vsupply: number or Engineer string
The gate driver supply voltage in Volts
"""
total_gate_charge = normalize_numeric(total_gate_charge)
vsupply = normalize_numeric(vsupply)
return total_gate_charge * vsupply
| apache-2.0 |
yuchangfu/pythonfun | flaskenv/Lib/site-packages/pip/commands/show.py | 344 | 2767 | import os
from pip.basecommand import Command
from pip.log import logger
from pip._vendor import pkg_resources
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warn('ERROR: Please provide a package name or names.')
return
query = args
results = search_packages_info(query)
print_results(results, options.files)
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed_packages = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
for name in query:
normalized_name = name.lower()
if normalized_name in installed_packages:
dist = installed_packages[normalized_name]
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
filelist = os.path.join(
dist.location,
dist.egg_name() + '.egg-info',
'installed-files.txt')
if os.path.isfile(filelist):
package['files'] = filelist
yield package
def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
for dist in distributions:
logger.notify("---")
logger.notify("Name: %s" % dist['name'])
logger.notify("Version: %s" % dist['version'])
logger.notify("Location: %s" % dist['location'])
logger.notify("Requires: %s" % ', '.join(dist['requires']))
if list_all_files:
logger.notify("Files:")
if 'files' in dist:
for line in open(dist['files']):
logger.notify(" %s" % line.strip())
else:
logger.notify("Cannot locate installed-files.txt")
| gpl-3.0 |
zpincus/celltool | celltool/utility/image.py | 1 | 1424 | # Copyright 2007 Zachary Pincus
# This file is part of CellTool.
#
# CellTool is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Tools to read and write numpy arrays from and to image files.
"""
import freeimage
import numpy
from . import warn_tools
def read_grayscale_array_from_image_file(filename, warn = True):
"""Read an image from disk into a 2-D grayscale array, converting from color if necessary.
If 'warn' is True, issue a warning when arrays are converted from color to grayscale.
"""
image_array = freeimage.read(filename)
if len(image_array.shape) == 3:
image_array = make_grayscale_array(image_array)
if warn:
warn_tools.warn('Image %s converted from RGB to grayscale: intensity values have been scaled and combined.'%filename)
return image_array
write_array_as_image_file = freeimage.write
def make_grayscale_array(array):
"""Giiven an array of shape (x,y,3) where the last dimension indexes the
(r,g,b) pixel value, return a (x,y) grayscale array, where intensity is
calculated with the ITU-R BT 709 luma transform:
intensity = 0.2126r + 0.7152g + 0.0722b
"""
dtype = array.dtype
new_array = numpy.round((array * [0.2126, 0.7152, 0.0722]).sum(axis = 2))
return new_array.astype(dtype)
| gpl-2.0 |
40223222/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/opcode.py | 714 | 5442 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
| gpl-3.0 |
AlphaStaxLLC/tornado | tornado/test/testing_test.py | 144 | 7361 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen, ioloop
from tornado.log import app_log
from tornado.testing import AsyncTestCase, gen_test, ExpectLog
from tornado.test.util import unittest
import contextlib
import os
import traceback
@contextlib.contextmanager
def set_environ(name, value):
old_value = os.environ.get(name)
os.environ[name] = value
try:
yield
finally:
if old_value is None:
del os.environ[name]
else:
os.environ[name] = old_value
class AsyncTestCaseTest(AsyncTestCase):
def test_exception_in_callback(self):
self.io_loop.add_callback(lambda: 1 / 0)
try:
self.wait()
self.fail("did not get expected exception")
except ZeroDivisionError:
pass
def test_wait_timeout(self):
time = self.io_loop.time
# Accept default 5-second timeout, no error
self.io_loop.add_timeout(time() + 0.01, self.stop)
self.wait()
# Timeout passed to wait()
self.io_loop.add_timeout(time() + 1, self.stop)
with self.assertRaises(self.failureException):
self.wait(timeout=0.01)
# Timeout set with environment variable
self.io_loop.add_timeout(time() + 1, self.stop)
with set_environ('ASYNC_TEST_TIMEOUT', '0.01'):
with self.assertRaises(self.failureException):
self.wait()
def test_subsequent_wait_calls(self):
"""
This test makes sure that a second call to wait()
clears the first timeout.
"""
self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
self.wait(timeout=0.02)
self.io_loop.add_timeout(self.io_loop.time() + 0.03, self.stop)
self.wait(timeout=0.15)
def test_multiple_errors(self):
def fail(message):
raise Exception(message)
self.io_loop.add_callback(lambda: fail("error one"))
self.io_loop.add_callback(lambda: fail("error two"))
# The first error gets raised; the second gets logged.
with ExpectLog(app_log, "multiple unhandled exceptions"):
with self.assertRaises(Exception) as cm:
self.wait()
self.assertEqual(str(cm.exception), "error one")
class AsyncTestCaseWrapperTest(unittest.TestCase):
def test_undecorated_generator(self):
class Test(AsyncTestCase):
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
def test_undecorated_generator_with_skip(self):
class Test(AsyncTestCase):
@unittest.skip("don't run this")
def test_gen(self):
yield
test = Test('test_gen')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
def test_other_return(self):
class Test(AsyncTestCase):
def test_other_return(self):
return 42
test = Test('test_other_return')
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("Return value from test method ignored", result.errors[0][1])
class SetUpTearDownTest(unittest.TestCase):
def test_set_up_tear_down(self):
"""
This test makes sure that AsyncTestCase calls super methods for
setUp and tearDown.
InheritBoth is a subclass of both AsyncTestCase and
SetUpTearDown, with the ordering so that the super of
AsyncTestCase will be SetUpTearDown.
"""
events = []
result = unittest.TestResult()
class SetUpTearDown(unittest.TestCase):
def setUp(self):
events.append('setUp')
def tearDown(self):
events.append('tearDown')
class InheritBoth(AsyncTestCase, SetUpTearDown):
def test(self):
events.append('test')
InheritBoth('test').run(result)
expected = ['setUp', 'test', 'tearDown']
self.assertEqual(expected, events)
class GenTest(AsyncTestCase):
def setUp(self):
super(GenTest, self).setUp()
self.finished = False
def tearDown(self):
self.assertTrue(self.finished)
super(GenTest, self).tearDown()
@gen_test
def test_sync(self):
self.finished = True
@gen_test
def test_async(self):
yield gen.Task(self.io_loop.add_callback)
self.finished = True
def test_timeout(self):
# Set a short timeout and exceed it.
@gen_test(timeout=0.1)
def test(self):
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
# This can't use assertRaises because we need to inspect the
# exc_info triple (and not just the exception object)
try:
test(self)
self.fail("did not get expected exception")
except ioloop.TimeoutError:
# The stack trace should blame the add_timeout line, not just
# unrelated IOLoop/testing internals.
self.assertIn(
"gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)",
traceback.format_exc())
self.finished = True
def test_no_timeout(self):
# A test that does not exceed its timeout should succeed.
@gen_test(timeout=1)
def test(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 0.1)
test(self)
self.finished = True
def test_timeout_environment_variable(self):
@gen_test(timeout=0.5)
def test_long_timeout(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 0.25)
# Uses provided timeout of 0.5 seconds, doesn't time out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
test_long_timeout(self)
self.finished = True
def test_no_timeout_environment_variable(self):
@gen_test(timeout=0.01)
def test_short_timeout(self):
time = self.io_loop.time
yield gen.Task(self.io_loop.add_timeout, time() + 1)
# Uses environment-variable timeout of 0.1, times out.
with set_environ('ASYNC_TEST_TIMEOUT', '0.1'):
with self.assertRaises(ioloop.TimeoutError):
test_short_timeout(self)
self.finished = True
def test_with_method_args(self):
@gen_test
def test_with_args(self, *args):
self.assertEqual(args, ('test',))
yield gen.Task(self.io_loop.add_callback)
test_with_args(self, 'test')
self.finished = True
def test_with_method_kwargs(self):
@gen_test
def test_with_kwargs(self, **kwargs):
self.assertDictEqual(kwargs, {'test': 'test'})
yield gen.Task(self.io_loop.add_callback)
test_with_kwargs(self, test='test')
self.finished = True
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
AlperSaltabas/OR_Tools_Google_API | examples/python/alldifferent_except_0.py | 32 | 3637 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All different except 0 Google CP Solver.
Decomposition of global constraint alldifferent_except_0.
From Global constraint catalogue:
http://www.emn.fr/x-info/sdemasse/gccat/Calldifferent_except_0.html
'''
Enforce all variables of the collection VARIABLES to take distinct
values, except those variables that are assigned to 0.
Example
(<5, 0, 1, 9, 0, 3>)
The alldifferent_except_0 constraint holds since all the values
(that are different from 0) 5, 1, 9 and 3 are distinct.
'''
Compare with the following models:
* Comet: http://hakank.org/comet/alldifferent_except_0.co
* ECLiPSe: http://hakank.org/eclipse/alldifferent_except_0.ecl
* Tailor/Essence': http://hakank.org/tailor/alldifferent_except_0.eprime
* Gecode: http://hakank.org/gecode/alldifferent_except_0.cpp
* Gecode/R: http://hakank.org/gecode_r/all_different_except_0.rb
* MiniZinc: http://hakank.org/minizinc/alldifferent_except_0.mzn
* SICStus_ http://hakank.org/sicstus/alldifferent_except_0.pl
* Choco: http://hakank.org/choco/AllDifferentExcept0_test.java
* JaCoP: http://hakank.org/JaCoP/AllDifferentExcept0_test.java
* Zinc: http://hakank.org/minizinc/alldifferent_except_0.zinc
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
#
# Decomposition of alldifferent_except_0
# Thanks to Laurent Perron (Google) for
# suggestions of improvements.
#
def alldifferent_except_0(solver, a):
n = len(a)
for i in range(n):
for j in range(i):
solver.Add((a[i] != 0) * (a[j] != 0) <= (a[i] != a[j]))
# more compact version:
def alldifferent_except_0_b(solver, a):
n = len(a)
[solver.Add((a[i] != 0) * (a[j] != 0) <= (a[i] != a[j]))
for i in range(n) for j in range(i)]
def main(unused_argv):
# Create the solver.
solver = pywrapcp.Solver("Alldifferent except 0")
# data
n = 7
# declare variables
x = [solver.IntVar(0, n - 1, "x%i" % i) for i in range(n)]
# Number of zeros.
z = solver.Sum([x[i] == 0 for i in range(n)]).VarWithName("z")
#
# constraints
#
alldifferent_except_0(solver, x)
# we require 2 0's
solver.Add(z == 2)
#
# solution and search
#
solution = solver.Assignment()
solution.Add([x[i] for i in range(n)])
solution.Add(z)
collector = solver.AllSolutionCollector(solution)
solver.Solve(solver.Phase([x[i] for i in range(n)],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
for s in range(num_solutions):
print "x:", [collector.Value(s, x[i]) for i in range(n)]
print "z:", collector.Value(s, z)
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main("cp sample")
| apache-2.0 |
cschnei3/forseti-security | tests/inventory/pipelines/test_data/fake_buckets.py | 3 | 4791 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake buckets data."""
FAKE_BUCKETS_MAP = [{
'project_number': 11111,
'buckets': {
'items': [{
'kind': 'storage#bucket',
'name': 'fakebucket1',
'timeCreated': '2016-07-21T12:57:04.604Z',
'updated': '2016-07-21T12:57:04.604Z',
'projectNumber': '11111',
'metageneration': '2',
'location': 'EU',
'etag': 'CAE=',
'id': 'fakebucket1',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1',
'storageClass': 'STANDARD',
'lifecycle': {}
}
]
}
}]
EXPECTED_LOADABLE_BUCKETS = [{
'project_number': 11111,
'bucket_id': 'fakebucket1',
'bucket_name': 'fakebucket1',
'bucket_kind': 'storage#bucket',
'bucket_storage_class': 'STANDARD',
'bucket_location': 'EU',
'bucket_create_time': '2016-07-21 12:57:04',
'bucket_update_time': '2016-07-21 12:57:04',
'bucket_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1',
'bucket_lifecycle_raw': '{}',
'raw_bucket': '{"updated": "2016-07-21T12:57:04.604Z", "timeCreated": "2016-07-21T12:57:04.604Z", "metageneration": "2", "id": "fakebucket1", "kind": "storage#bucket", "name": "fakebucket1", "projectNumber": "11111", "etag": "CAE=", "storageClass": "STANDARD", "lifecycle": {}, "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1", "location": "EU"}'
}
]
FAKE_BUCKET_ACL_MAP = [{
'bucket_name': 'fakebucket1',
'acl': {
'items': [{
'kind': 'storage#bucketAccessControl',
'bucket': 'fakebucket1',
'entity': 'project-owners-11111',
'etag': 'CAE=',
'role': 'OWNER',
'projectTeam': {
'projectNumber': '11111',
'team': 'owners'
},
'id': 'fakebucket1/project-owners-11111',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111'
},
{
'kind': 'storage#bucketAccessControl',
'bucket': 'fakebucket1',
'entity': 'project-readers-11111',
'etag': 'CAE=',
'role': 'READER',
'projectTeam': {
'projectNumber': '11111',
'team': 'readers'},
'id': 'fakebucket1/project-readers-11111',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111'
}
]
}
}]
EXPECTED_LOADABLE_BUCKET_ACLS = [{
'acl_id': 'fakebucket1/project-owners-11111',
'bucket': 'fakebucket1',
'bucket_acl_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111',
'domain': None,
'email': None,
'entity': 'project-owners-11111',
'entity_id': None,
'kind': 'storage#bucketAccessControl',
'project_team': '{"projectNumber": "11111", "team": "owners"}',
'raw_bucket_acl': '{"kind": "storage#bucketAccessControl", "etag": "CAE=", "role": "OWNER", "projectTeam": {"projectNumber": "11111", "team": "owners"}, "bucket": "fakebucket1", "id": "fakebucket1/project-owners-11111", "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111", "entity": "project-owners-11111"}',
'role': 'OWNER'
},
{
'acl_id': 'fakebucket1/project-readers-11111',
'bucket': 'fakebucket1',
'bucket_acl_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111',
'domain': None,
'email': None,
'entity': 'project-readers-11111',
'entity_id': None,
'kind': 'storage#bucketAccessControl',
'project_team': '{"projectNumber": "11111", "team": "readers"}',
'raw_bucket_acl': '{"kind": "storage#bucketAccessControl", "etag": "CAE=", "role": "READER", "projectTeam": {"projectNumber": "11111", "team": "readers"}, "bucket": "fakebucket1", "id": "fakebucket1/project-readers-11111", "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111", "entity": "project-readers-11111"}',
'role': 'READER'
}]
| apache-2.0 |
junhuac/MQUIC | src/testing/gtest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
alexgibson/bedrock | lib/fluent_migrations/firefox/set-as-default/landing.py | 6 | 6217 | from __future__ import absolute_import
import fluent.syntax.ast as FTL
from fluent.migrate.helpers import transforms_from
from fluent.migrate.helpers import VARIABLE_REFERENCE, TERM_REFERENCE
from fluent.migrate import REPLACE, COPY
whatsnew_73 = "firefox/whatsnew_73.lang"
def migrate(ctx):
"""Migrate bedrock/firefox/templates/firefox/set-as-default/landing-page.html, part {index}."""
ctx.add_transforms(
"firefox/set-as-default/landing.ftl",
"firefox/set-as-default/landing.ftl",
[
FTL.Message(
id=FTL.Identifier("set-as-default-landing-make-firefox-your-default"),
value=REPLACE(
whatsnew_73,
"Make Firefox your default browser",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
set-as-default-landing-make-sure-youre-protected = {COPY(whatsnew_73, "Make sure you’re protected, every time you get online",)}
""", whatsnew_73=whatsnew_73) + [
FTL.Message(
id=FTL.Identifier("set-as-default-landing-thanks-for-using-the"),
value=REPLACE(
whatsnew_73,
"Thanks for using the latest Firefox browser. When you choose Firefox, you support a better web for you and everyone else. Now take the next step to protect yourself.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
set-as-default-landing-choose-automatic-privacy = {COPY(whatsnew_73, "Choose automatic privacy",)}
""", whatsnew_73=whatsnew_73) + [
FTL.Message(
id=FTL.Identifier("set-as-default-landing-companies-keep-finding"),
value=REPLACE(
whatsnew_73,
"Companies keep finding new ways to poach your personal data. Firefox is the browser with a mission of finding new ways to protect you.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
] + transforms_from("""
set-as-default-landing-choose-freedom-on-every = {COPY(whatsnew_73, "Choose freedom on every device",)}
""", whatsnew_73=whatsnew_73) + [
FTL.Message(
id=FTL.Identifier("set-as-default-landing-firefox-is-fast-and"),
value=REPLACE(
whatsnew_73,
"Firefox is fast and safe on Windows, iOS, Android, Linux…and across them all. You deserve choices in browsers and devices, instead of decisions made for you.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
"Windows": TERM_REFERENCE("brand-name-windows"),
"iOS": TERM_REFERENCE("brand-name-ios"),
"Android": TERM_REFERENCE("brand-name-android"),
"Linux": TERM_REFERENCE("brand-name-linux"),
}
)
),
] + transforms_from("""
set-as-default-landing-choose-corporate-independence = {COPY(whatsnew_73, "Choose corporate independence",)}
""", whatsnew_73=whatsnew_73) + [
FTL.Message(
id=FTL.Identifier("set-as-default-landing-firefox-is-the-only"),
value=REPLACE(
whatsnew_73,
"Firefox is the only major independent browser. Chrome, Edge and Brave are all built on Google code, which means giving Google even more control of the internet.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
"Chrome": TERM_REFERENCE("brand-name-chrome"),
"Edge": TERM_REFERENCE("brand-name-edge"),
"Brave": TERM_REFERENCE("brand-name-brave"),
"Google": TERM_REFERENCE("brand-name-google"),
}
)
),
FTL.Message(
id=FTL.Identifier("set-as-default-landing-the-internet-keeps"),
value=REPLACE(
whatsnew_73,
"The internet keeps finding new ways to poach your personal data. Firefox is the only browser with a mission of finding new ways to protect you.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
}
)
),
FTL.Message(
id=FTL.Identifier("set-as-default-landing-firefox-is-fast-no-interest"),
value=REPLACE(
whatsnew_73,
"Firefox is fast and safe on Windows, iOS, Android, Linux...and across them all. We have no interest in locking you in or resetting your preferences.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
"Windows": TERM_REFERENCE("brand-name-windows"),
"iOS": TERM_REFERENCE("brand-name-ios"),
"Android": TERM_REFERENCE("brand-name-android"),
"Linux": TERM_REFERENCE("brand-name-linux"),
}
)
),
FTL.Message(
id=FTL.Identifier("set-as-default-landing-firefox-is-the-only-major"),
value=REPLACE(
whatsnew_73,
"Firefox is the only major independent browser. Chrome, Edge and Brave are all built with code from Google, the world’s largest ad network.",
{
"Firefox": TERM_REFERENCE("brand-name-firefox"),
"Chrome": TERM_REFERENCE("brand-name-chrome"),
"Edge": TERM_REFERENCE("brand-name-edge"),
"Brave": TERM_REFERENCE("brand-name-brave"),
"Google": TERM_REFERENCE("brand-name-google"),
}
)
),
]
)
| mpl-2.0 |
PriceChild/ansible | contrib/inventory/zone.py | 57 | 1489 | #!/usr/bin/env python
# (c) 2015, Dagobert Michelsen <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = []
for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
result['all']['hosts'].append(s[1])
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'zone'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")
| gpl-3.0 |
FujitsuEnablingSoftwareTechnologyGmbH/tempest | tempest/api/orchestration/stacks/test_templates.py | 4 | 2155 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.orchestration import base
from tempest import test
class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which creates only a new user
Resources:
CfnUser:
Type: AWS::IAM::User
"""
@classmethod
def resource_setup(cls):
super(TemplateYAMLTestJSON, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
cls.client.wait_for_stack_status(cls.stack_identifier,
'CREATE_COMPLETE')
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.parameters = {}
@test.attr(type='gate')
@test.idempotent_id('47430699-c368-495e-a1db-64c26fd967d7')
def test_show_template(self):
"""Getting template used to create the stack."""
self.client.show_template(self.stack_identifier)
@test.attr(type='gate')
@test.idempotent_id('ed53debe-8727-46c5-ab58-eba6090ec4de')
def test_validate_template(self):
"""Validating template passing it content."""
self.client.validate_template(self.template,
self.parameters)
class TemplateAWSTestJSON(TemplateYAMLTestJSON):
template = """
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template which creates only a new user",
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
}
}
}
"""
| apache-2.0 |
Mirantis/tempest | tempest/openstack/common/fixture/lockutils.py | 15 | 1890 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from tempest.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
| apache-2.0 |
SKA-ScienceDataProcessor/algorithm-reference-library | workflows/serial/imaging/imaging_serial.py | 1 | 21976 | """Manages the imaging context. This take a string and returns a dictionary containing:
* Predict function
* Invert function
* image_iterator function
* vis_iterator function
"""
__all__ = ['predict_list_serial_workflow', 'invert_list_serial_workflow', 'residual_list_serial_workflow',
'restore_list_serial_workflow', 'deconvolve_list_serial_workflow',
'weight_list_serial_workflow',
'taper_list_serial_workflow', 'zero_list_serial_workflow', 'subtract_list_serial_workflow']
import collections
import logging
import numpy
from data_models.memory_data_models import Image, Visibility, BlockVisibility
from data_models.parameters import get_parameter
from processing_library.image.operations import copy_image, create_empty_image_like
from workflows.shared.imaging.imaging_shared import imaging_context
from workflows.shared.imaging.imaging_shared import sum_invert_results, remove_sumwt, sum_predict_results, \
threshold_list
from wrappers.serial.griddata.gridding import grid_weight_to_griddata, griddata_reweight, griddata_merge_weights
from wrappers.serial.griddata.kernels import create_pswf_convolutionfunction
from wrappers.serial.griddata.operations import create_griddata_from_image
from wrappers.serial.image.deconvolution import deconvolve_cube, restore_cube
from wrappers.serial.image.gather_scatter import image_scatter_facets, image_gather_facets, \
image_scatter_channels, image_gather_channels
from wrappers.serial.image.operations import calculate_image_frequency_moments
from wrappers.serial.imaging.base import normalize_sumwt
from wrappers.serial.imaging.weighting import taper_visibility_gaussian
from wrappers.serial.visibility.base import copy_visibility, create_visibility_from_rows
from wrappers.serial.visibility.gather_scatter import visibility_scatter, visibility_gather
log = logging.getLogger(__name__)
def predict_list_serial_workflow(vis_list, model_imagelist, context, vis_slices=1, facets=1,
gcfcf=None, **kwargs):
"""Predict, iterating over both the scattered vis_list and image
The visibility and image are scattered, the visibility is predicted on each part, and then the
parts are assembled.
:param vis_list:
:param model_imagelist: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param facets: Number of facets (per axis)
:param context: Type of processing e.g. 2d, wstack, timeslice or facets
:param gcfcg: tuple containing grid correction and convolution function
:param kwargs: Parameters for functions in components
:return: List of vis_lists
"""
assert len(vis_list) == len(model_imagelist), "Model must be the same length as the vis_list"
# Predict_2d does not clear the vis so we have to do it here.
vis_list = zero_list_serial_workflow(vis_list)
c = imaging_context(context)
vis_iter = c['vis_iterator']
predict = c['predict']
if facets % 2 == 0 or facets == 1:
actual_number_facets = facets
else:
actual_number_facets = facets - 1
def predict_ignore_none(vis, model, g):
if vis is not None:
assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis
assert isinstance(model, Image), model
return predict(vis, model, context=context, gcfcf=g, **kwargs)
else:
return None
if gcfcf is None:
gcfcf = [create_pswf_convolutionfunction(m) for m in model_imagelist]
# Loop over all frequency windows
if facets == 1:
image_results_list = list()
for ivis, sub_vis_list in enumerate(vis_list):
if len(gcfcf) > 1:
g = gcfcf[ivis]
else:
g = gcfcf[0]
# Loop over sub visibility
vis_predicted = copy_visibility(sub_vis_list, zero=True)
for rows in vis_iter(sub_vis_list, vis_slices):
row_vis = create_visibility_from_rows(sub_vis_list, rows)
row_vis_predicted = predict_ignore_none(row_vis, model_imagelist[ivis], g)
if row_vis_predicted is not None:
vis_predicted.data['vis'][rows, ...] = row_vis_predicted.data['vis']
image_results_list.append(vis_predicted)
return image_results_list
else:
image_results_list = list()
for ivis, sub_vis_list in enumerate(vis_list):
# Create the graph to divide an image into facets. This is by reference.
facet_lists = image_scatter_facets(model_imagelist[ivis], facets=facets)
facet_vis_lists = list()
sub_vis_lists = visibility_scatter(sub_vis_list, vis_iter, vis_slices)
# Loop over sub visibility
for sub_vis_list in sub_vis_lists:
facet_vis_results = list()
# Loop over facets
for facet_list in facet_lists:
# Predict visibility for this subvisibility from this facet
facet_vis_list = predict_ignore_none(sub_vis_list, facet_list,
None)
facet_vis_results.append(facet_vis_list)
# Sum the current sub-visibility over all facets
facet_vis_lists.append(sum_predict_results(facet_vis_results))
# Sum all sub-visibilties
image_results_list.append(visibility_gather(facet_vis_lists, sub_vis_list, vis_iter))
return image_results_list
def invert_list_serial_workflow(vis_list, template_model_imagelist, dopsf=False, normalize=True,
facets=1, vis_slices=1, context='2d', gcfcf=None, **kwargs):
""" Sum results from invert, iterating over the scattered image and vis_list
:param vis_list:
:param template_model_imagelist: Model used to determine image parameters
:param dopsf: Make the PSF instead of the dirty image
:param facets: Number of facets
:param normalize: Normalize by sumwt
:param vis_slices: Number of slices
:param context: Imaging context
:param gcfcg: tuple containing grid correction and convolution function
:param kwargs: Parameters for functions in components
:return: List of (image, sumwt) tuple
"""
if not isinstance(template_model_imagelist, collections.Iterable):
template_model_imagelist = [template_model_imagelist]
c = imaging_context(context)
vis_iter = c['vis_iterator']
invert = c['invert']
if facets % 2 == 0 or facets == 1:
actual_number_facets = facets
else:
actual_number_facets = max(1, (facets - 1))
def gather_image_iteration_results(results, template_model):
result = create_empty_image_like(template_model)
i = 0
sumwt = numpy.zeros([template_model.nchan, template_model.npol])
for dpatch in image_scatter_facets(result, facets=facets):
assert i < len(results), "Too few results in gather_image_iteration_results"
if results[i] is not None:
assert len(results[i]) == 2, results[i]
dpatch.data[...] = results[i][0].data[...]
sumwt += results[i][1]
i += 1
return result, sumwt
def invert_ignore_none(vis, model, gg):
if vis is not None:
return invert(vis, model, context=context, dopsf=dopsf, normalize=normalize,
gcfcf=gg, **kwargs)
else:
return create_empty_image_like(model), numpy.zeros([model.nchan, model.npol])
# If we are doing facets, we need to create the gcf for each image
if gcfcf is None and facets == 1:
gcfcf = [create_pswf_convolutionfunction(template_model_imagelist[0])]
# Loop over all vis_lists independently
results_vislist = list()
if facets == 1:
for ivis, sub_vis_list in enumerate(vis_list):
if len(gcfcf) > 1:
g = gcfcf[ivis]
else:
g = gcfcf[0]
# Iterate within each vis_list
result_image = create_empty_image_like(template_model_imagelist[ivis])
result_sumwt = numpy.zeros([template_model_imagelist[ivis].nchan,
template_model_imagelist[ivis].npol])
for rows in vis_iter(sub_vis_list, vis_slices):
row_vis = create_visibility_from_rows(sub_vis_list, rows)
result = invert_ignore_none(row_vis, template_model_imagelist[ivis], g)
if result is not None:
result_image.data += result[1][:, :, numpy.newaxis, numpy.newaxis] * result[0].data
result_sumwt += result[1]
result_image = normalize_sumwt(result_image, result_sumwt)
results_vislist.append((result_image, result_sumwt))
else:
for ivis, sub_vis_list in enumerate(vis_list):
# Create the graph to divide an image into facets. This is by reference.
facet_lists = image_scatter_facets(template_model_imagelist[ivis],
facets=facets)
# Create the graph to divide the visibility into slices. This is by copy.
sub_sub_vis_lists = visibility_scatter(sub_vis_list, vis_iter, vis_slices=vis_slices)
# Iterate within each vis_list
vis_results = list()
for sub_sub_vis_list in sub_sub_vis_lists:
facet_vis_results = list()
for facet_list in facet_lists:
facet_vis_results.append(invert_ignore_none(sub_sub_vis_list, facet_list, None))
vis_results.append(gather_image_iteration_results(facet_vis_results,
template_model_imagelist[ivis]))
results_vislist.append(sum_invert_results(vis_results))
return results_vislist
def residual_list_serial_workflow(vis, model_imagelist, context='2d', gcfcf=None, **kwargs):
""" Create a graph to calculate residual image
:param vis:
:param model_imagelist: Model used to determine image parameters
:param context:
:param gcfcg: tuple containing grid correction and convolution function
:param kwargs: Parameters for functions in components
:return:
"""
model_vis = zero_list_serial_workflow(vis)
model_vis = predict_list_serial_workflow(model_vis, model_imagelist, context=context,
gcfcf=gcfcf, **kwargs)
residual_vis = subtract_list_serial_workflow(vis, model_vis)
result = invert_list_serial_workflow(residual_vis, model_imagelist, dopsf=False, normalize=True,
context=context,
gcfcf=gcfcf, **kwargs)
return result
def restore_list_serial_workflow(model_imagelist, psf_imagelist, residual_imagelist=None, **kwargs):
""" Create a graph to calculate the restored image
:param model_imagelist: Model list
:param psf_imagelist: PSF list
:param residual_imagelist: Residual list
:param kwargs: Parameters for functions in components
:return:
"""
if residual_imagelist is None:
residual_imagelist = []
if len(residual_imagelist) > 0:
return [restore_cube(model_imagelist[i], psf_imagelist[i][0],
residual_imagelist[i][0], **kwargs)
for i, _ in enumerate(model_imagelist)]
else:
return [restore_cube(model_imagelist[i], psf_imagelist[i][0], **kwargs)
for i, _ in enumerate(model_imagelist)]
def restore_list_serial_workflow_nosumwt(model_imagelist, psf_imagelist, residual_imagelist=None, **kwargs):
""" Create a graph to calculate the restored image
:param model_imagelist: Model list
:param psf_imagelist: PSF list (without the sumwt term)
:param residual_imagelist: Residual list (without the sumwt term)
:param kwargs: Parameters for functions in components
:return:
"""
if residual_imagelist is None:
residual_imagelist = []
if len(residual_imagelist) > 0:
return [restore_cube(model_imagelist[i], psf_imagelist[i],
residual_imagelist[i], **kwargs)
for i, _ in enumerate(model_imagelist)]
else:
return [restore_cube(model_imagelist[i], psf_imagelist[i], **kwargs)
for i, _ in enumerate(model_imagelist)]
def deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist, prefix='', mask=None, **kwargs):
"""Create a graph for deconvolution, adding to the model
:param dirty_list:
:param psf_list:
:param model_imagelist:
:param prefix: Informative prefix to log messages
:param mask: Mask for deconvolution
:param kwargs: Parameters for functions in components
:return: (graph for the deconvolution, graph for the flat)
"""
nchan = len(dirty_list)
nmoment = get_parameter(kwargs, "nmoment", 0)
assert isinstance(dirty_list, list), dirty_list
assert isinstance(psf_list, list), psf_list
assert isinstance(model_imagelist, list), model_imagelist
def deconvolve(dirty, psf, model, facet, gthreshold, msk=None):
if prefix == '':
lprefix = "facet %d" % facet
else:
lprefix = "%s, facet %d" % (prefix, facet)
if nmoment > 0:
moment0 = calculate_image_frequency_moments(dirty)
this_peak = numpy.max(numpy.abs(moment0.data[0, ...])) / dirty.data.shape[0]
else:
ref_chan = dirty.data.shape[0] // 2
this_peak = numpy.max(numpy.abs(dirty.data[ref_chan, ...]))
if this_peak > 1.1 * gthreshold:
kwargs['threshold'] = gthreshold
result, _ = deconvolve_cube(dirty, psf, prefix=lprefix, mask=msk, **kwargs)
if result.data.shape[0] == model.data.shape[0]:
result.data += model.data
return result
else:
return copy_image(model)
deconvolve_facets = get_parameter(kwargs, 'deconvolve_facets', 1)
deconvolve_overlap = get_parameter(kwargs, 'deconvolve_overlap', 0)
deconvolve_taper = get_parameter(kwargs, 'deconvolve_taper', None)
if deconvolve_facets > 1 and deconvolve_overlap > 0:
deconvolve_number_facets = (deconvolve_facets - 2) ** 2
else:
deconvolve_number_facets = deconvolve_facets ** 2
model_imagelist = image_gather_channels(model_imagelist)
# Scatter the separate channel images into deconvolve facets and then gather channels for each facet.
# This avoids constructing the entire spectral cube.
dirty_list_trimmed = remove_sumwt(dirty_list)
scattered_channels_facets_dirty_list = \
[image_scatter_facets(d, facets=deconvolve_facets,
overlap=deconvolve_overlap,
taper=deconvolve_taper)
for d in dirty_list_trimmed]
# Now we do a transpose and gather
scattered_facets_list = [
image_gather_channels([scattered_channels_facets_dirty_list[chan][facet]
for chan in range(nchan)])
for facet in range(deconvolve_number_facets)]
psf_list_trimmed = remove_sumwt(psf_list)
psf_list_trimmed = image_gather_channels(psf_list_trimmed)
scattered_model_imagelist = \
image_scatter_facets(model_imagelist,
facets=deconvolve_facets,
overlap=deconvolve_overlap)
# Work out the threshold. Need to find global peak over all dirty_list images
threshold = get_parameter(kwargs, "threshold", 0.0)
fractional_threshold = get_parameter(kwargs, "fractional_threshold", 0.1)
nmoment = get_parameter(kwargs, "nmoment", 0)
use_moment0 = nmoment > 0
# Find the global threshold. This uses the peak in the average on the frequency axis since we
# want to use it in a stopping criterion in a moment clean
global_threshold = threshold_list(scattered_facets_list, threshold, fractional_threshold, use_moment0=use_moment0,
prefix=prefix)
facet_list = numpy.arange(deconvolve_number_facets).astype('int')
if mask is None:
scattered_results_list = [
deconvolve(d, psf_list_trimmed, m, facet, global_threshold)
for d, m, facet in zip(scattered_facets_list, scattered_model_imagelist, facet_list)]
else:
mask_list = \
image_scatter_facets(mask,
facets=deconvolve_facets,
overlap=deconvolve_overlap)
scattered_results_list = [
deconvolve(d, psf_list_trimmed, m, facet, global_threshold, msk)
for d, m, facet, msk in zip(scattered_facets_list, scattered_model_imagelist, facet_list, mask_list)]
# Gather the results back into one image, correcting for overlaps as necessary. The taper function is is used to
# feather the facets together
gathered_results_list = image_gather_facets(scattered_results_list, model_imagelist,
facets=deconvolve_facets,
overlap=deconvolve_overlap,
taper=deconvolve_taper)
return image_scatter_channels(gathered_results_list, subimages=nchan)
def deconvolve_channel_list_serial_workflow(dirty_list, psf_list, model_imagelist, subimages, **kwargs):
"""Create a graph for deconvolution by channels, adding to the model
Does deconvolution channel by channel.
:param subimages:
:param dirty_list:
:param psf_list: Must be the size of a facet
:param model_imagelist: Current model
:param kwargs: Parameters for functions in components
:return:
"""
def deconvolve_subimage(dirty, psf):
assert isinstance(dirty, Image)
assert isinstance(psf, Image)
comp = deconvolve_cube(dirty, psf, **kwargs)
return comp[0]
def add_model(sum_model, model):
assert isinstance(output, Image)
assert isinstance(model, Image)
sum_model.data += model.data
return sum_model
output = create_empty_image_like(model_imagelist)
dirty_lists = image_scatter_channels(dirty_list[0],
subimages=subimages)
results = [deconvolve_subimage(dirty_list, psf_list[0])
for dirty_list in dirty_lists]
result = image_gather_channels(results, output, subimages=subimages)
return add_model(result, model_imagelist)
def weight_list_serial_workflow(vis_list, model_imagelist, gcfcf=None, weighting='uniform', **kwargs):
""" Weight the visibility data
This is done collectively so the weights are summed over all vis_lists and then
corrected
:param vis_list:
:param model_imagelist: Model required to determine weighting parameters
:param weighting: Type of weighting
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
centre = len(model_imagelist) // 2
if gcfcf is None:
gcfcf = [create_pswf_convolutionfunction(model_imagelist[centre])]
def grid_wt(vis, model, g):
if vis is not None:
if model is not None:
griddata = create_griddata_from_image(model)
griddata = grid_weight_to_griddata(vis, griddata, g[0][1])
return griddata
else:
return None
else:
return None
weight_list = [grid_wt(vis_list[i], model_imagelist[i], gcfcf) for i in range(len(vis_list))]
merged_weight_grid = griddata_merge_weights(weight_list)
def re_weight(vis, model, gd, g):
if gd is not None:
if vis is not None:
# Ensure that the griddata has the right axes so that the convolution
# function mapping works
agd = create_griddata_from_image(model)
agd.data = gd[0].data
vis = griddata_reweight(vis, agd, g[0][1])
return vis
else:
return None
else:
return vis
return [re_weight(v, model_imagelist[i], merged_weight_grid, gcfcf)
for i, v in enumerate(vis_list)]
def taper_list_serial_workflow(vis_list, size_required):
"""Taper to desired size
:param vis_list:
:param size_required:
:return:
"""
return [taper_visibility_gaussian(v, beam=size_required) for v in vis_list]
def zero_list_serial_workflow(vis_list):
""" Initialise vis to zero: creates new data holders
:param vis_list:
:return: List of vis_lists
"""
def zero(vis):
if vis is not None:
zerovis = copy_visibility(vis)
zerovis.data['vis'][...] = 0.0
return zerovis
else:
return None
return [zero(v) for v in vis_list]
def subtract_list_serial_workflow(vis_list, model_vislist):
""" Initialise vis to zero
:param vis_list:
:param model_vislist: Model to be subtracted
:return: List of vis_lists
"""
def subtract_vis(vis, model_vis):
if vis is not None and model_vis is not None:
assert vis.vis.shape == model_vis.vis.shape
subvis = copy_visibility(vis)
subvis.data['vis'][...] -= model_vis.data['vis'][...]
return subvis
else:
return None
return [subtract_vis(vis=vis_list[i], model_vis=model_vislist[i]) for i in range(len(vis_list))]
| apache-2.0 |
anthill-platform/anthill-exec | anthill/exec/options.py | 1 | 1753 |
from anthill.common.options import define
# Main
define("host",
default="http://localhost:9507",
help="Public hostname of this service",
type=str)
define("listen",
default="port:9507",
help="Socket to listen. Could be a port number (port:N), or a unix domain socket (unix:PATH)",
type=str)
define("name",
default="exec",
help="Service short name. User to discover by discovery service.",
type=str)
# MySQL database
define("db_host",
default="localhost",
type=str,
help="MySQL database location")
define("db_username",
default="root",
type=str,
help="MySQL account username")
define("db_password",
default="",
type=str,
help="MySQL account password")
define("db_name",
default="dev_exec",
type=str,
help="MySQL database name")
# Regular cache
define("cache_host",
default="localhost",
help="Location of a regular cache (redis).",
group="cache",
type=str)
define("cache_port",
default=6379,
help="Port of regular cache (redis).",
group="cache",
type=int)
define("cache_db",
default=4,
help="Database of regular cache (redis).",
group="cache",
type=int)
define("cache_max_connections",
default=500,
help="Maximum connections to the regular cache (connection pool).",
group="cache",
type=int)
# JS
define("js_source_path",
default="/usr/local/anthill/exec-source",
help="Directory the source repositories will be pulled into",
type=str)
define("js_call_timeout",
default=10,
help="Maximum time limit for each script execution",
type=int)
| mit |
afloren/nipype | nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py | 9 | 1237 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.filtering.arithmetic import MultiplyScalarVolumes
def test_MultiplyScalarVolumes_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume1=dict(argstr='%s',
position=-3,
),
inputVolume2=dict(argstr='%s',
position=-2,
),
order=dict(argstr='--order %s',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
terminal_output=dict(nohash=True,
),
)
inputs = MultiplyScalarVolumes.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MultiplyScalarVolumes_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = MultiplyScalarVolumes.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
TeslaProject/external_chromium_org | build/android/pylib/instrumentation/test_result.py | 110 | 1183 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib.base import base_test_result
class InstrumentationTestResult(base_test_result.BaseTestResult):
"""Result information for a single instrumentation test."""
def __init__(self, full_name, test_type, start_date, dur, log=''):
"""Construct an InstrumentationTestResult object.
Args:
full_name: Full name of the test.
test_type: Type of the test result as defined in ResultType.
start_date: Date in milliseconds when the test began running.
dur: Duration of the test run in milliseconds.
log: A string listing any errors.
"""
super(InstrumentationTestResult, self).__init__(full_name, test_type, log)
name_pieces = full_name.rsplit('#')
if len(name_pieces) > 1:
self._test_name = name_pieces[1]
self._class_name = name_pieces[0]
else:
self._class_name = full_name
self._test_name = full_name
self._start_date = start_date
self._dur = dur
def GetDur(self):
"""Get the test duration."""
return self._dur
| bsd-3-clause |
domijin/Pset | assignment1/q2_sigmoid.py | 1 | 1346 | import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1./(1 + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f*(1-f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
# print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
# print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
# print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
# print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
# print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
# test_sigmoid()
| mit |
google/llvm-propeller | lldb/test/API/python_api/watchpoint/condition/TestWatchpointConditionAPI.py | 4 | 3374 | """
Test watchpoint condition API.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class WatchpointConditionAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(
self.source, '// Set break point at this line.')
# And the watchpoint variable declaration line number.
self.decl = line_number(self.source,
'// Watchpoint variable declaration.')
# Build dictionary to have unique executable names for each test
# method.
self.exe_name = self.testMethodName
self.d = {'CXX_SOURCES': self.source, 'EXE': self.exe_name}
def test_watchpoint_cond_api(self):
"""Test watchpoint condition API."""
self.build(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
exe = self.getBuildArtifact(self.exe_name)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
# We should be stopped due to the breakpoint. Get frame #0.
process = target.GetProcess()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
frame0 = thread.GetFrameAtIndex(0)
# Watch 'global' for write.
value = frame0.FindValue('global', lldb.eValueTypeVariableGlobal)
error = lldb.SBError()
watchpoint = value.Watch(True, False, True, error)
self.assertTrue(value and watchpoint,
"Successfully found the variable and set a watchpoint")
self.DebugSBValue(value)
# Now set the condition as "global==5".
watchpoint.SetCondition('global==5')
self.expect(watchpoint.GetCondition(), exe=False,
startstr='global==5')
# Hide stdout if not running with '-t' option.
if not self.TraceOn():
self.HideStdout()
print(watchpoint)
# Continue. Expect the program to stop due to the variable being
# written to.
process.Continue()
if (self.TraceOn()):
lldbutil.print_stacktraces(process)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonWatchpoint)
self.assertTrue(thread, "The thread stopped due to watchpoint")
self.DebugSBValue(value)
# Verify that the condition is met.
self.assertTrue(value.GetValueAsUnsigned() == 5)
| apache-2.0 |
jfantom/incubator-airflow | tests/contrib/hooks/test_spark_sql_hook.py | 16 | 3922 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import six
import sys
import unittest
from io import StringIO
from itertools import dropwhile
from mock import patch, call
from airflow import configuration, models
from airflow.utils import db
from airflow.contrib.hooks.spark_sql_hook import SparkSqlHook
def get_after(sentinel, iterable):
"Get the value after `sentinel` in an `iterable`"
truncated = dropwhile(lambda el: el != sentinel, iterable)
next(truncated)
return next(truncated)
class TestSparkSqlHook(unittest.TestCase):
_config = {
'conn_id': 'spark_default',
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'sql': ' /path/to/sql/file.sql ',
'conf': 'key=value,PROP=VALUE'
}
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='spark_default', conn_type='spark',
host='yarn://yarn-master')
)
def test_build_command(self):
hook = SparkSqlHook(**self._config)
# The subprocess requires an array but we build the cmd by joining on a space
cmd = ' '.join(hook._prepare_command(""))
# Check all the parameters
assert "--executor-cores {}".format(self._config['executor_cores']) in cmd
assert "--executor-memory {}".format(self._config['executor_memory']) in cmd
assert "--keytab {}".format(self._config['keytab']) in cmd
assert "--name {}".format(self._config['name']) in cmd
assert "--num-executors {}".format(self._config['num_executors']) in cmd
sql_path = get_after('-f', hook._prepare_command(""))
assert self._config['sql'].strip() == sql_path
# Check if all config settings are there
for kv in self._config['conf'].split(","):
k, v = kv.split('=')
assert "--conf {0}={1}".format(k, v) in cmd
if self._config['verbose']:
assert "--verbose" in cmd
@patch('airflow.contrib.hooks.spark_sql_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('Spark-sql communicates using stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(
conn_id='spark_default',
sql='SELECT 1'
)
with patch.object(hook.log, 'debug') as mock_debug:
with patch.object(hook.log, 'info') as mock_info:
hook.run_query()
mock_debug.assert_called_with(
'Spark-Sql cmd: %s',
['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose', '--queue', 'default']
)
mock_info.assert_called_with(
'Spark-sql communicates using stdout'
)
# Then
self.assertEqual(
mock_popen.mock_calls[0],
call(['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose', '--queue', 'default'], stderr=-2, stdout=-1)
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
newemailjdm/scipy | scipy/integrate/tests/test_quadpack.py | 19 | 12363 | from __future__ import division, print_function, absolute_import
import sys
import math
import numpy as np
from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
from numpy.testing import (assert_, TestCase, run_module_suite, dec,
assert_allclose, assert_array_less, assert_almost_equal)
from scipy.integrate import quad, dblquad, tplquad, nquad
from scipy._lib.six import xrange
try:
import ctypes
import ctypes.util
_ctypes_missing = False
except ImportError:
_ctypes_missing = True
try:
import scipy.integrate._test_multivariate as clib_test
_ctypes_multivariate_fail = False
except:
_ctypes_multivariate_fail = True
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if errTol is not None:
assert_array_less(err, errTol)
class TestCtypesQuad(TestCase):
@dec.skipif(_ctypes_missing, msg="Ctypes library could not be found")
def setUp(self):
if sys.platform == 'win32':
if sys.version_info < (3, 5):
file = ctypes.util.find_msvcrt()
else:
file = 'api-ms-win-crt-math-l1-1-0.dll'
elif sys.platform == 'darwin':
file = 'libm.dylib'
else:
file = 'libm.so'
self.lib = ctypes.CDLL(file)
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
@dec.skipif(_ctypes_missing, msg="Ctypes library could not be found")
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
#@dec.skipif(_ctypes_missing, msg="Ctypes library could not be found")
# This doesn't seem to always work. Need a better way to figure out
# whether the fast path is called.
@dec.knownfailureif(True, msg="Unreliable test, see ticket 1684.")
def test_improvement(self):
import time
start = time.time()
for i in xrange(100):
quad(self.lib.sin, 0, 100)
fast = time.time() - start
start = time.time()
for i in xrange(100):
quad(math.sin, 0, 100)
slow = time.time() - start
assert_(fast < 0.5*slow, (fast, slow))
class TestMultivariateCtypesQuad(TestCase):
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def setUp(self):
self.lib = ctypes.CDLL(clib_test.__file__)
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self.lib._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self.lib._multivariate_indefinite, 0, Inf),
0.577215664901532860606512)
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self.lib._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_improvement(self):
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
import time
start = time.time()
for i in xrange(20):
quad(self.lib._multivariate_indefinite, 0, 100)
fast = time.time() - start
start = time.time()
for i in xrange(20):
quad(myfunc, 0, 100)
slow = time.time() - start
# 2+ times faster speeds generated by nontrivial ctypes
# function (single variable)
assert_(fast < 0.5*slow, (fast, slow))
class TestQuad(TestCase):
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if x > 0 and x < 2.5:
return sin(x)
elif x >= 2.5 and x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a*(x-1))
ome = 2.0**3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
def test_sine_weighted_infinite(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x*a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
ome/(a**2 + ome**2))
def test_cosine_weighted_infinite(self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x*a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
a/(a**2 + ome**2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1/(1+x+2**(-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi/sqrt((1+2**(-a))**2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0**(-a)/((x-1)**2+4.0**(-a))
a = 0.4
tabledValue = ((2.0**(-0.4)*log(1.5) -
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
arctan(2.0**(a+2)) -
arctan(2.0**a)) /
(4.0**(-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, errTol=1.9e-8)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x+y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
5/6.0 * (b**3.0-a**3.0))
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x): # Note order of arguments.
return x+y+z
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y),
8/3.0 * (b**4.0 - a**4.0))
class TestNQuad(TestCase):
def test_fixed_limits(self):
def func1(x0, x1, x2, x3):
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
return val
def opts_basic(*args):
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
opts=[opts_basic, {}, {}, {}])
assert_quad(res, 1.5267454070738635)
def test_variable_limits(self):
scale = .1
def func2(x0, x1, x2, x3, t0, t1):
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
(1 if x0 + t1*x1 - t0 > 0 else 0))
return val
def lim0(x1, x2, x3, t0, t1):
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
def lim1(x2, x3, t0, t1):
return [scale * (t0*x2 + t1*x3) - 1,
scale * (t0*x2 + t1*x3) + 1]
def lim2(x3, t0, t1):
return [scale * (x3 + t0**2*t1**3) - 1,
scale * (x3 + t0**2*t1**3) + 1]
def lim3(t0, t1):
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
def opts0(x1, x2, x3, t0, t1):
return {'points': [t0 - t1*x1]}
def opts1(x2, x3, t0, t1):
return {}
def opts2(x3, t0, t1):
return {}
def opts3(t0, t1):
return {}
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
opts=[opts0, opts1, opts2, opts3])
assert_quad(res, 25.066666666666663)
def test_square_separate_ranges_and_opts(self):
def f(y, x):
return 1.0
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
def test_square_aliased_ranges_and_opts(self):
def f(y, x):
return 1.0
r = [-1, 1]
opt = {}
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
def test_square_separate_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range0(*args):
return (-1, 1)
def fn_range1(*args):
return (-1, 1)
def fn_opt0(*args):
return {}
def fn_opt1(*args):
return {}
ranges = [fn_range0, fn_range1]
opts = [fn_opt0, fn_opt1]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_square_aliased_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range(*args):
return (-1, 1)
def fn_opt(*args):
return {}
ranges = [fn_range, fn_range]
opts = [fn_opt, fn_opt]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_matching_quad(self):
def func(x):
return x**2 + 1
res, reserr = quad(func, 0, 4)
res2, reserr2 = nquad(func, ranges=[[0, 4]])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_dblquad(self):
def func2d(x0, x1):
return x0**2 + x1**3 - x0 * x1 + 1
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_tplquad(self):
def func3d(x0, x1, x2, c0, c1):
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
lambda x, y: -np.pi, lambda x, y: np.pi,
args=(2, 3))
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
assert_almost_equal(res, res2)
def test_dict_as_opts(self):
try:
out = nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
except(TypeError):
assert False
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
blueskycoco/rt-thread | bsp/stm32/stm32f103-onenet-nbiot/rtconfig.py | 14 | 4024 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| gpl-2.0 |
CodeRiderz/rojak | rojak-analyzer/show_data_stats.py | 4 | 2488 | import csv
from collections import Counter
import re
from bs4 import BeautifulSoup
csv_file = open('data_detikcom_labelled_740.csv')
csv_reader = csv.DictReader(csv_file)
words = []
docs = []
label_counter = {}
unique_label_counter = {}
for row in csv_reader:
title = row['title'].strip().lower()
raw_content = row['raw_content']
clean_content = BeautifulSoup(raw_content, 'lxml').text
# Compile regex to remove non-alphanum char
nonalpha = re.compile('[\W_]+')
for word in title.split(' '):
word = word.lower()
word = nonalpha.sub('', word)
if word != '':
words.append(word)
for word in clean_content.split(' '):
word = word.lower()
word = nonalpha.sub('', word)
if word != '':
words.append(word)
labels = []
sentiment_1 = row['sentiment_1']
if sentiment_1 != '':
labels.append(sentiment_1)
if sentiment_1 in unique_label_counter:
unique_label_counter[sentiment_1] += 1
else:
unique_label_counter[sentiment_1] = 1
sentiment_2 = row['sentiment_2']
if sentiment_2 != '':
labels.append(sentiment_2)
if sentiment_2 in unique_label_counter:
unique_label_counter[sentiment_2] += 1
else:
unique_label_counter[sentiment_2] = 1
sentiment_3 = row['sentiment_3']
if sentiment_3 != '':
labels.append(sentiment_3)
if sentiment_3 in unique_label_counter:
unique_label_counter[sentiment_3] += 1
else:
unique_label_counter[sentiment_3] = 1
label_name = ','.join(sorted(labels))
if label_name != '':
if label_name in label_counter:
label_counter[label_name] += 1
else:
label_counter[label_name] = 1
else:
print 'WARNING: "{}" does not have label'.format(title)
print 'Unique label statistics:'
for key in sorted(unique_label_counter):
label_name = key
label_count = unique_label_counter[key]
print '{}: {}'.format(label_name, label_count)
print ''
print 'Label statistics:'
total_data = 0
for key in label_counter:
label_name = key
label_count = label_counter[key]
total_data += label_count
print '{}: {}'.format(label_name, label_count)
print 'Total data:', total_data
print ''
counter = Counter(words)
print '10 Most common words:'
for word in counter.most_common(10):
print '{},{}'.format(word[0], word[1])
csv_file.close()
| bsd-3-clause |
Arakmar/Sick-Beard | lib/imdb/linguistics.py | 50 | 9220 | """
linguistics module (imdb package).
This module provides functions and data to handle in a smart way
languages and articles (in various languages) at the beginning of movie titles.
Copyright 2009-2012 Davide Alberani <[email protected]>
2012 Alberto Malagoli <albemala AT gmail.com>
2009 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at [email protected]; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'le', 'el', 'les', 'un', 'los', 'una', 'uno', 'unos',
'unas'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# [email protected] .
LANG_COUNTRIES = {
'English': ('Canada', 'Swaziland', 'Ghana', 'St. Lucia', 'Liberia', 'Jamaica', 'Bahamas', 'New Zealand', 'Lesotho', 'Kenya', 'Solomon Islands', 'United States', 'South Africa', 'St. Vincent and the Grenadines', 'Fiji', 'UK', 'Nigeria', 'Australia', 'USA', 'St. Kitts and Nevis', 'Belize', 'Sierra Leone', 'Gambia', 'Namibia', 'Micronesia', 'Kiribati', 'Grenada', 'Antigua and Barbuda', 'Barbados', 'Malta', 'Zimbabwe', 'Ireland', 'Uganda', 'Trinidad and Tobago', 'South Sudan', 'Guyana', 'Botswana', 'United Kingdom', 'Zambia'),
'Italian': ('Italy', 'San Marino', 'Vatican City'),
'Spanish': ('Spain', 'Mexico', 'Argentina', 'Bolivia', 'Guatemala', 'Uruguay', 'Peru', 'Cuba', 'Dominican Republic', 'Panama', 'Costa Rica', 'Ecuador', 'El Salvador', 'Chile', 'Equatorial Guinea', 'Spain', 'Colombia', 'Nicaragua', 'Venezuela', 'Honduras', 'Paraguay'),
'French': ('Cameroon', 'Burkina Faso', 'Dominica', 'Gabon', 'Monaco', 'France', "Cote d'Ivoire", 'Benin', 'Togo', 'Central African Republic', 'Mali', 'Niger', 'Congo, Republic of', 'Guinea', 'Congo, Democratic Republic of the', 'Luxembourg', 'Haiti', 'Chad', 'Burundi', 'Madagascar', 'Comoros', 'Senegal'),
'Portuguese': ('Portugal', 'Brazil', 'Sao Tome and Principe', 'Cape Verde', 'Angola', 'Mozambique', 'Guinea-Bissau'),
'German': ('Liechtenstein', 'Austria', 'West Germany', 'Switzerland', 'East Germany', 'Germany'),
'Arabic': ('Saudi Arabia', 'Kuwait', 'Jordan', 'Oman', 'Yemen', 'United Arab Emirates', 'Mauritania', 'Lebanon', 'Bahrain', 'Libya', 'Palestinian State (proposed)', 'Qatar', 'Algeria', 'Morocco', 'Iraq', 'Egypt', 'Djibouti', 'Sudan', 'Syria', 'Tunisia'),
'Turkish': ('Turkey', 'Azerbaijan'),
'Swahili': ('Tanzania',),
'Swedish': ('Sweden',),
'Icelandic': ('Iceland',),
'Estonian': ('Estonia',),
'Romanian': ('Romania',),
'Samoan': ('Samoa',),
'Slovenian': ('Slovenia',),
'Tok Pisin': ('Papua New Guinea',),
'Palauan': ('Palau',),
'Macedonian': ('Macedonia',),
'Hindi': ('India',),
'Dutch': ('Netherlands', 'Belgium', 'Suriname'),
'Marshallese': ('Marshall Islands',),
'Korean': ('Korea, North', 'Korea, South', 'North Korea', 'South Korea'),
'Vietnamese': ('Vietnam',),
'Danish': ('Denmark',),
'Khmer': ('Cambodia',),
'Lao': ('Laos',),
'Somali': ('Somalia',),
'Filipino': ('Philippines',),
'Hungarian': ('Hungary',),
'Ukrainian': ('Ukraine',),
'Bosnian': ('Bosnia and Herzegovina',),
'Georgian': ('Georgia',),
'Lithuanian': ('Lithuania',),
'Malay': ('Brunei',),
'Tetum': ('East Timor',),
'Norwegian': ('Norway',),
'Armenian': ('Armenia',),
'Russian': ('Russia',),
'Slovak': ('Slovakia',),
'Thai': ('Thailand',),
'Croatian': ('Croatia',),
'Turkmen': ('Turkmenistan',),
'Nepali': ('Nepal',),
'Finnish': ('Finland',),
'Uzbek': ('Uzbekistan',),
'Albanian': ('Albania', 'Kosovo'),
'Hebrew': ('Israel',),
'Bulgarian': ('Bulgaria',),
'Greek': ('Cyprus', 'Greece'),
'Burmese': ('Myanmar',),
'Latvian': ('Latvia',),
'Serbian': ('Serbia',),
'Afar': ('Eritrea',),
'Catalan': ('Andorra',),
'Chinese': ('China', 'Taiwan'),
'Czech': ('Czech Republic', 'Czechoslovakia'),
'Bislama': ('Vanuatu',),
'Japanese': ('Japan',),
'Kinyarwanda': ('Rwanda',),
'Amharic': ('Ethiopia',),
'Persian': ('Afghanistan', 'Iran'),
'Tajik': ('Tajikistan',),
'Mongolian': ('Mongolia',),
'Dzongkha': ('Bhutan',),
'Urdu': ('Pakistan',),
'Polish': ('Poland',),
'Sinhala': ('Sri Lanka',),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in LANG_COUNTRIES:
for country in LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
| gpl-3.0 |
rwl/PyCIM | CIM15/CDPSM/Geographical/IEC61970/Wires/PowerTransformer.py | 1 | 5736 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.CDPSM.Geographical.IEC61970.Core.ConductingEquipment import ConductingEquipment
class PowerTransformer(ConductingEquipment):
"""An electrical device consisting of two or more coupled windings, with or without a magnetic core, for introducing mutual coupling between electric circuits. Transformers can be used to control voltage and phase shift (active power flow). A power transformer may be composed of separate transformer tanks that need not be identical. The same power transformer can be modelled in two ways, namely with and without tanks: <ol> <li>The power transformer that uses power transformer ends directly (without tanks) is suitable for balanced three-phase models. This is typical for transmission and sub-transmission network modelling. Such a transformer will require one power transformer end for each physical winding. There must be a one-to-one association between PowerTransformerEnd and Core::Terminal.</li> <li>The power transformer that uses transformer tanks is suitable for an unbalanced transformer, a balanced transformer within a single tank, or a balanced transformer made up of three tanks. This is typical for distribution network modelling and the only choice when modelling an unbalanced transformer, or a transformer that has more than three windings. Power transformer modelled with tanks will require for each tank, one transformer tank end per physical winding in the tank. There may be one, two, or three phases in the transformer tank end. Examples: 3 phases for 3-phase delta or wye connected windings, 2 for one phase-to-phase winding, and 1 for a phase-to-neutral or phase-to-ground winding. With 1 or 2 phases, more than one transformer tank end may be associated to the same 3-phase Core::Terminal instance, while with 3 phases there should be a one-to-one association.</li> </ol> This power transformer model is flexible in order to support different kinds of data exchange requirements. There are 5 possible ways to combine available classes and their attributes: <ol> <li>Instance parameters - Use the r, x, r0, x0, b, b0, g, and g0 attributes on PowerTransformerEnd and ignore related TransformerStarImpedance, TransformerMeshImpedance, or TransformerCoreAdmittance. This option assumes a star connection of the series impedances. It is suitable for typical transmission, balanced three-phase transformer models, for transformers with 2 or three windings.</li> <li>Star instance parameters by association - Instead of the r, x, r0, x0, b, b0, g, and g0 attributes, use associations to TransformerStarImpedance and TransformerCoreAdmitance. This option is suitable in same scenarios as option 1, but when catalogue data is available for transformers.</li> <li>Mesh instance parameters by association: Instead of the r, x, r0, x0, b, b0, g, and g0 attributes, use associations to TransformerMeshImpedance and TransformerCoreAdmittance. This option supports transformers with more than three windings.</li> <li>Catalog mesh parameters by association - Instead of attributes r, x, r0, x0, b, b0, g, and g0 and associations to TransformerStarImpedance, TransformerMeshImpedance, or TransformerCoreAdmittance, use the association to TransformerEndInfo. The TransformerEnd.endNumber should match the corresponding TransformerEndInfo.endNumber, following the IEC standard convention of numbering from the highest voltage ends to the lowest, starting at 1. This matching supports higher-level use of a catalog, through just one association between TransformerTank and TransformerTankInfo, with simpler exchanges and incremental updates. The associated TransformerEndInfo will have associations to TransformerMeshImpedance and TransformerCoreAdmittance. This option supports unbalanced transformer, with more than three windings and is suitable whenever the transformer test data has been converted to an electrical model.</li> <li>Catalog test data by association - This is the same as option 4, except TransformerEndInfo will have associations to AssetModels::TransformerTest decendents, instead of to TransformerMeshImpedance and TransformerCoreAdmittance. This option is suitable when the test data is available, and the receiving application is able to interpret the test data.</li> </ol> Every profile should specify which one or more of these options are supported.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'PowerTransformer' instance.
"""
super(PowerTransformer, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| mit |
cpe/VAMDC-VALD | nodes/tipbase/node/queryfunc.py | 2 | 9280 | # -*- coding: utf-8 -*-
#
# This module (which must have the name queryfunc.py) is responsible
# for converting incoming queries to a database query understood by
# this particular node's database schema.
#
# This module must contain a function setupResults, taking a sql object
# as its only argument.
#
# library imports
import sys
from itertools import chain
from django.conf import settings
from vamdctap.sqlparse import sql2Q
from django.db.models import Q
from django.db import connection
import dictionaries
import models as django_models
import util_models as util_models
if hasattr(settings,'LAST_MODIFIED'):
LAST_MODIFIED = settings.LAST_MODIFIED
else: LAST_MODIFIED = None
def replacements(sql):
pfx = 'target.'
restrictables = ['IonCharge', 'AtomSymbol', 'AtomNuclearCharge']
for r in restrictables :
if pfx+r in sql:
sql=sql.replace(pfx+r, r)
return sql
def setupResults(sql):
"""
Return results for request
@type sql: string
@param sql: vss request
@type limit: int
@param limit: maximum number of results
@rtype: dict
@return: dictionnary containig data
"""
result = None
# return all species
if str(sql).strip().lower() == 'select species':
result = setupSpecies()
# all other requests
else:
result = setupVssRequest(sql)
if isinstance(result, util_models.Result) :
return result.getResult()
else:
raise Exception('error while generating result')
def setupVssRequest(sql, limit=10000):
"""
Execute a vss request
@type sql: string
@param sql: vss request
@rtype: util_models.Result
@return: Result object
"""
# convert the incoming sql to a correct django query syntax object
# based on the RESTRICTABLES dictionary in dictionaries.py
q = sql2Q(sql)
transs = django_models.Collisionaltransition.objects.filter(q)
# count the number of matches, make a simple trunkation if there are
# too many (record the coverage in the returned header)
ncoll=transs.count()
if limit < ncoll :
transs, percentage = truncateTransitions(transs, q, limit)
else:
percentage=None
species, nstates, sourceids = getSpeciesWithStates(transs)
# electron collider
particles = getParticles()
# cross sections
states = []
for specie in species:
states.extend(specie.States)
nspecies = species.count()
nspecies = species.count()
sources = getSources(sourceids)
nsources = sources.count()
# Create the result object
result = util_models.Result()
result.addHeaderField('TRUNCATED', percentage)
result.addHeaderField('COUNT-STATES',nstates)
result.addHeaderField('COUNT-COLLISIONS',ncoll)
result.addHeaderField('COUNT-SPECIES',nspecies)
result.addHeaderField('COUNT-SOURCES',nsources)
if LAST_MODIFIED is not None :
result.addHeaderField('LAST-MODIFIED',LAST_MODIFIED)
if(nstates == 0 and nspecies == 0):
result.addHeaderField('APPROX-SIZE', 0)
if ncoll > 0 :
result.addDataField('CollTrans',transs)
result.addDataField('Particles',particles)
result.addDataField('Atoms',species)
result.addDataField('Sources',sources)
return result
def setupSpecies():
"""
Return all target species
@rtype: util_models.Result
@return: Result object
"""
# get recommended dataset
dataset = django_models.Dataset.objects.filter(isrecommended="1").get()
# get all species in this set
ids = django_models.DatasetVersion.objects.filter(datasetid=dataset).values_list('versionid', flat=True)
versions = django_models.Version.objects.filter(pk__in = ids)
result = util_models.Result()
result.addHeaderField('COUNT-SPECIES',versions.count())
result.addDataField('Atoms',versions)
return result
def truncateTransitions(transitions, request, maxTransitionNumber):
"""
limit the number of transitions
@type transitions: list
@param transitions: a list of Transition
@type request: Q()
@param request: sql query
@type maxTransitionNumber: int
@param maxTransitionNumber: max number of transitions
@rtype: list
@return: truncated list of transitions
"""
percentage='%.1f' % (float(maxTransitionNumber) / transitions.count() * 100)
transitions = transitions.order_by('initialatomicstate__stateenergy')
newmax = transitions[maxTransitionNumber].initialatomicstate.stateenergy
return django_models.Collisionaltransition.objects.filter(request,Q(initialatomicstate__stateenergy__lt=newmax)), percentage
def getSpeciesWithStates(transs):
"""
Use the Transition matches to obtain the related Species (only atoms in this example)
and the states related to each transition.
We also return some statistics of the result
"""
# get ions according to selected transitions
ionids = transs.values_list('version', flat=True).distinct()
species = django_models.Version.objects.filter(id__in=ionids)
# get all states.
nstates = 0
sourceids = []
for trans in transs :
setSpecies(trans)
# get tabulated data and their references
setDataset(trans, sourceids)
for specie in species:
# get all transitions in linked to this particular species
spec_transitions = transs.filter(version=specie.id)
# extract reference ids for the states from the transion, combining both
# upper and lower unique states together
up = spec_transitions.values_list('initialatomicstate',flat=True)
lo = spec_transitions.values_list('finalatomicstate',flat=True)
sids = set(chain(up, lo))
# use the found reference ids to search the State database table
specie.States = django_models.Atomicstate.objects.filter( pk__in = sids )
for state in specie.States :
state.Components = []
state.Components.append(getCoupling(state))
state.Sources = getStateSources(state)
sourceids.extend(state.Sources)
nstates += specie.States.count()
return species, nstates, sourceids
def getStateSources(state):
"""
get ids of sources related to an atomic state
"""
sourceids = []
relatedsources = django_models.Atomicstatesource.objects.filter(atomicstate=state)
for relatedsource in relatedsources :
sourceids.append(relatedsource.source.pk)
return sourceids
def getSources(ids):
"""
get a list of source objects from their ids
"""
sources = django_models.Source.objects.filter(pk__in=ids)
for source in sources :
names=[]
adresses=[]
relatedauthors = django_models.Authorsource.objects.filter(source=source).order_by('rank')
#build a list of authors
for relatedauthor in relatedauthors:
names.append(relatedauthor.author.name)
source.Authors = names
return sources
def getTabdataSources(tabdata):
"""
get source ids of tabdata
"""
sourceids = []
relatedsources = django_models.Tabulateddatasource.objects.filter(pk=tabdata.pk)
for relatedsource in relatedsources :
sourceids.append(relatedsource.source.pk)
return sourceids
def setDataset(trans, sourceids):
"""
create Dataset with Tabulated data
trans : a given transition
sourceids : a list of all references for the current request
"""
tabulateddata = django_models.Tabulateddata.objects.filter(collisionaltransition = trans.id)
sources = []
trans.DataSets = []
# get tabulated data
for data in tabulateddata :
datasources = getTabdataSources(data)
# add reference to list global list of references for this query
for source in datasources :
if source not in sourceids :
sourceids.append(source)
dataset = util_models.XsamsDataset()
dataset.TabData = []
dataset.TabData.append(data)
data.Sources = datasources
dataset.dataDescription = data.datadescription.value
trans.DataSets.append(dataset)
def setSpecies(trans):
"""
add product and reactant states
"""
setReactants(trans)
setProducts(trans)
def setReactants(trans):
"""
add reactants
"""
trans.Reactants = []
trans.Reactants.append(trans.initialatomicstate)
particle = django_models.Particle.objects.filter(name='electron') # second reactant is always an electron for now
if(len(particle) == 1 ):
trans.Reactants.append(particle[0])
def setProducts(trans):
"""
add product
"""
trans.Products = []
trans.Products.append(trans.finalatomicstate)
def getCoupling(state):
"""
Get coupling for the given state
"""
components = django_models.Atomiccomponent.objects.filter(atomicstate=state)
for component in components:
component.Lscoupling = django_models.Lscoupling.objects.get(atomiccomponent=component)
return components[0]
def getParticles():
return django_models.Particle.objects.all()
| gpl-3.0 |
JamesClough/networkx | examples/drawing/giant_component.py | 15 | 2287 | #!/usr/bin/env python
"""
This example illustrates the sudden appearance of a
giant connected component in a binomial random graph.
Requires pygraphviz and matplotlib to draw.
"""
# Copyright (C) 2006-2016
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
import math
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
layout = graphviz_layout
except ImportError:
try:
import pydotplus
from networkx.drawing.nx_pydot import graphviz_layout
layout = graphviz_layout
except ImportError:
print("PyGraphviz and PyDotPlus not found;\n"
"drawing with spring layout;\n"
"will be slow.")
layout = nx.spring_layout
n=150 # 150 nodes
# p value at which giant component (of size log(n) nodes) is expected
p_giant=1.0/(n-1)
# p value at which graph is expected to become completely connected
p_conn=math.log(n)/float(n)
# the following range of p values should be close to the threshold
pvals=[0.003, 0.006, 0.008, 0.015]
region=220 # for pylab 2x2 subplot layout
plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
for p in pvals:
G=nx.binomial_graph(n,p)
pos=layout(G)
region+=1
plt.subplot(region)
plt.title("p = %6.3f"%(p))
nx.draw(G,pos,
with_labels=False,
node_size=10
)
# identify largest connected component
Gcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
G0=Gcc[0]
nx.draw_networkx_edges(G0,pos,
with_labels=False,
edge_color='r',
width=6.0
)
# show other connected components
for Gi in Gcc[1:]:
if len(Gi)>1:
nx.draw_networkx_edges(Gi,pos,
with_labels=False,
edge_color='r',
alpha=0.3,
width=5.0
)
plt.savefig("giant_component.png")
plt.show() # display
| bsd-3-clause |
yamahata/neutron | neutron/plugins/vmware/api_client/request.py | 7 | 12120 | # Copyright 2012 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from abc import ABCMeta
from abc import abstractmethod
import copy
import eventlet
import httplib
import time
import six
import six.moves.urllib.parse as urlparse
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import ctrl_conn_to_str
LOG = logging.getLogger(__name__)
DEFAULT_REQUEST_TIMEOUT = 30
DEFAULT_HTTP_TIMEOUT = 10
DEFAULT_RETRIES = 2
DEFAULT_REDIRECTS = 2
DEFAULT_API_REQUEST_POOL_SIZE = 1000
DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
DOWNLOAD_TIMEOUT = 180
@six.add_metaclass(ABCMeta)
class ApiRequest(object):
'''An abstract baseclass for all ApiRequest implementations.
This defines the interface and property structure for both eventlet and
gevent-based ApiRequest classes.
'''
# List of allowed status codes.
ALLOWED_STATUS_CODES = [
httplib.OK,
httplib.CREATED,
httplib.NO_CONTENT,
httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT,
httplib.BAD_REQUEST,
httplib.UNAUTHORIZED,
httplib.FORBIDDEN,
httplib.NOT_FOUND,
httplib.CONFLICT,
httplib.INTERNAL_SERVER_ERROR,
httplib.SERVICE_UNAVAILABLE
]
@abstractmethod
def start(self):
pass
@abstractmethod
def join(self):
pass
@abstractmethod
def copy(self):
pass
def _issue_request(self):
'''Issue a request to a provider.'''
conn = (self._client_conn or
self._api_client.acquire_connection(True,
copy.copy(self._headers),
rid=self._rid()))
if conn is None:
error = Exception(_("No API connections available"))
self._request_error = error
return error
url = self._url
LOG.debug(_("[%(rid)d] Issuing - request %(conn)s"),
{'rid': self._rid(), 'conn': self._request_str(conn, url)})
issued_time = time.time()
is_conn_error = False
is_conn_service_unavail = False
response = None
try:
redirects = 0
while (redirects <= self._redirects):
# Update connection with user specified request timeout,
# the connect timeout is usually smaller so we only set
# the request timeout after a connection is established
if conn.sock is None:
conn.connect()
conn.sock.settimeout(self._http_timeout)
elif conn.sock.gettimeout() != self._http_timeout:
conn.sock.settimeout(self._http_timeout)
headers = copy.copy(self._headers)
cookie = self._api_client.auth_cookie(conn)
if cookie:
headers["Cookie"] = cookie
gen = self._api_client.config_gen
if gen:
headers["X-Nvp-Wait-For-Config-Generation"] = gen
LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation "
"request header: '%s'"), gen)
try:
conn.request(self._method, url, self._body, headers)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warn(_("[%(rid)d] Exception issuing request: "
"%(e)s"),
{'rid': self._rid(), 'e': e})
response = conn.getresponse()
response.body = response.read()
response.headers = response.getheaders()
elapsed_time = time.time() - issued_time
LOG.debug(_("[%(rid)d] Completed request '%(conn)s': "
"%(status)s (%(elapsed)s seconds)"),
{'rid': self._rid(),
'conn': self._request_str(conn, url),
'status': response.status,
'elapsed': elapsed_time})
new_gen = response.getheader('X-Nvp-Config-Generation', None)
if new_gen:
LOG.debug(_("Reading X-Nvp-config-Generation response "
"header: '%s'"), new_gen)
if (self._api_client.config_gen is None or
self._api_client.config_gen < int(new_gen)):
self._api_client.config_gen = int(new_gen)
if response.status == httplib.UNAUTHORIZED:
if cookie is None and self._url != "/ws.v1/login":
# The connection still has no valid cookie despite
# attemps to authenticate and the request has failed
# with unauthorized status code. If this isn't a
# a request to authenticate, we should abort the
# request since there is no point in retrying.
self._abort = True
else:
# If request is unauthorized, clear the session cookie
# for the current provider so that subsequent requests
# to the same provider triggers re-authentication.
self._api_client.set_auth_cookie(conn, None)
self._api_client.set_auth_cookie(conn, None)
elif response.status == httplib.SERVICE_UNAVAILABLE:
is_conn_service_unavail = True
if response.status not in [httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT]:
break
elif redirects >= self._redirects:
LOG.info(_("[%d] Maximum redirects exceeded, aborting "
"request"), self._rid())
break
redirects += 1
conn, url = self._redirect_params(conn, response.headers,
self._client_conn is None)
if url is None:
response.status = httplib.INTERNAL_SERVER_ERROR
break
LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"),
{'rid': self._rid(),
'conn': self._request_str(conn, url)})
# yield here, just in case we are not out of the loop yet
eventlet.greenthread.sleep(0)
# If we receive any of these responses, then
# our server did not process our request and may be in an
# errored state. Raise an exception, which will cause the
# the conn to be released with is_conn_error == True
# which puts the conn on the back of the client's priority
# queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status)
return response
except Exception as e:
if isinstance(e, httplib.BadStatusLine):
msg = (_("Invalid server response"))
else:
msg = unicode(e)
if response is None:
elapsed_time = time.time() - issued_time
LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e
is_conn_error = True
return e
finally:
# Make sure we release the original connection provided by the
# acquire_connection() call above.
if self._client_conn is None:
self._api_client.release_connection(conn, is_conn_error,
is_conn_service_unavail,
rid=self._rid())
def _redirect_params(self, conn, headers, allow_release_conn=False):
"""Process redirect response, create new connection if necessary.
Args:
conn: connection that returned the redirect response
headers: response headers of the redirect response
allow_release_conn: if redirecting to a different server,
release existing connection back to connection pool.
Returns: Return tuple(conn, url) where conn is a connection object
to the redirect target and url is the path of the API request
"""
url = None
for name, value in headers:
if name.lower() == "location":
url = value
break
if not url:
LOG.warn(_("[%d] Received redirect status without location header"
" field"), self._rid())
return (conn, None)
# Accept location with the following format:
# 1. /path, redirect to same node
# 2. scheme://hostname:[port]/path where scheme is https or http
# Reject others
# 3. e.g. relative paths, unsupported scheme, unspecified host
result = urlparse.urlparse(url)
if not result.scheme and not result.hostname and result.path:
if result.path[0] == "/":
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url) # case 1
else:
LOG.warn(_("[%(rid)d] Received invalid redirect location: "
"'%(url)s'"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_("[%(rid)d] Received malformed redirect "
"location: %(url)s"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
# case 2, redirect location includes a scheme
# so setup a new connection and authenticate
if allow_release_conn:
self._api_client.release_connection(conn)
conn_params = (result.hostname, result.port, result.scheme == "https")
conn = self._api_client.acquire_redirect_connection(conn_params, True,
self._headers)
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url)
def _rid(self):
'''Return current request id.'''
return self._request_id
@property
def request_error(self):
'''Return any errors associated with this instance.'''
return self._request_error
def _request_str(self, conn, url):
'''Return string representation of connection.'''
return "%s %s/%s" % (self._method, ctrl_conn_to_str(conn), url)
| apache-2.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/django/utils/translation/trans_null.py | 467 | 1408 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
def gettext(message):
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
| bsd-3-clause |
pombreda/syzygy | third_party/numpy/files/numpy/distutils/fcompiler/g95.py | 94 | 1313 | # http://g95.sourceforge.net/
from numpy.distutils.fcompiler import FCompiler
compilers = ['G95FCompiler']
class G95FCompiler(FCompiler):
compiler_type = 'g95'
description = 'G95 Fortran Compiler'
# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95!) May 22 2006)
version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["g95", "-ffixed-form"],
'compiler_fix' : ["g95", "-ffixed-form"],
'compiler_f90' : ["g95"],
'linker_so' : ["<F90>","-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-fmod='
module_include_switch = '-I'
def get_flags(self):
return ['-fno-second-underscore']
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = G95FCompiler()
compiler.customize()
print(compiler.get_version())
| apache-2.0 |
centricular/cerbero | cerbero/commands/build.py | 1 | 3560 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#from cerbero.oven import Oven
from cerbero.commands import Command, register_command
from cerbero.build.cookbook import CookBook
from cerbero.build.oven import Oven
from cerbero.utils import _, N_, ArgparseArgument
class Build(Command):
doc = N_('Build a recipe')
name = 'build'
def __init__(self, force=None, no_deps=None):
args = [
ArgparseArgument('recipe', nargs='*',
help=_('name of the recipe to build')),
ArgparseArgument('--missing-files', action='store_true',
default=False,
help=_('prints a list of files installed that are '
'listed in the recipe')),
ArgparseArgument('--dry-run', action='store_true',
default=False,
help=_('only print commands instead of running them ')),
ArgparseArgument('--offline', action='store_true',
default=False, help=_('Use only the source cache, no network')),
]
if force is None:
args.append(
ArgparseArgument('--force', action='store_true',
default=False,
help=_('force the build of the recipe ingoring '
'its cached state')))
if no_deps is None:
args.append(
ArgparseArgument('--no-deps', action='store_true',
default=False,
help=_('do not build dependencies')))
self.force = force
self.no_deps = no_deps
Command.__init__(self, args)
def run(self, config, args):
if self.force is None:
self.force = args.force
if self.no_deps is None:
self.no_deps = args.no_deps
self.runargs(config, args.recipe, args.missing_files, self.force,
self.no_deps, dry_run=args.dry_run, offline=args.offline)
def runargs(self, config, recipes, missing_files=False, force=False,
no_deps=False, cookbook=None, dry_run=False, offline=False):
if cookbook is None:
cookbook = CookBook(config, offline=offline)
oven = Oven(recipes, cookbook, force=self.force,
no_deps=self.no_deps, missing_files=missing_files,
dry_run=dry_run)
oven.start_cooking()
class BuildOne(Build):
doc = N_('Build or rebuild a single recipe without its dependencies')
name = 'buildone'
def __init__(self):
Build.__init__(self, True, True)
register_command(BuildOne)
register_command(Build)
| lgpl-2.1 |
alangwansui/mtl_ordercenter | openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/ModifyExistingReport.py | 384 | 8450 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
import base64, tempfile
from com.sun.star.task import XJobExecutor
import os
import sys
if __name__<>'package':
from lib.gui import *
from lib.error import *
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 3
class ModifyExistingReport(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 120, "Modify Existing Report")
self.win.addFixedText("lblReport", 2, 3, 60, 15, "Report Selection")
self.win.addComboListBox("lstReport", -1,15,178,80 , False )
self.lstReport = self.win.getControl( "lstReport" )
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.logobj=Logger()
self.hostname = docinfo.getUserFieldValue(0)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
# Open a new connexion to the server
ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_designer'),('state', '=', 'installed')])
if not len(ids):
ErrorDialog("Please install base_report_designer module.", "", "Module Uninstalled Error!")
exit(1)
ids = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'search', [('report_xsl', '=', False),('report_xml', '=', False)])
fields=['id', 'name','report_name','model']
self.reports = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'read', ids, fields)
self.report_with_id = []
for report in self.reports:
if report['name']<>"":
model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=', report['model'])])
model_res_other =self.sock.execute(database, uid, self.password, 'ir.model', 'read', model_ids, [ 'name', 'model' ] )
if model_res_other <> []:
name = model_res_other[0]['name'] + " - " + report['name']
else:
name = report['name'] + " - " + report['model']
self.report_with_id.append( (report['id'], name, report['model'] ) )
self.report_with_id.sort( lambda x, y: cmp( x[1], y[1] ) )
for id, report_name, model_name in self.report_with_id:
self.lstReport.addItem( report_name, self.lstReport.getItemCount() )
self.win.addButton('btnSave',10,-5,50,15,'Open Report' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-10 ,-5,50,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.addButton('btnDelete',15 -80 ,-5,50,15,'Delete Report',actionListenerProc = self.btnDelete_clicked)
self.win.doModalDialog("lstReport",self.report_with_id[0][1] )
def btnOk_clicked(self, oActionEvent):
try:
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstReport" )
id = self.report_with_id[ selectedItemPos ][0]
res = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'report_get', id)
if res['file_type'] in ['sxw','odt'] :
file_type = res['file_type']
else :
file_type = 'sxw'
fp_name = tempfile.mktemp('.'+file_type)
fp_name1="r"+fp_name
fp_path=os.path.join(fp_name1).replace("\\","/")
fp_win=fp_path[1:]
filename = ( os.name == 'nt' and fp_win or fp_name )
if res['report_sxw_content']:
write_data_to_file( filename, base64.decodestring(res['report_sxw_content']))
url = "file:///%s" % filename
arr=Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),)
oDoc2 = desktop.loadComponentFromURL(url, "openerp", 55, arr)
docinfo2=oDoc2.getDocumentInfo()
docinfo2.setUserFieldValue(0, self.hostname)
docinfo2.setUserFieldValue(1,self.password)
docinfo2.setUserFieldValue(2,id)
docinfo2.setUserFieldValue(3,self.report_with_id[selectedItemPos][2])
oParEnum = oDoc2.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.SelectedItem = oPar.Items[0]
oPar.update()
if oDoc2.isModified():
if oDoc2.hasLocation() and not oDoc2.isReadonly():
oDoc2.store()
ErrorDialog("Download is completed.","Your file has been placed here :\n ."+ fp_name,"Download Message !")
obj=Logger()
obj.log_write('Modify Existing Report',LOG_INFO, ':successful download report %s using database %s' % (self.report_with_id[selectedItemPos][2], database))
except Exception, e:
ErrorDialog("The report could not be downloaded.", "Report: %s\nDetails: %s" % ( fp_name, str(e) ),"Download Message !")
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ModifyExistingReport', LOG_ERROR, info)
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def btnDelete_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstReport" )
name=self.win.getListBoxSelectedItem ("lstReport")
id = self.report_with_id[ selectedItemPos ][0]
temp = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'unlink', id,)
str_value='ir.actions.report.xml,'+str(id)
ids = self.sock.execute(database, uid, self.password, 'ir.values' , 'search',[('value','=',str_value)])
if ids:
rec = self.sock.execute(database, uid, self.password, 'ir.values', 'unlink', ids,)
else :
pass
if temp:
ErrorDialog("Report", "The report could not be deleted:\n"+name+".", "Message !")
self.logobj.log_write('Delete Report', LOG_INFO, ': report %s successfully deleted using database %s.' % (name, database))
else:
ErrorDialog("Report", "The report could not be deleted:\n"+name+".", "Message !")
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
ModifyExistingReport(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( ModifyExistingReport, "org.openoffice.openerp.report.modifyreport", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
agile-geoscience/seisplot | notice.py | 1 | 1873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Notices during runtime.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
class Notice(object):
"""
Helper class to make printout more readable.
"""
styles = {'HEADER': '\033[95m',
'INFO': '\033[94m', # blue
'OK': '\033[92m', # green
'WARNING': '\033[93m', # red
'FAIL': '\033[91m',
'BOLD': '\033[1m'
}
ENDC = '\033[0m'
def __init__(self, string, style, hold=False):
string = self.styles[style.upper()] + string + self.ENDC
end = '' if hold else '\n'
print(string, end=end)
@classmethod
def title(cls):
"""Makes a logo."""
logo = """
Welcome to
┌─┐┌─┐┬┌─┐┌─┐┬ ┌─┐┌┬┐
└─┐├┤ │└─┐├─┘│ │ │ │
└─┘└─┘┴└─┘┴ ┴─┘└─┘ ┴
Good luck"""
return cls(logo, 'FAIL')
@classmethod
def warning(cls, string, hold=False):
"""Yellow."""
return cls(string, 'WARNING', hold=hold)
@classmethod
def fail(cls, string, hold=False):
"""Red."""
return cls(string, 'FAIL', hold=hold)
@classmethod
def header(cls, string, hold=False):
"""Pink."""
return cls('\n'+string+'\n', 'HEADER', hold=hold)
@classmethod
def hr_header(cls, string, hold=False):
"""Pink."""
hr = "\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
return cls(hr+string.upper(), 'HEADER', hold=hold)
@classmethod
def info(cls, string, hold=False):
"""Blue."""
return cls(string, 'INFO', hold=hold)
@classmethod
def ok(cls, string, hold=False):
"""Green."""
return cls(string, 'OK', hold=hold)
| apache-2.0 |
ptdtan/Ragout | lib/networkx/exception.py | 41 | 1660 | # -*- coding: utf-8 -*-
"""
**********
Exceptions
**********
Base exceptions and errors for NetworkX.
"""
__author__ = """Aric Hagberg ([email protected])\nPieter Swart ([email protected])\nDan Schult([email protected])\nLoïc Séguin-C. <[email protected]>"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
# Exception handling
# the root of all Exceptions
class NetworkXException(Exception):
"""Base class for exceptions in NetworkX."""
class NetworkXError(NetworkXException):
"""Exception for a serious error in NetworkX"""
class NetworkXPointlessConcept(NetworkXException):
"""Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?"
In Graphs and Combinatorics Conference, George Washington University.
New York: Springer-Verlag, 1973.
"""
class NetworkXAlgorithmError(NetworkXException):
"""Exception for unexpected termination of algorithms."""
class NetworkXUnfeasible(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a problem
instance that has no feasible solution."""
class NetworkXNoPath(NetworkXUnfeasible):
"""Exception for algorithms that should return a path when running
on graphs where such a path does not exist."""
class NetworkXUnbounded(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a maximization
or a minimization problem instance that is unbounded."""
class NetworkXNotImplemented(NetworkXException):
"""Exception raised by algorithms not implemented for a type of graph."""
| gpl-3.0 |
IRI-Research/django | django/core/management/commands/inspectdb.py | 6 | 11091 | from __future__ import unicode_literals
from collections import OrderedDict
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_system_checks = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Remove `managed = False` lines for those models you wish to give write DB access"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [app_label]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = "self" if relations[i][1] == table_name else table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
| bsd-3-clause |
virtualelephant/openstack-heat-bde-plugin | scripts/createRestAPI.py | 1 | 2262 | #!/usr/bin/python
# Testing module for REST API code in BDE
#
# Chris Mutchler - [email protected]
# http://www.VirtualElephant.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging, json, requests
#requests.packages.urllib3.disable_warnings()
# Setup logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# Big Data Extensions Endpoint
bde_endpoint = 'bde.localdomain'
username = '[email protected]'
password = 'password'
# Make initial session authenticated session
header = {'content-type': 'application/x-www-form-urlencoded'}
prefix = "https://"
port = ":8443"
auth_string = "/serengeti/j_spring_security_check"
creds = 'j_username=' + username + '&j_password=' + password
url = prefix + bde_endpoint + port + auth_string
s = requests.session()
r = s.post(url, creds, headers=header, verify=False)
#DEBUG
print url
print r.json
#/DEBUG
# Variables that will be passed through Heat
clusterType = "mesos"
clusterName = "mesos_api_01"
# Setup necessary bits for creating a new cluster
header = {'content-type': 'application/json'}
payload = {"name": clusterName, "distro": clusterType, "networkConfig": { "MGT_NETWORK": ["defaultNetwork"]}}
api_call = '/serengeti/api/clusters'
url = prefix + bde_endpoint + port + api_call
r = s.post(url, data=json.dumps(payload), headers=header, verify=False)
#DEBUG
print
print url
print r.json
print
print r.headers
print r.text
#/DEBUG
time.sleep(60)
# Check and gather cluster information
api_call = '/serengeti/api/cluster/' + clusterName
url = prefix + bde_endpoint + port + api_call
r = s.post(url, headers=header, verify=False)
print r.json
print r.headers
print r.text
| apache-2.0 |
jcfr/girder | tests/cases/access_test.py | 1 | 5001 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .. import base
from girder.api.rest import loadmodel, Resource
from girder.api import access
from girder.constants import AccessType
# We deliberately don't have an access decorator
def defaultFunctionHandler(**kwargs):
return
@access.admin
def adminFunctionHandler(**kwargs):
return
@access.user
def userFunctionHandler(**kwargs):
return
@access.public
def publicFunctionHandler(**kwargs):
return
@access.public
@loadmodel(map={'id': 'user'}, model='user', level=AccessType.READ)
def plainFn(user, params):
return user
class AccessTestResource(Resource):
def __init__(self):
self.resourceName = 'accesstest'
self.route('GET', ('default_access', ), self.defaultHandler)
self.route('GET', ('admin_access', ), self.adminHandler)
self.route('GET', ('user_access', ), self.userHandler)
self.route('GET', ('public_access', ), self.publicHandler)
# We deliberately don't have an access decorator
def defaultHandler(self, **kwargs):
return
@access.admin
def adminHandler(self, **kwargs):
return
@access.user
def userHandler(self, **kwargs):
return
@access.public
def publicHandler(self, **kwargs):
return
def setUpModule():
server = base.startServer()
server.root.api.v1.accesstest = AccessTestResource()
# Public access endpoints do not need to be a Resource subclass method,
# they can be a regular function
accesstest = server.root.api.v1.accesstest
accesstest.route('GET', ('default_function_access', ),
defaultFunctionHandler)
accesstest.route('GET', ('admin_function_access', ), adminFunctionHandler)
accesstest.route('GET', ('user_function_access', ), userFunctionHandler)
accesstest.route('GET', ('public_function_access', ),
publicFunctionHandler)
accesstest.route('GET', ('test_loadmodel_plain', ':id'), plainFn)
def tearDownModule():
base.stopServer()
class AccessTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': '[email protected]',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**admin)
user = {
'email': '[email protected]',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = self.model('user').createUser(**user)
def testAccessEndpoints(self):
endpoints = [
("/accesstest/default_access", "admin"),
("/accesstest/admin_access", "admin"),
("/accesstest/user_access", "user"),
("/accesstest/public_access", "public"),
("/accesstest/default_function_access", "admin"),
("/accesstest/admin_function_access", "admin"),
("/accesstest/user_function_access", "user"),
("/accesstest/public_function_access", "public"),
]
for endpoint in endpoints:
resp = self.request(path=endpoint[0], method='GET', user=None)
if endpoint[1] in ("public", ):
self.assertStatusOk(resp)
else:
self.assertStatus(resp, 401)
resp = self.request(path=endpoint[0], method='GET', user=self.user)
if endpoint[1] in ("public", "user"):
self.assertStatusOk(resp)
else:
self.assertStatus(resp, 403)
resp = self.request(path=endpoint[0], method='GET', user=self.admin)
if endpoint[1] in ("public", "user", "admin"):
self.assertStatusOk(resp)
else:
self.assertStatus(resp, 403)
def testLoadModelPlainFn(self):
resp = self.request(path='/accesstest/test_loadmodel_plain/{}'.format(
self.user['_id']), method='GET')
self.assertStatusOk(resp)
self.assertEqual(resp.json['_id'], str(self.user['_id']))
| apache-2.0 |
jimsimon/sky_engine | third_party/jinja2/tests.py | 638 | 3444 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, mapping_types
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, mapping_types)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
| bsd-3-clause |
chirilo/kitsune | kitsune/upload/tests/test_models.py | 17 | 1259 | from django.contrib.contenttypes.models import ContentType
from django.core.files import File
from nose.tools import eq_
from kitsune.questions.tests import question
from kitsune.sumo.tests import TestCase
from kitsune.upload.models import ImageAttachment
from kitsune.upload.tasks import generate_thumbnail
from kitsune.users.tests import user
class ImageAttachmentTestCase(TestCase):
def setUp(self):
super(ImageAttachmentTestCase, self).setUp()
self.user = user(save=True)
self.obj = question(save=True)
self.ct = ContentType.objects.get_for_model(self.obj)
def tearDown(self):
ImageAttachment.objects.all().delete()
super(ImageAttachmentTestCase, self).tearDown()
def test_thumbnail_if_set(self):
"""thumbnail_if_set() returns self.thumbnail if set, or else returns
self.file"""
image = ImageAttachment(content_object=self.obj, creator=self.user)
with open('kitsune/upload/tests/media/test.jpg') as f:
up_file = File(f)
image.file.save(up_file.name, up_file, save=True)
eq_(image.file, image.thumbnail_if_set())
generate_thumbnail(image, 'file', 'thumbnail')
eq_(image.thumbnail, image.thumbnail_if_set())
| bsd-3-clause |
darisandi/geonode | geonode/social/signals.py | 2 | 9849 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
""" Django signals connections and associated receiver functions for geonode's
third-party 'social' apps which include announcements, notifications,
relationships, actstream user_messages and potentially others
"""
import logging
from collections import defaultdict
from dialogos.models import Comment
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext as _
from actstream.exceptions import ModelNotActionable
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.people.models import Profile
from geonode.tasks.email import send_queued_notifications
logger = logging.getLogger(__name__)
activity = None
if "actstream" in settings.INSTALLED_APPS:
from actstream import action as activity
from actstream.actions import follow, unfollow
notification_app = None
if "notification" in settings.INSTALLED_APPS:
notification_app = True
from notification import models as notification
from notification.models import NoticeSetting
relationships = None
if "relationships" in settings.INSTALLED_APPS:
relationships = True
from relationships.models import Relationship
ratings = None
if "agon_ratings" in settings.INSTALLED_APPS:
ratings = True
from agon_ratings.models import Rating
def activity_post_modify_object(sender, instance, created=None, **kwargs):
"""
Creates new activities after a Map, Layer, or Comment is created/updated/deleted.
action_settings:
actor: the user who performed the activity
action_object: the object that received the action
created_verb: a translatable verb that is used when an object is created
deleted_verb: a translatable verb that is used when an object is deleted
object_name: the title of the object that is used to keep information about the object after it is deleted
target: the target of an action (if a comment is added to a map, the comment is the object the map is the target)
updated_verb: a translatable verb that is used when an object is updated
raw_action: a constant that describes the type of action performed (values should be: created, uploaded, deleted)
"""
verb = None
obj_type = instance.__class__._meta.object_name.lower()
action_settings = defaultdict(lambda: dict(actor=getattr(instance, "owner", None),
action_object=instance,
created_verb=_('created'),
deleted_verb=_('deleted'),
object_name=getattr(instance, 'name', None),
target=None,
updated_verb=_('updated'),
))
action_settings['map'].update(object_name=getattr(instance, 'title', None),)
action_settings['comment'].update(actor=getattr(instance, 'author', None),
created_verb=_("added a comment"),
target=getattr(instance, 'content_object', None),
updated_verb=_("updated a comment"),
)
action_settings['layer'].update(created_verb=_('uploaded'))
action_settings['document'].update(object_name=getattr(instance, 'title', None),) #^^
action_settings['document'].update(created_verb=_('uploaded')) #^^
action = action_settings[obj_type]
if created:
# object was created
verb = action.get('created_verb')
raw_action = 'created'
else:
if created is False:
# object was saved.
if not isinstance(instance, Layer) and not isinstance(instance, Map):
verb = action.get('updated_verb')
raw_action = 'updated'
if created is None:
# object was deleted.
verb = action.get('deleted_verb')
raw_action = 'deleted'
action.update(action_object=None,
target=None)
if verb:
try:
activity.send(action.get('actor'),
verb=u"{verb}".format(verb=verb),
action_object=action.get('action_object'),
target=action.get('target', None),
object_name=action.get('object_name'),
raw_action=raw_action,
)
except ModelNotActionable:
logger.debug('The activity received a non-actionable Model or None as the actor/action.')
def relationship_post_save_actstream(instance, sender, created, **kwargs):
follow(instance.from_user, instance.to_user)
def relationship_pre_delete_actstream(instance, sender, **kwargs):
unfollow(instance.from_user, instance.to_user)
def relationship_post_save(instance, sender, created, **kwargs):
notification.queue([instance.to_user], "user_follow", {"from_user": instance.from_user})
if activity:
signals.post_save.connect(activity_post_modify_object, sender=Comment)
signals.post_save.connect(activity_post_modify_object, sender=Layer)
signals.post_delete.connect(activity_post_modify_object, sender=Layer)
signals.post_save.connect(activity_post_modify_object, sender=Map)
signals.post_delete.connect(activity_post_modify_object, sender=Map)
signals.post_save.connect(activity_post_modify_object, sender=Document) #^^
signals.post_delete.connect(activity_post_modify_object, sender=Document) #^^
if notification_app:
def notification_post_save_resource(instance, sender, created, **kwargs):
""" Send a notification when a layer, map or document is created or
updated
"""
notice_type_label = '%s_created' if created else '%s_updated'
notice_type_label = notice_type_label % instance.class_name.lower()
#^^ do not send notification for layer_created and document_created, do it manually on upload
if notice_type_label != 'layer_created' and notice_type_label != 'document_created': #^^
recipients = get_notification_recipients(notice_type_label)
notification.send(recipients, notice_type_label, {'resource': instance})
send_queued_notifications.delay()
def notification_post_delete_resource(instance, sender, **kwargs):
""" Send a notification when a layer, map or document is deleted
"""
notice_type_label = '%s_deleted' % instance.class_name.lower()
recipients = get_notification_recipients(notice_type_label)
notification.send(recipients, notice_type_label, {'resource': instance})
send_queued_notifications.delay()
def rating_post_save(instance, sender, created, **kwargs):
""" Send a notification when rating a layer, map or document
"""
notice_type_label = '%s_rated' % instance.content_object.class_name.lower()
recipients = get_notification_recipients(notice_type_label, instance.user)
notification.send(recipients, notice_type_label, {"instance": instance})
send_queued_notifications.delay()
def comment_post_save(instance, sender, created, **kwargs):
""" Send a notification when a comment to a layer, map or document has
been submitted
"""
notice_type_label = '%s_comment' % instance.content_object.class_name.lower()
recipients = get_notification_recipients(notice_type_label, instance.author)
notification.send(recipients, notice_type_label, {"instance": instance})
send_queued_notifications.delay()
def get_notification_recipients(notice_type_label, exclude_user=None):
""" Get notification recipients
"""
recipients_ids = NoticeSetting.objects \
.filter(notice_type__label=notice_type_label) \
.values('user')
profiles = Profile.objects.filter(id__in=recipients_ids)
if exclude_user:
profiles.exclude(username=exclude_user.username)
return profiles
# signals
# layer/map/document notifications
for resource in (Layer, Map, Document):
signals.post_save.connect(notification_post_save_resource, sender=resource)
signals.post_delete.connect(notification_post_delete_resource, sender=resource)
signals.post_save.connect(comment_post_save, sender=Comment)
# rating notifications
if ratings and notification_app:
signals.post_save.connect(rating_post_save, sender=Rating)
if relationships and activity:
signals.post_save.connect(relationship_post_save_actstream, sender=Relationship)
signals.pre_delete.connect(relationship_pre_delete_actstream, sender=Relationship)
if relationships and notification_app:
signals.post_save.connect(relationship_post_save, sender=Relationship)
| gpl-3.0 |
0x0all/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 10 | 9541 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
"""Test stopping conditions of gradient descent."""
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
"""Test if the binary search finds Gaussians with desired perplexity."""
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
"""Test gradient of Kullback-Leibler divergence."""
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
"""Test trustworthiness score."""
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
"""X can be a sparse matrix."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
"""Early exaggeration factor must be >= 1."""
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
"""Number of gradient descent iterations must be at least 200."""
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
"""'init' must be 'pca' or 'random'."""
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
"""'metric' must be valid."""
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
"""t-SNE should allow metrics that cannot be squared (issue #3526)."""
random_state = check_random_state(0)
tsne = TSNE(verbose=2, metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
| bsd-3-clause |
Johnetordoff/osf.io | osf_tests/test_search_views.py | 6 | 15204 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from nose.tools import * # noqa: F403
from osf_tests import factories
from tests.base import OsfTestCase
from website.util import api_url_for
from website.views import find_bookmark_collection
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
robbie = factories.UserFactory(fullname='Robbie Williams')
self.project = factories.ProjectFactory(creator=robbie)
self.contrib = factories.UserFactory(fullname='Brian May')
for i in range(0, 12):
factories.UserFactory(fullname='Freddie Mercury{}'.format(i))
self.user_one = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project_private_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=False)
self.project_private_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=False)
self.project_public_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=True)
self.project_public_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=True)
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_views(self):
#Test search contributor
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('profile_image_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
#Test search pagination
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
#Test default page 1
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
#Test default page 2
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
#Test smaller pages
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
#Test smaller pages page 2
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
assert_equal(pages, 3)
#Test search projects
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
#Test search node
res = self.app.post_json(
api_url_for('search_node'),
{'query': self.project.title},
auth=factories.AuthUserFactory().auth
)
assert_equal(res.status_code, 200)
#Test search node includePublic true
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': True},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search node includePublic false
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': False},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_not_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search user
url = '/api/v1/search/user/'
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_false(res.json['results'])
user_one = factories.AuthUserFactory(fullname='Joe Umwali')
user_two = factories.AuthUserFactory(fullname='Joan Uwase')
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_false(res.json['results'][0]['social'])
user_one.social = {
'github': user_one.given_name,
'twitter': user_one.given_name,
'ssrn': user_one.given_name
}
user_one.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_not_in('Joan', res.body.decode())
assert_true(res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['names']['fullname'], user_one.fullname)
assert_equal(res.json['results'][0]['social']['github'], 'http://github.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['twitter'], 'http://twitter.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['ssrn'], 'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'.format(user_one.given_name))
user_two.social = {
'profileWebsites': ['http://me.com/{}'.format(user_two.given_name)],
'orcid': user_two.given_name,
'linkedIn': user_two.given_name,
'scholar': user_two.given_name,
'impactStory': user_two.given_name,
'baiduScholar': user_two.given_name
}
user_two.save()
user_three = factories.AuthUserFactory(fullname='Janet Umwali')
user_three.social = {
'github': user_three.given_name,
'ssrn': user_three.given_name
}
user_three.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 2)
assert_true(res.json['results'][0]['social'])
assert_true(res.json['results'][1]['social'])
assert_not_equal(res.json['results'][0]['social']['ssrn'], res.json['results'][1]['social']['ssrn'])
assert_not_equal(res.json['results'][0]['social']['github'], res.json['results'][1]['social']['github'])
res = self.app.get(url, {'q': 'Uwase'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_true(res.json['results'][0]['social'])
assert_not_in('ssrn', res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['social']['profileWebsites'][0], 'http://me.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['impactStory'], 'https://impactstory.org/u/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['orcid'], 'http://orcid.org/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['baiduScholar'], 'http://xueshu.baidu.com/scholarID/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['linkedIn'], 'https://www.linkedin.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['scholar'], 'http://scholar.google.com/citations?user={}'.format(user_two.given_name))
@pytest.mark.enable_bookmark_creation
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user, title='foo')
self.project_two = factories.ProjectFactory(creator=self.user_two, title='bar')
self.public_project = factories.ProjectFactory(creator=self.user_two, is_public=True, title='baz')
self.registration_project = factories.RegistrationFactory(creator=self.user, title='qux')
self.folder = factories.CollectionFactory(creator=self.user, title='quux')
self.dashboard = find_bookmark_collection(self.user)
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert len(res.json) == 0
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
| apache-2.0 |
colonelnugget/pychess | testing/eval.py | 21 | 2202 | import unittest
from pychess.Utils.const import *
from pychess.Utils.lutils.LBoard import LBoard
from pychess.Utils.lutils.leval import evaluateComplete
from pychess.Utils.lutils import leval
class EvalTestCase(unittest.TestCase):
def setUp (self):
self.board = LBoard(NORMALCHESS)
self.board.applyFen("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w - - 0 1")
def test1(self):
"""Testing eval symmetry with startboard (WHITE)"""
score = evaluateComplete(self.board, color=WHITE)
self.assertEqual(score, 0)
def test2(self):
"""Testing eval symmetry with startboard (BLACK)"""
score = evaluateComplete(self.board, color=BLACK)
self.assertEqual(score, 0)
def test3(self):
"""Testing eval symmetry of each function"""
funcs = (f for f in dir(leval) if f.startswith("eval"))
funcs = (getattr(leval,f) for f in funcs)
funcs = (f for f in funcs if callable(f) \
and f != leval.evaluateComplete\
and f != leval.evalMaterial\
and f != leval.evalPawnStructure\
and f != leval.evalTrappedBishops)
sw, phasew = leval.evalMaterial (self.board, WHITE)
sb, phaseb = leval.evalMaterial (self.board, BLACK)
self.assertEqual(phasew, phaseb)
pawnScore, passed, weaked = leval.cacheablePawnInfo (self.board, phasew)
sw = leval.evalPawnStructure (self.board, WHITE, phasew, passed, weaked)
pawnScore, passed, weaked = leval.cacheablePawnInfo (self.board, phaseb)
sb = leval.evalPawnStructure (self.board, BLACK, phaseb, passed, weaked)
self.assertEqual(sw, sb)
sw = leval.evalTrappedBishops (self.board, WHITE)
sb = leval.evalTrappedBishops (self.board, BLACK)
self.assertEqual(sw, sb)
for func in funcs:
sw = func(self.board, WHITE, phasew)
sb = func(self.board, BLACK, phaseb)
#print func, sw, sb
self.assertEqual(sw, sb)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Farkal/kivy | kivy/core/image/img_tex.py | 54 | 1548 | '''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
| mit |
axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| apache-2.0 |
saisai/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/xcode.py | 137 | 50429 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
]
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
import sys
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings. This is intended to be
# used very sparingly. Really, almost everything should go into
# target-specific build settings sections. The project-wide settings are
# only intended to be used in cases where Xcode attempts to resolve
# variable references in a project context as opposed to a target context,
# such as when resolving sourceTree references while building up the tree
# tree view for UI display.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, or is a test, add its
# target to the targets, and (if it's a test) add it the to the
# test targets.
is_test = int(target.get('test', 0))
if target.get('run_as') or is_test:
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
# The test runner target has a build phase that executes the
# test, if this has the 'test' attribute. If the 'run_as' tag
# doesn't exist (meaning that this must be a test), then we
# define a default test command line.
command = target.get('run_as', {
'action': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}']
})
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if is_test and serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
if is_test:
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
target_dict = xcode_target_to_target_dict[dependency_xct]
if target_dict and int(target_dict.get('test', 0)):
assert dependency_xct.test_runner
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, pbxp, xct):
# TODO(mark): Perhaps this can be made a little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext != '':
ext = ext[1:].lower()
if ext in source_extensions:
xct.SourcesPhase().AddFile(source)
else:
# Files that aren't added to a sources build phase can still go into
# the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. We use the type
# with "+bundle" appended if the target has "mac_bundle" set.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
target_product_name = spec.get('product_name', None)
if target_product_name:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_extension=spec.get('product_extension', None))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
# Xcode does not have a distinct type for loadable_modules that are pure
# BSD targets (ie-unbundled). It uses the same setup as a shared_library
# but the mach-o type is explictly set in the settings. So before we do
# anything else, for this one case, we stuff in that one setting. This
# would allow the other data in the spec to change it if need be.
if type == 'loadable_module' and not is_bundle:
xccl.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target as used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_basename = posixpath.basename(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = '@echo note: ' + ExpandXcodeVariables(message,
rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s_%s.make' % (target_name, rule['rule_name'])
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
# TODO(mark): quote the list of concrete_output_dirs.
if len(concrete_output_dirs) > 0:
makefile.write('\tmkdir -p %s\n' % ' '.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
makefile.write('\t%s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
for group in ['inputs', 'inputs_excluded']:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" if it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
# Add "copies".
for copy_group in spec.get('copies', []):
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
for key in ['sources', 'mac_bundle_resources']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
for action in spec.get('actions', []):
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
# TODO(mark): this logic isn't right. There are certain directories
# that are always searched, we should check to see if the library is
# in one of those directories, and if not, we should do the
# AppendBuildSetting thing.
if not posixpath.isabs(library) and not library.startswith('$'):
# TODO(mark): Need to check to see if library_dir is already in
# LIBRARY_SEARCH_PATHS.
library_dir = posixpath.dirname(library)
xct.AppendBuildSetting('LIBRARY_SEARCH_PATHS', library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
# If the define is of the form A="B", escape the quotes
# yielding A=\"\\\"B\\\"\". The extra set of quotes tell
# Xcode NOT to split on spaces, and still define a string
# literal (with quotes).
set_define = re.sub(r'^([^=]*=)"([^"]*)"$',
r'\1"\"\2\""', define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause |
tcoxon/association | google.py | 1 | 1780 |
import json, time
from math import log
from urllib import quote_plus
import urllib2
_COUNT_URL = 'https://ajax.googleapis.com/ajax/services/search/web?v=1.0&q='
_AUTOCOMPLETE_URL = 'http://suggestqueries.google.com/complete/search?client=chrome&q='
_GOOGLE_ENCODING = 'latin-1'
_last_query_time = 0.0
_QUERY_DELAY = 0.5
_FAIL_DELAY = 5.0
def urlopen(url):
'''
Let's avoid aggravating Google...
'''
global _last_query_time
now = time.time()
dt = now - _last_query_time
if dt < _QUERY_DELAY:
time.sleep(_QUERY_DELAY - dt)
_last_query_time = time.time()
return urllib2.urlopen(url)
def count_pages(q):
'''
Returns the number of search results Google returns for the given query.
'''
status = 0
response = None
while status != 200:
response = json.load(urlopen(_COUNT_URL + quote_plus(q)),
encoding=_GOOGLE_ENCODING)
status = response['responseStatus']
if status != 200:
time.sleep(_FAIL_DELAY)
print 'Failed! Retrying query...'
return int(response['responseData']['cursor']['resultCount'].replace(
',',''))
def ngd(x, y):
'''
Normalized Google Distance
http://arxiv.org/abs/cs.CL/0412098
'''
fx, fy, fxy = map(count_pages, (x, y, x+' '+y))
log_fx, log_fy, log_fxy = map(log, (fx, fy, fxy))
n = 1 # FIXME
return ((max(log_fx, log_fy) - log_fxy) /
(log(n) - min(log_fx, log_fy)))
def relevancy(x, y):
return (float(count_pages(x+' '+y)) /
min(count_pages(x), count_pages(y)))
def autocomplete(q):
results = json.load(urlopen(_AUTOCOMPLETE_URL + quote_plus(q)),
encoding=_GOOGLE_ENCODING)
return [val for val in results[1] if val.startswith(q)]
| bsd-3-clause |
dellax/django-files-widget | topnotchdev/files_widget/settings.py | 1 | 1103 | from django.conf import settings
FILES_DIR = getattr(settings, 'FILES_WIDGET_FILES_DIR', 'uploads/files_widget/')
OLD_VALUE_STR = getattr(settings, 'FILES_WIDGET_OLD_VALUE_STR', 'old_%s_value')
DELETED_VALUE_STR = getattr(settings, 'FILES_WIDGET_DELETED_VALUE_STR', 'deleted_%s_value')
MOVED_VALUE_STR = getattr(settings, 'FILES_WIDGET_MOVED_VALUE_STR', 'moved_%s_value')
JQUERY_PATH = getattr(settings, 'FILES_WIDGET_JQUERY_PATH', '//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js')
JQUERY_UI_PATH = getattr(settings, 'FILES_WIDGET_JQUERY_UI_PATH', '//ajax.googleapis.com/ajax/libs/jqueryui/1.10.3/jquery-ui.min.js')
ADD_IMAGE_BY_URL = getattr(settings, 'FILES_WIDGET_ADD_IMAGE_BY_URL', True)
MAX_FILESIZE = getattr(settings, 'FILES_WIDGET_MAX_FILESIZE', 0)
FILE_TYPES = getattr(settings, 'FILES_WIDGET_FILE_TYPES', None)
USE_TRASH = getattr(settings, 'FILES_WIDGET_USE_TRASH', False)
TRASH_DIR = getattr(settings, 'FILES_WIDGET_TRASH_DIR', 'uploads/trash/files_widget/')
IMAGE_QUALITY = getattr(settings, 'FILES_WIDGET_IMAGE_QUALITY', 50)
PROJECT_DIR = getattr(settings, 'MEDIA_ROOT', '')
| mit |
ladybug-analysis-tools/honeybee | honeybee/radiance/command/gendaymtx.py | 1 | 4200 | # coding=utf-8
from _commandbase import RadianceCommand
from ..parameters.gendaymtx import GendaymtxParameters
import os
class Gendaymtx(RadianceCommand):
u"""
gendaymtx - Generate an annual Perez sky matrix from a weather tape.
Attributes:
output_name: An optional name for output file name. If None the name of
.epw file will be used.
wea_file: Full path to input wea file (Default: None).
gendaymtx_parameters: Radiance parameters for gendaymtx. If None Default
parameters will be set. You can use self.gendaymtx_parameters to view,
add or remove the parameters before executing the command.
Usage:
from honeybee.radiance.parameters.gendaymtx import GendaymtxParameters
from honeybee.radiance.command.gendaymtx import Gendaymtx
# create and modify gendaymtx_parameters
# generate sky matrix with default values
gmtx = GendaymtxParameters()
# ask only for direct sun
gmtx.only_direct = True
# create gendaymtx
dmtx = Gendaymtx(wea_file="C:/IZMIR_TUR.wea", gendaymtx_parameters=gmtx)
# run gendaymtx
dmtx.execute()
> c:/radiance/bin/gendaymtx: reading weather tape 'C:/ladybug/IZMIR_TUR.wea'
> c:/radiance/bin/gendaymtx: location 'IZMIR_TUR'
> c:/radiance/bin/gendaymtx: (lat,long)=(38.5,-27.0) degrees north, west
> c:/radiance/bin/gendaymtx: 146 sky patches per time step
> c:/radiance/bin/gendaymtx: stepping through month 1...
> c:/radiance/bin/gendaymtx: stepping through month 2...
> c:/radiance/bin/gendaymtx: stepping through month 3...
> c:/radiance/bin/gendaymtx: stepping through month 4...
> c:/radiance/bin/gendaymtx: stepping through month 5...
> c:/radiance/bin/gendaymtx: stepping through month 6...
> c:/radiance/bin/gendaymtx: stepping through month 7...
> c:/radiance/bin/gendaymtx: stepping through month 8...
> c:/radiance/bin/gendaymtx: stepping through month 9...
> c:/radiance/bin/gendaymtx: stepping through month 10...
> c:/radiance/bin/gendaymtx: stepping through month 11...
> c:/radiance/bin/gendaymtx: stepping through month 12...
> c:/radiance/bin/gendaymtx: writing matrix with 8760 time steps...
> c:/radiance/bin/gendaymtx: done.
# change it not to be verbose
dmtx.gendaymtx_parameters.verbose_report = False
# run it again
dmtx.execute()
>
"""
def __init__(self, output_name=None, wea_file=None, gendaymtx_parameters=None):
"""Init command."""
RadianceCommand.__init__(self)
self.output_name = output_name
self.wea_file = wea_file
self.gendaymtx_parameters = gendaymtx_parameters
@property
def gendaymtx_parameters(self):
"""Get and set gendaymtx_parameters."""
return self.__gendaymtx_parameters
@gendaymtx_parameters.setter
def gendaymtx_parameters(self, mtx):
self.__gendaymtx_parameters = mtx if mtx is not None \
else GendaymtxParameters()
assert hasattr(self.gendaymtx_parameters, "isRadianceParameters"), \
"input gendaymtx_parameters is not a valid parameters type."
@property
def output_file(self):
"""Output file address."""
return os.path.splitext(str(self.wea_file))[0] + ".mtx" \
if self.output_name is None and self.wea_file.normpath is not None \
else self.output_name
def to_rad_string(self, relative_path=False):
"""Return full command as a string."""
# generate the name from self.wea_file
rad_string = "%s %s %s > %s" % (
self.normspace(os.path.join(self.radbin_path, 'gendaymtx')),
self.gendaymtx_parameters.to_rad_string(),
self.normspace(self.wea_file),
self.normspace(self.output_file)
)
# make sure input files are set by user
self.check_input_files(rad_string)
return rad_string
@property
def input_files(self):
"""Input files for this command."""
return self.wea_file,
| gpl-3.0 |
postlund/home-assistant | homeassistant/components/cloudflare/__init__.py | 13 | 2256 | """Update the IP addresses of your Cloudflare DNS records."""
from datetime import timedelta
import logging
from pycfdns import CloudflareUpdater
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_EMAIL, CONF_ZONE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_RECORDS = "records"
DOMAIN = "cloudflare"
INTERVAL = timedelta(minutes=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ZONE): cv.string,
vol.Required(CONF_RECORDS): vol.All(cv.ensure_list, [cv.string]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Cloudflare component."""
cfupdate = CloudflareUpdater()
email = config[DOMAIN][CONF_EMAIL]
key = config[DOMAIN][CONF_API_KEY]
zone = config[DOMAIN][CONF_ZONE]
records = config[DOMAIN][CONF_RECORDS]
def update_records_interval(now):
"""Set up recurring update."""
_update_cloudflare(cfupdate, email, key, zone, records)
def update_records_service(now):
"""Set up service for manual trigger."""
_update_cloudflare(cfupdate, email, key, zone, records)
track_time_interval(hass, update_records_interval, INTERVAL)
hass.services.register(DOMAIN, "update_records", update_records_service)
return True
def _update_cloudflare(cfupdate, email, key, zone, records):
"""Update DNS records for a given zone."""
_LOGGER.debug("Starting update for zone %s", zone)
headers = cfupdate.set_header(email, key)
_LOGGER.debug("Header data defined as: %s", headers)
zoneid = cfupdate.get_zoneID(headers, zone)
_LOGGER.debug("Zone ID is set to: %s", zoneid)
update_records = cfupdate.get_recordInfo(headers, zoneid, zone, records)
_LOGGER.debug("Records: %s", update_records)
result = cfupdate.update_records(headers, zoneid, update_records)
_LOGGER.debug("Update for zone %s is complete", zone)
if result is not True:
_LOGGER.warning(result)
| apache-2.0 |
PGower/Unsync | unsync_timetabler/unsync_timetabler/ptf9/student_timetable_import.py | 1 | 1359 | """Timetabler PTF9 import functions."""
import unsync
import petl
@unsync.command()
@unsync.option('--input-file', '-i', type=unsync.Path(exists=True, dir_okay=False, readable=True, resolve_path=True), help='Timetabler PTF9 file to extract data from.', required=True)
@unsync.option('--destination', '-d', required=True, help='The destination table that these courses will be stored in.')
def student_timetable_import(data, input_file, destination):
"""Import the teacher timetable information from a PTF9 file."""
student_timetables = petl.fromxml(input_file, '{http://www.timetabling.com.au/TDV9}StudentLessons/{http://www.timetabling.com.au/TDV9}StudentLesson', {
'StudentID': '{http://www.timetabling.com.au/TDV9}StudentID',
'CourseID': '{http://www.timetabling.com.au/TDV9}CourseID',
'LessonType': '{http://www.timetabling.com.au/TDV9}LessonType',
'ClassCode': '{http://www.timetabling.com.au/TDV9}ClassCode',
'RollClassCode': '{http://www.timetabling.com.au/TDV9}RollClassCode'
})
data.set(destination, student_timetables)
command = student_timetable_import
# default=[('Code', 'course_id'), ('Name', 'long_name'), ('Code', 'short_name'), ('ClassID', 'ptf9_id')]
| apache-2.0 |
shacker/django | tests/m2o_recursive/tests.py | 71 | 1678 | from django.test import TestCase
from .models import Category, Person
class ManyToOneRecursiveTests(TestCase):
def setUp(self):
self.r = Category(id=None, name='Root category', parent=None)
self.r.save()
self.c = Category(id=None, name='Child category', parent=self.r)
self.c.save()
def test_m2o_recursive(self):
self.assertQuerysetEqual(self.r.child_set.all(),
['<Category: Child category>'])
self.assertEqual(self.r.child_set.get(name__startswith='Child').id, self.c.id)
self.assertIsNone(self.r.parent)
self.assertQuerysetEqual(self.c.child_set.all(), [])
self.assertEqual(self.c.parent.id, self.r.id)
class MultipleManyToOneRecursiveTests(TestCase):
def setUp(self):
self.dad = Person(full_name='John Smith Senior', mother=None, father=None)
self.dad.save()
self.mom = Person(full_name='Jane Smith', mother=None, father=None)
self.mom.save()
self.kid = Person(full_name='John Smith Junior', mother=self.mom, father=self.dad)
self.kid.save()
def test_m2o_recursive2(self):
self.assertEqual(self.kid.mother.id, self.mom.id)
self.assertEqual(self.kid.father.id, self.dad.id)
self.assertQuerysetEqual(self.dad.fathers_child_set.all(),
['<Person: John Smith Junior>'])
self.assertQuerysetEqual(self.mom.mothers_child_set.all(),
['<Person: John Smith Junior>'])
self.assertQuerysetEqual(self.kid.mothers_child_set.all(), [])
self.assertQuerysetEqual(self.kid.fathers_child_set.all(), [])
| bsd-3-clause |
sadleader/odoo | addons/stock_account/wizard/__init__.py | 351 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_change_standard_price
import stock_invoice_onshipping
import stock_valuation_history
import stock_return_picking
| agpl-3.0 |
Mistobaan/tensorflow | tensorflow/python/util/compat.py | 58 | 3499 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for Python 2 vs. 3 compatibility.
## Conversion routines
In addition to the functions below, `as_str` converts an object to a `str`.
@@as_bytes
@@as_text
@@as_str_any
## Types
The compatibility module also provides the following types:
* `bytes_or_text_types`
* `complex_types`
* `integral_types`
* `real_types`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers as _numbers
import numpy as _np
import six as _six
from tensorflow.python.util.all_util import remove_undocumented
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts either bytes or unicode to `bytes`, using utf-8 encoding for text.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
Returns:
A `bytes` object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
def as_text(bytes_or_text, encoding='utf-8'):
"""Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)
# Convert an object to a `str` in both Python 2 and 3.
if _six.PY2:
as_str = as_bytes
else:
as_str = as_text
def as_str_any(value):
"""Converts to `str` as `str(value)`, but use `as_str` for `bytes`.
Args:
value: A object that can be converted to `str`.
Returns:
A `str` object.
"""
if isinstance(value, bytes):
return as_str(value)
else:
return str(value)
# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we
# need to check them specifically. The same goes from Real and Complex.
integral_types = (_numbers.Integral, _np.integer)
real_types = (_numbers.Real, _np.integer, _np.floating)
complex_types = (_numbers.Complex, _np.number)
# Either bytes or text.
bytes_or_text_types = (bytes, _six.text_type)
_allowed_symbols = [
'as_str',
'bytes_or_text_types',
'complex_types',
'integral_types',
'real_types',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
leppa/home-assistant | homeassistant/components/sql/sensor.py | 5 | 4817 | """Sensor from an SQL Query."""
import datetime
import decimal
import logging
import sqlalchemy
from sqlalchemy.orm import scoped_session, sessionmaker
import voluptuous as vol
from homeassistant.components.recorder import CONF_DB_URL, DEFAULT_DB_FILE, DEFAULT_URL
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_COLUMN_NAME = "column"
CONF_QUERIES = "queries"
CONF_QUERY = "query"
def validate_sql_select(value):
"""Validate that value is a SQL SELECT query."""
if not value.lstrip().lower().startswith("select"):
raise vol.Invalid("Only SELECT queries allowed")
return value
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_COLUMN_NAME): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_QUERY): vol.All(cv.string, validate_sql_select),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_URL): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SQL sensor platform."""
db_url = config.get(CONF_DB_URL, None)
if not db_url:
db_url = DEFAULT_URL.format(hass_config_path=hass.config.path(DEFAULT_DB_FILE))
try:
engine = sqlalchemy.create_engine(db_url)
sessmaker = scoped_session(sessionmaker(bind=engine))
# Run a dummy query just to test the db_url
sess = sessmaker()
sess.execute("SELECT 1;")
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error("Couldn't connect using %s DB_URL: %s", db_url, err)
return
finally:
sess.close()
queries = []
for query in config.get(CONF_QUERIES):
name = query.get(CONF_NAME)
query_str = query.get(CONF_QUERY)
unit = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
column_name = query.get(CONF_COLUMN_NAME)
if value_template is not None:
value_template.hass = hass
sensor = SQLSensor(
name, sessmaker, query_str, column_name, unit, value_template
)
queries.append(sensor)
add_entities(queries, True)
class SQLSensor(Entity):
"""Representation of an SQL sensor."""
def __init__(self, name, sessmaker, query, column, unit, value_template):
"""Initialize the SQL sensor."""
self._name = name
if "LIMIT" in query:
self._query = query
else:
self._query = query.replace(";", " LIMIT 1;")
self._unit_of_measurement = unit
self._template = value_template
self._column_name = column
self.sessionmaker = sessmaker
self._state = None
self._attributes = None
@property
def name(self):
"""Return the name of the query."""
return self._name
@property
def state(self):
"""Return the query's current state."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def update(self):
"""Retrieve sensor data from the query."""
try:
sess = self.sessionmaker()
result = sess.execute(self._query)
self._attributes = {}
if not result.returns_rows or result.rowcount == 0:
_LOGGER.warning("%s returned no results", self._query)
self._state = None
return
for res in result:
_LOGGER.debug("result = %s", res.items())
data = res[self._column_name]
for key, value in res.items():
if isinstance(value, decimal.Decimal):
value = float(value)
if isinstance(value, datetime.date):
value = str(value)
self._attributes[key] = value
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error("Error executing query %s: %s", self._query, err)
return
finally:
sess.close()
if self._template is not None:
self._state = self._template.async_render_with_possible_json_value(
data, None
)
else:
self._state = data
| apache-2.0 |
huggingface/transformers | src/transformers/models/vit/convert_vit_timm_to_pytorch.py | 2 | 9959 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ViT and non-distilled DeiT checkpoints from the timm library."""
import argparse
from pathlib import Path
import torch
from PIL import Image
import requests
import timm
from transformers import DeiTFeatureExtractor, ViTConfig, ViTFeatureExtractor, ViTForImageClassification, ViTModel
from transformers.utils import logging
from transformers.utils.imagenet_classes import id2label
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
def create_rename_keys(config, base_model=False):
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias"))
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
]
)
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
]
)
# if just the base model, we should remove "vit" from all keys that start with "vit"
rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
]
)
return rename_keys
# we split up the matrix of each encoder layer into queries, keys and values
def read_in_q_k_v(state_dict, config, base_model=False):
for i in range(config.num_hidden_layers):
if base_model:
prefix = ""
else:
prefix = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
: config.hidden_size, :
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
-config.hidden_size :, :
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
def remove_classification_head_(state_dict):
ignore_keys = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(k, None)
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path):
"""
Copy/paste/tweak model's weights to our ViT structure.
"""
# define default ViT configuration
config = ViTConfig()
base_model = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
base_model = True
config.patch_size = int(vit_name[-12:-10])
config.image_size = int(vit_name[-9:-6])
else:
config.num_labels = 1000
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
config.patch_size = int(vit_name[-6:-4])
config.image_size = int(vit_name[-3:])
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny"):
config.hidden_size = 192
config.intermediate_size = 768
config.num_hidden_layers = 12
config.num_attention_heads = 3
elif vit_name[9:].startswith("small"):
config.hidden_size = 384
config.intermediate_size = 1536
config.num_hidden_layers = 12
config.num_attention_heads = 6
else:
pass
else:
if vit_name[4:].startswith("small"):
config.hidden_size = 768
config.intermediate_size = 2304
config.num_hidden_layers = 8
config.num_attention_heads = 8
elif vit_name[4:].startswith("base"):
pass
elif vit_name[4:].startswith("large"):
config.hidden_size = 1024
config.intermediate_size = 4096
config.num_hidden_layers = 24
config.num_attention_heads = 16
elif vit_name[4:].startswith("huge"):
config.hidden_size = 1280
config.intermediate_size = 5120
config.num_hidden_layers = 32
config.num_attention_heads = 16
# load original model from timm
timm_model = timm.create_model(vit_name, pretrained=True)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
state_dict = timm_model.state_dict()
if base_model:
remove_classification_head_(state_dict)
rename_keys = create_rename_keys(config, base_model)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
read_in_q_k_v(state_dict, config, base_model)
# load HuggingFace model
if vit_name[-5:] == "in21k":
model = ViTModel(config).eval()
else:
model = ViTForImageClassification(config).eval()
model.load_state_dict(state_dict)
# Check outputs on an image, prepared by ViTFeatureExtractor/DeiTFeatureExtractor
if "deit" in vit_name:
feature_extractor = DeiTFeatureExtractor(size=config.image_size)
else:
feature_extractor = ViTFeatureExtractor(size=config.image_size)
encoding = feature_extractor(images=prepare_img(), return_tensors="pt")
pixel_values = encoding["pixel_values"]
outputs = model(pixel_values)
if base_model:
timm_pooled_output = timm_model.forward_features(pixel_values)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(timm_pooled_output, outputs.pooler_output, atol=1e-3)
else:
timm_logits = timm_model(pixel_values)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving feature extractor to {pytorch_dump_folder_path}")
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
args = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| apache-2.0 |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/lunch/tests/test_lunch.py | 345 | 5045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.tests import common
class Test_Lunch(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(Test_Lunch, self).setUp()
cr, uid = self.cr, self.uid
self.res_users = self.registry('res.users')
self.lunch_order = self.registry('lunch.order')
self.lunch_order_line = self.registry('lunch.order.line')
self.lunch_cashmove = self.registry('lunch.cashmove')
self.lunch_product = self.registry('lunch.product')
self.lunch_alert = self.registry('lunch.alert')
self.lunch_product_category = self.registry('lunch.product.category')
self.demo_id = self.res_users.search(cr, uid, [('name', '=', 'Demo User')])
self.product_bolognese_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'lunch', 'product_Bolognese')
self.product_Bolognese_id = self.product_bolognese_ref and self.product_bolognese_ref[1] or False
self.new_id_order = self.lunch_order.create(cr,uid,{
'user_id': self.demo_id[0],
'order_line_ids':'[]',
},context=None)
self.new_id_order_line = self.lunch_order_line.create(cr,uid,{
'order_id':self.new_id_order,
'product_id':self.product_Bolognese_id,
'note': '+Emmental',
'cashmove': [],
'price': self.lunch_product.browse(cr,uid,self.product_Bolognese_id,context=None).price,
})
def test_00_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered'. Check that there are no cashmove linked to that order line"""
cr, uid = self.cr, self.uid
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state,'new')
self.assertEqual(list(self.order_one.cashmove), [])
#we order that orderline so it's state will be 'ordered'
self.order_one.order()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state,'ordered')
self.assertEqual(list(self.order_one.cashmove), [])
def test_01_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered' then to 'confirmed'. Check that there is a cashmove linked to the order line"""
cr, uid = self.cr, self.uid
self.test_00_lunch_order()
#We receive the order so we confirm the order line so it's state will be 'confirmed'
#A cashmove will be created and we will test that the cashmove amount equals the order line price
self.order_one.confirm()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:
self.assertEqual(self.order_one.state,'confirmed')
self.assertTrue(self.order_one.cashmove)
self.assertTrue(self.order_one.cashmove[0].amount==-self.order_one.price)
def test_02_lunch_order(self):
"""Change the state of an order line from 'confirmed' to 'cancelled' and check that the cashmove linked to that order line will be deleted"""
cr, uid = self.cr, self.uid
self.test_01_lunch_order()
#We have a confirmed order with its associate cashmove
#We execute the cancel function
self.order_one.cancel()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#We check that the state is cancelled and that the cashmove has been deleted
self.assertEqual(self.order_one.state,'cancelled')
self.assertFalse(self.order_one.cashmove) | agpl-3.0 |
rwl/muntjac | muntjac/data/validatable.py | 1 | 2850 | # @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
"""Interface for validatable objects."""
class IValidatable(object):
"""Interface for validatable objects. Defines methods to verify if the
object's value is valid or not, and to add, remove and list registered
validators of the object.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
@see: L{IValidator}
"""
def addValidator(self, validator):
"""Adds a new validator for this object. The validator's
L{Validator.validate} method is activated every time the
object's value needs to be verified, that is, when the L{isValid}
method is called. This usually happens when the object's value changes.
@param validator:
the new validator
"""
raise NotImplementedError
def removeValidator(self, validator):
"""Removes a previously registered validator from the object. The
specified validator is removed from the object and its C{validate}
method is no longer called in L{isValid}.
@param validator:
the validator to remove
"""
raise NotImplementedError
def getValidators(self):
"""Lists all validators currently registered for the object. If no
validators are registered, returns C{None}.
@return: collection of validators or C{None}
"""
raise NotImplementedError
def isValid(self):
"""Tests the current value of the object against all registered
validators. The registered validators are iterated and for each the
L{Validator.validate} method is called. If any validator
throws the L{InvalidValueException} this method returns
C{False}.
@return: C{True} if the registered validators concur that the
value is valid, C{False} otherwise
"""
raise NotImplementedError
def validate(self):
"""Checks the validity of the validatable. If the validatable is valid
this method should do nothing, and if it's not valid, it should throw
C{InvalidValueException}
@raise InvalidValueException:
if the value is not valid
"""
raise NotImplementedError
def isInvalidAllowed(self):
"""Checks the validabtable object accept invalid values.The default
value is C{True}.
"""
raise NotImplementedError
def setInvalidAllowed(self, invalidValueAllowed):
"""Should the validabtable object accept invalid values. Supporting
this configuration possibility is optional. By default invalid values
are allowed.
@raise NotImplementedError:
if the setInvalidAllowed is not supported.
"""
raise NotImplementedError
| apache-2.0 |
gkumar7/AlGDock | AlGDock/BindingPMF.py | 2 | 149901 | #!/usr/bin/env python
import os # Miscellaneous operating system interfaces
from os.path import join
import cPickle as pickle
import gzip
import copy
import time
import numpy as np
import MMTK
import MMTK.Units
from MMTK.ParticleProperties import Configuration
from MMTK.ForceFields import ForceField
import Scientific
try:
from Scientific._vector import Vector
except:
from Scientific.Geometry.VectorModule import Vector
import AlGDock as a
# Define allowed_phases list and arguments dictionary
from AlGDock.BindingPMF_arguments import *
import pymbar.timeseries
import multiprocessing
from multiprocessing import Process
import psutil
# For profiling. Unnecessary for normal execution.
# from memory_profiler import profile
#############
# Constants #
#############
R = 8.3144621 * MMTK.Units.J / MMTK.Units.mol / MMTK.Units.K
term_map = {
'cosine dihedral angle':'MM',
'electrostatic/pair sum':'MM',
'harmonic bond':'MM',
'harmonic bond angle':'MM',
'Lennard-Jones':'MM',
'site':'site',
'sLJr':'sLJr',
'sELE':'sELE',
'sLJa':'sLJa',
'LJr':'LJr',
'LJa':'LJa',
'ELE':'ELE',
'electrostatic':'misc'}
########################
# Auxilliary functions #
########################
def random_rotate():
"""
Return a random rotation matrix
"""
u = np.random.uniform(size=3)
# Random quaternion
q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),
np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),
np.sqrt(u[0])*np.sin(2*np.pi*u[2]),
np.sqrt(u[0])*np.cos(2*np.pi*u[2])])
# Convert the quaternion into a rotation matrix
rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],
2*q[1]*q[2] - 2*q[0]*q[3],
2*q[1]*q[3] + 2*q[0]*q[2]],
[2*q[1]*q[2] + 2*q[0]*q[3],
q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],
2*q[2]*q[3] - 2*q[0]*q[1]],
[2*q[1]*q[3] - 2*q[0]*q[2],
2*q[2]*q[3] + 2*q[0]*q[1],
q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])
return rotMat
def merge_dictionaries(dicts, required_consistency=[]):
"""
Merges a list of dictionaries, giving priority to items in descending order.
Items in the required_consistency list must be consistent with one another.
"""
merged = {}
for a in range(len(dicts)): # Loop over all dictionaries,
# giving priority to the first
if not isinstance(dicts[a],dict):
continue
for key in dicts[a].keys():
if isinstance(dicts[a][key],dict): # item is a dictionary
merged[key] = merge_dictionaries(
[dicts[n][key] for n in range(len(dicts)) if key in dicts[n].keys()],
required_consistency=required_consistency)
elif (key not in merged.keys()):
# Merged dictionary will contain value from
# first dictionary where key appears
merged[key] = dicts[a][key]
# Check for consistency with other dictionaries
for b in (range(a) + range(a+1,len(dicts))):
if isinstance(dicts[b],dict) and (key in dicts[b].keys()) \
and (dicts[a][key] is not None) and (dicts[b][key] is not None):
if (isinstance(dicts[b][key],np.ndarray)):
inconsistent_items = (dicts[b][key]!=dicts[a][key]).any()
else:
inconsistent_items = (dicts[b][key]!=dicts[a][key])
if inconsistent_items:
if key in required_consistency:
print 'Dictionary items for %s are inconsistent:'%key
print dicts[a][key]
print dicts[b][key]
raise Exception('Items must be consistent!')
elif (merged[key] is None): # Replace None
merged[key] = dicts[a][key]
return merged
def convert_dictionary_relpath(d, relpath_o=None, relpath_n=None):
"""
Converts all file names in a dictionary from one relative path to another.
If relpath_o is None, nothing is joined to the original path.
If relpath_n is None, an absolute path is used.
"""
converted = {}
for key in d.keys():
if d[key] is None:
pass
elif isinstance(d[key],dict):
converted[key] = convert_dictionary_relpath(d[key],
relpath_o = relpath_o, relpath_n = relpath_n)
elif isinstance(d[key],str):
if d[key]=='default':
converted[key] = 'default'
else:
if relpath_o is not None:
p = os.path.abspath(join(relpath_o,d[key]))
else:
p = os.path.abspath(d[key])
# if os.path.exists(p): # Only save file names for existent paths
if relpath_n is not None:
converted[key] = os.path.relpath(p,relpath_n)
else:
converted[key] = p
return converted
def HMStime(s):
"""
Given the time in seconds, an appropriately formatted string.
"""
if s<60.:
return '%.3f s'%s
elif s<3600.:
return '%d:%.3f'%(int(s/60%60),s%60)
else:
return '%d:%d:%.3f'%(int(s/3600),int(s/60%60),s%60)
def dict_view(dict_c, indent=2, relpath=None, show_None=False):
view_string = ''
for key in dict_c.keys():
if dict_c[key] is None:
if show_None:
view_string += ' '*indent + key + ': None\n'
elif isinstance(dict_c[key],dict):
subdict_string = dict_view(dict_c[key], indent+2, relpath=relpath)
if subdict_string!='':
view_string += ' '*indent + key + ':\n' + subdict_string
elif isinstance(dict_c[key],str):
view_string += ' '*indent + key + ': '
if relpath is not None:
view_string += os.path.relpath(dict_c[key],relpath) + '\n'
if not os.path.exists(dict_c[key]):
view_string += ' '*(indent+len(key)) + 'DOES NOT EXIST\n'
else:
view_string += dict_c[key] + '\n'
else:
view_string += ' '*indent + key + ': ' + repr(dict_c[key]) + '\n'
return view_string
class NullDevice():
"""
A device to suppress output
"""
def write(self, s):
pass
##############
# Main Class #
##############
class BPMF:
def __init__(self, **kwargs): # Energy values
"""Parses the input arguments and runs the requested docking calculation"""
# Set undefined keywords to None
for key in arguments.keys():
if not key in kwargs.keys():
kwargs[key] = None
if kwargs['dir_grid'] is None:
kwargs['dir_grid'] = ''
mod_path = join(os.path.dirname(a.__file__),'BindingPMF.py')
print """
###########
# AlGDock #
###########
Molecular docking with adaptively scaled alchemical interaction grids
version {0}
in {1}
last modified {2}
""".format(a.__version__, mod_path, \
time.ctime(os.path.getmtime(mod_path)))
# Multiprocessing options.
# Default is to use 1 core.
# If cores is a number, then that number (or the maximum number)
# of cores will be used.
# Default
available_cores = multiprocessing.cpu_count()
if kwargs['cores'] is None:
self._cores = 1
elif (kwargs['cores']==-1):
self._cores = available_cores
else:
self._cores = min(kwargs['cores'], available_cores)
print "using %d/%d available cores"%(self._cores, available_cores)
if kwargs['rotate_matrix'] is not None:
self._view_args_rotate_matrix = kwargs['rotate_matrix']
self.confs = {'cool':{}, 'dock':{}}
self.dir = {}
self.dir['start'] = os.getcwd()
if kwargs['dir_dock'] is not None:
self.dir['dock'] = os.path.abspath(kwargs['dir_dock'])
else:
self.dir['dock'] = os.path.abspath('.')
if kwargs['dir_cool'] is not None:
self.dir['cool'] = os.path.abspath(kwargs['dir_cool'])
else:
self.dir['cool'] = self.dir['dock'] # Default that may be
# overwritten by stored directory
# Load previously stored file names and arguments
FNs = {}
args = {}
for p in ['dock','cool']:
params = self._load(p)
if params is not None:
(fn_dict, arg_dict) = params
FNs[p] = convert_dictionary_relpath(fn_dict,
relpath_o=self.dir[p], relpath_n=None)
args[p] = arg_dict
if (p=='dock') and (kwargs['dir_cool'] is None) and \
('dir_cool' in FNs[p].keys()) and \
(FNs[p]['dir_cool'] is not None):
self.dir['cool'] = FNs[p]['dir_cool']
else:
FNs[p] = {}
args[p] = {}
print '\n*** Directories ***'
print dict_view(self.dir)
# Identify tarballs
tarFNs = [kwargs[prefix + '_tarball'] \
for prefix in ['ligand','receptor','complex'] \
if (prefix + '_tarball') in kwargs.keys() and
kwargs[(prefix + '_tarball')] is not None]
for p in ['cool','dock']:
if (p in FNs.keys()) and ('tarball' in FNs[p].keys()):
tarFNs += [tarFN for tarFN in FNs[p]['tarball'].values() \
if tarFN is not None]
tarFNs = set([FN for FN in tarFNs if os.path.isfile(FN)])
# Identify files to look for in the tarballs
seekFNs = []
if len(tarFNs)>0:
for prefix in ['ligand','receptor','complex']:
for postfix in ('database','prmtop','inpcrd','fixed_atoms'):
key = '%s_%s'%(prefix,postfix)
if (key in kwargs.keys()) and (kwargs[key] is not None):
FN = os.path.abspath(kwargs[key])
if not os.path.isfile(FN):
seekFNs.append(os.path.basename(FN))
for p in ['cool','dock']:
if p in FNs.keys():
for level1 in ['ligand_database','prmtop','inpcrd','fixed_atoms']:
if level1 in FNs[p].keys():
if isinstance(FNs[p][level1],dict):
for level2 in ['L','R','RL']:
if level2 in FNs[p][level1].keys():
seekFNs.append(os.path.basename(FNs[p][level1][level2]))
else:
seekFNs.append(os.path.basename(FNs[p][level1]))
seekFNs = set(seekFNs)
seek_frcmod = (kwargs['frcmodList'] is None) or \
(not os.path.isfile(kwargs['frcmodList'][0]))
# Decompress tarballs into self.dir['dock']
self._toClear = []
if len(seekFNs)>0:
import tarfile
print '>>> Decompressing tarballs'
print 'looking for:\n ' + '\n '.join(seekFNs)
if seek_frcmod:
print ' and frcmod files'
for tarFN in tarFNs:
print 'reading '+tarFN
tarF = tarfile.open(tarFN,'r')
for member in tarF.getmembers():
for seekFN in seekFNs:
if member.name.endswith(seekFN):
tarF.extract(member, path = self.dir['dock'])
self._toClear.append(os.path.join(self.dir['dock'],seekFN))
print ' extracted '+seekFN
if seek_frcmod and member.name.endswith('frcmod'):
FN = os.path.abspath(os.path.join(self.dir['dock'],member.name))
if not os.path.isfile(FN):
tarF.extract(member, path = self.dir['dock'])
kwargs['frcmodList'] = [FN]
self._toClear.append(FN)
print ' extracted '+FN
# Set up file name dictionary
print '\n*** Files ***'
for p in ['cool','dock']:
if p in FNs.keys():
if FNs[p]!={}:
print 'previously stored in %s directory:'%p
print dict_view(FNs[p], relpath=self.dir['start'])
if not (FNs['cool']=={} and FNs['dock']=={}):
print 'from arguments and defaults:'
def cdir_or_dir_dock(FN):
if FN is not None:
return a.findPath([FN,join(self.dir['dock'],FN)])
else:
return None
if kwargs['frcmodList'] is not None:
if isinstance(kwargs['frcmodList'],str):
kwargs['frcmodList'] = [kwargs['frcmodList']]
kwargs['frcmodList'] = [cdir_or_dir_dock(FN) \
for FN in kwargs['frcmodList']]
FNs['new'] = {
'ligand_database':cdir_or_dir_dock(kwargs['ligand_database']),
'forcefield':a.findPath([kwargs['forcefield'],'../Data/gaff.dat'] + \
a.search_paths['gaff.dat']),
'frcmodList':kwargs['frcmodList'],
'tarball':{'L':a.findPath([kwargs['ligand_tarball']]),
'R':a.findPath([kwargs['receptor_tarball']]),
'RL':a.findPath([kwargs['complex_tarball']])},
'prmtop':{'L':cdir_or_dir_dock(kwargs['ligand_prmtop']),
'R':cdir_or_dir_dock(kwargs['receptor_prmtop']),
'RL':cdir_or_dir_dock(kwargs['complex_prmtop'])},
'inpcrd':{'L':cdir_or_dir_dock(kwargs['ligand_inpcrd']),
'R':cdir_or_dir_dock(kwargs['receptor_inpcrd']),
'RL':cdir_or_dir_dock(kwargs['complex_inpcrd'])},
'fixed_atoms':{'R':cdir_or_dir_dock(kwargs['receptor_fixed_atoms']),
'RL':cdir_or_dir_dock(kwargs['complex_fixed_atoms'])},
'grids':{'LJr':a.findPath([kwargs['grid_LJr'],
join(kwargs['dir_grid'],'LJr.nc'),
join(kwargs['dir_grid'],'LJr.dx'),
join(kwargs['dir_grid'],'LJr.dx.gz')]),
'LJa':a.findPath([kwargs['grid_LJa'],
join(kwargs['dir_grid'],'LJa.nc'),
join(kwargs['dir_grid'],'LJa.dx'),
join(kwargs['dir_grid'],'LJa.dx.gz')]),
'ELE':a.findPath([kwargs['grid_ELE'],
join(kwargs['dir_grid'],'electrostatic.nc'),
join(kwargs['dir_grid'],'electrostatic.dx'),
join(kwargs['dir_grid'],'electrostatic.dx.gz'),
join(kwargs['dir_grid'],'pbsa.nc'),
join(kwargs['dir_grid'],'pbsa.dx'),
join(kwargs['dir_grid'],'pbsa.dx.gz')])},
'score':'default' if kwargs['score']=='default' \
else a.findPath([kwargs['score']]),
'dir_cool':self.dir['cool'],
'namd':a.findPath([kwargs['namd']] + a.search_paths['namd']),
'vmd':a.findPath([kwargs['vmd']] + a.search_paths['vmd']),
'sander':a.findPath([kwargs['sander']] + a.search_paths['sander']),
'convert':a.findPath([kwargs['convert']] + a.search_paths['convert']),
'font':a.findPath([kwargs['font']] + a.search_paths['font'])}
if not (FNs['cool']=={} and FNs['dock']=={}):
print dict_view(FNs['new'], relpath=self.dir['start'])
print 'to be used:'
self._FNs = merge_dictionaries(
[FNs[src] for src in ['new','cool','dock']])
# Default: a force field modification is in the same directory as the ligand
if (self._FNs['frcmodList'] is None):
if self._FNs['prmtop']['L'] is not None:
dir_lig = os.path.dirname(self._FNs['prmtop']['L'])
frcmodpaths = [os.path.abspath(join(dir_lig, \
os.path.basename(self._FNs['prmtop']['L'])[:-7]+'.frcmod'))]
else:
dir_lig = '.'
frcmodpaths = []
if kwargs['frcmodList'] is None:
frcmodpaths.extend([os.path.abspath(join(dir_lig,'lig.frcmod')),\
os.path.abspath(join(dir_lig,'ligand.frcmod'))])
frcmod = a.findPath(frcmodpaths)
self._FNs['frcmodList'] = [frcmod]
elif not isinstance(self._FNs['frcmodList'],list):
self._FNs['frcmodList'] = [self._FNs['frcmodList']]
# Check for existence of required files
do_dock = (hasattr(args,'run_type') and \
(args.run_type not in ['store_params', 'cool']))
for key in ['ligand_database','forcefield']:
if (self._FNs[key] is None) or (not os.path.isfile(self._FNs[key])):
raise Exception('File for %s is missing!'%key)
for (key1,key2) in [('prmtop','L'),('inpcrd','L')]:
FN = self._FNs[key1][key2]
if (FN is None) or (not os.path.isfile(FN)):
raise Exception('File for %s %s is missing'%(key1,key2))
for (key1,key2) in [\
('prmtop','RL'), ('inpcrd','RL'), \
('grids','LJr'), ('grids','LJa'), ('grids','ELE')]:
FN = self._FNs[key1][key2]
errstring = 'Missing file %s %s required for docking!'%(key1,key2)
if (FN is None) or (not os.path.isfile(FN)):
if do_dock:
raise Exception(errstring)
else:
print errstring
if ((self._FNs['inpcrd']['RL'] is None) and \
(self._FNs['inpcrd']['R'] is None)):
if do_dock:
raise Exception('Receptor coordinates needed for docking!')
else:
print 'Receptor coordinates needed for docking!'
print dict_view(self._FNs, relpath=self.dir['start'], show_None=True)
args['default_cool'] = {
'protocol':'Adaptive',
'therm_speed':0.2,
'T_HIGH':600.,
'T_TARGET':300.,
'sampler':'NUTS',
'seeds_per_state':100,
'steps_per_seed':2500,
'repX_cycles':20,
'min_repX_acc':0.3,
'sweeps_per_cycle':200,
'attempts_per_sweep':100,
'steps_per_sweep':1000,
'snaps_per_independent':3.0,
'phases':['NAMD_Gas','NAMD_GBSA'],
'keep_intermediate':False,
'GMC_attempts': 0,
'GMC_tors_threshold': 0.0 }
args['default_dock'] = dict(args['default_cool'].items() + {
'site':None, 'site_center':None, 'site_direction':None,
'site_max_X':None, 'site_max_R':None,
'site_density':50., 'site_measured':None,
'MCMC_moves':1,
'rmsd':False}.items() + \
[('receptor_'+phase,None) for phase in allowed_phases])
args['default_dock']['snaps_per_independent'] = 25.0
# Store passed arguments in dictionary
for p in ['cool','dock']:
args['new_'+p] = {}
for key in args['default_'+p].keys():
specific_key = p + '_' + key
if (specific_key in kwargs.keys()) and \
(kwargs[specific_key] is not None):
# Use the specific key if it exists
args['new_'+p][key] = kwargs[specific_key]
elif (key in ['site_center', 'site_direction'] +
['receptor_'+phase for phase in allowed_phases]) and \
(kwargs[key] is not None):
# Convert these to arrays of floats
args['new_'+p][key] = np.array(kwargs[key], dtype=float)
elif key in kwargs.keys():
# Use the general key
args['new_'+p][key] = kwargs[key]
self.params = {}
for p in ['cool','dock']:
self.params[p] = merge_dictionaries(
[args[src] for src in ['new_'+p,p,'default_'+p]])
self._scalables = ['sLJr','sELE','LJr','LJa','ELE']
# Variables dependent on the parameters
self.original_Es = [[{}]]
for phase in allowed_phases:
if self.params['dock']['receptor_'+phase] is not None:
self.original_Es[0][0]['R'+phase] = \
self.params['dock']['receptor_'+phase]
else:
self.original_Es[0][0]['R'+phase] = None
self.T_HIGH = self.params['cool']['T_HIGH']
self.T_TARGET = self.params['cool']['T_TARGET']
self.RT_TARGET = R * self.params['cool']['T_TARGET']
print '>>> Setting up the simulation'
self._setup_universe(do_dock = do_dock)
print '\n*** Simulation parameters and constants ***'
for p in ['cool','dock']:
print 'for %s:'%p
print dict_view(self.params[p])
self.timing = {'start':time.time(), 'max':kwargs['max_time']}
self._run(kwargs['run_type'])
def _setup_universe(self, do_dock=True):
"""Creates an MMTK InfiniteUniverse and adds the ligand"""
# Set up the system
import sys
original_stderr = sys.stderr
sys.stderr = NullDevice()
MMTK.Database.molecule_types.directory = \
os.path.dirname(self._FNs['ligand_database'])
self.molecule = MMTK.Molecule(\
os.path.basename(self._FNs['ligand_database']))
sys.stderr = original_stderr
# Helpful variables for referencing and indexing atoms in the molecule
self.molecule.heavy_atoms = [ind for (atm,ind) in zip(self.molecule.atoms,range(self.molecule.numberOfAtoms())) if atm.type.name!='hydrogen']
self.molecule.nhatoms = len(self.molecule.heavy_atoms)
self.molecule.prmtop_atom_order = np.array([atom.number \
for atom in self.molecule.prmtop_order], dtype=int)
self.molecule.inv_prmtop_atom_order = np.zeros(shape=len(self.molecule.prmtop_atom_order), dtype=int)
for i in range(len(self.molecule.prmtop_atom_order)):
self.molecule.inv_prmtop_atom_order[self.molecule.prmtop_atom_order[i]] = i
# Create universe and add molecule to universe
self.universe = MMTK.Universe.InfiniteUniverse()
self.universe.addObject(self.molecule)
self._evaluators = {} # Store evaluators
self._OpenMM_sims = {} # Store OpenMM simulations
self._ligand_natoms = self.universe.numberOfAtoms()
# Force fields
from MMTK.ForceFields import Amber12SBForceField
self._forceFields = {}
self._forceFields['gaff'] = Amber12SBForceField(
parameter_file=self._FNs['forcefield'],mod_files=self._FNs['frcmodList'])
# Samplers may accept the following options:
# steps - number of MD steps
# T - temperature in K
# delta_t - MD time step
# normalize - normalizes configurations
# adapt - uses an adaptive time step
self.sampler = {}
from NUTS import NUTSIntegrator # @UnresolvedImport
self.sampler['init'] = NUTSIntegrator(self.universe)
from AlGDock.Integrators.SmartDarting.SmartDarting \
import SmartDartingIntegrator # @UnresolvedImport
self.sampler['cool_SmartDarting'] = SmartDartingIntegrator(self.universe, \
self.molecule, extended=False)
self.sampler['dock_SmartDarting'] = SmartDartingIntegrator(self.universe, \
self.molecule, extended=True)
for p in ['cool', 'dock']:
if self.params[p]['sampler'] == 'NUTS':
self.sampler[p] = NUTSIntegrator(self.universe)
elif self.params[p]['sampler'] == 'HMC':
from AlGDock.Integrators.HamiltonianMonteCarlo.HamiltonianMonteCarlo \
import HamiltonianMonteCarloIntegrator
self.sampler[p] = HamiltonianMonteCarloIntegrator(self.universe)
elif self.params[p]['sampler'] == 'TDHMC':
from Integrators.TDHamiltonianMonteCarlo.TDHamiltonianMonteCarlo \
import TDHamiltonianMonteCarloIntegrator
self.sampler[p] = TDHamiltonianMonteCarloIntegrator(self.universe)
elif self.params[p]['sampler'] == 'VV':
from AlGDock.Integrators.VelocityVerlet.VelocityVerlet \
import VelocityVerletIntegrator
self.sampler[p] = VelocityVerletIntegrator(self.universe)
else:
raise Exception('Unrecognized sampler!')
# Determine ligand atomic index
if (self._FNs['prmtop']['R'] is not None) and \
(self._FNs['prmtop']['RL'] is not None):
import AlGDock.IO
IO_prmtop = AlGDock.IO.prmtop()
prmtop_R = IO_prmtop.read(self._FNs['prmtop']['R'])
prmtop_RL = IO_prmtop.read(self._FNs['prmtop']['RL'])
ligand_ind = [ind for ind in range(len(prmtop_RL['RESIDUE_LABEL']))
if prmtop_RL['RESIDUE_LABEL'][ind] not in prmtop_R['RESIDUE_LABEL']]
if len(ligand_ind)==0:
raise Exception('Ligand not found in complex prmtop')
elif len(ligand_ind) > 1:
print ' possible ligand residue labels: '+\
', '.join([prmtop_RL['RESIDUE_LABEL'][ind] for ind in ligand_ind])
print ' considering a residue named "%s" as the ligand'%\
prmtop_RL['RESIDUE_LABEL'][ligand_ind[-1]].strip()
self._ligand_first_atom = prmtop_RL['RESIDUE_POINTER'][ligand_ind[-1]] - 1
else:
self._ligand_first_atom = 0
if do_dock:
raise Exception('Missing AMBER prmtop files for receptor')
else:
print 'Missing AMBER prmtop files for receptor'
# Read the reference ligand and receptor coordinates
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
if self._FNs['inpcrd']['R'] is not None:
if os.path.isfile(self._FNs['inpcrd']['L']):
lig_crd = IO_crd.read(self._FNs['inpcrd']['L'], multiplier=0.1)
self.confs['receptor'] = IO_crd.read(self._FNs['inpcrd']['R'], multiplier=0.1)
elif self._FNs['inpcrd']['RL'] is not None:
complex_crd = IO_crd.read(self._FNs['inpcrd']['RL'], multiplier=0.1)
lig_crd = complex_crd[self._ligand_first_atom:self._ligand_first_atom + self._ligand_natoms,:]
self.confs['receptor'] = np.vstack(\
(complex_crd[:self._ligand_first_atom,:],\
complex_crd[self._ligand_first_atom + self._ligand_natoms:,:]))
elif self._FNs['inpcrd']['L'] is not None:
self.confs['receptor'] = None
if os.path.isfile(self._FNs['inpcrd']['L']):
lig_crd = IO_crd.read(self._FNs['inpcrd']['L'], multiplier=0.1)
else:
lig_crd = None
if lig_crd is not None:
self.confs['ligand'] = lig_crd[self.molecule.inv_prmtop_atom_order,:]
if self.params['dock']['rmsd'] is not False:
if self.params['dock']['rmsd'] is True:
if lig_crd is not None:
rmsd_crd = lig_crd
else:
raise Exception('Reference structure for rmsd calculations unknown')
else:
rmsd_crd = IO_crd.read(self.params['dock']['rmsd'], \
natoms=self.universe.numberOfAtoms(), multiplier=0.1)
rmsd_crd = rmsd_crd[self.molecule.inv_prmtop_atom_order,:]
self.confs['rmsd'] = rmsd_crd[self.molecule.heavy_atoms,:]
# Determine APBS grid spacing
if 'APBS' in self.params['dock']['phases'] or \
'APBS' in self.params['cool']['phases']:
factor = 1.0/MMTK.Units.Ang
def roundUpDime(x):
return (np.ceil((x.astype(float)-1)/32)*32+1).astype(int)
self._set_universe_evaluator({'MM':True, 'T':self.T_HIGH, 'ELE':1})
gd = self._forceFields['ELE'].grid_data
focus_dims = roundUpDime(gd['counts'])
focus_center = factor*(gd['counts']*gd['spacing']/2. + gd['origin'])
focus_spacing = factor*gd['spacing'][0]
min_xyz = np.array([min(factor*self.confs['receptor'][a,:]) for a in range(3)])
max_xyz = np.array([max(factor*self.confs['receptor'][a,:]) for a in range(3)])
mol_range = max_xyz - min_xyz
mol_center = (min_xyz + max_xyz)/2.
# The full grid spans 1.5 times the range of the receptor
# and the focus grid, whatever is larger
full_spacing = 1.0
full_min = np.minimum(mol_center - mol_range/2.*1.5, \
focus_center - focus_dims*focus_spacing/2.*1.5)
full_max = np.maximum(mol_center + mol_range/2.*1.5, \
focus_center + focus_dims*focus_spacing/2.*1.5)
full_dims = roundUpDime((full_max-full_min)/full_spacing)
full_center = (full_min + full_max)/2.
self._apbs_grid = { \
'dime':[full_dims, focus_dims], \
'gcent':[full_center, focus_center], \
'spacing':[full_spacing, focus_spacing]}
# If poses are being rescored, start with a docked structure
(confs,Es) = self._get_confs_to_rescore(site=False, minimize=False)
if len(confs)>0:
self.universe.setConfiguration(Configuration(self.universe,confs[-1]))
# Load progress
self._postprocess(readOnly=True)
self.calc_f_L(readOnly=True)
self.calc_f_RL(readOnly=True)
def _run(self, run_type):
self.run_type = run_type
if run_type=='pose_energies' or run_type=='minimized_pose_energies':
self.pose_energies(minimize=(run_type=='minimized_pose_energies'))
elif run_type=='store_params':
self._save('cool', keys=['progress'])
self._save('dock', keys=['progress'])
elif run_type=='cool': # Sample the cooling process
self.cool()
self.calc_f_L()
elif run_type=='dock': # Sample the docking process
self.dock()
self.calc_f_RL()
elif run_type=='timed': # Timed replica exchange sampling
cool_complete = self.cool()
if cool_complete:
pp_complete = self._postprocess([('cool',-1,-1,'L')])
if pp_complete:
self.calc_f_L()
dock_complete = self.dock()
if dock_complete:
pp_complete = self._postprocess()
if pp_complete:
self.calc_f_RL()
elif run_type=='postprocess': # Postprocessing
self._postprocess()
elif run_type=='redo_postprocess':
self._postprocess(redo_dock=True)
elif (run_type=='free_energies') or (run_type=='redo_free_energies'):
self.calc_f_L()
self.calc_f_RL(redo=(run_type=='redo_free_energies'))
elif run_type=='all':
self.cool()
self._postprocess([('cool',-1,-1,'L')])
self.calc_f_L()
self.dock()
self._postprocess()
self.calc_f_RL()
elif run_type=='render_docked':
view_args = {'axes_off':True, 'size':[1008,1008], 'scale_by':0.80, \
'render':'TachyonInternal'}
if hasattr(self, '_view_args_rotate_matrix'):
view_args['rotate_matrix'] = getattr(self, '_view_args_rotate_matrix')
self.show_samples(show_ref_ligand=True, show_starting_pose=True, \
show_receptor=True, save_image=True, execute=True, quit=True, \
view_args=view_args)
elif run_type=='render_intermediates':
view_args = {'axes_off':True, 'size':[1008,1008], 'scale_by':0.80, \
'render':'TachyonInternal'}
if hasattr(self, '_view_args_rotate_matrix'):
view_args['rotate_matrix'] = getattr(self, '_view_args_rotate_matrix')
self.render_intermediates(\
movie_name=os.path.join(self.dir['dock'],'dock-intermediates.gif'), \
view_args=view_args)
self.render_intermediates(nframes=8, view_args=view_args)
elif run_type=='clear_intermediates':
for process in ['cool','dock']:
print 'Clearing intermediates for '+process
for state_ind in range(1,len(self.confs[process]['samples'])-1):
for cycle_ind in range(len(self.confs[process]['samples'][state_ind])):
self.confs[process]['samples'][state_ind][cycle_ind] = []
self._save(process)
###########
# Cooling #
###########
def initial_cool(self, warm=True):
"""
Warms the ligand from self.T_TARGET to self.T_HIGH, or
cools the ligand from self.T_HIGH to self.T_TARGET
Intermediate thermodynamic states are chosen such that
thermodynamic length intervals are approximately constant.
Configurations from each state are subsampled to seed the next simulation.
"""
if (len(self.cool_protocol)>0) and (self.cool_protocol[-1]['crossed']):
return # Initial cooling is already complete
self._set_lock('cool')
cool_start_time = time.time()
if warm:
T_START, T_END = self.T_TARGET, self.T_HIGH
direction_name = 'warm'
else:
T_START, T_END = self.T_HIGH, self.T_TARGET
direction_name = 'cool'
if self.cool_protocol==[]:
self.tee("\n>>> Initial %sing of the ligand "%direction_name + \
"from %d K to %d K, "%(T_START,T_END) + "starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
# Set up the force field
T = T_START
self.cool_protocol = [{'MM':True, 'T':T, \
'delta_t':1.5*MMTK.Units.fs,
'a':0.0, 'crossed':False}]
self._set_universe_evaluator(self.cool_protocol[-1])
# Get starting configurations
seeds = self._get_confs_to_rescore(site=False, minimize=True)[0]
seeds = [s for s in reversed(seeds)]
self.confs['cool']['starting_poses'] = seeds
self.sampler['cool_SmartDarting'].set_confs(seeds)
self.universe.setConfiguration(Configuration(self.universe,seeds[0]))
# Ramp the temperature from 0 to the desired starting temperature
T_LOW = 20.
T_SERIES = T_LOW*(T_START/T_LOW)**(np.arange(30)/29.)
for T in T_SERIES:
self.sampler['init'](steps = 500, T=T,\
delta_t=self.delta_t, steps_per_trial = 100, \
seed=int(time.time()+T))
self.universe.normalizePosition()
# Run at starting temperature
state_start_time = time.time()
conf = self.universe.configuration().array
(confs, Es_MM, self.cool_protocol[-1]['delta_t'], Ht) = \
self._initial_sim_state(\
[conf for n in range(self.params['cool']['seeds_per_state'])], \
'cool', self.cool_protocol[-1])
self.confs['cool']['replicas'] = [confs[np.random.randint(len(confs))]]
self.confs['cool']['samples'] = [[confs]]
self.cool_Es = [[{'MM':Es_MM}]]
tL_tensor = Es_MM.std()/(R*T_START*T_START)
self.tee(" generated %d configurations "%len(confs) + \
"at %d K "%self.cool_protocol[-1]['T'] + \
"in " + HMStime(time.time()-state_start_time))
self.tee(" dt=%f ps, Ht=%f, tL_tensor=%e"%(\
self.cool_protocol[-1]['delta_t'], Ht, tL_tensor))
else:
self.tee("\n>>> Initial %s of the ligand "%direction_name + \
"from %d K to %d K, "%(T_START,T_END) + "continuing at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
confs = self.confs['cool']['samples'][-1][0]
Es_MM = self.cool_Es[-1][0]['MM']
T = self.cool_protocol[-1]['T']
tL_tensor = Es_MM.std()/(R*T*T)
# Main loop for initial cooling:
# choose new temperature, randomly select seeds, simulate
while (not self.cool_protocol[-1]['crossed']):
# Choose new temperature
To = self.cool_protocol[-1]['T']
crossed = self.cool_protocol[-1]['crossed']
if tL_tensor>1E-5:
dL = self.params['cool']['therm_speed']/tL_tensor
if warm:
T = To + dL
if T > self.T_HIGH:
T = self.T_HIGH
crossed = True
else:
T = To - dL
if T < self.T_TARGET:
T = self.T_TARGET
crossed = True
else:
raise Exception('No variance in configuration energies')
self.cool_protocol.append(\
{'T':T, 'a':(self.T_HIGH-T)/(self.T_HIGH-self.T_TARGET), 'MM':True, 'crossed':crossed})
# TODO: Add seeds to SmartDarting integrator
# Randomly select seeds for new trajectory
logweight = Es_MM/R*(1/T-1/To)
weights = np.exp(-logweight+min(logweight))
seedIndicies = np.random.choice(len(Es_MM),
size = self.params['cool']['seeds_per_state'], p = weights/sum(weights))
# Simulate and store data
confs_o = confs
Es_MM_o = Es_MM
self._set_universe_evaluator(self.cool_protocol[-1])
self.sampler['cool_SmartDarting'].set_confs(confs, append=True)
state_start_time = time.time()
(confs, Es_MM, self.cool_protocol[-1]['delta_t'], Ht) = \
self._initial_sim_state(\
[confs[ind] for ind in seedIndicies], 'cool', self.cool_protocol[-1])
self.tee(" generated %d configurations "%len(confs) + \
"at %d K "%self.cool_protocol[-1]['T'] + \
"in " + (HMStime(time.time()-state_start_time)))
self.tee(" dt=%f ps, Ht=%f, sigma=%f"%(\
self.cool_protocol[-1]['delta_t'], Ht, Es_MM.std()))
# Estimate the mean replica exchange acceptance rate
# between the previous and new state
(u_kln,N_k) = self._u_kln([[{'MM':Es_MM_o}],[{'MM':Es_MM}]], \
self.cool_protocol[-2:])
N = min(N_k)
acc = np.exp(-u_kln[0,1,:N]-u_kln[1,0,:N]+u_kln[0,0,:N]+u_kln[1,1,:N])
mean_acc = np.mean(np.minimum(acc,np.ones(acc.shape)))
if mean_acc<self.params['cool']['min_repX_acc']:
# If the acceptance probability is too low,
# reject the state and restart
self.cool_protocol.pop()
confs = confs_o
Es_MM = Es_MM_o
tL_tensor = tL_tensor*1.25 # Use a smaller step
self.tee(" rejected new state, as estimated replica exchange" + \
" acceptance rate of %f is too low"%mean_acc)
elif (mean_acc>0.99) and (not crossed):
# If the acceptance probability is too high,
# reject the previous state and restart
self.confs['cool']['replicas'][-1] = confs[np.random.randint(len(confs))]
self.cool_protocol.pop(-2)
tL_tensor = Es_MM.std()/(R*T*T) # Metric tensor for the thermodynamic length
self.tee(" rejected previous state, as estimated replica exchange" + \
" acceptance rate of %f is too high"%mean_acc)
else:
self.confs['cool']['replicas'].append(confs[np.random.randint(len(confs))])
self.confs['cool']['samples'].append([confs])
if len(self.confs['cool']['samples'])>2 and \
(not self.params['cool']['keep_intermediate']):
self.confs['cool']['samples'][-2] = []
self.cool_Es.append([{'MM':Es_MM}])
tL_tensor = Es_MM.std()/(R*T*T) # Metric tensor for the thermodynamic length
self.tee(" estimated replica exchange acceptance rate is %f"%mean_acc)
# Special tasks after the last stage
if self.cool_protocol[-1]['crossed']:
self._cool_cycle += 1
# For warming, reverse protocol and energies
if warm:
self.tee(" reversing replicas, samples, and protocol")
self.confs['cool']['replicas'].reverse()
self.confs['cool']['samples'].reverse()
self.cool_Es.reverse()
self.cool_protocol.reverse()
self.cool_protocol[0]['crossed'] = False
self.cool_protocol[-1]['crossed'] = True
self._save('cool')
self.tee("")
if self.run_type=='timed':
remaining_time = self.timing['max']*60 - \
(time.time()-self.timing['start'])
if remaining_time<0:
self.tee(" no time remaining for initial %s"%direction_name)
self._clear_lock('cool')
return False
# Save data
self.tee("Elapsed time for initial %sing of "%direction_name + \
"%d states: "%len(self.cool_protocol) + \
HMStime(time.time()-cool_start_time))
self._clear_lock('cool')
return True
def cool(self):
"""
Samples different ligand configurations
at thermodynamic states between self.T_HIGH and self.T_TARGET
"""
return self._sim_process('cool')
def calc_f_L(self, readOnly=False, redo=False):
"""
Calculates ligand-specific free energies:
1. solvation free energy of the ligand using single-step
free energy perturbation
2. reduced free energy of cooling the ligand from self.T_HIGH to self.T_TARGET
"""
# Initialize variables as empty lists or by loading data
f_L_FN = join(self.dir['cool'],'f_L.pkl.gz')
if redo:
if os.path.isfile(f_L_FN):
os.remove(f_L_FN)
dat = None
else:
dat = self._load_pkl_gz(f_L_FN)
if dat is not None:
(self.stats_L, self.f_L) = dat
else:
self.stats_L = dict(\
[(item,[]) for item in ['equilibrated_cycle','mean_acc']])
self.stats_L['protocol'] = self.cool_protocol
self.f_L = dict([(key,[]) for key in ['cool_BAR','cool_MBAR'] + \
[phase+'_solv' for phase in self.params['cool']['phases']]])
if readOnly or self.cool_protocol==[]:
return
K = len(self.cool_protocol)
# Make sure postprocessing is complete
pp_complete = self._postprocess([('cool',-1,-1,'L')])
if not pp_complete:
return False
# Make sure all the energies are available
for c in range(self._cool_cycle):
if len(self.cool_Es[-1][c].keys())==0:
self.tee(" skipping the cooling free energy calculation")
return
start_string = "\n>>> Ligand free energy calculations, starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
free_energy_start_time = time.time()
# Store stats_L internal energies
self.stats_L['u_K_sampled'] = \
[self._u_kln([self.cool_Es[-1][c]],[self.cool_protocol[-1]]) \
for c in range(self._cool_cycle)]
self.stats_L['u_KK'] = \
[np.sum([self._u_kln([self.cool_Es[k][c]],[self.cool_protocol[k]]) \
for k in range(len(self.cool_protocol))],0) \
for c in range(self._cool_cycle)]
for phase in self.params['cool']['phases']:
self.stats_L['u_K_'+phase] = \
[self.cool_Es[-1][c]['L'+phase][:,-1]/self.RT_TARGET \
for c in range(self._cool_cycle)]
# Estimate cycle at which simulation has equilibrated and predict native pose
self.stats_L['equilibrated_cycle'] = self._get_equilibrated_cycle('cool')
(self.stats_L['predicted_pose_index'], \
self.stats_L['lowest_energy_pose_index']) = \
self._get_pose_prediction('cool', self.stats_L['equilibrated_cycle'][-1])
# Calculate solvation free energies that have not already been calculated,
# in units of RT
updated = False
for phase in self.params['cool']['phases']:
if not phase+'_solv' in self.f_L:
self.f_L[phase+'_solv'] = []
for c in range(len(self.f_L[phase+'_solv']), self._cool_cycle):
if not updated:
self._set_lock('cool')
self.tee(start_string)
updated = True
fromCycle = self.stats_L['equilibrated_cycle'][c]
toCycle = c + 1
if not ('L'+phase) in self.cool_Es[-1][c].keys():
raise Exception('L%s energies not found in cycle %d'%(phase, c))
# Arbitrarily, solvation is the
# 'forward' direction and desolvation the 'reverse'
u_phase = np.concatenate([\
self.cool_Es[-1][n]['L'+phase] for n in range(fromCycle,toCycle)])
u_MM = np.concatenate([\
self.cool_Es[-1][n]['MM'] for n in range(fromCycle,toCycle)])
du_F = (u_phase[:,-1] - u_MM)/self.RT_TARGET
min_du_F = min(du_F)
f_L_solv = -np.log(np.exp(-du_F+min_du_F).mean()) + min_du_F
self.f_L[phase+'_solv'].append(f_L_solv)
self.tee(" calculated " + phase + " solvation free energy of " + \
"%f RT "%(f_L_solv) + \
"using cycles %d to %d"%(fromCycle, toCycle-1))
# Calculate cooling free energies that have not already been calculated,
# in units of RT
for c in range(len(self.f_L['cool_BAR']), self._cool_cycle):
if not updated:
self._set_lock('cool')
self.tee(start_string)
updated = True
fromCycle = self.stats_L['equilibrated_cycle'][c]
toCycle = c + 1
# Cooling free energy
cool_Es = []
for cool_Es_state in self.cool_Es:
cool_Es.append(cool_Es_state[fromCycle:toCycle])
(u_kln,N_k) = self._u_kln(cool_Es,self.cool_protocol)
(BAR,MBAR) = self._run_MBAR(u_kln,N_k)
self.f_L['cool_BAR'].append(BAR)
self.f_L['cool_MBAR'].append(MBAR)
# Average acceptance probabilities
cool_mean_acc = np.zeros(K-1)
for k in range(0, K-1):
(u_kln, N_k) = self._u_kln(cool_Es[k:k+2],self.cool_protocol[k:k+2])
N = min(N_k)
acc = np.exp(-u_kln[0,1,:N]-u_kln[1,0,:N]+u_kln[0,0,:N]+u_kln[1,1,:N])
cool_mean_acc[k] = np.mean(np.minimum(acc,np.ones(acc.shape)))
self.stats_L['mean_acc'].append(cool_mean_acc)
self.tee(" calculated cooling free energy of %f RT "%(\
self.f_L['cool_MBAR'][-1][-1])+\
"using MBAR for cycles %d to %d"%(fromCycle, c))
if updated:
self._write_pkl_gz(f_L_FN, (self.stats_L,self.f_L))
self.tee("\nElapsed time for free energy calculation: " + \
HMStime(time.time()-free_energy_start_time))
self._clear_lock('cool')
###########
# Docking #
###########
def random_dock(self):
"""
Randomly places the ligand into the receptor and evaluates energies
The first state of docking is sampled by randomly placing configurations
from the high temperature ligand simulation into the binding site.
"""
# Select samples from the high T unbound state and ensure there are enough
E_MM = []
confs = []
for k in range(1,len(self.cool_Es[0])):
E_MM += list(self.cool_Es[0][k]['MM'])
confs += list(self.confs['cool']['samples'][0][k])
while len(E_MM)<self.params['dock']['seeds_per_state']:
self.tee("More samples from high temperature ligand simulation needed")
self._replica_exchange('cool')
E_MM = []
confs = []
for k in range(1,len(self.cool_Es[0])):
E_MM += list(self.cool_Es[0][k]['MM'])
confs += list(self.confs['cool']['samples'][0][k])
random_dock_inds = np.array(np.linspace(0,len(E_MM), \
self.params['dock']['seeds_per_state'],endpoint=False),dtype=int)
cool0_Es_MM = [E_MM[ind] for ind in random_dock_inds]
cool0_confs = [confs[ind] for ind in random_dock_inds]
# Do the random docking
lambda_o = self._lambda(0.0,'dock',MM=True,site=True,crossed=False)
self.dock_protocol = [lambda_o]
# Set up the force field with full interaction grids
lambda_scalables = dict(zip(\
self._scalables,np.ones(len(self._scalables),dtype=int)) + \
[('T',self.T_HIGH),('site',True)])
self._set_universe_evaluator(lambda_scalables)
# Either loads or generates the random translations and rotations for the first state of docking
if not (hasattr(self,'_random_trans') and hasattr(self,'_random_rotT')):
self._max_n_trans = 10000
# Default density of points is 50 per nm**3
self._n_trans = max(min(np.int(np.ceil(self._forceFields['site'].volume*self.params['dock']['site_density'])),self._max_n_trans),5)
self._random_trans = np.ndarray((self._max_n_trans), dtype=Vector)
for ind in range(self._max_n_trans):
self._random_trans[ind] = Vector(self._forceFields['site'].randomPoint())
self._max_n_rot = 100
self._n_rot = 100
self._random_rotT = np.ndarray((self._max_n_rot,3,3))
for ind in range(self._max_n_rot):
self._random_rotT[ind,:,:] = np.transpose(random_rotate())
else:
self._max_n_trans = self._random_trans.shape[0]
self._n_rot = self._random_rotT.shape[0]
# Get interaction energies.
# Loop over configurations, random rotations, and random translations
E = {}
for term in (['MM','site']+self._scalables):
# Large array creation may cause MemoryError
E[term] = np.zeros((self.params['dock']['seeds_per_state'], \
self._max_n_rot,self._n_trans))
self.tee(" allocated memory for interaction energies")
converged = False
n_trans_o = 0
n_trans_n = self._n_trans
while not converged:
for c in range(self.params['dock']['seeds_per_state']):
E['MM'][c,:,:] = cool0_Es_MM[c]
for i_rot in range(self._n_rot):
conf_rot = Configuration(self.universe,\
np.dot(cool0_confs[c], self._random_rotT[i_rot,:,:]))
for i_trans in range(n_trans_o, n_trans_n):
self.universe.setConfiguration(conf_rot)
self.universe.translateTo(self._random_trans[i_trans])
eT = self.universe.energyTerms()
for (key,value) in eT.iteritems():
E[term_map[key]][c,i_rot,i_trans] += value
E_c = {}
for term in E.keys():
# Large array creation may cause MemoryError
E_c[term] = np.ravel(E[term][:,:self._n_rot,:n_trans_n])
self.tee(" allocated memory for %d translations"%n_trans_n)
(u_kln,N_k) = self._u_kln([E_c],\
[lambda_o,self._next_dock_state(E=E_c, lambda_o=lambda_o)])
du = u_kln[0,1,:] - u_kln[0,0,:]
bootstrap_reps = 50
f_grid0 = np.zeros(bootstrap_reps)
for b in range(bootstrap_reps):
du_b = du[np.random.randint(0, len(du), len(du))]
f_grid0[b] = -np.log(np.exp(-du_b+min(du_b)).mean()) + min(du_b)
f_grid0_std = f_grid0.std()
converged = f_grid0_std<0.1
if not converged:
self.tee(" with %s translations "%n_trans_n + \
"the predicted free energy difference is %f (%f)"%(\
f_grid0.mean(),f_grid0_std))
if n_trans_n == self._max_n_trans:
break
n_trans_o = n_trans_n
n_trans_n = min(n_trans_n + 25, self._max_n_trans)
for term in (['MM','site']+self._scalables):
# Large array creation may cause MemoryError
E[term] = np.dstack((E[term], \
np.zeros((self.params['dock']['seeds_per_state'],\
self._max_n_rot,25))))
if self._n_trans != n_trans_n:
self._n_trans = n_trans_n
self.tee(" %d ligand configurations "%len(cool0_Es_MM) + \
"were randomly docked into the binding site using "+ \
"%d translations and %d rotations "%(n_trans_n,self._n_rot))
self.tee(" the predicted free energy difference between the" + \
" first and second docking states is " + \
"%f (%f)"%(f_grid0.mean(),f_grid0_std))
ravel_start_time = time.time()
for term in E.keys():
E[term] = np.ravel(E[term][:,:self._n_rot,:self._n_trans])
self.tee(" raveled energy terms in " + \
HMStime(time.time()-ravel_start_time))
return (cool0_confs, E)
def initial_dock(self, randomOnly=False, undock=True):
"""
Docks the ligand into the receptor
Intermediate thermodynamic states are chosen such that
thermodynamic length intervals are approximately constant.
Configurations from each state are subsampled to seed the next simulation.
"""
if (len(self.dock_protocol)>0) and (self.dock_protocol[-1]['crossed']):
return # Initial docking already complete
self._set_lock('dock')
dock_start_time = time.time()
if self.dock_protocol==[]:
self.tee("\n>>> Initial docking, starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
if undock:
lambda_o = self._lambda(1.0, 'dock', MM=True, site=True, crossed=False)
self.dock_protocol = [lambda_o]
self._set_universe_evaluator(lambda_o)
seeds = [s for s in reversed(self._get_confs_to_rescore(site=True, minimize=True)[0])]
if seeds==[]:
undock = False
else:
self.confs['dock']['starting_poses'] = seeds
self.sampler['dock_SmartDarting'].set_confs(seeds)
self.universe.setConfiguration(Configuration(self.universe,seeds[0]))
# Ramp up the temperature
T_LOW = 20.
T_SERIES = T_LOW*(self.T_TARGET/T_LOW)**(np.arange(30)/29.)
for T in T_SERIES:
self.sampler['init'](steps = 500, T=T,\
delta_t=self.delta_t, steps_per_trial = 100, \
seed=int(time.time()+T))
seeds = [self.universe.configuration().array]
# Simulate
sim_start_time = time.time()
(confs, Es_tot, lambda_o['delta_t'], Ht) = \
self._initial_sim_state(\
seeds*self.params['dock']['seeds_per_state'], 'dock', lambda_o)
# Get state energies
E = self._calc_E(confs)
self.confs['dock']['replicas'] = [confs[np.random.randint(len(confs))]]
self.confs['dock']['samples'] = [[confs]]
self.dock_Es = [[E]]
self.tee(" generated %d configurations "%len(confs) + \
"with progress %e "%lambda_o['a'] + \
"in " + HMStime(time.time()-sim_start_time))
self.tee(" dt=%f ps, Ht=%f, tL_tensor=%e"%(\
lambda_o['delta_t'],Ht,self._tL_tensor(E,lambda_o)))
if not undock:
(cool0_confs, E) = self.random_dock()
self.tee(" random docking complete in " + \
HMStime(time.time()-dock_start_time))
if randomOnly:
self._clear_lock('dock')
return
else:
# Continuing from a previous docking instance
self.tee("\n>>> Initial docking, continuing at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
confs = self.confs['dock']['samples'][-1][0]
E = self.dock_Es[-1][0]
lambda_o = self.dock_protocol[-1]
# Main loop for initial docking:
# choose new thermodynamic variables,
# randomly select seeds,
# simulate
rejectStage = 0
while (not self.dock_protocol[-1]['crossed']):
# Determine next value of the protocol
lambda_n = self._next_dock_state(E = E, lambda_o = lambda_o, \
pow = rejectStage, undock = undock)
self.dock_protocol.append(lambda_n)
if len(self.dock_protocol)>1000:
self._clear('dock')
self._save('dock')
self._store_infinite_f_RL()
raise Exception('Too many replicas!')
if abs(rejectStage)>20:
self._clear('dock')
self._save('dock')
self._store_infinite_f_RL()
raise Exception('Too many consecutive rejected stages!')
# TODO: Add seeds to SmartDarting integrator
# Randomly select seeds for new trajectory
u_o = self._u_kln([E],[lambda_o])
u_n = self._u_kln([E],[lambda_n])
du = u_n-u_o
weights = np.exp(-du+min(du))
seedIndicies = np.random.choice(len(u_o), \
size = self.params['dock']['seeds_per_state'], \
p=weights/sum(weights))
if (not undock) and (len(self.dock_protocol)==2):
# Cooling state 0 configurations, randomly oriented
# Use the lowest energy configuration in the first docking state for replica exchange
ind = np.argmin(u_n)
(c,i_rot,i_trans) = np.unravel_index(ind, (self.params['dock']['seeds_per_state'], self._n_rot, self._n_trans))
repX_conf = np.add(np.dot(cool0_confs[c], self._random_rotT[i_rot,:,:]),\
self._random_trans[i_trans].array)
self.confs['dock']['replicas'] = [repX_conf]
self.confs['dock']['samples'] = [[repX_conf]]
self.dock_Es = [[dict([(key,np.array([val[ind]])) for (key,val) in E.iteritems()])]]
seeds = []
for ind in seedIndicies:
(c,i_rot,i_trans) = np.unravel_index(ind, (self.params['dock']['seeds_per_state'], self._n_rot, self._n_trans))
seeds.append(np.add(np.dot(cool0_confs[c], self._random_rotT[i_rot,:,:]), self._random_trans[i_trans].array))
confs = None
E = {}
else: # Seeds from last state
seeds = [confs[ind] for ind in seedIndicies]
self.confs['dock']['seeds'] = seeds
# Store old data
confs_o = confs
E_o = E
# Simulate
sim_start_time = time.time()
self._set_universe_evaluator(lambda_n)
(confs, Es_tot, lambda_n['delta_t'], Ht) = \
self._initial_sim_state(seeds, 'dock', lambda_n)
# Get state energies
E = self._calc_E(confs)
self.tee(" generated %d configurations "%len(confs) + \
"with progress %f "%lambda_n['a'] + \
"in " + HMStime(time.time()-sim_start_time))
self.tee(" dt=%f ps, Ht=%f, tL_tensor=%e"%(\
lambda_n['delta_t'],Ht,self._tL_tensor(E,lambda_n)))
# Decide whether to keep the state
if len(self.dock_protocol)>(1+(not undock)):
# Estimate the mean replica exchange acceptance rate
# between the previous and new state
(u_kln,N_k) = self._u_kln([[E_o],[E]], self.dock_protocol[-2:])
N = min(N_k)
acc = np.exp(-u_kln[0,1,:N]-u_kln[1,0,:N]+u_kln[0,0,:N]+u_kln[1,1,:N])
mean_acc = np.mean(np.minimum(acc,np.ones(acc.shape)))
if (mean_acc<self.params['dock']['min_repX_acc']):
# If the acceptance probability is too low,
# reject the state and restart
self.dock_protocol.pop()
confs = confs_o
E = E_o
rejectStage += 1
self.tee(" rejected new state, as estimated replica exchange acceptance rate of %f is too low"%mean_acc)
elif (mean_acc>0.99) and (not lambda_n['crossed']):
# If the acceptance probability is too high,
# reject the previous state and restart
self.confs['dock']['replicas'][-1] = confs[np.random.randint(len(confs))]
self.dock_protocol.pop()
self.dock_protocol[-1] = copy.deepcopy(lambda_n)
rejectStage -= 1
lambda_o = lambda_n
self.tee(" rejected previous state, as estimated replica exchange acceptance rate of %f is too high"%mean_acc)
else:
# Store data and continue with initialization
self.confs['dock']['replicas'].append(confs[np.random.randint(len(confs))])
self.confs['dock']['samples'].append([confs])
self.dock_Es.append([E])
self.dock_protocol[-1] = copy.deepcopy(lambda_n)
rejectStage = 0
lambda_o = lambda_n
self.tee(" the estimated replica exchange acceptance rate is %f"%mean_acc)
if (not self.params['dock']['keep_intermediate']):
if len(self.dock_protocol)>(2+(not undock)):
self.confs['dock']['samples'][-2] = []
else:
# Store data and continue with initialization (first time)
self.confs['dock']['replicas'].append(confs[np.random.randint(len(confs))])
self.confs['dock']['samples'].append([confs])
self.dock_Es.append([E])
self.dock_protocol[-1] = copy.deepcopy(lambda_n)
rejectStage = 0
lambda_o = lambda_n
# Special tasks after the last stage
if (self.dock_protocol[-1]['crossed']):
# For undocking, reverse protocol and energies
if undock:
self.tee(" reversing replicas, samples, and protocol")
self.confs['dock']['replicas'].reverse()
self.confs['dock']['samples'].reverse()
self.confs['dock']['seeds'] = None
self.dock_Es.reverse()
self.dock_protocol.reverse()
self.dock_protocol[0]['crossed'] = False
self.dock_protocol[-1]['crossed'] = True
if (not self.params['dock']['keep_intermediate']):
for k in range(len(self.dock_protocol)-1):
self.confs['dock']['samples'][k] = []
self._dock_cycle += 1
# Save progress every 5 minutes
if ('last_dock_save' not in self.timing) or \
((time.time()-self.timing['last_dock_save'])>5*60):
self._save('dock')
self.tee("")
self.timing['last_dock_save'] = time.time()
saved = True
else:
saved = False
if self.run_type=='timed':
remaining_time = self.timing['max']*60 - \
(time.time()-self.timing['start'])
if remaining_time<0:
if not saved:
self._save('dock')
self.tee("")
self.tee(" no time remaining for initial dock")
self._clear_lock('dock')
return False
if not saved:
self._save('dock')
self.tee("")
self.tee("Elapsed time for initial docking of " + \
"%d states: "%len(self.dock_protocol) + \
HMStime(time.time()-dock_start_time))
self._clear_lock('dock')
return True
def dock(self):
"""
Docks the ligand into the binding site
by simulating at thermodynamic states
between decoupled and fully interacting and
between self.T_HIGH and self.T_TARGET
"""
return self._sim_process('dock')
def calc_f_RL(self, readOnly=False, redo=False):
"""
Calculates the binding potential of mean force
"""
if self.dock_protocol==[]:
return # Initial docking is incomplete
# Initialize variables as empty lists or by loading data
f_RL_FN = join(self.dir['dock'],'f_RL.pkl.gz')
# if redo:
# if os.path.isfile(f_RL_FN):
# os.remove(f_RL_FN)
# dat = None
# else:
dat = self._load_pkl_gz(f_RL_FN)
if (dat is not None):
(self.f_L, self.stats_RL, self.f_RL, self.B) = dat
else:
# stats_RL will include internal energies, interaction energies,
# the cycle by which the bound state is equilibrated,
# the mean acceptance probability between replica exchange neighbors,
# and the rmsd, if applicable
stats_RL = [('u_K_'+FF,[]) \
for FF in ['ligand','sampled']+self.params['dock']['phases']]
stats_RL += [('Psi_'+FF,[]) \
for FF in ['grid']+self.params['dock']['phases']]
stats_RL += [(item,[]) \
for item in ['equilibrated_cycle','cum_Nclusters','mean_acc','rmsd']]
self.stats_RL = dict(stats_RL)
self.stats_RL['protocol'] = self.dock_protocol
# Free energy components
self.f_RL = dict([(key,[]) for key in ['grid_BAR','grid_MBAR'] + \
[phase+'_solv' for phase in self.params['dock']['phases']]])
# Binding PMF estimates
self.B = {'MBAR':[]}
for phase in self.params['dock']['phases']:
for method in ['min_Psi','mean_Psi','inverse_FEP','BAR','MBAR']:
self.B[phase+'_'+method] = []
if readOnly:
return True
if redo:
self.B = {'MBAR':[]}
for phase in self.params['dock']['phases']:
for method in ['min_Psi','mean_Psi','inverse_FEP','BAR','MBAR']:
self.B[phase+'_'+method] = []
# Make sure postprocessing is complete
pp_complete = self._postprocess()
if not pp_complete:
return False
self.calc_f_L()
# Make sure all the energies are available
for c in range(self._dock_cycle):
if len(self.dock_Es[-1][c].keys())==0:
self.tee(" skipping the binding PMF calculation")
return
for phase in self.params['dock']['phases']:
for prefix in ['L','RL']:
if not prefix+phase in self.dock_Es[-1][c].keys():
self.tee(" postprocessed energies for %s unavailable"%phase)
return
if not hasattr(self,'f_L'):
self.tee(" skipping the binding PMF calculation")
return
self._set_lock('dock')
self.tee("\n>>> Binding PMF estimation, starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
BPMF_start_time = time.time()
updated = False
K = len(self.dock_protocol)
# Store stats_RL
# Internal energies
self.stats_RL['u_K_ligand'] = \
[self.dock_Es[-1][c]['MM']/self.RT_TARGET for c in range(self._dock_cycle)]
self.stats_RL['u_K_sampled'] = \
[self._u_kln([self.dock_Es[-1][c]],[self.dock_protocol[-1]]) \
for c in range(self._dock_cycle)]
self.stats_RL['u_KK'] = \
[np.sum([self._u_kln([self.dock_Es[k][c]],[self.dock_protocol[k]]) \
for k in range(len(self.dock_protocol))],0) \
for c in range(self._dock_cycle)]
for phase in self.params['dock']['phases']:
self.stats_RL['u_K_'+phase] = \
[self.dock_Es[-1][c]['RL'+phase][:,-1]/self.RT_TARGET \
for c in range(self._dock_cycle)]
# Interaction energies
for c in range(len(self.stats_RL['Psi_grid']), self._dock_cycle):
self.stats_RL['Psi_grid'].append(
(self.dock_Es[-1][c]['LJr'] + \
self.dock_Es[-1][c]['LJa'] + \
self.dock_Es[-1][c]['ELE'])/self.RT_TARGET)
updated = True
for phase in self.params['dock']['phases']:
if (not 'Psi_'+phase in self.stats_RL) or redo:
self.stats_RL['Psi_'+phase] = []
for c in range(len(self.stats_RL['Psi_'+phase]), self._dock_cycle):
self.stats_RL['Psi_'+phase].append(
(self.dock_Es[-1][c]['RL'+phase][:,-1] - \
self.dock_Es[-1][c]['L'+phase][:,-1] - \
self.original_Es[0][0]['R'+phase][-1])/self.RT_TARGET)
# Estimate cycle at which simulation has equilibrated
eqc_o = self.stats_RL['equilibrated_cycle']
self.stats_RL['equilibrated_cycle'] = self._get_equilibrated_cycle('dock')
if self.stats_RL['equilibrated_cycle']!=eqc_o:
updated = True
# Predict native pose
(self.stats_RL['predicted_pose_index'], \
self.stats_RL['lowest_energy_pose_index']) = \
self._get_pose_prediction('dock', self.stats_RL['equilibrated_cycle'][-1])
# Autocorrelation time for all replicas
if updated:
paths = [np.array(self.dock_Es[0][c]['repXpath']) \
for c in range(len(self.dock_Es[0])) \
if 'repXpath' in self.dock_Es[0][c].keys()]
if len(paths)>0:
paths = np.transpose(np.hstack(paths))
self.stats_RL['tau_ac'] = \
pymbar.timeseries.integratedAutocorrelationTimeMultiple(paths)
# Store rmsd values
self.stats_RL['rmsd'] = [self.dock_Es[-1][c]['rmsd'] \
if 'rmsd' in self.dock_Es[-1][c].keys() else [] \
for c in range(self._dock_cycle)]
# Calculate docking free energies that have not already been calculated
for c in range(len(self.f_RL['grid_MBAR']), self._dock_cycle):
extractCycles = range(self.stats_RL['equilibrated_cycle'][c], c+1)
# Extract relevant energies
dock_Es = [Es[self.stats_RL['equilibrated_cycle'][c]:c+1] \
for Es in self.dock_Es]
# Use MBAR for the grid scaling free energy estimate
(u_kln,N_k) = self._u_kln(dock_Es,self.dock_protocol)
(BAR,MBAR) = self._run_MBAR(u_kln,N_k)
self.f_RL['grid_MBAR'].append(MBAR)
self.f_RL['grid_BAR'].append(BAR)
updated = True
# Average acceptance probabilities
mean_acc = np.zeros(K-1)
for k in range(0, K-1):
(u_kln,N_k) = self._u_kln(dock_Es[k:k+2],self.dock_protocol[k:k+2])
N = min(N_k)
acc = np.exp(-u_kln[0,1,:N]-u_kln[1,0,:N]+u_kln[0,0,:N]+u_kln[1,1,:N])
mean_acc[k] = np.mean(np.minimum(acc,np.ones(acc.shape)))
self.stats_RL['mean_acc'].append(mean_acc)
# BPMF assuming receptor and complex solvation cancel
self.B['MBAR'] = [-self.f_L['cool_MBAR'][-1][-1] + \
self.f_RL['grid_MBAR'][c][-1] for c in range(len(self.f_RL['grid_MBAR']))]
# BPMFs
for phase in self.params['dock']['phases']:
if not phase+'_solv' in self.f_RL:
self.f_RL[phase+'_solv'] = []
for method in ['min_Psi','mean_Psi','inverse_FEP','BAR','MBAR']:
if not phase+'_'+method in self.B:
self.B[phase+'_'+method] = []
# Receptor solvation
f_R_solv = self.original_Es[0][0]['R'+phase][-1]/self.RT_TARGET
for c in range(len(self.B[phase+'_MBAR']), self._dock_cycle):
extractCycles = range(self.stats_RL['equilibrated_cycle'][c], c+1)
du = np.concatenate([self.stats_RL['u_K_'+phase][c] - \
self.stats_RL['u_K_sampled'][c] for c in extractCycles])
# Complex solvation
min_du = min(du)
f_RL_solv = -np.log(np.exp(-du+min_du).mean()) + min_du
weights = np.exp(-du+min_du)
weights = weights/sum(weights)
Psi = np.concatenate([self.stats_RL['Psi_'+phase][c] \
for c in extractCycles])
min_Psi = min(Psi)
# If the range is too large, filter Psi
if np.any((Psi-min_Psi)>500):
keep = (Psi-min_Psi)<500
weights = weights[keep]
Psi = Psi[keep]
self.f_RL[phase+'_solv'].append(f_RL_solv)
self.B[phase+'_min_Psi'].append(min_Psi)
self.B[phase+'_mean_Psi'].append(np.mean(Psi))
self.B[phase+'_inverse_FEP'].append(\
np.log(sum(weights*np.exp(Psi-min_Psi))) + min_Psi)
self.B[phase+'_BAR'].append(-f_R_solv \
- self.f_L[phase+'_solv'][-1] - self.f_L['cool_BAR'][-1][-1] \
+ self.f_RL['grid_BAR'][-1][-1] + f_RL_solv)
self.B[phase+'_MBAR'].append(-f_R_solv \
- self.f_L[phase+'_solv'][-1] - self.f_L['cool_MBAR'][-1][-1] \
+ self.f_RL['grid_MBAR'][-1][-1] + f_RL_solv)
self.tee(" calculated %s binding PMF of %f RT with cycles %d to %d"%(\
phase, self.B[phase+'_MBAR'][-1], \
self.stats_RL['equilibrated_cycle'][c], c))
updated = True
if updated or redo:
self._write_pkl_gz(f_RL_FN, (self.f_L, self.stats_RL, self.f_RL, self.B))
self.tee("\nElapsed time for binding PMF estimation: " + \
HMStime(time.time()-BPMF_start_time))
self._clear_lock('dock')
def _store_infinite_f_RL(self):
f_RL_FN = join(self.dir['dock'],'f_RL.pkl.gz')
self._write_pkl_gz(f_RL_FN, (self.f_L, [], np.inf, np.inf))
def _get_equilibrated_cycle(self, process):
# Estimate cycle at which simulation has equilibrated
u_KKs = [np.sum([self._u_kln(\
[getattr(self,process+'_Es')[k][c]], [getattr(self,process+'_protocol')[k]]) \
for k in range(len(getattr(self,process+'_protocol')))],0) \
for c in range(getattr(self,'_%s_cycle'%process))]
mean_u_KKs = np.array([np.mean(u_KK) for u_KK in u_KKs])
std_u_KKs = np.array([np.std(u_KK) for u_KK in u_KKs])
equilibrated_cycle = []
for c in range(getattr(self,'_%s_cycle'%process)):
nearMean = abs(mean_u_KKs - mean_u_KKs[c])<std_u_KKs[c]
if nearMean.any():
nearMean = list(nearMean).index(True)
else:
nearMean = c
if c>0: # If possible, reject burn-in
nearMean = max(nearMean,1)
equilibrated_cycle.append(nearMean)
return equilibrated_cycle
# correlation_times = [pymbar.timeseries.integratedAutocorrelationTimeMultiple(\
# np.transpose(np.hstack([np.array(self.dock_Es[0][c]['repXpath']) \
# for c in range(start_c,len(self.dock_Es[0])) \
# if 'repXpath' in self.dock_Es[0][c].keys()]))) \
# for start_c in range(1,len(self.dock_Es[0]))]
def _get_pose_prediction(self, process, equilibrated_cycle):
# Gather snapshots
for k in range(equilibrated_cycle,getattr(self,'_%s_cycle'%process)):
if not isinstance(self.confs[process]['samples'][-1][k], list):
self.confs[process]['samples'][-1][k] = [self.confs[process]['samples'][-1][k]]
import itertools
confs = np.array([conf[self.molecule.heavy_atoms,:] \
for conf in itertools.chain.from_iterable(\
[self.confs[process]['samples'][-1][c] \
for c in range(equilibrated_cycle,getattr(self,'_%s_cycle'%process))])])
cum_Nk = np.cumsum([0] + [len(self.confs[process]['samples'][-1][c]) \
for c in range(equilibrated_cycle,getattr(self,'_%s_cycle'%process))])
# RMSD matrix
import sys
original_stdout = sys.stdout
original_stderr = sys.stderr
sys.stdout = NullDevice()
sys.stderr = NullDevice()
from pyRMSD.matrixHandler import MatrixHandler
rmsd_matrix = MatrixHandler().createMatrix(confs, \
{'cool':'QCP_SERIAL_CALCULATOR', \
'dock':'NOSUP_SERIAL_CALCULATOR'}[process])
sys.stdout = original_stdout
sys.stderr = original_stderr
# Clustering
import scipy.cluster
Z = scipy.cluster.hierarchy.complete(rmsd_matrix.get_data())
assignments = np.array(\
scipy.cluster.hierarchy.fcluster(Z, 0.1, criterion='distance'))
# Reindexes the assignments in order of appearance
new_index = 0
mapping_to_new_index = {}
for assignment in assignments:
if not assignment in mapping_to_new_index.keys():
mapping_to_new_index[assignment] = new_index
new_index += 1
assignments = [mapping_to_new_index[a] for a in assignments]
def linear_index_to_pair(ind):
cycle = list(ind<cum_Nk).index(True)-1
n = ind-cum_Nk[cycle]
return (cycle + equilibrated_cycle,n)
if process=='dock':
stats = self.stats_RL
else:
stats = self.stats_L
# Find lowest energy pose in most populated cluster (after equilibration)
pose_ind = {}
lowest_e_ind = {}
for phase in (['sampled']+self.params[process]['phases']):
un = np.concatenate([stats['u_K_'+phase][c] \
for c in range(equilibrated_cycle,getattr(self,'_%s_cycle'%process))])
uo = np.concatenate([stats['u_K_sampled'][c] \
for c in range(equilibrated_cycle,getattr(self,'_%s_cycle'%process))])
du = un-uo
min_du = min(du)
weights = np.exp(-du+min_du)
cluster_counts = np.histogram(assignments, \
bins=np.arange(len(set(assignments))+1)+0.5,
weights=weights)[0]
top_cluster = np.argmax(cluster_counts)
pose_ind[phase] = linear_index_to_pair(\
np.argmin(un+(assignments!=top_cluster)*np.max(un)))
lowest_e_ind[phase] = linear_index_to_pair(np.argmin(un))
return (pose_ind, lowest_e_ind)
def pose_energies(self, minimize=False):
"""
Calculates the energy for poses from self._FNs['score']
"""
lambda_o = self._lambda(1.0, 'dock', MM=True, site=True, crossed=False)
self._set_universe_evaluator(lambda_o)
# Load the poses
(confs, Es) = self._get_confs_to_rescore(site=False, minimize=minimize)
# Calculate MM energies
prefix = 'xtal' if self._FNs['score']=='default' else \
os.path.basename(self._FNs['score']).split('.')[0]
if minimize:
prefix = 'min_' + prefix
Es = self._calc_E(confs, Es, type='all', prefix=prefix)
# Calculate RMSD
if self.params['dock']['rmsd'] is not False:
Es['rmsd'] = np.array([np.sqrt(((confs[c][self.molecule.heavy_atoms,:] - \
self.confs['rmsd'])**2).sum()/self.molecule.nhatoms) \
for c in range(len(confs))])
# Grid interpolation energies
from AlGDock.ForceFields.Grid.Interpolation import InterpolationForceField
for grid_type in ['LJa','LJr']:
for interpolation_type in ['Trilinear','BSpline']:
key = '%s_%sTransform'%(grid_type,interpolation_type)
Es[key] = np.zeros((12,len(confs)),dtype=np.float)
for p in range(12):
print interpolation_type + ' interpolation of the ' + \
grid_type + ' grid with an inverse power of %d'%(p+1)
FF = InterpolationForceField(self._FNs['grids'][grid_type], \
name='%f'%(p+1),
interpolation_type=interpolation_type, strength=1.0,
scaling_property='scaling_factor_'+grid_type, \
inv_power=-float(p+1))
self.universe.setForceField(FF)
for c in range(len(confs)):
self.universe.setConfiguration(Configuration(self.universe,confs[c]))
Es[key][p,c] = self.universe.energy()
# Store the data
self._write_pkl_gz(join(self.dir['dock'],prefix+'.pkl.gz'),(confs,Es))
return (confs,Es)
######################
# Internal Functions #
######################
def _set_universe_evaluator(self, lambda_n):
"""
Sets the universe evaluator to values appropriate for the given lambda_n dictionary.
The elements in the dictionary lambda_n can be:
MM - True, to turn on the Generalized AMBER force field
site - True, to turn on the binding site
sLJr - scaling of the soft Lennard-Jones repulsive grid
sLJa - scaling of the soft Lennard-Jones attractive grid
sELE - scaling of the soft electrostatic grid
LJr - scaling of the Lennard-Jones repulsive grid
LJa - scaling of the Lennard-Jones attractive grid
ELE - scaling of the electrostatic grid
T - the temperature in K
"""
self.T = lambda_n['T']
self.RT = R*lambda_n['T']
if 'delta_t' in lambda_n.keys():
self.delta_t = lambda_n['delta_t']
else:
self.delta_t = 1.5*MMTK.Units.fs
# Reuse evaluators that have been stored
evaluator_key = '-'.join(repr(v) for v in lambda_n.values())
if evaluator_key in self._evaluators.keys():
self.universe._evaluator[(None,None,None)] = \
self._evaluators[evaluator_key]
return
# Otherwise create a new evaluator
fflist = []
if ('MM' in lambda_n.keys()) and lambda_n['MM']:
fflist.append(self._forceFields['gaff'])
if ('site' in lambda_n.keys()) and lambda_n['site']:
if not 'site' in self._forceFields.keys():
# Set up the binding site in the force field
if (self.params['dock']['site']=='Measure'):
self.params['dock']['site'] = 'Sphere'
if self.params['dock']['site_measured'] is not None:
(self.params['dock']['site_max_R'],self.params['dock']['site_center']) = \
self.params['dock']['site_measured']
else:
print '\n*** Measuring the binding site ***'
self._set_universe_evaluator(\
self._lambda(1.0, 'dock', MM=True, site=False, crossed=False))
(confs, Es) = self._get_confs_to_rescore(site=False, minimize=True)
if len(confs)>0:
# Use the center of mass for configurations
# within 20 RT of the lowest energy
cutoffE = Es['total'][-1] + 20*self.RT_TARGET
coms = []
for (conf,E) in reversed(zip(confs,Es['total'])):
if E<=cutoffE:
self.universe.setConfiguration(Configuration(self.universe,conf))
coms.append(np.array(self.universe.centerOfMass()))
else:
break
print ' %d configurations fit in the binding site'%len(coms)
coms = np.array(coms)
center = (np.min(coms,0)+np.max(coms,0))/2
max_R = max(np.ceil(np.max(np.sqrt(np.sum((coms-center)**2,1)))*10.)/10.,0.6)
self.params['dock']['site_max_R'] = max_R
self.params['dock']['site_center'] = center
self.universe.setConfiguration(Configuration(self.universe,confs[-1]))
if ((self.params['dock']['site_max_R'] is None) or \
(self.params['dock']['site_center'] is None)):
raise Exception('No binding site parameters!')
else:
self.params['dock']['site_measured'] = \
(self.params['dock']['site_max_R'], \
self.params['dock']['site_center'])
if (self.params['dock']['site']=='Sphere') and \
(self.params['dock']['site_center'] is not None) and \
(self.params['dock']['site_max_R'] is not None):
from AlGDock.ForceFields.Sphere.Sphere import SphereForceField
self._forceFields['site'] = SphereForceField(
center=self.params['dock']['site_center'],
max_R=self.params['dock']['site_max_R'], name='site')
elif (self.params['dock']['site']=='Cylinder') and \
(self.params['dock']['site_center'] is not None) and \
(self.params['dock']['site_direction'] is not None):
from AlGDock.ForceFields.Cylinder.Cylinder import CylinderForceField
self._forceFields['site'] = CylinderForceField(
origin=self.params['dock']['site_center'],
direction=self.params['dock']['site_direction'],
max_X=self.params['dock']['site_max_X'],
max_R=self.params['dock']['site_max_R'], name='site')
else:
raise Exception('Binding site type not recognized!')
fflist.append(self._forceFields['site'])
for scalable in self._scalables:
if (scalable in lambda_n.keys()) and lambda_n[scalable]>0:
# Load the force field if it has not been loaded
if not scalable in self._forceFields.keys():
loading_start_time = time.time()
grid_FN = self._FNs['grids'][{'sLJr':'LJr','sLJa':'LJa','sELE':'ELE',
'LJr':'LJr','LJa':'LJa','ELE':'ELE'}[scalable]]
grid_scaling_factor = 'scaling_factor_' + \
{'sLJr':'LJr','sLJa':'LJa','sELE':'electrostatic', \
'LJr':'LJr','LJa':'LJa','ELE':'electrostatic'}[scalable]
# Determine the grid threshold
grid_thresh = -1
if scalable!='LJr':
if scalable=='sLJr':
grid_thresh = 10.0
elif scalable=='sELE':
# The maximum value is set so that the electrostatic energy
# less than or equal to the Lennard-Jones repulsive energy
# for every heavy atom at every grid point
scaling_factors_ELE = np.array([ \
self.molecule.getAtomProperty(a, 'scaling_factor_electrostatic') \
for a in self.molecule.atomList()],dtype=float)
scaling_factors_LJr = np.array([ \
self.molecule.getAtomProperty(a, 'scaling_factor_LJr') \
for a in self.molecule.atomList()],dtype=float)
scaling_factors_ELE = scaling_factors_ELE[scaling_factors_LJr>10]
scaling_factors_LJr = scaling_factors_LJr[scaling_factors_LJr>10]
grid_thresh = min(abs(scaling_factors_LJr*10.0/scaling_factors_ELE))
from AlGDock.ForceFields.Grid.Interpolation \
import InterpolationForceField
self._forceFields[scalable] = InterpolationForceField(grid_FN, \
name=scalable, interpolation_type='Trilinear', \
strength=lambda_n[scalable], scaling_property=grid_scaling_factor,
inv_power=-2 if scalable=='LJr' else None, \
grid_thresh=grid_thresh)
self.tee(' %s grid loaded from %s in %s'%(scalable, grid_FN, \
HMStime(time.time()-loading_start_time)))
# Set the force field strength to the desired value
self._forceFields[scalable].strength = lambda_n[scalable]
fflist.append(self._forceFields[scalable])
compoundFF = fflist[0]
for ff in fflist[1:]:
compoundFF += ff
self.universe.setForceField(compoundFF)
eval = ForceField.EnergyEvaluator(\
self.universe, self.universe._forcefield, None, None, None, None)
self.universe._evaluator[(None,None,None)] = eval
self._evaluators[evaluator_key] = eval
def _MC_translate_rotate(self, lambda_k, trials=20):
"""
Conducts Monte Carlo translation and rotation moves.
"""
# It does not seem worth creating a special evaluator
# for a small number of trials
if trials>100:
lambda_noMM = copy.deepcopy(lambda_k)
lambda_noMM['MM'] = False
lambda_noMM['site'] = False
self._set_universe_evaluator(lambda_noMM)
step_size = min(\
1.0*MMTK.Units.Ang, \
self._forceFields['site'].max_R*MMTK.Units.nm/10.)
acc = 0
xo = self.universe.copyConfiguration()
eo = self.universe.energy()
com = self.universe.centerOfMass().array
for c in range(trials):
step = np.random.randn(3)*step_size
xn = np.dot((xo.array - com), random_rotate()) + com + step
self.universe.setConfiguration(Configuration(self.universe,xn))
en = self.universe.energy()
if ((en<eo) or (np.random.random()<np.exp(-(en-eo)/self.RT))):
acc += 1
xo = self.universe.copyConfiguration()
eo = en
com += step
else:
self.universe.setConfiguration(xo)
return acc
def _initial_sim_state(self, seeds, process, lambda_k):
"""
Initializes a state, returning the configurations and potential energy.
"""
results = []
if self._cores>1:
# Multiprocessing code
m = multiprocessing.Manager()
task_queue = m.Queue()
done_queue = m.Queue()
for k in range(len(seeds)):
task_queue.put((seeds[k], process, lambda_k, True, k))
processes = [multiprocessing.Process(target=self._sim_one_state_worker, \
args=(task_queue, done_queue)) for p in range(self._cores)]
for p in range(self._cores):
task_queue.put('STOP')
for p in processes:
p.start()
for p in processes:
p.join()
results = [done_queue.get() for seed in seeds]
for p in processes:
p.terminate()
else:
# Single process code
results = [self._sim_one_state(\
seeds[k], process, lambda_k, True, k) for k in range(len(seeds))]
confs = [result['confs'] for result in results]
potEs = [result['E_MM'] for result in results]
Ht = np.mean(np.array([result['Ht'] for result in results]))
delta_t = np.median(np.array([results['delta_t'] for results in results]))
delta_t = min(max(delta_t, 0.25*MMTK.Units.fs), 2.5*MMTK.Units.fs)
return (confs, np.array(potEs), delta_t, Ht)
def _replica_exchange(self, process):
"""
Performs a cycle of replica exchange
"""
if not process in ['dock','cool']:
raise Exception('Process must be dock or cool')
# GMC
def gMC_initial_setup():
"""
Initialize BAT converter object.
Decide which internal coord to crossover. Here, only the soft torsions will be crossovered.
Produce a list of replica (state) index pairs to be swaped. Only Neighbor pairs will be swaped.
Assume that self.universe, self.molecule and K (number of states) exist
as global variables when the function is called.
"""
from AlGDock.RigidBodies import identifier
import itertools
BAT_converter = identifier( self.universe, self.molecule )
BAT = BAT_converter.BAT( extended = True )
# this assumes that the torsional angles are stored in the tail of BAT
softTorsionId = [ i + len(BAT) - BAT_converter.ntorsions for i in BAT_converter._softTorsionInd ]
torsions_to_crossover = []
for i in range(1, len(softTorsionId) ):
combinations = itertools.combinations( softTorsionId, i )
for c in combinations:
torsions_to_crossover.append( list(c) )
#
BAT_converter.BAT_to_crossover = torsions_to_crossover
if len( BAT_converter.BAT_to_crossover ) == 0:
self.tee(' GMC No BAT to crossover')
state_indices = range( K )
state_indices_to_swap = zip( state_indices[0::2], state_indices[1::2] ) + \
zip( state_indices[1::2], state_indices[2::2] )
#
return BAT_converter, state_indices_to_swap
#
def do_gMC( nr_attempts, BAT_converter, state_indices_to_swap, torsion_threshold ):
"""
Assume self.universe, confs, lambdas, state_inds, inv_state_inds exist as global variables
when the function is called.
If at least one of the torsions in the combination chosen for an crossover attempt
changes more than torsion_threshold, the crossover will be attempted.
The function will update confs.
It returns the number of attempts and the number of accepted moves.
"""
if nr_attempts < 0:
raise Exception('Number of attempts must be nonnegative!')
if torsion_threshold < 0.:
raise Exception('Torsion threshold must be nonnegative!')
#
if len( BAT_converter.BAT_to_crossover ) == 0:
return 0., 0.
#
from random import randrange
# get reduced energies and BAT for all configurations in confs
BATs = []
energies = np.zeros( K, dtype = float )
for c_ind in range(K):
s_ind = state_inds[ c_ind ]
self.universe.setConfiguration( Configuration( self.universe, confs[c_ind] ) )
BATs.append( np.array( BAT_converter.BAT( extended = True ) , dtype = float ) )
self._set_universe_evaluator( lambdas[ s_ind ] )
reduced_e = self.universe.energy() / ( R*lambdas[ s_ind ]['T'] )
energies[ c_ind ] = reduced_e
#
nr_sets_of_torsions = len( BAT_converter.BAT_to_crossover )
#
attempt_count , acc_count = 0 , 0
sweep_count = 0
while True:
sweep_count += 1
if (sweep_count * K) > (1000 * nr_attempts):
self.tee(' GMC Sweep too many times, but few attempted. Consider reducing torsion_threshold.')
return attempt_count, acc_count
#
for state_pair in state_indices_to_swap:
conf_ind_k0 = inv_state_inds[ state_pair[0] ]
conf_ind_k1 = inv_state_inds[ state_pair[1] ]
# check if it should attempt for this pair of states
ran_set_torsions = BAT_converter.BAT_to_crossover[ randrange( nr_sets_of_torsions ) ]
do_crossover = np.any(np.abs(BATs[conf_ind_k0][ran_set_torsions] - BATs[conf_ind_k1][ran_set_torsions]) >= torsion_threshold)
if do_crossover:
attempt_count += 1
# BAT and reduced energies before crossover
BAT_k0_be = copy.deepcopy( BATs[conf_ind_k0] )
BAT_k1_be = copy.deepcopy( BATs[conf_ind_k1] )
e_k0_be = energies[conf_ind_k0]
e_k1_be = energies[conf_ind_k1]
# BAT after crossover
BAT_k0_af = copy.deepcopy( BAT_k0_be )
BAT_k1_af = copy.deepcopy( BAT_k1_be )
for index in ran_set_torsions:
tmp = BAT_k0_af[ index ]
BAT_k0_af[ index ] = BAT_k1_af[ index ]
BAT_k1_af[ index ] = tmp
# Cartesian coord and reduced energies after crossover.
BAT_converter.Cartesian( BAT_k0_af )
self._set_universe_evaluator( lambdas[ state_pair[0] ] )
e_k0_af = self.universe.energy() / ( R*lambdas[ state_pair[0] ]['T'] )
conf_k0_af = copy.deepcopy( self.universe.configuration().array )
#
BAT_converter.Cartesian( BAT_k1_af )
self._set_universe_evaluator( lambdas[ state_pair[1] ] )
e_k1_af = self.universe.energy() / ( R*lambdas[ state_pair[1] ]['T'] )
conf_k1_af = copy.deepcopy( self.universe.configuration().array )
#
de = ( e_k0_be - e_k0_af ) + ( e_k1_be - e_k1_af )
# update confs, energies, BATS
if (de > 0) or ( np.random.uniform() < np.exp(de) ):
acc_count += 1
confs[conf_ind_k0] = conf_k0_af
confs[conf_ind_k1] = conf_k1_af
#
energies[conf_ind_k0] = e_k0_af
energies[conf_ind_k1] = e_k1_af
#
BATs[conf_ind_k0] = BAT_k0_af
BATs[conf_ind_k1] = BAT_k1_af
#
if attempt_count == nr_attempts:
return attempt_count, acc_count
#
self._set_lock(process)
if process=='cool':
terms = ['MM']
else:
terms = ['MM','site','misc'] + self._scalables
cycle = getattr(self,'_%s_cycle'%process)
confs = self.confs[process]['replicas']
lambdas = getattr(self,process+'_protocol')
# A list of pairs of replica indicies
K = len(lambdas)
pairs_to_swap = []
for interval in range(1,min(5,K)):
lower_inds = []
for lowest_index in range(interval):
lower_inds += range(lowest_index,K-interval,interval)
upper_inds = np.array(lower_inds) + interval
pairs_to_swap += zip(lower_inds,upper_inds)
# Setting the force field will load grids
# before multiple processes are spawned
for k in range(K):
self._set_universe_evaluator(lambdas[k])
storage = {}
for var in ['confs','state_inds','energies']:
storage[var] = []
cycle_start_time = time.time()
if self._cores>1:
# Multiprocessing setup
m = multiprocessing.Manager()
task_queue = m.Queue()
done_queue = m.Queue()
# GMC
whether_do_gMC = self.params[process]['GMC_attempts'] > 0
if whether_do_gMC:
self.tee(' Using GMC for %s' %process)
nr_gMC_attempts = K * self.params[process]['GMC_attempts']
torsion_threshold = self.params[process]['GMC_tors_threshold']
gMC_attempt_count = 0
gMC_acc_count = 0
gMC_time = 0.0
BAT_converter, state_indices_to_swap = gMC_initial_setup()
# Do replica exchange
MC_time = 0.0
repX_time = 0.0
state_inds = range(K)
inv_state_inds = range(K)
for sweep in range(self.params[process]['sweeps_per_cycle']):
E = {}
for term in terms:
E[term] = np.zeros(K, dtype=float)
if process=='dock':
E['acc_MC'] = np.zeros(K, dtype=float)
Ht = np.zeros(K, dtype=float)
# Sample within each state
if self._cores>1:
for k in range(K):
task_queue.put((confs[k], process, lambdas[state_inds[k]], False, k))
for p in range(self._cores):
task_queue.put('STOP')
processes = [multiprocessing.Process(target=self._sim_one_state_worker, \
args=(task_queue, done_queue)) for p in range(self._cores)]
for p in processes:
p.start()
for p in processes:
p.join()
unordered_results = [done_queue.get() for k in range(K)]
results = sorted(unordered_results, key=lambda d: d['reference'])
for p in processes:
p.terminate()
else:
# Single process code
results = [self._sim_one_state(confs[k], process, \
lambdas[state_inds[k]], False, k) for k in range(K)]
# GMC
if whether_do_gMC:
gMC_start_time = time.time()
att_count, acc_count = do_gMC( nr_gMC_attempts, BAT_converter, state_indices_to_swap, torsion_threshold )
gMC_attempt_count += att_count
gMC_acc_count += acc_count
gMC_time =+ ( time.time() - gMC_start_time )
# Store results
for k in range(K):
if 'acc_MC' in results[k].keys():
E['acc_MC'][k] = results[k]['acc_MC']
MC_time += results[k]['MC_time']
confs[k] = results[k]['confs'] # [-1]
if process == 'cool':
E['MM'][k] = results[k]['E_MM'] # [-1]
Ht[k] += results[k]['Ht']
if process=='dock':
E = self._calc_E(confs, E) # Get energies
# Get rmsd values
if self.params['dock']['rmsd'] is not False:
E['rmsd'] = np.array([np.sqrt(((confs[k][self.molecule.heavy_atoms,:] - \
self.confs['rmsd'])**2).sum()/self.molecule.nhatoms) for k in range(K)])
# Calculate u_ij (i is the replica, and j is the configuration),
# a list of arrays
(u_ij,N_k) = self._u_kln(E, [lambdas[state_inds[c]] for c in range(K)])
# Do the replica exchange
repX_start_time = time.time()
for attempt in range(self.params[process]['attempts_per_sweep']):
for (t1,t2) in pairs_to_swap:
a = inv_state_inds[t1]
b = inv_state_inds[t2]
ddu = -u_ij[a][b]-u_ij[b][a]+u_ij[a][a]+u_ij[b][b]
if (ddu>0) or (np.random.uniform()<np.exp(ddu)):
u_ij[a],u_ij[b] = u_ij[b],u_ij[a]
state_inds[a],state_inds[b] = state_inds[b],state_inds[a]
inv_state_inds[state_inds[a]],inv_state_inds[state_inds[b]] = \
inv_state_inds[state_inds[b]],inv_state_inds[state_inds[a]]
repX_time += (time.time()-repX_start_time)
# Store data in local variables
storage['confs'].append(list(confs))
storage['state_inds'].append(list(state_inds))
storage['energies'].append(copy.deepcopy(E))
# GMC
if whether_do_gMC:
self.tee(' {0}/{1} crossover attempts ({2:.3g}) accepted in {3}'.format(\
gMC_acc_count, gMC_attempt_count, \
float(gMC_acc_count)/float(gMC_attempt_count) \
if gMC_attempt_count > 0 else 0, \
HMStime(gMC_time)))
# Estimate relaxation time from empirical state transition matrix
state_inds = np.array(storage['state_inds'])
Nij = np.zeros((K,K),dtype=int)
for (i,j) in zip(state_inds[:-1,:],state_inds[1:,:]):
for k in range(K):
Nij[j[k],i[k]] += 1
N = (Nij+Nij.T)
Tij = np.array(N,dtype=float)/sum(N,1)
(eval,evec)=np.linalg.eig(Tij)
tau2 = 1/(1-eval[1])
# Estimate relaxation time from autocorrelation
tau_ac = pymbar.timeseries.integratedAutocorrelationTimeMultiple(state_inds.T)
# There will be at least per_independent and up to sweeps_per_cycle saved samples
# max(int(np.ceil((1+2*tau_ac)/per_independent)),1) is the minimum stride,
# which is based on per_independent samples per autocorrelation time.
# max(self.params['dock']['sweeps_per_cycle']/per_independent)
# is the maximum stride, which gives per_independent samples if possible.
per_independent = self.params[process]['snaps_per_independent']
stride = min(max(int(np.ceil((1+2*tau_ac)/per_independent)),1), \
max(int(np.ceil(self.params[process]['sweeps_per_cycle']/per_independent)),1))
store_indicies = np.array(\
range(min(stride-1,self.params[process]['sweeps_per_cycle']-1), self.params[process]['sweeps_per_cycle'], stride), dtype=int)
nsaved = len(store_indicies)
self.tee(" storing %d configurations for %d replicas"%(nsaved, len(confs)) + \
" in cycle %d"%cycle + \
" (tau2=%f, tau_ac=%f)"%(tau2,tau_ac))
self.tee(" with %s for MC"%(HMStime(MC_time)) + \
" and %s for replica exchange"%(HMStime(repX_time)) + \
" in " + HMStime(time.time()-cycle_start_time))
# Get indicies for storing global variables
inv_state_inds = np.zeros((nsaved,K),dtype=int)
for snap in range(nsaved):
state_inds = storage['state_inds'][store_indicies[snap]]
for state in range(K):
inv_state_inds[snap][state_inds[state]] = state
# Reorder energies and replicas for storage
if process=='dock':
terms.append('acc_MC') # Make sure to save the acceptance probability
if self.params['dock']['rmsd'] is not False:
terms.append('rmsd') # Make sure to save the rmsd
Es = []
for state in range(K):
E_state = {}
if state==0:
E_state['repXpath'] = storage['state_inds']
E_state['Ht'] = Ht
for term in terms:
E_state[term] = np.array([storage['energies'][store_indicies[snap]][term][inv_state_inds[snap][state]] for snap in range(nsaved)])
Es.append([E_state])
self.confs[process]['replicas'] = \
[storage['confs'][store_indicies[-1]][inv_state_inds[-1][state]] \
for state in range(K)]
for state in range(K):
getattr(self,process+'_Es')[state].append(Es[state][0])
for state in range(K):
if self.params[process]['keep_intermediate'] or \
((process=='cool') and (state==0)) or \
(state==(K-1)):
confs = [storage['confs'][store_indicies[snap]][inv_state_inds[snap][state]] for snap in range(nsaved)]
self.confs[process]['samples'][state].append(confs)
else:
self.confs[process]['samples'][state].append([])
# TODO: Change seeds to SmartDarting integrator
setattr(self,'_%s_cycle'%process,cycle + 1)
self._save(process)
self._clear_lock(process)
def _sim_one_state_worker(self, input, output):
"""
Executes a task from the queue
"""
for args in iter(input.get, 'STOP'):
result = self._sim_one_state(*args)
output.put(result)
def _sim_one_state(self, seed, process, lambda_k, \
initialize=False, reference=None):
self.universe.setConfiguration(Configuration(self.universe, seed))
results = {}
# For initialization, the evaluator is already set
if not initialize:
self._set_universe_evaluator(lambda_k)
if 'delta_t' in lambda_k.keys():
delta_t = lambda_k['delta_t']
else:
delta_t = 1.5*MMTK.Units.fs
# Perform MCMC moves
if (process == 'dock') and (self.params['dock']['MCMC_moves']>0) \
and (lambda_k['a'] > 0.0) and (lambda_k['a'] < 0.01):
MC_start_time = time.time()
results['acc_MC'] = self._MC_translate_rotate(lambda_k, trials=20)/20.
results['MC_time'] = (time.time() - MC_start_time)
# Performs Smart Darting
self.sampler[process+'_SmartDarting'](ntrials=10, T=lambda_k['T'])
# Execute sampler
if initialize:
sampler = self.sampler['init']
steps = self.params[process]['steps_per_seed']
steps_per_trial = self.params[process]['steps_per_seed']/10
else:
sampler = self.sampler[process]
steps = self.params[process]['steps_per_sweep']
steps_per_trial = steps
(confs, potEs, Ht, delta_t) = sampler(\
steps=steps, \
steps_per_trial=steps_per_trial, \
T=lambda_k['T'], delta_t=delta_t, \
normalize=(process=='cool'),
adapt=initialize,
seed=int(time.time()+reference))
# Store and return results
results['confs'] = np.copy(confs[-1])
results['E_MM'] = potEs[-1]
results['Ht'] = Ht
results['delta_t'] = delta_t
results['reference'] = reference
return results
def _sim_process(self, process):
"""
Simulate and analyze a cooling or docking process.
As necessary, first conduct an initial cooling or docking
and then run a desired number of replica exchange cycles.
"""
if (getattr(self,process+'_protocol')==[]) or \
(not getattr(self,process+'_protocol')[-1]['crossed']):
time_left = getattr(self,'initial_'+process)()
if not time_left:
return False
# Main loop for replica exchange
if (self.params[process]['repX_cycles'] is not None) and \
((getattr(self,'_%s_cycle'%process) < \
self.params[process]['repX_cycles'])):
# Load configurations to score from another program
if (process=='dock') and (self._dock_cycle==1) and \
(self._FNs['score'] is not None) and \
(self._FNs['score']!='default'):
self._set_lock('dock')
self.tee(">>> Reinitializing replica exchange configurations")
confs = self._get_confs_to_rescore(\
nconfs=len(self.dock_protocol), site=True, minimize=True)[0]
self._clear_lock('dock')
if len(confs)>0:
self.confs['dock']['replicas'] = confs
self.tee("\n>>> Replica exchange for {0}ing, starting at {1} GMT".format(\
process, time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())), \
process=process)
mem_start = psutil.virtual_memory()
self.tee(' %f MiB available / %f MiB total'%(\
mem_start.available/1E6, mem_start.total/1E6))
self.timing[process+'_repX_start'] = time.time()
start_cycle = getattr(self,'_%s_cycle'%process)
cycle_times = []
while ((getattr(self,'_%s_cycle'%process) < self.params[process]['repX_cycles'])):
cycle_start_time = time.time()
self._replica_exchange(process)
cycle_times.append(time.time()-cycle_start_time)
if self.run_type=='timed':
remaining_time = self.timing['max']*60 - (time.time()-self.timing['start'])
cycle_time = np.mean(cycle_times)
self.tee(" projected cycle time: %s, remaining time: %s"%(\
HMStime(cycle_time), HMStime(remaining_time)), process=process)
if cycle_time>remaining_time:
return False
self.tee("\nElapsed time for %d cycles of replica exchange was %s"%(\
(getattr(self,'_%s_cycle'%process) - start_cycle), \
HMStime(time.time() - self.timing[process+'_repX_start'])), \
process=process)
# If there are insufficient configurations,
# do additional replica exchange on the cooling process
if (process=='cool'):
E_MM = []
for k in range(len(self.cool_Es[0])):
E_MM += list(self.cool_Es[0][k]['MM'])
while len(E_MM)<self.params['dock']['seeds_per_state']:
self.tee("More samples from high temperature ligand simulation needed", process='cool')
cycle_start_time = time.time()
self._replica_exchange('cool')
cycle_times.append(time.time()-cycle_start_time)
if self.run_type=='timed':
remaining_time = self.timing['max']*60 - (time.time()-self.timing['start'])
cycle_time = np.mean(cycle_times)
self.tee(" projected cycle time: %s, remaining time: %s"%(\
HMStime(cycle_time), HMStime(remaining_time)), process=process)
if cycle_time>remaining_time:
return False
E_MM = []
for k in range(len(self.cool_Es[0])):
E_MM += list(self.cool_Es[0][k]['MM'])
return True # The process has completed
def _get_confs_to_rescore(self, nconfs=None, site=False, minimize=True):
"""
Returns configurations to rescore and their corresponding energies
as a tuple of lists, ordered by DECREASING energy.
It is either the default configuration, or from dock6 and initial docking.
If nconfs is None, then all configurations will be unique.
If nconfs is smaller than the number of unique configurations,
then the lowest energy configurations will be retained.
If nconfs is larger than the number of unique configurations,
then the lowest energy configuration will be duplicated.
"""
# Get configurations
count = {'xtal':0, 'dock6':0, 'initial_dock':0, 'duplicated':0}
# based on the score option
if self._FNs['score']=='default':
confs = [np.copy(self.confs['ligand'])]
count['xtal'] = 1
Es = {}
if nconfs is None:
nconfs = 1
elif (self._FNs['score'] is None) or (not os.path.isfile(self._FNs['score'])):
confs = []
Es = {}
elif self._FNs['score'].endswith('.mol2') or \
self._FNs['score'].endswith('.mol2.gz'):
import AlGDock.IO
IO_dock6_mol2 = AlGDock.IO.dock6_mol2()
(confs, Es) = IO_dock6_mol2.read(self._FNs['score'], \
reorder=self.molecule.inv_prmtop_atom_order)
count['dock6'] = len(confs)
elif self._FNs['score'].endswith('.nc'):
from netCDF4 import Dataset
dock6_nc = Dataset(self._FNs['score'],'r')
confs = [dock6_nc.variables['confs'][n][self.molecule.inv_prmtop_atom_order,:] for n in range(dock6_nc.variables['confs'].shape[0])]
Es = dict([(key,dock6_nc.variables[key][:]) for key in dock6_nc.variables.keys() if key !='confs'])
dock6_nc.close()
count['dock6'] = len(confs)
elif self._FNs['score'].endswith('.pkl.gz'):
F = gzip.open(self._FNs['score'],'r')
confs = pickle.load(F)
F.close()
if not isinstance(confs, list):
confs = [confs]
Es = {}
else:
raise Exception('Input configuration format not recognized')
# based on the seeds
if self.confs['dock']['seeds'] is not None:
confs = confs + self.confs['dock']['seeds']
count['initial_dock'] = len(self.confs['dock']['seeds'])
if len(confs)==0:
return ([],{})
if site:
# Filters out configurations not in the binding site
confs_in_site = []
Es_in_site = dict([(label,[]) for label in Es.keys()])
old_eval = self.universe._evaluator[(None,None,None)]
self._set_universe_evaluator({'site':True,'T':self.T_TARGET})
for n in range(len(confs)):
self.universe.setConfiguration(Configuration(self.universe, confs[n]))
if self.universe.energy()<1.:
confs_in_site.append(confs[n])
for label in Es.keys():
Es_in_site[label].append(Es[label][n])
self.universe._evaluator[(None,None,None)] = old_eval
confs = confs_in_site
Es = Es_in_site
try:
self.universe.energy()
except ValueError:
return (confs,{})
if minimize:
Es = {}
from MMTK.Minimization import SteepestDescentMinimizer # @UnresolvedImport
minimizer = SteepestDescentMinimizer(self.universe)
for rep in range(5):
x_o = np.copy(self.universe.configuration().array)
e_o = self.universe.energy()
minimizer(steps = 50)
e_n = self.universe.energy()
if np.isnan(e_n) or (e_o-e_n)<1000:
self.universe.configuration().array = x_o
break
minimized_confs = []
min_start_time = time.time()
for conf in confs:
self.universe.setConfiguration(Configuration(self.universe, conf))
minimizer(steps = 1000)
minimized_confs.append(np.copy(self.universe.configuration().array))
confs = minimized_confs
self.tee("\n minimized %d configurations in "%len(confs) + \
HMStime(time.time()-min_start_time))
# Evaluate energies
Etot = []
for conf in confs:
self.universe.setConfiguration(Configuration(self.universe, conf))
Etot.append(self.universe.energy())
# Sort configurations by DECREASING energy
Etot, confs = (list(l) for l in zip(*sorted(zip(Etot, confs), \
key=lambda p:p[0], reverse=True)))
# Shrink or extend configuration and energy array
if nconfs is not None:
confs = confs[-nconfs:]
Etot = Etot[-nconfs:]
while len(confs)<nconfs:
confs.append(confs[-1])
Etot.append(Etot[-1])
count['duplicated'] += 1
count['nconfs'] = nconfs
else:
count['nconfs'] = len(confs)
count['minimized'] = {True:' minimized', False:''}[minimize]
Es['total'] = Etot
self.tee(" keeping {nconfs}{minimized} configurations out of {xtal} from xtal, {dock6} from dock6, {initial_dock} from initial docking, and {duplicated} duplicated\n".format(**count))
return (confs, Es)
def _run_MBAR(self,u_kln,N_k):
"""
Estimates the free energy of a transition using BAR and MBAR
"""
import pymbar
K = len(N_k)
f_k_FEPF = np.zeros(K)
f_k_FEPR = np.zeros(K)
f_k_BAR = np.zeros(K)
for k in range(K-1):
w_F = u_kln[k,k+1,:N_k[k]] - u_kln[k,k,:N_k[k]]
min_w_F = min(w_F)
w_R = u_kln[k+1,k,:N_k[k+1]] - u_kln[k+1,k+1,:N_k[k+1]]
min_w_R = min(w_R)
f_k_FEPF[k+1] = -np.log(np.mean(np.exp(-w_F+min_w_F))) + min_w_F
f_k_FEPR[k+1] = np.log(np.mean(np.exp(-w_R+min_w_R))) - min_w_R
try:
f_k_BAR[k+1] = pymbar.BAR(w_F, w_R, relative_tolerance=0.000001, verbose=False, compute_uncertainty=False)
except:
f_k_BAR[k+1] = f_k_FEPF[k+1]
f_k_FEPF = np.cumsum(f_k_FEPF)
f_k_FEPR = np.cumsum(f_k_FEPR)
f_k_BAR = np.cumsum(f_k_BAR)
try:
f_k_MBAR = pymbar.MBAR(u_kln, N_k,
verbose = False,
initial_f_k = f_k_BAR,
maximum_iterations = 20).f_k
except:
f_k_MBAR = f_k_BAR
if np.isnan(f_k_MBAR).any():
f_k_MBAR = f_k_BAR
return (f_k_BAR,f_k_MBAR)
def _u_kln(self,eTs,lambdas,noBeta=False):
"""
Computes a reduced potential energy matrix. k is the sampled state. l is the state for which energies are evaluated.
Input:
eT is a
-dictionary (of mapped energy terms) of numpy arrays (over states)
-list (over states) of dictionaries (of mapped energy terms) of numpy arrays (over configurations), or a
-list (over states) of lists (over cycles) of dictionaries (of mapped energy terms) of numpy arrays (over configurations)
lambdas is a list of thermodynamic states
noBeta means that the energy will not be divided by RT
Output: u_kln or (u_kln, N_k)
u_kln is the matrix (as a numpy array)
N_k is an array of sample sizes
"""
L = len(lambdas)
addMM = ('MM' in lambdas[0].keys()) and (lambdas[0]['MM'])
addSite = ('site' in lambdas[0].keys()) and (lambdas[0]['site'])
probe_key = [key for key in lambdas[0].keys() if key in (['MM'] + self._scalables)][0]
if isinstance(eTs,dict):
# There is one configuration per state
K = len(eTs[probe_key])
N_k = np.ones(K, dtype=int)
u_kln = []
E_base = np.zeros(K)
if addMM:
E_base += eTs['MM']
if addSite:
E_base += eTs['site']
for l in range(L):
E = 1.*E_base
for scalable in self._scalables:
if scalable in lambdas[l].keys():
E += lambdas[l][scalable]*eTs[scalable]
if noBeta:
u_kln.append(E)
else:
u_kln.append(E/(R*lambdas[l]['T']))
elif isinstance(eTs[0],dict):
K = len(eTs)
N_k = np.array([len(eTs[k][probe_key]) for k in range(K)])
u_kln = np.zeros([K, L, N_k.max()], np.float)
for k in range(K):
E_base = 0.0
if addMM:
E_base += eTs[k]['MM']
if addSite:
E_base += eTs[k]['site']
for l in range(L):
E = 1.*E_base
for scalable in self._scalables:
if scalable in lambdas[l].keys():
E += lambdas[l][scalable]*eTs[k][scalable]
if noBeta:
u_kln[k,l,:N_k[k]] = E
else:
u_kln[k,l,:N_k[k]] = E/(R*lambdas[l]['T'])
elif isinstance(eTs[0],list):
K = len(eTs)
N_k = np.zeros(K, dtype=int)
for k in range(K):
for c in range(len(eTs[k])):
N_k[k] += len(eTs[k][c][probe_key])
u_kln = np.zeros([K, L, N_k.max()], np.float)
for k in range(K):
E_base = 0.0
C = len(eTs[k])
if addMM:
E_base += np.concatenate([eTs[k][c]['MM'] for c in range(C)])
if addSite:
E_base += np.concatenate([eTs[k][c]['site'] for c in range(C)])
for l in range(L):
E = 1.*E_base
for scalable in self._scalables:
if scalable in lambdas[l].keys():
E += lambdas[l][scalable]*np.concatenate([eTs[k][c][scalable] for c in range(C)])
if noBeta:
u_kln[k,l,:N_k[k]] = E
else:
u_kln[k,l,:N_k[k]] = E/(R*lambdas[l]['T'])
if (K==1) and (L==1):
return u_kln.ravel()
else:
return (u_kln,N_k)
def _next_dock_state(self, E=None, lambda_o=None, pow=None, undock=False):
"""
Determines the parameters for the next docking state
"""
if E is None:
E = self.dock_Es[-1]
if lambda_o is None:
lambda_o = self.dock_protocol[-1]
lambda_n = copy.deepcopy(lambda_o)
if self.params['dock']['protocol']=='Set':
raise Exception("Set protocol not currently supported")
elif self.params['dock']['protocol']=='Adaptive':
# Change grid scaling and temperature simultaneously
tL_tensor = self._tL_tensor(E,lambda_o)
crossed = lambda_o['crossed']
if pow is not None:
tL_tensor = tL_tensor*(1.25**pow)
if tL_tensor>1E-5:
dL = self.params['dock']['therm_speed']/tL_tensor
if undock:
a = lambda_o['a'] - dL
if a < 0.0:
a = 0.0
crossed = True
else:
a = lambda_o['a'] + dL
if a > 1.0:
a = 1.0
crossed = True
return self._lambda(a, crossed=crossed)
else:
# Repeats the previous stage
lambda_n['delta_t'] = lambda_o['delta_t']*(1.25**pow)
self.tee(' no variance in previous stage!' + \
' trying time step of %f'%lambda_n['delta_t'])
return lambda_n
def _tL_tensor(self, E, lambda_c, process='dock'):
T = lambda_c['T']
if process=='dock':
# Metric tensor for the thermodynamic length
a = lambda_c['a']
a_sg = 1.-4.*(a-0.5)**2
a_g = 4.*(a-0.5)**2/(1+np.exp(-100*(a-0.5)))
da_sg_da = -8*(a-0.5)
da_g_da = (400.*(a-0.5)**2*np.exp(-100.*(a-0.5)))/(1+np.exp(-100.*(a-0.5)))**2 + \
(8.*(a-0.5))/(1 + np.exp(-100.*(a-0.5)))
Psi_sg = self._u_kln([E], [{'sLJr':1,'sELE':1}], noBeta=True)
Psi_g = self._u_kln([E], [{'LJr':1,'LJa':1,'ELE':1}], noBeta=True)
U_RL_g = self._u_kln([E],
[{'MM':True, 'site':True, 'T':T,\
'sLJr':a_sg, 'sELE':a_sg, 'LJr':a_g, 'LJa':a_g, 'ELE':a_g}], noBeta=True)
return np.abs(da_sg_da)*Psi_sg.std()/(R*T) + \
np.abs(da_g_da)*Psi_g.std()/(R*T) + \
np.abs(self.T_TARGET-self.T_HIGH)*U_RL_g.std()/(R*T*T)
elif process=='cool':
return self._u_kln([E],[{'MM':True}], noBeta=True).std()/(R*T*T)
else:
raise Exception("Unknown process!")
def _lambda(self, a, process='dock', lambda_o=None, \
MM=None, site=None, crossed=None):
if (lambda_o is None) and len(getattr(self,process+'_protocol'))>0:
lambda_o = copy.deepcopy(getattr(self,process+'_protocol')[-1])
if (lambda_o is not None):
lambda_n = copy.deepcopy(lambda_o)
else:
lambda_n = {}
if MM is not None:
lambda_n['MM'] = MM
if site is not None:
lambda_n['site'] = site
if crossed is not None:
lambda_n['crossed'] = crossed
if process=='dock':
a_sg = 1.-4.*(a-0.5)**2
a_g = 4.*(a-0.5)**2/(1+np.exp(-100*(a-0.5)))
if a_g<1E-10:
a_g=0
lambda_n['a'] = a
lambda_n['sLJr'] = a_sg
lambda_n['sELE'] = a_sg
lambda_n['LJr'] = a_g
lambda_n['LJa'] = a_g
lambda_n['ELE'] = a_g
lambda_n['T'] = a*(self.T_TARGET-self.T_HIGH) + self.T_HIGH
elif process=='cool':
lambda_n['a'] = a
lambda_n['T'] = self.T_HIGH - a*(self.T_HIGH-self.T_TARGET)
else:
raise Exception("Unknown process!")
return lambda_n
def _load_programs(self, phases):
# Find the necessary programs, downloading them if necessary
programs = []
for phase in phases:
if phase in ['Gas','GBSA','PBSA'] and not 'sander' in programs:
programs.append('sander')
elif phase.startswith('NAMD') and not 'namd' in programs:
programs.append('namd')
elif phase in ['APBS'] and not 'apbs' in programs:
programs.extend(['apbs','ambpdb','molsurf'])
for program in programs:
self._FNs[program] = a.findPaths([program])[program]
# TODO: This does not seem to keep the environment variables
# for sander in subprocess.
a.loadModules(programs)
def _postprocess(self,
conditions=[('original',0, 0,'R'), ('cool',-1,-1,'L'), \
('dock', -1,-1,'L'), ('dock',-1,-1,'RL')],
phases=None,
readOnly=False, redo_dock=False, debug=False):
"""
Obtains the NAMD energies of all the conditions using all the phases.
Saves both MMTK and NAMD energies after NAMD energies are estimated.
state == -1 means the last state
cycle == -1 means all cycles
"""
# Clear evaluators to save memory
self._evaluators = {}
if phases is None:
phases = list(set(self.params['cool']['phases'] + self.params['dock']['phases']))
updated_processes = []
if 'APBS' in phases:
updated_processes = self._combine_APBS_and_NAMD(updated_processes)
# Identify incomplete calculations
incomplete = []
for (p, state, cycle, moiety) in conditions:
# Check that the values are legitimate
if not p in ['cool','dock','original']:
raise Exception("Type should be in ['cool', 'dock', 'original']")
if not moiety in ['R','L', 'RL']:
raise Exception("Species should in ['R','L', 'RL']")
if p!='original' and getattr(self,p+'_protocol')==[]:
continue
if state==-1:
state = len(getattr(self,p+'_protocol'))-1
if cycle==-1:
cycles = range(getattr(self,'_'+p+'_cycle'))
else:
cycles = [cycle]
# Check for completeness
for c in cycles:
for phase in phases:
label = moiety+phase
# Skip postprocessing
# if the function is NOT being rerun in redo mode
# and one of the following:
# the function is being run in readOnly mode,
# the energies are already in memory.
if (not (redo_dock and p=='dock')) and \
(readOnly \
or (p == 'original' and \
(label in getattr(self,p+'_Es')[state][c].keys()) and \
(getattr(self,p+'_Es')[state][c][label] is not None)) \
or (('MM' in getattr(self,p+'_Es')[state][c].keys()) and \
(label in getattr(self,p+'_Es')[state][c].keys()) and \
(len(getattr(self,p+'_Es')[state][c]['MM'])==\
len(getattr(self,p+'_Es')[state][c][label])))):
pass
else:
incomplete.append((p, state, c, moiety, phase))
if incomplete==[]:
return True
del p, state, c, moiety, phase, cycles, label
self._load_programs([val[-1] for val in incomplete])
# Write trajectories and queue calculations
m = multiprocessing.Manager()
task_queue = m.Queue()
time_per_snap = m.dict()
for (p, state, c, moiety, phase) in incomplete:
if moiety+phase not in time_per_snap.keys():
time_per_snap[moiety+phase] = m.list()
# Decompress prmtop and inpcrd files
decompress = (self._FNs['prmtop'][moiety].endswith('.gz')) or \
(self._FNs['inpcrd'][moiety].endswith('.gz'))
if decompress:
for key in ['prmtop','inpcrd']:
if self._FNs[key][moiety].endswith('.gz'):
import shutil
shutil.copy(self._FNs[key][moiety],self._FNs[key][moiety]+'.BAK')
os.system('gunzip -f '+self._FNs[key][moiety])
os.rename(self._FNs[key][moiety]+'.BAK', self._FNs[key][moiety])
self._FNs[key][moiety] = self._FNs[key][moiety][:-3]
toClean = []
for (p, state, c, moiety, phase) in incomplete:
# Identify the configurations
if (moiety=='R'):
if not 'receptor' in self.confs.keys():
continue
confs = [self.confs['receptor']]
else:
confs = self.confs[p]['samples'][state][c]
# Identify the file names
if p=='original':
prefix = p
else:
prefix = '%s%d_%d'%(p, state, c)
p_dir = {'cool':self.dir['cool'],
'original':self.dir['dock'],
'dock':self.dir['dock']}[p]
if phase in ['Gas','GBSA','PBSA']:
traj_FN = join(p_dir,'%s.%s.mdcrd'%(prefix,moiety))
elif phase.startswith('NAMD'):
traj_FN = join(p_dir,'%s.%s.dcd'%(prefix,moiety))
elif phase.startswith('OpenMM'):
traj_FN = None
elif phase in ['APBS']:
traj_FN = join(p_dir,'%s.%s.pqr'%(prefix,moiety))
outputname = join(p_dir,'%s.%s%s'%(prefix,moiety,phase))
# Writes trajectory
self._write_traj(traj_FN, confs, moiety)
if (traj_FN is not None) and (not traj_FN in toClean):
toClean.append(traj_FN)
# Queues the calculations
task_queue.put((confs, moiety, phase, traj_FN, outputname, debug, \
(p,state,c,moiety+phase)))
# Start postprocessing
self._set_lock('dock' if 'dock' in [loc[0] for loc in incomplete] else 'cool')
self.tee("\n>>> Postprocessing, starting at " + \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
postprocess_start_time = time.time()
done_queue = m.Queue()
processes = [multiprocessing.Process(target=self._energy_worker, \
args=(task_queue, done_queue, time_per_snap)) \
for p in range(self._cores)]
for p in range(self._cores):
task_queue.put('STOP')
for p in processes:
p.start()
for p in processes:
p.join()
results = []
while not done_queue.empty():
results.append(done_queue.get())
for p in processes:
p.terminate()
# Clean up files
if not debug:
for FN in toClean:
if os.path.isfile(FN):
os.remove(FN)
# Clear decompressed files
if decompress:
for key in ['prmtop','inpcrd']:
if os.path.isfile(self._FNs[key][moiety]+'.gz'):
os.remove(self._FNs[key][moiety])
self._FNs[key][moiety] = self._FNs[key][moiety] + '.gz'
# Store energies
for (E,(p,state,c,label),wall_time) in results:
if p=='original':
self.original_Es[state][c][label] = E[:,-1]
else:
getattr(self,p+'_Es')[state][c][label] = E
if not p in updated_processes:
updated_processes.append(p)
# Print time per snapshot
for key in time_per_snap.keys():
if len(time_per_snap[key])>0:
mean_time_per_snap = np.mean(time_per_snap[key])
if not np.isnan(mean_time_per_snap):
self.tee(" an average of %f s per %s snapshot"%(\
mean_time_per_snap, key))
else:
self.tee(" time per snapshot in %s: "%(key) + \
', '.join(['%f'%t for t in time_per_snap[key]]))
else:
self.tee(" no snapshots postprocessed in %s"%(key))
# Combine APBS and NAMD_Gas energies
if 'APBS' in phases:
updated_processes = self._combine_APBS_and_NAMD(updated_processes)
# Save data
if 'original' in updated_processes:
for phase in phases:
if (self.params['dock']['receptor_'+phase] is None) and \
(self.original_Es[0][0]['R'+phase] is not None):
self.params['dock']['receptor_'+phase] = \
self.original_Es[0][0]['R'+phase]
self._save('dock', keys=['progress'])
if 'cool' in updated_processes:
self._save('cool')
if ('dock' in updated_processes) or ('original' in updated_processes):
self._save('dock')
if len(updated_processes)>0:
self._clear_lock('dock' if 'dock' in updated_processes else 'cool')
self.tee("\nElapsed time for postprocessing was " + \
HMStime(time.time()-postprocess_start_time))
return len(incomplete)==len(results)
def _energy_worker(self, input, output, time_per_snap):
for args in iter(input.get, 'STOP'):
(confs, moiety, phase, traj_FN, outputname, debug, reference) = args
(p, state, c, label) = reference
nsnaps = len(confs)
# Make sure there is enough time remaining
if self.run_type=='timed':
remaining_time = self.timing['max']*60 - \
(time.time()-self.timing['start'])
if len(time_per_snap[moiety+phase])>0:
mean_time_per_snap = np.mean(np.mean(time_per_snap[moiety+phase]))
if np.isnan(mean_time_per_snap):
return
projected_time = mean_time_per_snap*nsnaps
self.tee(" projected cycle time for %s: %s, remaining time: %s"%(\
moiety+phase, \
HMStime(projected_time), HMStime(remaining_time)), process=p)
if projected_time > remaining_time:
return
# Calculate the energy
start_time = time.time()
if phase in ['Gas','GBSA','PBSA']:
E = self._sander_Energy(*args)
elif phase.startswith('NAMD'):
E = self._NAMD_Energy(*args)
elif phase.startswith('OpenMM'):
E = self._OpenMM_Energy(*args)
elif phase in ['APBS']:
E = self._APBS_Energy(*args)
wall_time = time.time() - start_time
if not np.isinf(E).any():
self.tee(" postprocessed %s, state %d, cycle %d, %s in %s"%(\
p,state,c,label,HMStime(wall_time)))
# Store output and timings
output.put((E, reference, wall_time))
times_per_snap = time_per_snap[moiety+phase]
times_per_snap.append(wall_time/nsnaps)
time_per_snap[moiety+phase] = times_per_snap
else:
self.tee(" error in postprocessing %s, state %d, cycle %d, %s in %s"%(\
p,state,c,label,HMStime(wall_time)))
return
def _combine_APBS_and_NAMD(self, updated_processes = []):
psmc = [\
('cool', len(self.cool_protocol)-1, ['L'], range(self._cool_cycle)), \
('dock', len(self.dock_protocol)-1, ['L','RL'], range(self._dock_cycle)), \
('original', 0, ['R'], [0])]
for (p,state,moieties,cycles) in psmc:
for moiety in moieties:
label = moiety + 'APBS'
for c in cycles:
if not ((label in getattr(self,p+'_Es')[state][c]) and \
(getattr(self,p+'_Es')[state][c][label] is not None) and \
(moiety+'NAMD_Gas' in getattr(self,p+'_Es')[state][c]) and \
(getattr(self,p+'_Es')[state][c][moiety+'NAMD_Gas'] is not None)):
break
if len(getattr(self,p+'_Es')[state][c][label].shape)==1 and \
(getattr(self,p+'_Es')[state][c][label].shape[0]==2):
E_PBSA = getattr(self,p+'_Es')[state][c][label]
E_NAMD_Gas = getattr(self,p+'_Es')[state][c][moiety+'NAMD_Gas'][-1]
E_tot = np.sum(E_PBSA) + E_NAMD_Gas
getattr(self,p+'_Es')[state][c][label] = \
np.hstack((E_PBSA,E_NAMD_Gas,E_tot))
if not p in updated_processes:
updated_processes.append(p)
elif len(getattr(self,p+'_Es')[state][c][label].shape)==2 and \
(getattr(self,p+'_Es')[state][c][label].shape[1]==2):
E_PBSA = getattr(self,p+'_Es')[state][c][label]
E_NAMD_Gas = getattr(self,p+'_Es')[state][c][moiety+'NAMD_Gas']
if len(E_NAMD_Gas.shape)==1:
E_NAMD_Gas = np.array([[E_NAMD_Gas[-1]]])
else:
E_NAMD_Gas = E_NAMD_Gas[:,-1]
E_tot = np.sum(E_PBSA,1) + E_NAMD_Gas
getattr(self,p+'_Es')[state][c][label] = \
np.hstack((E_NAMD_Gas[...,None],E_PBSA,E_tot[...,None]))
if not p in updated_processes:
updated_processes.append(p)
return updated_processes
def _calc_E(self, confs, E=None, type='sampling', prefix='confs', debug=False):
"""
Calculates energies for a series of configurations
Units are the MMTK standard, kJ/mol
"""
if E is None:
E = {}
lambda_full = {'T':self.T_HIGH,'MM':True,'site':True}
for scalable in self._scalables:
lambda_full[scalable] = 1
if type=='sampling' or type=='all':
# Molecular mechanics and grid interaction energies
self._set_universe_evaluator(lambda_full)
for term in (['MM','site','misc'] + self._scalables):
E[term] = np.zeros(len(confs), dtype=float)
for c in range(len(confs)):
self.universe.setConfiguration(Configuration(self.universe,confs[c]))
eT = self.universe.energyTerms()
for (key,value) in eT.iteritems():
E[term_map[key]][c] += value
if type=='all':
self._load_programs(self.params['dock']['phases'])
toClear = []
for phase in self.params['dock']['phases']:
E['R'+phase] = self.params['dock']['receptor_'+phase]
for moiety in ['L','RL']:
outputname = join(self.dir['dock'],'%s.%s%s'%(prefix,moiety,phase))
if phase in ['Gas','GBSA','PBSA']:
traj_FN = join(self.dir['dock'],'%s.%s.mdcrd'%(prefix,moiety))
self._write_traj(traj_FN, confs, moiety)
E[moiety+phase] = self._sander_Energy(confs, moiety, phase, \
traj_FN, outputname, debug=debug)
elif phase.startswith('NAMD'):
traj_FN = join(self.dir['dock'],'%s.%s.dcd'%(prefix,moiety))
self._write_traj(traj_FN, confs, moiety)
E[moiety+phase] = self._NAMD_Energy(confs, moiety, phase, \
traj_FN, outputname, debug=debug)
elif phase.startswith('OpenMM'):
traj_FN = None
E[moiety+phase] = self._OpenMM_Energy(confs, moiety, phase, \
traj_FN, outputname, debug=debug)
elif phase in ['APBS']:
traj_FN = join(self.dir['dock'],'%s.%s.pqr'%(prefix,moiety))
E[moiety+phase] = self._APBS_Energy(confs, moiety, phase, \
traj_FN, outputname, debug=debug)
else:
raise Exception('Unknown phase!')
if not traj_FN in toClear:
toClear.append(traj_FN)
for FN in toClear:
if os.path.isfile(FN):
os.remove(FN)
return E
def _sander_Energy(self, confs, moiety, phase, AMBER_mdcrd_FN, \
outputname=None, debug=False, reference=None):
self.dir['out'] = os.path.dirname(os.path.abspath(AMBER_mdcrd_FN))
script_FN = '%s%s.in'%('.'.join(AMBER_mdcrd_FN.split('.')[:-1]),phase)
out_FN = '%s%s.out'%('.'.join(AMBER_mdcrd_FN.split('.')[:-1]),phase)
script_F = open(script_FN,'w')
if phase=='PBSA':
if moiety=='L':
fillratio = 4.
else:
fillratio = 2.
script_F.write('''Calculate PBSA energies
&cntrl
imin=5, ! read trajectory in for analysis
ntx=1, ! input is read formatted with no velocities
irest=0,
ntb=0, ! no periodicity and no PME
idecomp=0, ! no decomposition
ntc=1, ! No SHAKE
ntf=1, ! Complete interaction is calculated
ipb=2, ! Default PB dielectric model
inp=1, ! SASA non-polar
/
&pb
radiopt=0, ! Use atomic radii from the prmtop file
fillratio=%f,
sprob=1.4,
cavity_surften=0.005,
cavity_offset=0.000,
/
'''%fillratio)
elif phase=='GBSA':
script_F.write('''Calculate GBSA energies
&cntrl
imin=5, ! read trajectory in for analysis
ntx=1, ! input is read formatted with no velocities
irest=0,
ntb=0, ! no periodicity and no PME
idecomp=0, ! no decomposition
ntc=1, ! No SHAKE
ntf=1, ! Complete interaction is calculated
igb=8, ! Most recent AMBER GBn model, best agreement with PB
gbsa=2, ! recursive surface area algorithm
/
''')
elif phase=='Gas':
script_F.write('''Calculate Gas energies
&cntrl
imin=5, ! read trajectory in for analysis
ntx=1, ! input is read formatted with no velocities
irest=0,
ntb=0, ! no periodicity and no PME
idecomp=0, ! no decomposition
ntc=1, ! No SHAKE
ntf=1, ! Complete interaction is calculated
cut=9999., !
/
''')
script_F.close()
os.chdir(self.dir['out'])
import subprocess
p = subprocess.Popen([self._FNs['sander'], '-O','-i',script_FN,'-o',out_FN, \
'-p',self._FNs['prmtop'][moiety],'-c',self._FNs['inpcrd'][moiety], \
'-y', AMBER_mdcrd_FN, '-r',script_FN+'.restrt'])
p.wait()
F = open(out_FN,'r')
dat = F.read().strip().split(' BOND')
F.close()
dat.pop(0)
if len(dat)>0:
E = np.array([rec[:rec.find('\nminimization')].replace('1-4 ','1-4').split()[1::3] for rec in dat],dtype=float)*MMTK.Units.kcal/MMTK.Units.mol
E = np.hstack((E,np.sum(E,1)[...,None]))
if not debug and os.path.isfile(script_FN):
os.remove(script_FN)
if os.path.isfile(script_FN+'.restrt'):
os.remove(script_FN+'.restrt')
if not debug and os.path.isfile(out_FN):
os.remove(out_FN)
else:
E = np.array([np.inf]*11)
os.chdir(self.dir['start'])
return E
# AMBER ENERGY FIELDS:
# For Gas phase:
# 0. BOND 1. ANGLE 2. DIHEDRAL 3. VDWAALS 4. EEL
# 5. HBOND 6. 1-4 VWD 7. 1-4 EEL 8. RESTRAINT
# For GBSA phase:
# 0. BOND 1. ANGLE 2. DIHEDRAL 3. VDWAALS 4. EEL
# 5. EGB 6. 1-4 VWD 7. 1-4 EEL 8. RESTRAINT 9. ESURF
def _NAMD_Energy(self, confs, moiety, phase, dcd_FN, outputname,
debug=False, reference=None):
"""
Uses NAMD to calculate the energy of a set of configurations
Units are the MMTK standard, kJ/mol
"""
# NAMD ENERGY FIELDS:
# 0. TS 1. BOND 2. ANGLE 3. DIHED 4. IMPRP 5. ELECT 6. VDW 7. BOUNDARY
# 8. MISC 9. KINETIC 10. TOTAL 11. TEMP 12. POTENTIAL 13. TOTAL3 14. TEMPAVG
# The saved fields are energyFields=[1, 2, 3, 4, 5, 6, 8, 12],
# and thus the new indicies are
# 0. BOND 1. ANGLE 2. DIHED 3. IMPRP 4. ELECT 5. VDW 6. MISC 7. POTENTIAL
# Run NAMD
import AlGDock.NAMD
energyCalc = AlGDock.NAMD.NAMD(\
prmtop=self._FNs['prmtop'][moiety], \
inpcrd=self._FNs['inpcrd'][moiety], \
fixed={'R':self._FNs['fixed_atoms']['R'], \
'L':None, \
'RL':self._FNs['fixed_atoms']['RL']}[moiety], \
solvent={'NAMD_GBSA':'GBSA', 'NAMD_Gas':'Gas'}[phase], \
useCutoff=(phase=='NAMD_GBSA'), \
namd_command=self._FNs['namd'])
E = energyCalc.energies_PE(\
outputname, dcd_FN, energyFields=[1, 2, 3, 4, 5, 6, 8, 12], \
keepScript=debug, writeEnergyDatGZ=False)
return np.array(E, dtype=float)*MMTK.Units.kcal/MMTK.Units.mol
def _OpenMM_Energy(self, confs, moiety, phase, traj_FN=None, \
outputname=None, debug=False, reference=None):
import simtk.openmm
import simtk.openmm.app as OpenMM_app
# Set up the simulation
key = moiety+phase
if not key in self._OpenMM_sims.keys():
prmtop = OpenMM_app.AmberPrmtopFile(self._FNs['prmtop'][moiety])
inpcrd = OpenMM_app.AmberInpcrdFile(self._FNs['inpcrd'][moiety])
OMM_system = prmtop.createSystem(nonbondedMethod=OpenMM_app.NoCutoff, \
constraints=None, implicitSolvent={
'OpenMM_Gas':None,
'OpenMM_GBn':OpenMM_app.GBn,
'OpenMM_GBn2':OpenMM_app.GBn2,
'OpenMM_HCT':OpenMM_app.HCT,
'OpenMM_OBC1':OpenMM_app.OBC1,
'OpenMM_OBC2':OpenMM_app.OBC2}[phase])
dummy_integrator = simtk.openmm.LangevinIntegrator(300*simtk.unit.kelvin, \
1/simtk.unit.picosecond, 0.002*simtk.unit.picoseconds)
# platform = simtk.openmm.Platform.getPlatformByName('CPU')
self._OpenMM_sims[key] = OpenMM_app.Simulation(prmtop.topology, \
OMM_system, dummy_integrator)
# Prepare the conformations by combining with the receptor if necessary
if (moiety.find('R')>-1):
receptor_0 = self.confs['receptor'][:self._ligand_first_atom,:]
receptor_1 = self.confs['receptor'][self._ligand_first_atom:,:]
if not isinstance(confs,list):
confs = [confs]
if (moiety.find('R')>-1):
if (moiety.find('L')>-1):
confs = [np.vstack((receptor_0, \
conf[self.molecule.prmtop_atom_order,:], \
receptor_1)) for conf in confs]
else:
confs = [self.confs['receptor']]
else:
confs = [conf[self.molecule.prmtop_atom_order,:] for conf in confs]
# Calculate the energies
E = []
for conf in confs:
self._OpenMM_sims[key].context.setPositions(conf)
s = self._OpenMM_sims[key].context.getState(getEnergy=True)
E.append([0., s.getPotentialEnergy()/simtk.unit.kilojoule*simtk.unit.mole])
return np.array(E, dtype=float)*MMTK.Units.kJ/MMTK.Units.mol
def _APBS_Energy(self, confs, moiety, phase, pqr_FN, outputname,
debug=False, reference=None, factor=1.0/MMTK.Units.Ang):
"""
Uses NAMD to calculate the energy of a set of configurations
Units are the MMTK standard, kJ/mol
"""
# TODO: Include internal energy
# Prepare configurations for writing to crd file
if (moiety.find('R')>-1):
receptor_0 = factor*self.confs['receptor'][:self._ligand_first_atom,:]
receptor_1 = factor*self.confs['receptor'][self._ligand_first_atom:,:]
if not isinstance(confs,list):
confs = [confs]
if (moiety.find('R')>-1):
if (moiety.find('L')>-1):
full_confs = [np.vstack((receptor_0, \
conf[self.molecule.prmtop_atom_order,:]/MMTK.Units.Ang, \
receptor_1)) for conf in confs]
else:
full_confs = [factor*self.confs['receptor']]
else:
full_confs = [conf[self.molecule.prmtop_atom_order,:]/MMTK.Units.Ang \
for conf in confs]
# Write coordinates, run APBS, and store energies
apbs_dir = pqr_FN[:-4]
os.system('mkdir -p '+apbs_dir)
os.chdir(apbs_dir)
pqr_FN = os.path.join(apbs_dir, 'in.pqr')
import subprocess
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
E = []
for full_conf in full_confs:
# Writes the coordinates in AMBER format
inpcrd_FN = pqr_FN[:-4]+'.crd'
IO_crd.write(inpcrd_FN, full_conf, 'title', trajectory=False)
# Converts the coordinates to a pqr file
inpcrd_F = open(inpcrd_FN,'r')
cdir = os.getcwd()
p = subprocess.Popen(\
[os.path.relpath(self._FNs['ambpdb'], cdir), \
'-p', os.path.relpath(self._FNs['prmtop'][moiety], cdir), \
'-pqr'], \
stdin=inpcrd_F, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata_ambpdb, stderrdata_ambpdb) = p.communicate()
p.wait()
inpcrd_F.close()
pqr_F = open(pqr_FN,'w')
pqr_F.write(stdoutdata_ambpdb)
pqr_F.close()
# Writes APBS script
apbs_in_FN = moiety+'apbs-mg-manual.in'
apbs_in_F = open(apbs_in_FN,'w')
apbs_in_F.write('READ\n mol pqr {0}\nEND\n'.format(pqr_FN))
for sdie in [80.0,2.0]:
if moiety=='L':
min_xyz = np.array([min(full_conf[a,:]) for a in range(3)])
max_xyz = np.array([max(full_conf[a,:]) for a in range(3)])
mol_range = max_xyz - min_xyz
mol_center = (min_xyz + max_xyz)/2.
def roundUpDime(x):
return (np.ceil((x.astype(float)-1)/32)*32+1).astype(int)
focus_spacing = 0.5
focus_dims = roundUpDime(mol_range*1.5/focus_spacing)
args = zip(['mdh'],[focus_dims],[mol_center],[focus_spacing])
else:
args = zip(['mdh','focus'],
self._apbs_grid['dime'], self._apbs_grid['gcent'],
self._apbs_grid['spacing'])
for (bcfl,dime,gcent,grid) in args:
apbs_in_F.write('''ELEC mg-manual
bcfl {0} # multiple debye-huckel boundary condition
chgm spl4 # quintic B-spline charge discretization
dime {1[0]} {1[1]} {1[2]}
gcent {2[0]} {2[1]} {2[2]}
grid {3} {3} {3}
lpbe # Linearized Poisson-Boltzmann
mol 1
pdie 2.0
sdens 10.0
sdie {4}
srad 1.4
srfm smol # Smoothed dielectric and ion-accessibility coefficients
swin 0.3
temp 300.0
calcenergy total
END
'''.format(bcfl,dime,gcent,grid,sdie))
apbs_in_F.write('quit\n')
apbs_in_F.close()
# Runs APBS
p = subprocess.Popen([self._FNs['apbs'], apbs_in_FN], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = p.communicate()
p.wait()
apbs_energy = [float(line.split('=')[-1][:-7]) \
for line in stdoutdata.split('\n') \
if line.startswith(' Total electrostatic energy')]
if moiety=='L' and len(apbs_energy)==2:
polar_energy = apbs_energy[0]-apbs_energy[1]
elif len(apbs_energy)==4:
polar_energy = apbs_energy[1]-apbs_energy[3]
else:
# An error has occured in APBS
polar_energy = np.inf
self.tee(" error has occured in APBS after %d snapshots"%len(E))
self.tee(" prmtop was "+self._FNs['prmtop'][moiety])
self.tee(" --- ambpdb stdout:")
self.tee(stdoutdata_ambpdb)
self.tee(" --- ambpdb stderr:")
self.tee(stderrdata_ambpdb)
self.tee(" --- APBS stdout:")
self.tee(stdoutdata)
self.tee(" --- APBS stderr:")
self.tee(stderrdata)
# Runs molsurf to calculate Connolly surface
apolar_energy = np.inf
p = subprocess.Popen([self._FNs['molsurf'], pqr_FN, '1.4'], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = p.communicate()
p.wait()
for line in stdoutdata.split('\n'):
if line.startswith('surface area ='):
apolar_energy = float(line.split('=')[-1]) * \
0.0072 * MMTK.Units.kcal/MMTK.Units.mol
for FN in [inpcrd_FN, pqr_FN, apbs_in_FN, 'io.mc']:
os.remove(FN)
E.append([polar_energy, apolar_energy])
if np.isinf(polar_energy) or np.isinf(apolar_energy):
break
os.chdir(self.dir['start'])
os.system('rm -rf '+apbs_dir)
return np.array(E, dtype=float)*MMTK.Units.kJ/MMTK.Units.mol
def _write_traj(self, traj_FN, confs, moiety, \
title='', factor=1.0/MMTK.Units.Ang):
"""
Writes a trajectory file
"""
if traj_FN is None:
return
if traj_FN.endswith('.pqr'):
return
if os.path.isfile(traj_FN):
return
traj_dir = os.path.dirname(os.path.abspath(traj_FN))
if not os.path.isdir(traj_dir):
os.system('mkdir -p '+traj_dir)
import AlGDock.IO
if traj_FN.endswith('.dcd'):
IO_dcd = AlGDock.IO.dcd(self.molecule,
ligand_atom_order = self.molecule.prmtop_atom_order, \
receptorConf = self.confs['receptor'], \
ligand_first_atom = self._ligand_first_atom)
IO_dcd.write(traj_FN, confs,
includeReceptor=(moiety.find('R')>-1),
includeLigand=(moiety.find('L')>-1))
elif traj_FN.endswith('.mdcrd'):
if (moiety.find('R')>-1):
receptor_0 = factor*self.confs['receptor'][:self._ligand_first_atom,:]
receptor_1 = factor*self.confs['receptor'][self._ligand_first_atom:,:]
if not isinstance(confs,list):
confs = [confs]
if (moiety.find('R')>-1):
if (moiety.find('L')>-1):
confs = [np.vstack((receptor_0, \
conf[self.molecule.prmtop_atom_order,:]/MMTK.Units.Ang, \
receptor_1)) for conf in confs]
else:
confs = [factor*self.confs['receptor']]
else:
confs = [conf[self.molecule.prmtop_atom_order,:]/MMTK.Units.Ang \
for conf in confs]
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
IO_crd.write(traj_FN, confs, title, trajectory=True)
self.tee(" wrote %d configurations to %s"%(len(confs), traj_FN))
else:
raise Exception('Unknown trajectory type')
def _load_pkl_gz(self, FN):
if os.path.isfile(FN) and os.path.getsize(FN)>0:
F = gzip.open(FN,'r')
try:
data = pickle.load(F)
except:
self.tee(' error loading '+FN)
F.close()
return None
F.close()
return data
else:
return None
def _write_pkl_gz(self, FN, data):
F = gzip.open(FN,'w')
pickle.dump(data,F)
F.close()
self.tee(" wrote to "+FN)
def _load(self, p):
progress_FN = join(self.dir[p],'%s_progress.pkl.gz'%(p))
data_FN = join(self.dir[p],'%s_data.pkl.gz'%(p))
saved = {'progress':self._load_pkl_gz(progress_FN),
'data':self._load_pkl_gz(data_FN)}
if (saved['progress'] is None) or (saved['data'] is None):
if os.path.isfile(progress_FN):
os.remove(progress_FN)
if os.path.isfile(data_FN):
os.remove(data_FN)
progress_FN = join(self.dir[p],'%s_progress.pkl.gz.BAK'%(p))
data_FN = join(self.dir[p],'%s_data.pkl.gz.BAK'%(p))
saved = {'progress':self._load_pkl_gz(progress_FN),
'data':self._load_pkl_gz(data_FN)}
if (saved['progress'] is None):
print ' no progress information for %s'%p
elif (saved['data'] is None):
saved['progress'] = None
print ' missing data in %s'%p
else:
print ' using stored progress and data in %s'%p
self._clear(p)
params = None
if saved['progress'] is not None:
params = saved['progress'][0]
setattr(self,'%s_protocol'%p,saved['progress'][1])
setattr(self,'_%s_cycle'%p,saved['progress'][2])
if saved['data'] is not None:
if p=='dock' and saved['data'][0] is not None:
(self._n_trans, self._max_n_trans, self._random_trans, \
self._n_rot, self._max_n_rot, self._random_rotT) = saved['data'][0]
self.confs[p]['replicas'] = saved['data'][1]
self.confs[p]['seeds'] = saved['data'][2]
self.confs[p]['samples'] = saved['data'][3]
# TODO: Check if conformations are empty
setattr(self,'%s_Es'%p, saved['data'][4])
if saved['data'][3] is not None:
cycle = len(saved['data'][3][-1])
setattr(self,'_%s_cycle'%p,cycle)
else:
setattr(self,'_%s_cycle'%p,0)
if getattr(self,'%s_protocol'%p)==[] or \
(not getattr(self,'%s_protocol'%p)[-1]['crossed']):
setattr(self,'_%s_cycle'%p,0)
return params
def _clear(self, p):
setattr(self,'%s_protocol'%p,[])
setattr(self,'_%s_cycle'%p,0)
self.confs[p]['replicas'] = None
self.confs[p]['seeds'] = None
self.confs[p]['samples'] = None
setattr(self,'%s_Es'%p,None)
def _save(self, p, keys=['progress','data']):
"""
Saves the protocol,
cycle counts,
random orientation parameters (for docking),
replica configurations,
sampled configurations,
and energies
"""
random_orient = None
if p=='dock' and hasattr(self,'_n_trans'):
random_orient = (self._n_trans, self._max_n_trans, self._random_trans, \
self._n_rot, self._max_n_rot, self._random_rotT)
arg_dict = dict([tp for tp in self.params[p].items() \
if not tp[0] in ['repX_cycles']])
if p=='cool':
fn_dict = convert_dictionary_relpath({
'ligand_database':self._FNs['ligand_database'],
'forcefield':self._FNs['forcefield'],
'frcmodList':self._FNs['frcmodList'],
'tarball':{'L':self._FNs['tarball']['L']},
'prmtop':{'L':self._FNs['prmtop']['L']},
'inpcrd':{'L':self._FNs['inpcrd']['L']}},
relpath_o=None, relpath_n=self.dir['cool'])
elif p=='dock':
fn_dict = convert_dictionary_relpath(
dict([tp for tp in self._FNs.items() \
if not tp[0] in ['namd','vmd','sander','convert','font']]),
relpath_o=None, relpath_n=self.dir['dock'])
params = (fn_dict,arg_dict)
saved = {
'progress': (params,
getattr(self,'%s_protocol'%p),
getattr(self,'_%s_cycle'%p)),
'data': (random_orient,
self.confs[p]['replicas'],
self.confs[p]['seeds'],
self.confs[p]['samples'],
getattr(self,'%s_Es'%p))}
for key in keys:
saved_FN = join(self.dir[p],'%s_%s.pkl.gz'%(p,key))
if not os.path.isdir(self.dir[p]):
os.system('mkdir -p '+self.dir[p])
if os.path.isfile(saved_FN):
os.rename(saved_FN,saved_FN+'.BAK')
self._write_pkl_gz(saved_FN, saved[key])
def _set_lock(self, p):
if not os.path.isdir(self.dir[p]):
os.system('mkdir -p '+self.dir[p])
lockFN = join(self.dir[p],'.lock')
if os.path.isfile(lockFN):
raise Exception(p + ' is locked')
else:
lockF = open(lockFN,'w')
lockF.close()
logFN = join(self.dir[p],p+'_log.txt')
self.log = open(logFN,'a')
def _clear_lock(self, p):
lockFN = join(self.dir[p],'.lock')
if os.path.isfile(lockFN):
os.remove(lockFN)
if hasattr(self,'log'):
self.log.close()
del self.log
def tee(self, var, process=None):
print var
if hasattr(self,'log'):
if isinstance(var,str):
self.log.write(var+'\n')
else:
self.log.write(repr(var)+'\n')
self.log.flush()
elif process is not None:
self._set_lock(process)
if isinstance(var,str):
self.log.write(var+'\n')
else:
self.log.write(repr(var)+'\n')
self.log.flush()
self._clear_lock(process)
def __del__(self):
if len(self._toClear)>0:
print '\n>>> Clearing files'
for FN in self._toClear:
if os.path.isfile(FN):
os.remove(FN)
print ' removed '+os.path.relpath(FN,self.dir['start'])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Molecular docking with adaptively scaled alchemical interaction grids')
for key in arguments.keys():
parser.add_argument('--'+key, **arguments[key])
args = parser.parse_args()
if args.run_type in ['render_docked', 'render_intermediates']:
from AlGDock.BindingPMF_plots import BPMF_plots
self = BPMF_plots(**vars(args))
else:
self = BPMF(**vars(args))
| mit |
imsuwj/noambox | model.py | 1 | 1420 | '''
Provide the Data Model
'''
import time
class Config(object):
def __init__(self):
self.play = Playing()
self.use_netease_source = False
self.scroll_lryic = False
self.enable_rpc = False
class Playing(object):
def __init__(self):
self.title = ''
self.singer = ''
self.album = ''
self.list = ''
self.lryic = ''
self.lengeth = 0
self.pause = False
self.time = 0
self.volume = 50
self.play_mode = 0
self.stream = 0
class History(object):
def __init__(self):
self.last_time = int(time.time()*1000)
self.run_time = 0
self.douban = DoubanAccount()
class DoubanAccount(object):
def __init__(self):
self.ck = ''
self.userid = ''
self.is_pro = False
self.username = ''
self.play_record = {
'banned': 0,
'played': 0,
'liked': 0,
'fav_chls_count': 0
}
self.logined = False
self.cookies = None
self.channel_id = ''
self.song_id = ''
class DoubanChannel(object):
def __init__(self):
self.intro = ''
self.id = 0
self.name = ''
self.song_num = 0
class WsInteraction(object):
def __init__(self):
self.date = int(time.time()*1000)
self.command = ''
self.content = ''
| mit |
GageGaskins/osf.io | website/exceptions.py | 7 | 1484 | from website.tokens.exceptions import TokenError
class OSFError(Exception):
"""Base class for exceptions raised by the Osf application"""
pass
class NodeError(OSFError):
"""Raised when an action cannot be performed on a Node model"""
pass
class NodeStateError(NodeError):
"""Raised when the Node's state is not suitable for the requested action
Example: Node.remove_node() is called, but the node has non-deleted children
"""
pass
class SanctionTokenError(TokenError):
"""Base class for errors arising from the user of a sanction token."""
pass
class InvalidSanctionRejectionToken(TokenError):
"""Raised if a Sanction subclass disapproval token submitted is invalid
or associated with another admin authorizer
"""
message_short = "Invalid Token"
message_long = "This disapproval link is invalid. Are you logged into the correct account?"
class InvalidSanctionApprovalToken(TokenError):
"""Raised if a Sanction subclass approval token submitted is invalid
or associated with another admin authorizer
"""
message_short = "Invalid Token"
message_long = "This approval link is invalid. Are you logged into the correct account?"
class UserNotAffiliatedError(OSFError):
"""Raised if a user attempts to add an institution that is not currently
one of its affiliations.
"""
message_short = "User not affiliated"
message_long = "This user is not affiliated with this institution."
| apache-2.0 |
pinpong/android_kernel_htc_m7-gpe | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
bitifirefly/edx-platform | lms/djangoapps/instructor/management/tests/test_openended_commands.py | 106 | 8584 | """Test the openended_post management command."""
from datetime import datetime
import json
from mock import patch
from pytz import UTC
from django.conf import settings
from opaque_keys.edx.locations import Location
import capa.xqueue_interface as xqueue_interface
from courseware.courses import get_course_with_access
from courseware.tests.factories import StudentModuleFactory, UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.tests.test_util_open_ended import (
STATE_INITIAL, STATE_ACCESSING, STATE_POST_ASSESSMENT
)
from student.models import anonymous_id_for_user
from instructor.management.commands.openended_post import post_submission_for_student
from instructor.management.commands.openended_stats import calculate_task_statistics
from instructor.utils import get_module_for_student
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class OpenEndedPostTest(ModuleStoreTestCase):
"""Test the openended_post management command."""
def setUp(self):
super(OpenEndedPostTest, self).setUp()
self.user = UserFactory()
store = modulestore()
course_items = import_course_from_xml(store, self.user.id, TEST_DATA_DIR, ['open_ended']) # pylint: disable=maybe-no-member
self.course = course_items[0]
self.course_id = self.course.id
self.problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
self.self_assessment_task_number = 0
self.open_ended_task_number = 1
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
def test_post_submission_for_student_on_initial(self):
course = get_course_with_access(self.student_on_initial, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_initial, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_on_accessing(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
module = get_module_for_student(self.student_on_accessing, self.problem_location)
module.child_module.get_task_number(self.open_ended_task_number)
student_response = "Here is an answer."
student_anonymous_id = anonymous_id_for_user(self.student_on_accessing, None)
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(mock_send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['max_score'], 2)
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], student_anonymous_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_post_submission_for_student_on_post_assessment(self):
course = get_course_with_access(self.student_on_post_assessment, 'load', self.course_id)
dry_run_result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=True)
self.assertFalse(dry_run_result)
result = post_submission_for_student(self.student_on_post_assessment, course, self.problem_location, self.open_ended_task_number, dry_run=False)
self.assertFalse(result)
def test_post_submission_for_student_invalid_task(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, self.self_assessment_task_number, dry_run=False)
self.assertFalse(result)
out_of_bounds_task_number = 3
result = post_submission_for_student(self.student_on_accessing, course, self.problem_location, out_of_bounds_task_number, dry_run=False)
self.assertFalse(result)
class OpenEndedStatsTest(ModuleStoreTestCase):
"""Test the openended_stats management command."""
def setUp(self):
super(OpenEndedStatsTest, self).setUp()
self.user = UserFactory()
store = modulestore()
course_items = import_course_from_xml(store, self.user.id, TEST_DATA_DIR, ['open_ended']) # pylint: disable=maybe-no-member
self.course = course_items[0]
self.course_id = self.course.id
self.problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
self.task_number = 1
self.invalid_task_number = 3
self.student_on_initial = UserFactory()
self.student_on_accessing = UserFactory()
self.student_on_post_assessment = UserFactory()
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_initial,
grade=0,
max_grade=1,
state=STATE_INITIAL
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_accessing,
grade=0,
max_grade=1,
state=STATE_ACCESSING
)
StudentModuleFactory.create(
course_id=self.course_id,
module_state_key=self.problem_location,
student=self.student_on_post_assessment,
grade=0,
max_grade=1,
state=STATE_POST_ASSESSMENT
)
self.students = [self.student_on_initial, self.student_on_accessing, self.student_on_post_assessment]
def test_calculate_task_statistics(self):
course = get_course_with_access(self.student_on_accessing, 'load', self.course_id)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 1)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 1)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 1)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
stats = calculate_task_statistics(self.students, course, self.problem_location, self.invalid_task_number, write_to_file=False)
self.assertEqual(stats[OpenEndedChild.INITIAL], 0)
self.assertEqual(stats[OpenEndedChild.ASSESSING], 0)
self.assertEqual(stats[OpenEndedChild.POST_ASSESSMENT], 0)
self.assertEqual(stats[OpenEndedChild.DONE], 0)
| agpl-3.0 |
LifeDJIK/S.H.I.V.A. | containers/shiva/hazelcast/protocol/codec/client_add_membership_listener_codec.py | 2 | 2425 | from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.client_message_type import *
from hazelcast.protocol.event_response_const import *
REQUEST_TYPE = CLIENT_ADDMEMBERSHIPLISTENER
RESPONSE_TYPE = 104
RETRYABLE = False
def calculate_size(local_only):
""" Calculates the request payload size"""
data_size = 0
data_size += BOOLEAN_SIZE_IN_BYTES
return data_size
def encode_request(local_only):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(local_only))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_bool(local_only)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_str()
return parameters
def handle(client_message, handle_event_member = None, handle_event_memberlist = None, handle_event_memberattributechange = None, to_object=None):
""" Event handler """
message_type = client_message.get_message_type()
if message_type == EVENT_MEMBER and handle_event_member is not None:
member = MemberCodec.decode(client_message, to_object)
event_type = client_message.read_int()
handle_event_member(member=member, event_type=event_type)
if message_type == EVENT_MEMBERLIST and handle_event_memberlist is not None:
members_size = client_message.read_int()
members = []
for members_index in xrange(0, members_size):
members_item = MemberCodec.decode(client_message, to_object)
members.append(members_item)
handle_event_memberlist(members=members)
if message_type == EVENT_MEMBERATTRIBUTECHANGE and handle_event_memberattributechange is not None:
uuid = client_message.read_str()
key = client_message.read_str()
operation_type = client_message.read_int()
value=None
if not client_message.read_bool():
value = client_message.read_str()
handle_event_memberattributechange(uuid=uuid, key=key, operation_type=operation_type, value=value)
| mit |
olatoft/reverse-hangman | lib/python3.5/site-packages/pip/req/req_uninstall.py | 510 | 6897 | from __future__ import absolute_import
import logging
import os
import tempfile
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.exceptions import UninstallationError
from pip.utils import rmtree, ask, is_local, renames, normalize_path
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| gpl-3.0 |
waytai/django | tests/template_tests/syntax_tests/test_with.py | 391 | 2245 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class WithTagTests(SimpleTestCase):
@setup({'with01': '{% with key=dict.key %}{{ key }}{% endwith %}'})
def test_with01(self):
output = self.engine.render_to_string('with01', {'dict': {'key': 50}})
self.assertEqual(output, '50')
@setup({'legacywith01': '{% with dict.key as key %}{{ key }}{% endwith %}'})
def test_legacywith01(self):
output = self.engine.render_to_string('legacywith01', {'dict': {'key': 50}})
self.assertEqual(output, '50')
@setup({'with02': '{{ key }}{% with key=dict.key %}'
'{{ key }}-{{ dict.key }}-{{ key }}'
'{% endwith %}{{ key }}'})
def test_with02(self):
output = self.engine.render_to_string('with02', {'dict': {'key': 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID50-50-50INVALID')
else:
self.assertEqual(output, '50-50-50')
@setup({'legacywith02': '{{ key }}{% with dict.key as key %}'
'{{ key }}-{{ dict.key }}-{{ key }}'
'{% endwith %}{{ key }}'})
def test_legacywith02(self):
output = self.engine.render_to_string('legacywith02', {'dict': {'key': 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID50-50-50INVALID')
else:
self.assertEqual(output, '50-50-50')
@setup({'with03': '{% with a=alpha b=beta %}{{ a }}{{ b }}{% endwith %}'})
def test_with03(self):
output = self.engine.render_to_string('with03', {'alpha': 'A', 'beta': 'B'})
self.assertEqual(output, 'AB')
@setup({'with-error01': '{% with dict.key xx key %}{{ key }}{% endwith %}'})
def test_with_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('with-error01', {'dict': {'key': 50}})
@setup({'with-error02': '{% with dict.key as %}{{ key }}{% endwith %}'})
def test_with_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('with-error02', {'dict': {'key': 50}})
| bsd-3-clause |
sysadmin75/ansible | test/support/integration/plugins/modules/postgresql_user.py | 51 | 34084 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_user
short_description: Add or remove a user (role) from a PostgreSQL server instance
description:
- Adds or removes a user (role) from a PostgreSQL server instance
("cluster" in PostgreSQL terminology) and, optionally,
grants the user access to an existing database or tables.
- A user is a role with login privilege.
- The fundamental function of the module is to create, or delete, users from
a PostgreSQL instances. Privilege assignment, or removal, is an optional
step, which works on one database at a time. This allows for the module to
be called several times in the same module to modify the permissions on
different databases, or to grant permissions to already existing users.
- A user cannot be removed until all the privileges have been stripped from
the user. In such situation, if the module tries to remove the user it
will fail. To avoid this from happening the fail_on_user option signals
the module to try to remove the user, but if not possible keep going; the
module will report if changes happened and separately if the user was
removed or not.
version_added: '0.6'
options:
name:
description:
- Name of the user (role) to add or remove.
type: str
required: true
aliases:
- user
password:
description:
- Set the user's password, before 1.4 this was required.
- Password can be passed unhashed or hashed (MD5-hashed).
- Unhashed password will automatically be hashed when saved into the
database if C(encrypted) parameter is set, otherwise it will be save in
plain text format.
- When passing a hashed password it must be generated with the format
C('str["md5"] + md5[ password + username ]'), resulting in a total of
35 characters. An easy way to do this is C(echo "md5$(echo -n
'verysecretpasswordJOE' | md5sum | awk '{print $1}')").
- Note that if the provided password string is already in MD5-hashed
format, then it is used as-is, regardless of C(encrypted) parameter.
type: str
db:
description:
- Name of database to connect to and where user's permissions will be granted.
type: str
aliases:
- login_db
fail_on_user:
description:
- If C(yes), fail when user (role) can't be removed. Otherwise just log and continue.
default: 'yes'
type: bool
aliases:
- fail_on_role
priv:
description:
- "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
privileges can be defined for database ( allowed options - 'CREATE',
'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
C(table:SELECT) ). Mixed example of this string:
C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
type: str
role_attr_flags:
description:
- "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
- Note that '[NO]CREATEUSER' is deprecated.
- To create a simple role for using it like a group, use C(NOLOGIN) flag.
type: str
choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
'[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
session_role:
version_added: '2.8'
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
state:
description:
- The user (role) state.
type: str
default: present
choices: [ absent, present ]
encrypted:
description:
- Whether the password is stored hashed in the database.
- Passwords can be passed already hashed or unhashed, and postgresql
ensures the stored password is hashed when C(encrypted) is set.
- "Note: Postgresql 10 and newer doesn't support unhashed passwords."
- Previous to Ansible 2.6, this was C(no) by default.
default: 'yes'
type: bool
version_added: '1.4'
expires:
description:
- The date at which the user's password is to expire.
- If set to C('infinity'), user's password never expire.
- Note that this value should be a valid SQL date and time type.
type: str
version_added: '1.4'
no_password_changes:
description:
- If C(yes), don't inspect database for password changes. Effective when
C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
password changes as necessary.
default: 'no'
type: bool
version_added: '2.0'
conn_limit:
description:
- Specifies the user (role) connection limit.
type: int
version_added: '2.4'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
version_added: '2.3'
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
version_added: '2.3'
groups:
description:
- The list of groups (roles) that need to be granted to the user.
type: list
elements: str
version_added: '2.9'
comment:
description:
- Add a comment on the user (equal to the COMMENT ON ROLE statement result).
type: str
version_added: '2.10'
notes:
- The module creates a user (role) with login privilege by default.
Use NOLOGIN role_attr_flags to change this behaviour.
- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles).
You may not specify password or role_attr_flags when the PUBLIC user is specified.
seealso:
- module: postgresql_privs
- module: postgresql_membership
- module: postgresql_owner
- name: PostgreSQL database roles
description: Complete reference of the PostgreSQL database roles documentation.
link: https://www.postgresql.org/docs/current/user-manag.html
author:
- Ansible Core Team
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Connect to acme database, create django user, and grant access to database and products table
postgresql_user:
db: acme
name: django
password: ceec4eif7ya
priv: "CONNECT/products:ALL"
expires: "Jan 31 2020"
- name: Add a comment on django user
postgresql_user:
db: acme
name: django
comment: This is a test user
# Connect to default database, create rails user, set its password (MD5-hashed),
# and grant privilege to create other databases and demote rails from super user status if user exists
- name: Create rails user, set MD5-hashed password, grant privs
postgresql_user:
name: rails
password: md59543f1d82624df2b31672ec0f7050460
role_attr_flags: CREATEDB,NOSUPERUSER
- name: Connect to acme database and remove test user privileges from there
postgresql_user:
db: acme
name: test
priv: "ALL/products:ALL"
state: absent
fail_on_user: no
- name: Connect to test database, remove test user from cluster
postgresql_user:
db: test
name: test
priv: ALL
state: absent
- name: Connect to acme database and set user's password with no expire date
postgresql_user:
db: acme
name: django
password: mysupersecretword
priv: "CONNECT/products:ALL"
expires: infinity
# Example privileges string format
# INSERT,UPDATE/table:SELECT/anothertable:ALL
- name: Connect to test database and remove an existing user's password
postgresql_user:
db: test
user: test
password: ""
- name: Create user test and grant group user_ro and user_rw to it
postgresql_user:
name: test
groups:
- user_ro
- user_rw
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: list
sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
version_added: '2.8'
'''
import itertools
import re
import traceback
from hashlib import md5
try:
import psycopg2
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import pg_quote_identifier, SQLParseError
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
PgMembership,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six import iteritems
FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
database=frozenset(
('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
)
# map to cope with idiosyncracies of SUPERUSER and LOGIN
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
executed_queries = []
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def user_exists(cursor, user):
# The PUBLIC user is a special case that is always there
if user == 'PUBLIC':
return True
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
cursor.execute(query, {'user': user})
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
"""Create a new database user (role)."""
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
# literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER "%(user)s"' %
{"user": user}]
if password is not None and password != '':
query.append("WITH %(crypt)s" % {"crypt": encrypted})
query.append("PASSWORD %(password)s")
if expires is not None:
query.append("VALID UNTIL %(expires)s")
if conn_limit is not None:
query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query.append(role_attr_flags)
query = ' '.join(query)
executed_queries.append(query)
cursor.execute(query, query_password_data)
return True
def user_should_we_change_password(current_role_attrs, user, password, encrypted):
"""Check if we should change the user's password.
Compare the proposed password with the existing one, comparing
hashes if encrypted. If we can't access it assume yes.
"""
if current_role_attrs is None:
# on some databases, E.g. AWS RDS instances, there is no access to
# the pg_authid relation to check the pre-existing password, so we
# just assume password is different
return True
# Do we actually need to do anything?
pwchanging = False
if password is not None:
# Empty password means that the role shouldn't have a password, which
# means we need to check if the current password is None.
if password == '':
if current_role_attrs['rolpassword'] is not None:
pwchanging = True
# 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
# 3: The size of the 'md5' prefix
# When the provided password looks like a MD5-hash, value of
# 'encrypted' is ignored.
elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
if password != current_role_attrs['rolpassword']:
pwchanging = True
elif encrypted == 'ENCRYPTED':
hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
if hashed_password != current_role_attrs['rolpassword']:
pwchanging = True
return pwchanging
def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
# literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
elif role_attr_flags != '':
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
else:
return False
# Handle passwords.
if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
# Select password and all flag-like columns in order to verify changes.
try:
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
except psycopg2.ProgrammingError:
current_role_attrs = None
db_connection.rollback()
pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
if current_role_attrs is None:
try:
# AWS RDS instances does not allow user to access pg_authid
# so try to get current_role_attrs from pg_roles tables
select = "SELECT * FROM pg_roles where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes from pg_roles
current_role_attrs = cursor.fetchone()
except psycopg2.ProgrammingError as e:
db_connection.rollback()
module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
if expires is not None:
cursor.execute("SELECT %s::timestamptz;", (expires,))
expires_with_tz = cursor.fetchone()[0]
expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
else:
expires_changing = False
conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
return False
alter = ['ALTER USER "%(user)s"' % {"user": user}]
if pwchanging:
if password != '':
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
else:
alter.append("WITH PASSWORD NULL")
alter.append(role_attr_flags)
elif role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter.append("VALID UNTIL %(expires)s")
if conn_limit is not None:
alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query_password_data = dict(password=password, expires=expires)
try:
cursor.execute(' '.join(alter), query_password_data)
changed = True
except psycopg2.InternalError as e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
return changed
else:
raise psycopg2.InternalError(e)
except psycopg2.NotSupportedError as e:
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
elif no_password_changes and role_attr_flags != '':
# Grab role information from pg_roles instead of pg_authid
select = "SELECT * FROM pg_roles where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
if not role_attr_flags_changing:
return False
alter = ['ALTER USER "%(user)s"' %
{"user": user}]
if role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
try:
cursor.execute(' '.join(alter))
except psycopg2.InternalError as e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
return changed
else:
raise psycopg2.InternalError(e)
# Grab new role attributes.
cursor.execute(select, {"user": user})
new_role_attrs = cursor.fetchone()
# Detect any differences between current_ and new_role_attrs.
changed = current_role_attrs != new_role_attrs
return changed
def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
query = 'DROP USER "%s"' % user
executed_queries.append(query)
cursor.execute(query)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privileges(cursor, user, table, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_table_privileges(cursor, user, table)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def get_table_privileges(cursor, user, table):
if '.' in table:
schema, table = table.split('.', 1)
else:
schema = 'public'
query = ("SELECT privilege_type FROM information_schema.role_table_grants "
"WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
return frozenset([x[0] for x in cursor.fetchall()])
def grant_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'GRANT %s ON TABLE %s TO "%s"' % (
privs, pg_quote_identifier(table, 'table'), user)
executed_queries.append(query)
cursor.execute(query)
def revoke_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
privs, pg_quote_identifier(table, 'table'), user)
executed_queries.append(query)
cursor.execute(query)
def get_database_privileges(cursor, user, db):
priv_map = {
'C': 'CREATE',
'T': 'TEMPORARY',
'c': 'CONNECT',
}
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return set()
r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
if r is None:
return set()
o = set()
for v in r.group(1):
o.add(priv_map[v])
return normalize_privileges(o, 'database')
def has_database_privileges(cursor, user, db, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_database_privileges(cursor, user, db)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def grant_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO "%s"' % (
privs, pg_quote_identifier(db, 'database'), user)
executed_queries.append(query)
cursor.execute(query)
def revoke_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
privs, pg_quote_identifier(db, 'database'), user)
executed_queries.append(query)
cursor.execute(query)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
revoke_funcs = dict(table=revoke_table_privileges,
database=revoke_database_privileges)
check_funcs = dict(table=has_table_privileges,
database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[0]:
revoke_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
grant_funcs = dict(table=grant_table_privileges,
database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges,
database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[2]:
grant_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def parse_role_attrs(cursor, role_attr_flags):
"""
Parse role attributes string for user creation.
Format:
attributes[,attributes,...]
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
"[NO]BYPASSRLS" ]
Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
Note: "[NO]CREATEUSER" role attribute is deprecated.
"""
flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
if not flags.issubset(valid_flags):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flags.difference(valid_flags)))
return ' '.join(flags)
def normalize_privileges(privs, type_):
new_privs = set(privs)
if 'ALL' in new_privs:
new_privs.update(VALID_PRIVS[type_])
new_privs.remove('ALL')
if 'TEMP' in new_privs:
new_privs.add('TEMPORARY')
new_privs.remove('TEMP')
return new_privs
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
Format:
privileges[/privileges/...]
Where:
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
"""
if privs is None:
return privs
o_privs = {
'database': {},
'table': {}
}
for token in privs.split('/'):
if ':' not in token:
type_ = 'database'
name = db
priv_set = frozenset(x.strip().upper()
for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = frozenset(x.strip().upper()
for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
priv_set = normalize_privileges(priv_set, type_)
o_privs[type_][name] = priv_set
return o_privs
def get_valid_flags_by_version(cursor):
"""
Some role attributes were introduced after certain versions. We want to
compile a list of valid flags against the current Postgres version.
"""
current_version = cursor.connection.server_version
return [
flag
for flag, version_introduced in FLAGS_BY_VERSION.items()
if current_version >= version_introduced
]
def get_comment(cursor, user):
"""Get user's comment."""
query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
"FROM pg_catalog.pg_roles r "
"WHERE r.rolname = %(user)s")
cursor.execute(query, {'user': user})
return cursor.fetchone()[0]
def add_comment(cursor, user, comment):
"""Add comment on user."""
if comment != get_comment(cursor, user):
query = 'COMMENT ON ROLE "%s" IS ' % user
cursor.execute(query + '%(comment)s', {'comment': comment})
executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
user=dict(type='str', required=True, aliases=['name']),
password=dict(type='str', default=None, no_log=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
priv=dict(type='str', default=None),
db=dict(type='str', default='', aliases=['login_db']),
fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']),
role_attr_flags=dict(type='str', default=''),
encrypted=dict(type='bool', default='yes'),
no_password_changes=dict(type='bool', default='no'),
expires=dict(type='str', default=None),
conn_limit=dict(type='int', default=None),
session_role=dict(type='str'),
groups=dict(type='list', elements='str'),
comment=dict(type='str', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
user = module.params["user"]
password = module.params["password"]
state = module.params["state"]
fail_on_user = module.params["fail_on_user"]
if module.params['db'] == '' and module.params["priv"] is not None:
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], module.params["db"])
no_password_changes = module.params["no_password_changes"]
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
encrypted = "UNENCRYPTED"
expires = module.params["expires"]
conn_limit = module.params["conn_limit"]
role_attr_flags = module.params["role_attr_flags"]
groups = module.params["groups"]
if groups:
groups = [e.strip() for e in groups]
comment = module.params["comment"]
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
except InvalidFlagsError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
kw = dict(user=user)
changed = False
user_removed = False
if state == "present":
if user_exists(cursor, user):
try:
changed = user_alter(db_connection, module, user, password,
role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
else:
try:
changed = user_add(cursor, user, password,
role_attr_flags, encrypted, expires, conn_limit)
except psycopg2.ProgrammingError as e:
module.fail_json(msg="Unable to add user with given requirement "
"due to : %s" % to_native(e),
exception=traceback.format_exc())
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if groups:
target_roles = []
target_roles.append(user)
pg_membership = PgMembership(module, cursor, groups, target_roles)
changed = pg_membership.grant() or changed
executed_queries.extend(pg_membership.executed_queries)
if comment is not None:
try:
changed = add_comment(cursor, user, comment) or changed
except Exception as e:
module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
exception=traceback.format_exc())
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "Unable to remove user"
module.fail_json(msg=msg)
kw['user_removed'] = user_removed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
kw['queries'] = executed_queries
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 |
turian/batchtrain | hyperparameters.py | 1 | 5497 | from locals import *
from collections import OrderedDict
import itertools
import sklearn.linear_model
import sklearn.svm
import sklearn.ensemble
import sklearn.neighbors
import sklearn.semi_supervised
import sklearn.naive_bayes
# Code from http://rosettacode.org/wiki/Power_set#Python
def list_powerset2(lst):
return reduce(lambda result, x: result + [subset + [x] for subset in result],
lst, [[]])
def powerset(s):
return frozenset(map(frozenset, list_powerset2(list(s))))
def all_hyperparameters(odict):
hyperparams = list(itertools.product(*odict.values()))
for h in hyperparams:
yield dict(zip(odict.keys(), h))
MODEL_HYPERPARAMETERS = {
"MultinomialNB": OrderedDict({
"alpha": [0.01, 0.032, 0.1, 0.32, 1.0, 10.]
}),
"SGDClassifier": OrderedDict({
"loss": ['hinge', 'log', 'modified_huber'],
"penalty": ['l2', 'l1', 'elasticnet'],
"alpha": [0.001, 0.0001, 0.00001, 0.000001],
"rho": [0.15, 0.30, 0.55, 0.85, 0.95],
# "l1_ratio": [0.05, 0.15, 0.45],
"fit_intercept": [True],
"n_iter": [1, 5, 25, 100],
"shuffle": [True, False],
# "epsilon": [
"learning_rate": ["constant", "optimal", "invscaling"],
"eta0": [0.001, 0.01, 0.1],
"power_t": [0.05, 0.1, 0.25, 0.5, 1.],
"warm_start": [True, False],
}),
"BayesianRidge": OrderedDict({
"n_iter": [100, 300, 1000],
"tol": [1e-2, 1e-3, 1e-4],
"alpha_1": [1e-5, 1e-6, 1e-7],
"alpha_2": [1e-5, 1e-6, 1e-7],
"lambda_1": [1e-5, 1e-6, 1e-7],
"lambda_2": [1e-5, 1e-6, 1e-7],
"normalize": [True, False],
}),
"Perceptron": OrderedDict({
"penalty": ["l2", "l1", "elasticnet"],
"alpha": [1e-2, 1e-3, 1e-4, 1e-5, 1e-6],
"n_iter": [1, 5, 25],
"shuffle": [True, False],
"eta0": [0.1, 1., 10.],
"warm_start": [True, False],
}),
"SVC": OrderedDict({
"C": [0.1, 1, 10, 100],
"kernel": ["rbf", "sigmoid", "linear", "poly"],
"degree": [1,2,3,4,5],
"gamma": [1e-3, 1e-5, 0.],
"probability": [False, True],
"cache_size": [CACHESIZE],
"shrinking": [False, True],
}),
"SVR": OrderedDict({
"C": [0.1, 1, 10, 100],
"epsilon": [0.001, 0.01, 0.1, 1.0],
"kernel": ["rbf", "sigmoid", "linear", "poly"],
"degree": [1,2,3,4,5],
"gamma": [1e-3, 1e-5, 0.],
"cache_size": [CACHESIZE],
"shrinking": [False, True],
}),
"GradientBoostingClassifier": OrderedDict({
'loss': ['deviance'],
#'learn_rate': [1., 0.1, 0.01],
'learn_rate': [1., 0.1],
#'n_estimators': [10, 32, 100, 320],
'n_estimators': [10, 32, 100],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
#'subsample': [0.032, 0.1, 0.32, 1],
'subsample': [0.1, 0.32, 1],
# 'alpha': [0.5, 0.9],
}),
"GradientBoostingRegressor": OrderedDict({
'loss': ['ls', 'lad', 'huber', 'quantile'],
'learn_rate': [1., 0.1, 0.01],
'n_estimators': [10, 32, 100, 320],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
'subsample': [0.032, 0.1, 0.32, 1],
}),
"RandomForestClassifier": OrderedDict({
'n_estimators': [10, 32, 100, 320],
'criterion': ['gini', 'entropy'],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
'min_density': [0.032, 0.1, 0.32],
'max_features': ["sqrt", "log2", None],
# 'bootstrap': [True, False],
'bootstrap': [True],
'oob_score': [True, False],
# 'verbose': [True],
}),
"RandomForestRegressor": OrderedDict({
'n_estimators': [10, 32, 100, 320],
'max_depth': [1, 3, None],
'min_samples_split': [1, 3],
'min_samples_leaf': [1, 3],
'min_density': [0.032, 0.1, 0.32],
'max_features': ["sqrt", "log2", None],
# 'bootstrap': [True, False],
'bootstrap': [True],
'oob_score': [True, False],
# 'verbose': [True],
}),
"KNeighborsClassifier": OrderedDict({
'n_neighbors': [3, 5, 7],
'weights': ['uniform', 'distance'],
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'leaf_size': [10, 30, 100],
'p': [1, 2],
}),
"LabelSpreading": OrderedDict({
'kernel': ['knn', 'rbf'],
'gamma': [10, 20, 100, 200],
'n_neighbors': [3, 5, 7, 9],
'alpha': [0, 0.02, 0.2, 1.0],
'max_iters': [3, 10, 30, 100],
'tol': [1e-5, 1e-3, 1e-1, 1.],
})
}
MODEL_NAME_TO_CLASS = {
"MultinomialNB": sklearn.naive_bayes.MultinomialNB,
"SGDClassifier": sklearn.linear_model.SGDClassifier,
"BayesianRidge": sklearn.linear_model.BayesianRidge,
"Perceptron": sklearn.linear_model.Perceptron,
"SVC": sklearn.svm.SVC,
"SVR": sklearn.svm.SVR,
"GradientBoostingClassifier": sklearn.ensemble.GradientBoostingClassifier,
"GradientBoostingRegressor": sklearn.ensemble.GradientBoostingRegressor,
"RandomForestClassifier": sklearn.ensemble.RandomForestClassifier,
"RandomForestRegressor": sklearn.ensemble.RandomForestRegressor,
"KNeighborsClassifier": sklearn.neighbors.KNeighborsClassifier,
"LabelSpreading": sklearn.semi_supervised.LabelSpreading,
}
| bsd-3-clause |
GeotrekCE/Geotrek-admin | mapentity/decorators.py | 2 | 5841 | from functools import wraps
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
from django.views.decorators.http import last_modified as cache_last_modified
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import PermissionDenied
from django.core.cache import caches
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.views.generic.edit import BaseUpdateView
from django.views.generic.detail import BaseDetailView
from .settings import app_settings
from .helpers import user_has_perm
from . import models as mapentity_models
def view_permission_required(login_url=None, raise_exception=None):
if raise_exception is None:
raise_exception = (login_url is None)
def check_perms(request, user, perm):
# Check both authenticated and anonymous
if user_has_perm(user, perm):
return True
if not user.is_anonymous and raise_exception:
raise PermissionDenied
# As the last resort, redirects
msg = _(u'Access to the requested resource is restricted. You have been redirected.')
messages.warning(request, msg)
return False
def decorator(view_func):
def _wrapped_view(self, request, *args, **kwargs):
perm = self.get_view_perm()
redirect_url = login_url
if login_url in mapentity_models.ENTITY_KINDS:
is_handle_object = issubclass(self.__class__, (BaseDetailView, BaseUpdateView))
if is_handle_object:
view_subject = self.get_object()
else:
view_subject = self.get_model()
get_url_method = getattr(view_subject, 'get_{0}_url'.format(login_url))
redirect_url = get_url_method()
has_perm_decorator = user_passes_test(lambda u: check_perms(request, u, perm),
login_url=redirect_url,
redirect_field_name=None)
cbv_user_has_perm = method_decorator(has_perm_decorator)
@cbv_user_has_perm
def decorated(self, request, *args, **kwargs):
return view_func(self, request, *args, **kwargs)
return decorated(self, request, *args, **kwargs)
return _wrapped_view
return decorator
def view_cache_latest():
def decorator(view_func):
def _wrapped_view(self, request, *args, **kwargs):
view_model = self.get_model()
cache_latest = cache_last_modified(lambda x: view_model.latest_updated())
cbv_cache_latest = method_decorator(cache_latest)
@method_decorator(cache_control(max_age=0, must_revalidate=True))
@cbv_cache_latest
def decorated(self, request, *args, **kwargs):
return view_func(self, request, *args, **kwargs)
return decorated(self, request, *args, **kwargs)
return _wrapped_view
return decorator
def view_cache_response_content():
def decorator(view_func):
def _wrapped_method(self, *args, **kwargs):
response_class = self.response_class
response_kwargs = dict()
# Do not cache if filters presents
params = self.request.GET.keys()
with_filters = all([not p.startswith('_') for p in params])
if len(params) > 0 and with_filters:
return view_func(self, *args, **kwargs)
# Restore from cache or store view result
geojson_lookup = None
if hasattr(self, 'view_cache_key'):
geojson_lookup = self.view_cache_key()
elif not self.request.GET: # Do not cache filtered responses
view_model = self.get_model()
language = self.request.LANGUAGE_CODE
latest_saved = view_model.latest_updated()
if latest_saved:
geojson_lookup = '%s_%s_%s_json_layer' % (
language,
view_model._meta.model_name,
latest_saved.strftime('%y%m%d%H%M%S%f')
)
geojson_cache = caches[app_settings['GEOJSON_LAYERS_CACHE_BACKEND']]
if geojson_lookup:
content = geojson_cache.get(geojson_lookup)
if content:
return response_class(content=content, **response_kwargs)
response = view_func(self, *args, **kwargs)
if geojson_lookup:
geojson_cache.set(geojson_lookup, response.content)
return response
return _wrapped_method
return decorator
def save_history():
"""
A decorator for class-based views, which save navigation history in
session.
"""
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(self, request, *args, **kwargs):
result = view_func(self, request, *args, **kwargs)
# Stack list of request paths
history = request.session.get('history', [])
# Remove previous visits of this page
history = [h for h in history if h['path'] != request.path]
# Add this one and remove extras
model = self.model or self.queryset.model
history.insert(0, dict(title=self.get_title(),
path=request.path,
modelname=model._meta.object_name.lower()))
if len(history) > app_settings['HISTORY_ITEMS_MAX']:
history.pop()
request.session['history'] = history
return result
return _wrapped_view
return decorator
| bsd-2-clause |
ybellavance/python-for-android | python3-alpha/python3-src/Lib/test/test_keywordonlyarg.py | 49 | 6367 | #!/usr/bin/env python3
"""Unit tests for the keyword only argument specified in PEP 3102."""
__author__ = "Jiwon Seo"
__email__ = "seojiwon at gmail dot com"
import unittest
from test.support import run_unittest
def posonly_sum(pos_arg1, *arg, **kwarg):
return pos_arg1 + sum(arg) + sum(kwarg.values())
def keywordonly_sum(*, k1=0, k2):
return k1 + k2
def keywordonly_nodefaults_sum(*, k1, k2):
return k1 + k2
def keywordonly_and_kwarg_sum(*, k1, k2, **kwarg):
return k1 + k2 + sum(kwarg.values())
def mixedargs_sum(a, b=0, *arg, k1, k2=0):
return a + b + k1 + k2 + sum(arg)
def mixedargs_sum2(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values())
def sortnum(*nums, reverse=False):
return sorted(list(nums), reverse=reverse)
def sortwords(*words, reverse=False, **kwargs):
return sorted(list(words), reverse=reverse)
class Foo:
def __init__(self, *, k1, k2=0):
self.k1 = k1
self.k2 = k2
def set(self, p1, *, k1, k2):
self.k1 = k1
self.k2 = k2
def sum(self):
return self.k1 + self.k2
class KeywordOnlyArgTestCase(unittest.TestCase):
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, "<test>", "single")
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testSyntaxErrorForFunctionDefinition(self):
self.assertRaisesSyntaxError("def f(p, *):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, p1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, None, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p, *, (k1, k2), **kw):\n pass\n")
def testSyntaxForManyArguments(self):
fundef = "def f("
for i in range(255):
fundef += "i%d, "%i
fundef += "*, key=100):\n pass\n"
self.assertRaisesSyntaxError(fundef)
fundef2 = "def foo(i,*,"
for i in range(255):
fundef2 += "i%d, "%i
fundef2 += "lastarg):\n pass\n"
self.assertRaisesSyntaxError(fundef2)
# exactly 255 arguments, should compile ok
fundef3 = "def f(i,*,"
for i in range(253):
fundef3 += "i%d, "%i
fundef3 += "lastarg):\n pass\n"
compile(fundef3, "<test>", "single")
def testTooManyPositionalErrorMessage(self):
def f(a, b=None, *, c=None):
pass
with self.assertRaises(TypeError) as exc:
f(1, 2, 3)
expected = "f() takes at most 2 positional arguments (3 given)"
self.assertEqual(str(exc.exception), expected)
def testSyntaxErrorForFunctionCall(self):
self.assertRaisesSyntaxError("f(p, k=1, p2)")
self.assertRaisesSyntaxError("f(p, k1=50, *(1,2), k1=100)")
def testRaiseErrorFuncallWithUnexpectedKeywordArgument(self):
self.assertRaises(TypeError, keywordonly_sum, ())
self.assertRaises(TypeError, keywordonly_nodefaults_sum, ())
self.assertRaises(TypeError, Foo, ())
try:
keywordonly_sum(k2=100, non_existing_arg=200)
self.fail("should raise TypeError")
except TypeError:
pass
try:
keywordonly_nodefaults_sum(k2=2)
self.fail("should raise TypeError")
except TypeError:
pass
def testFunctionCall(self):
self.assertEqual(1, posonly_sum(1))
self.assertEqual(1+2, posonly_sum(1,**{"2":2}))
self.assertEqual(1+2+3, posonly_sum(1,*(2,3)))
self.assertEqual(1+2+3+4, posonly_sum(1,*(2,3),**{"4":4}))
self.assertEqual(1, keywordonly_sum(k2=1))
self.assertEqual(1+2, keywordonly_sum(k1=1, k2=2))
self.assertEqual(1+2, keywordonly_and_kwarg_sum(k1=1, k2=2))
self.assertEqual(1+2+3, keywordonly_and_kwarg_sum(k1=1, k2=2, k3=3))
self.assertEqual(1+2+3+4,
keywordonly_and_kwarg_sum(k1=1, k2=2,
**{"a":3,"b":4}))
self.assertEqual(1+2, mixedargs_sum(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2, mixedargs_sum2(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum2(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum2(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum2(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, k2=5, k3=6))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, **{'k2':5, 'k3':6}))
self.assertEqual(1, Foo(k1=1).sum())
self.assertEqual(1+2, Foo(k1=1,k2=2).sum())
self.assertEqual([1,2,3], sortnum(3,2,1))
self.assertEqual([3,2,1], sortnum(1,2,3, reverse=True))
self.assertEqual(['a','b','c'], sortwords('a','c','b'))
self.assertEqual(['c','b','a'], sortwords('a','c','b', reverse=True))
self.assertEqual(['c','b','a'],
sortwords('a','c','b', reverse=True, ignore='ignore'))
def testKwDefaults(self):
def foo(p1,p2=0, *, k1, k2=0):
return p1 + p2 + k1 + k2
self.assertEqual(2, foo.__code__.co_kwonlyargcount)
self.assertEqual({"k2":0}, foo.__kwdefaults__)
foo.__kwdefaults__ = {"k1":0}
try:
foo(1,k1=10)
self.fail("__kwdefaults__ is not properly changed")
except TypeError:
pass
def test_kwonly_methods(self):
class Example:
def f(self, *, k1=1, k2=2):
return k1, k2
self.assertEqual(Example().f(k1=1, k2=2), (1, 2))
self.assertEqual(Example.f(Example(), k1=1, k2=2), (1, 2))
self.assertRaises(TypeError, Example.f, k1=1, k2=2)
def test_main():
run_unittest(KeywordOnlyArgTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
joelpinheiro/safebox-smartcard-auth | Server/veserver/lib/python2.7/site-packages/cherrypy/lib/covercp.py | 58 | 11592 | """Code-coverage tools for CherryPy.
To use this module, or the coverage tools in the test suite,
you need to download 'coverage.py', either Gareth Rees' `original
implementation <http://www.garethrees.org/2001/12/04/python-coverage/>`_
or Ned Batchelder's `enhanced version:
<http://www.nedbatchelder.com/code/modules/coverage.html>`_
To turn on coverage tracing, use the following code::
cherrypy.engine.subscribe('start', covercp.start)
DO NOT subscribe anything on the 'start_thread' channel, as previously
recommended. Calling start once in the main thread should be sufficient
to start coverage on all threads. Calling start again in each thread
effectively clears any coverage data gathered up to that point.
Run your code, then use the ``covercp.serve()`` function to browse the
results in a web browser. If you run this module from the command line,
it will call ``serve()`` for you.
"""
import re
import sys
import cgi
from cherrypy._cpcompat import quote_plus
import os
import os.path
localFile = os.path.join(os.path.dirname(__file__), "coverage.cache")
the_coverage = None
try:
from coverage import coverage
the_coverage = coverage(data_file=localFile)
def start():
the_coverage.start()
except ImportError:
# Setting the_coverage to None will raise errors
# that need to be trapped downstream.
the_coverage = None
import warnings
warnings.warn(
"No code coverage will be performed; "
"coverage.py could not be imported.")
def start():
pass
start.priority = 20
TEMPLATE_MENU = """<html>
<head>
<title>CherryPy Coverage Menu</title>
<style>
body {font: 9pt Arial, serif;}
#tree {
font-size: 8pt;
font-family: Andale Mono, monospace;
white-space: pre;
}
#tree a:active, a:focus {
background-color: black;
padding: 1px;
color: white;
border: 0px solid #9999FF;
-moz-outline-style: none;
}
.fail { color: red;}
.pass { color: #888;}
#pct { text-align: right;}
h3 {
font-size: small;
font-weight: bold;
font-style: italic;
margin-top: 5px;
}
input { border: 1px solid #ccc; padding: 2px; }
.directory {
color: #933;
font-style: italic;
font-weight: bold;
font-size: 10pt;
}
.file {
color: #400;
}
a { text-decoration: none; }
#crumbs {
color: white;
font-size: 8pt;
font-family: Andale Mono, monospace;
width: 100%;
background-color: black;
}
#crumbs a {
color: #f88;
}
#options {
line-height: 2.3em;
border: 1px solid black;
background-color: #eee;
padding: 4px;
}
#exclude {
width: 100%;
margin-bottom: 3px;
border: 1px solid #999;
}
#submit {
background-color: black;
color: white;
border: 0;
margin-bottom: -9px;
}
</style>
</head>
<body>
<h2>CherryPy Coverage</h2>"""
TEMPLATE_FORM = """
<div id="options">
<form action='menu' method=GET>
<input type='hidden' name='base' value='%(base)s' />
Show percentages
<input type='checkbox' %(showpct)s name='showpct' value='checked' /><br />
Hide files over
<input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br />
Exclude files matching<br />
<input type='text' id='exclude' name='exclude'
value='%(exclude)s' size='20' />
<br />
<input type='submit' value='Change view' id="submit"/>
</form>
</div>"""
TEMPLATE_FRAMESET = """<html>
<head><title>CherryPy coverage data</title></head>
<frameset cols='250, 1*'>
<frame src='menu?base=%s' />
<frame name='main' src='' />
</frameset>
</html>
"""
TEMPLATE_COVERAGE = """<html>
<head>
<title>Coverage for %(name)s</title>
<style>
h2 { margin-bottom: .25em; }
p { margin: .25em; }
.covered { color: #000; background-color: #fff; }
.notcovered { color: #fee; background-color: #500; }
.excluded { color: #00f; background-color: #fff; }
table .covered, table .notcovered, table .excluded
{ font-family: Andale Mono, monospace;
font-size: 10pt; white-space: pre; }
.lineno { background-color: #eee;}
.notcovered .lineno { background-color: #000;}
table { border-collapse: collapse;
</style>
</head>
<body>
<h2>%(name)s</h2>
<p>%(fullpath)s</p>
<p>Coverage: %(pc)s%%</p>"""
TEMPLATE_LOC_COVERED = """<tr class="covered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_NOT_COVERED = """<tr class="notcovered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_EXCLUDED = """<tr class="excluded">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_ITEM = (
"%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n"
)
def _percent(statements, missing):
s = len(statements)
e = s - len(missing)
if s > 0:
return int(round(100.0 * e / s))
return 0
def _show_branch(root, base, path, pct=0, showpct=False, exclude="",
coverage=the_coverage):
# Show the directory name and any of our children
dirs = [k for k, v in root.items() if v]
dirs.sort()
for name in dirs:
newpath = os.path.join(path, name)
if newpath.lower().startswith(base):
relpath = newpath[len(base):]
yield "| " * relpath.count(os.sep)
yield (
"<a class='directory' "
"href='menu?base=%s&exclude=%s'>%s</a>\n" %
(newpath, quote_plus(exclude), name)
)
for chunk in _show_branch(
root[name], base, newpath, pct, showpct,
exclude, coverage=coverage
):
yield chunk
# Now list the files
if path.lower().startswith(base):
relpath = path[len(base):]
files = [k for k, v in root.items() if not v]
files.sort()
for name in files:
newpath = os.path.join(path, name)
pc_str = ""
if showpct:
try:
_, statements, _, missing, _ = coverage.analysis2(newpath)
except:
# Yes, we really want to pass on all errors.
pass
else:
pc = _percent(statements, missing)
pc_str = ("%3d%% " % pc).replace(' ', ' ')
if pc < float(pct) or pc == -1:
pc_str = "<span class='fail'>%s</span>" % pc_str
else:
pc_str = "<span class='pass'>%s</span>" % pc_str
yield TEMPLATE_ITEM % ("| " * (relpath.count(os.sep) + 1),
pc_str, newpath, name)
def _skip_file(path, exclude):
if exclude:
return bool(re.search(exclude, path))
def _graft(path, tree):
d = tree
p = path
atoms = []
while True:
p, tail = os.path.split(p)
if not tail:
break
atoms.append(tail)
atoms.append(p)
if p != "/":
atoms.append("/")
atoms.reverse()
for node in atoms:
if node:
d = d.setdefault(node, {})
def get_tree(base, exclude, coverage=the_coverage):
"""Return covered module names as a nested dict."""
tree = {}
runs = coverage.data.executed_files()
for path in runs:
if not _skip_file(path, exclude) and not os.path.isdir(path):
_graft(path, tree)
return tree
class CoverStats(object):
def __init__(self, coverage, root=None):
self.coverage = coverage
if root is None:
# Guess initial depth. Files outside this path will not be
# reachable from the web interface.
import cherrypy
root = os.path.dirname(cherrypy.__file__)
self.root = root
def index(self):
return TEMPLATE_FRAMESET % self.root.lower()
index.exposed = True
def menu(self, base="/", pct="50", showpct="",
exclude=r'python\d\.\d|test|tut\d|tutorial'):
# The coverage module uses all-lower-case names.
base = base.lower().rstrip(os.sep)
yield TEMPLATE_MENU
yield TEMPLATE_FORM % locals()
# Start by showing links for parent paths
yield "<div id='crumbs'>"
path = ""
atoms = base.split(os.sep)
atoms.pop()
for atom in atoms:
path += atom + os.sep
yield ("<a href='menu?base=%s&exclude=%s'>%s</a> %s"
% (path, quote_plus(exclude), atom, os.sep))
yield "</div>"
yield "<div id='tree'>"
# Then display the tree
tree = get_tree(base, exclude, self.coverage)
if not tree:
yield "<p>No modules covered.</p>"
else:
for chunk in _show_branch(tree, base, "/", pct,
showpct == 'checked', exclude,
coverage=self.coverage):
yield chunk
yield "</div>"
yield "</body></html>"
menu.exposed = True
def annotated_file(self, filename, statements, excluded, missing):
source = open(filename, 'r')
buffer = []
for lineno, line in enumerate(source.readlines()):
lineno += 1
line = line.strip("\n\r")
empty_the_buffer = True
if lineno in excluded:
template = TEMPLATE_LOC_EXCLUDED
elif lineno in missing:
template = TEMPLATE_LOC_NOT_COVERED
elif lineno in statements:
template = TEMPLATE_LOC_COVERED
else:
empty_the_buffer = False
buffer.append((lineno, line))
if empty_the_buffer:
for lno, pastline in buffer:
yield template % (lno, cgi.escape(pastline))
buffer = []
yield template % (lineno, cgi.escape(line))
def report(self, name):
filename, statements, excluded, missing, _ = self.coverage.analysis2(
name)
pc = _percent(statements, missing)
yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name),
fullpath=name,
pc=pc)
yield '<table>\n'
for line in self.annotated_file(filename, statements, excluded,
missing):
yield line
yield '</table>'
yield '</body>'
yield '</html>'
report.exposed = True
def serve(path=localFile, port=8080, root=None):
if coverage is None:
raise ImportError("The coverage module could not be imported.")
from coverage import coverage
cov = coverage(data_file=path)
cov.load()
import cherrypy
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': "production",
})
cherrypy.quickstart(CoverStats(cov, root))
if __name__ == "__main__":
serve(*tuple(sys.argv[1:]))
| gpl-2.0 |
evandavid/dodolipet | yowsup/layers/axolotl/store/sqlite/litesessionstore.py | 53 | 2155 | from axolotl.state.sessionstore import SessionStore
from axolotl.state.sessionrecord import SessionRecord
class LiteSessionStore(SessionStore):
def __init__(self, dbConn):
"""
:type dbConn: Connection
"""
self.dbConn = dbConn
dbConn.execute("CREATE TABLE IF NOT EXISTS sessions (_id INTEGER PRIMARY KEY AUTOINCREMENT,"
"recipient_id INTEGER UNIQUE, device_id INTEGER, record BLOB, timestamp INTEGER);")
def loadSession(self, recipientId, deviceId):
q = "SELECT record FROM sessions WHERE recipient_id = ? AND device_id = ?"
c = self.dbConn.cursor()
c.execute(q, (recipientId, deviceId))
result = c.fetchone()
if result:
return SessionRecord(serialized=result[0])
else:
return SessionRecord()
def getSubDeviceSessions(self, recipientId):
q = "SELECT device_id from sessions WHERE recipient_id = ?"
c = self.dbConn.cursor()
c.execute(q, (recipientId,))
result = c.fetchall()
deviceIds = [r[0] for r in result]
return deviceIds
def storeSession(self, recipientId, deviceId, sessionRecord):
self.deleteSession(recipientId, deviceId)
q = "INSERT INTO sessions(recipient_id, device_id, record) VALUES(?,?,?)"
c = self.dbConn.cursor()
c.execute(q, (recipientId, deviceId, sessionRecord.serialize()))
self.dbConn.commit()
def containsSession(self, recipientId, deviceId):
q = "SELECT record FROM sessions WHERE recipient_id = ? AND device_id = ?"
c = self.dbConn.cursor()
c.execute(q, (recipientId, deviceId))
result = c.fetchone()
return result is not None
def deleteSession(self, recipientId, deviceId):
q = "DELETE FROM sessions WHERE recipient_id = ? AND device_id = ?"
self.dbConn.cursor().execute(q, (recipientId, deviceId))
self.dbConn.commit()
def deleteAllSessions(self, recipientId):
q = "DELETE FROM sessions WHERE recipient_id = ?"
self.dbConn.cursor().execute(q, (recipientId,))
self.dbConn.commit()
| gpl-3.0 |
thelastpickle/python-driver | tests/integration/cqlengine/columns/test_static_column.py | 3 | 3323 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from uuid import uuid4
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration import PROTOCOL_VERSION
# TODO: is this really a protocol limitation, or is it just C* version?
# good enough proxy for now
STATIC_SUPPORTED = PROTOCOL_VERSION >= 2
class TestStaticModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
cluster = columns.UUID(primary_key=True, default=uuid4)
static = columns.Text(static=True)
text = columns.Text()
class TestStaticColumn(BaseCassEngTestCase):
def setUp(cls):
if not STATIC_SUPPORTED:
raise unittest.SkipTest("only runs against the cql3 protocol v2.0")
super(TestStaticColumn, cls).setUp()
@classmethod
def setUpClass(cls):
drop_table(TestStaticModel)
if STATIC_SUPPORTED: # setup and teardown run regardless of skip
sync_table(TestStaticModel)
@classmethod
def tearDownClass(cls):
drop_table(TestStaticModel)
def test_mixed_updates(self):
""" Tests that updates on both static and non-static columns work as intended """
instance = TestStaticModel.create()
instance.static = "it's shared"
instance.text = "some text"
instance.save()
u = TestStaticModel.get(partition=instance.partition)
u.static = "it's still shared"
u.text = "another text"
u.update()
actual = TestStaticModel.get(partition=u.partition)
assert actual.static == "it's still shared"
def test_static_only_updates(self):
""" Tests that updates on static only column work as intended """
instance = TestStaticModel.create()
instance.static = "it's shared"
instance.text = "some text"
instance.save()
u = TestStaticModel.get(partition=instance.partition)
u.static = "it's still shared"
u.update()
actual = TestStaticModel.get(partition=u.partition)
assert actual.static == "it's still shared"
def test_static_with_null_cluster_key(self):
""" Tests that save/update/delete works for static column works when clustering key is null"""
instance = TestStaticModel.create(cluster=None, static = "it's shared")
instance.save()
u = TestStaticModel.get(partition=instance.partition)
u.static = "it's still shared"
u.update()
actual = TestStaticModel.get(partition=u.partition)
assert actual.static == "it's still shared"
| apache-2.0 |
lifemapper/core | LmCompute/plugins/multi/calculate/calculate.py | 1 | 9421 | """Module containing functions to calculate PAM statistics
Todo:
Convert to use Matrix instead of numpy matrices
"""
import numpy as np
from LmCommon.common.lmconstants import PamStatKeys, PhyloTreeKeys
from LmCompute.plugins.multi.calculate import ot_phylo
from lmpy import Matrix
# .............................................................................
class PamStats:
"""This class is used to calculate statistics for a PAM
"""
# ...........................
def __init__(self, pam, tree=None):
"""Constructor
Args:
pam: A Present / Absence Matrix to compute statistics for
tree: An optional TreeWrapper object to use for additional
statistics
"""
# Ensure PAM is a Matrix object. PAM data will be shortcut to data
if isinstance(pam, Matrix):
self.pam = pam
else:
self.pam = Matrix(pam)
self.tree = tree
self.alpha = None
self.alpha_prop = None
self.c_score = None
self.lande = None
self.legendre = None
self.num_sites = None
self.num_species = None
self.omega = None
self.omega_prop = None
self.phi = None
self.phi_avg_prop = None
self.psi = None
self.psi_avg_prop = None
self.sigma_sites = None
self.sigma_species = None
self.whittaker = None
self._calculate_core_stats()
self._calculate_diversity_statistics()
# ...........................
def get_covariance_matrices(self):
"""Returns the covariance matrices for the PAM
Todo:
Add headers
"""
try:
return self.sigma_sites, self.sigma_species
except Exception:
# We haven't calculated them yet
self._calculate_covariance_matrices()
return self.sigma_sites, self.sigma_species
# ...........................
def get_diversity_statistics(self):
"""Get the (beta) diversity statistics
"""
return Matrix.concatenate(
[self.whittaker, self.lande, self.legendre, self.c_score], axis=1)
# ...........................
def get_schluter_covariances(self):
"""Calculate and return the Schluter variance ratio statistics
"""
# Try to use already computed co-variance matrices, if that fails,
# calculate them too
try:
sp_var_ratio = float(
self.sigma_species.sum()) / self.sigma_species.trace()
site_var_ratio = float(
self.sigma_sites.sum()) / self.sigma_sites.trace()
except Exception:
# If we haven't calculated sigma sites and sigma species
self._calculate_covariance_matrices()
sp_var_ratio = float(
self.sigma_species.sum()) / self.sigma_species.trace()
site_var_ratio = float(
self.sigma_sites.sum()) / self.sigma_sites.trace()
return Matrix(
np.nan_to_num(np.array([[sp_var_ratio, site_var_ratio]])),
headers={
'0': ['Value'],
'1': [PamStatKeys.SPECIES_VARIANCE_RATIO,
PamStatKeys.SITES_VARIANCE_RATIO]})
# ...........................
def get_site_statistics(self):
"""Retrieves the site statistics as a Matrix of site statistic columns
"""
num_rows = self.alpha.shape[0]
stat_columns = [
self.alpha.reshape(num_rows, 1),
self.alpha_prop.reshape(num_rows, 1),
self.phi.reshape(num_rows, 1),
self.phi_avg_prop.reshape(num_rows, 1)]
sites_headers = [
PamStatKeys.ALPHA, PamStatKeys.ALPHA_PROP, PamStatKeys.PHI,
PamStatKeys.PHI_AVG_PROP]
# Check if we have tree stats too
if self.tree is not None:
# Get phylogenetic distance matrix
phylo_dist_mtx = self.tree.get_distance_matrix()
squid_annotations = self.tree.get_annotations(PhyloTreeKeys.SQUID)
squid_dict = {squid: label for label, squid in squid_annotations}
taxon_labels = []
keep_columns = []
squids = self.pam.get_column_headers()
for i, squid in enumerate(squids):
if squid in list(squid_dict.keys()):
keep_columns.append(i)
taxon_labels.append(squid_dict[squid])
# Slice the PAM to remove missing squid columns
sl_pam = self.pam.slice(
list(range(self.pam.shape[0])), keep_columns)
stat_columns.extend([
ot_phylo.mean_nearest_taxon_distance(sl_pam, phylo_dist_mtx),
ot_phylo.mean_pairwise_distance(sl_pam, phylo_dist_mtx),
ot_phylo.pearson_correlation(sl_pam, phylo_dist_mtx),
ot_phylo.phylogenetic_diversity(
sl_pam, self.tree, taxon_labels),
ot_phylo.sum_pairwise_distance(sl_pam, phylo_dist_mtx)])
sites_headers.extend(
[PamStatKeys.MNTD, PamStatKeys.MPD, PamStatKeys.PEARSON,
PamStatKeys.PD, PamStatKeys.SPD])
# Return a matrix
return Matrix(
np.nan_to_num(np.concatenate(stat_columns, axis=1)),
headers={'0': self.pam.get_row_headers(), '1': sites_headers})
# ...........................
def get_species_statistics(self):
"""Retrieves the species statistics as a Matrix
"""
num_sp = self.omega.shape[0]
sp_data = np.concatenate(
[self.omega.reshape(num_sp, 1),
self.omega_prop.reshape(num_sp, 1),
self.psi.reshape(num_sp, 1),
self.psi_avg_prop.reshape(num_sp, 1)], axis=1)
sp_headers = {
'0': self.pam.get_column_headers(),
'1': [PamStatKeys.OMEGA, PamStatKeys.OMEGA_PROP, PamStatKeys.PSI,
PamStatKeys.PSI_AVG_PROP]}
# Return a Matrix
return Matrix(np.nan_to_num(sp_data), headers=sp_headers)
# ...........................
def _calculate_core_stats(self):
"""This function calculates the standard PAM statistics
"""
# Number of species at each site
self.alpha = Matrix(np.sum(self.pam, axis=1))
# Number of sites for each species
self.omega = Matrix(np.sum(self.pam, axis=0))
# Calculate the number of species by looking for columns that have any
# presences. This will let the stats ignore empty columns
self.num_species = np.sum(np.any(self.pam, axis=0))
# Calculate the number of sites that have at least one species present
self.num_sites = np.sum(np.any(self.pam, axis=1))
# Site statistics
self.alpha_prop = self.alpha.astype(float) / self.num_species
self.phi = self.pam.dot(self.omega)
# phiAvgProp can have np.nan values if empty rows and columns,
# set to zero
self.phi_avg_prop = np.nan_to_num(
self.phi.astype(float) / (self.num_sites * self.alpha))
# Species statistics
self.omega_prop = self.omega.astype(float) / self.num_sites
self.psi = self.alpha.dot(self.pam)
# psi_avg_prop can produce np.nan for empty row and columns, set to
# zero
self.psi_avg_prop = np.nan_to_num(
self.psi.astype(float) / (self.num_species * self.omega))
# ...........................
def _calculate_covariance_matrices(self):
"""Calculates the sigmaSpecies and sigmaSites covariance matrices
"""
alpha = self.pam.dot(self.pam.T).astype(float) # Site by site
omega = self.pam.T.dot(self.pam).astype(float) # Species by sites
self.sigma_sites = (alpha / self.num_species) - np.outer(
self.alpha_prop, self.alpha_prop)
self.sigma_species = (omega / self.num_sites) - np.outer(
self.omega_prop, self.omega_prop)
# ...........................
def _calculate_diversity_statistics(self):
"""Calculate the (beta) diversity statistics for this PAM
"""
self.whittaker = Matrix(
np.array([[float(self.num_species) / self.omega_prop.sum()]]),
headers={'0': ['value'], '1': [PamStatKeys.WHITTAKERS_BETA]})
self.lande = Matrix(
np.array([[self.num_species - self.omega_prop.sum()]]),
headers={'0': ['value'], '1': [PamStatKeys.LANDES_ADDATIVE_BETA]})
self.legendre = Matrix(
np.array([[self.omega.sum() - (
float((self.omega ** 2).sum()) / self.num_sites)]]),
headers={'0': ['value'], '1': [PamStatKeys.LEGENDRES_BETA]})
temp = 0.0
for i in range(self.num_species):
for j in range(i, self.num_species):
# Get the number shared (where both are == 1, so sum == 2)
num_shared = len(
np.where(np.sum(self.pam[:, [i, j]], axis=1) == 2)[0])
p_1 = self.omega[i] - num_shared
p_2 = self.omega[j] - num_shared
temp += p_1 * p_2
self.c_score = Matrix(
np.array([
[2 * temp / (self.num_species * (self.num_species - 1))]]),
headers={'0': ['value'], '1': [PamStatKeys.C_SCORE]})
| gpl-3.0 |
awamper/draobpilc | draobpilc/widgets/items_view.py | 1 | 13650 | #!/usr/bin/env python3
# Copyright 2015 Ivan [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from draobpilc import common
from draobpilc.lib import utils
from draobpilc.lib import fuzzy
from draobpilc.widgets.histories_manager import HistoriesManager
from draobpilc.widgets.items_counter import ItemsCounter
class AlreadyBound(Exception):
""" raise when ItemsView already bound to HistoryItems """
class ItemsView(Gtk.Box):
AUTOSCROLL_BORDER_OFFSET = 100
AUTOSCROLL_TIMEOUT_MS = 50
AUTOSCROLL_STEP = 10
__gsignals__ = {
'item-activated': (GObject.SIGNAL_RUN_FIRST, None, (object,)),
'item-selected': (GObject.SIGNAL_RUN_FIRST, None, (object,)),
'item-entered': (GObject.SIGNAL_RUN_FIRST, None, (object,)),
'item-left': (GObject.SIGNAL_RUN_FIRST, None, (object,))
}
def __init__(self):
super().__init__()
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_valign(Gtk.Align.FILL)
self.set_halign(Gtk.Align.FILL)
self.set_vexpand(True)
self.set_hexpand(True)
self.set_name('ItemsViewBox')
self._bound_history = None
self._last_entered_item = None
self._last_selected_index = None
self._show_index = None
self._autoscroll_timeout_id = 0
self._histories_manager = HistoriesManager()
placeholder = Gtk.Label()
placeholder.set_markup(
'<span font-size="xx-large">%s</span>' % _('Nothing')
)
placeholder.show()
self._listbox = Gtk.ListBox()
self._listbox.set_name('ItemsViewList')
self._listbox.set_selection_mode(Gtk.SelectionMode.MULTIPLE)
self._listbox.set_activate_on_single_click(False)
self._listbox.set_placeholder(placeholder)
self._listbox.connect('row-selected', self._on_row_selected)
self._listbox.connect('row-activated', self._on_row_activated)
self._listbox.connect('motion-notify-event', self._on_motion_event)
self._listbox.connect('leave-notify-event', self._on_leave_event)
self._listbox.connect('button-press-event', self._on_button_press_event)
self._listbox.connect('button-release-event', self._on_button_release_event)
self._items_counter = ItemsCounter(self._listbox)
self._load_rest_btn = Gtk.LinkButton()
self._load_rest_btn.set_label('load all history')
self._load_rest_btn.set_no_show_all(True)
self._load_rest_btn.connect(
'activate-link',
lambda _: self.load_rest_items()
)
self._load_rest_btn.hide()
scrolled = Gtk.ScrolledWindow()
scrolled.set_name('ItemsViewScrolledWindow')
scrolled.set_vexpand(True)
scrolled.set_hexpand(True)
scrolled.add(self._listbox)
bottom_box = Gtk.Box()
bottom_box.set_orientation(Gtk.Orientation.HORIZONTAL)
bottom_box.add(self._items_counter)
bottom_box.add(self._load_rest_btn)
bottom_box.add(self._histories_manager)
box = Gtk.Box()
box.set_orientation(Gtk.Orientation.VERTICAL)
box.add(scrolled)
box.add(bottom_box)
self.add(box)
self.show_all()
def __len__(self):
return len(self._listbox.get_children())
def _on_leave_event(self, listbox, event):
if self._last_entered_item:
self.emit('item-left', self._last_entered_item)
self._last_entered_item = None
def _on_motion_event(self, listbox, event):
def do_autoscroll_and_selection():
adjustment = self._listbox.get_adjustment()
new_value = adjustment.get_value() + ItemsView.AUTOSCROLL_STEP
adjustment.set_value(new_value)
row = self._listbox.get_row_at_y(
new_value + adjustment.get_page_increment()
)
if not row.is_selected(): self._listbox.select_row(row)
return True
def maybe_toggle_selection(row):
if event.state == Gdk.ModifierType.BUTTON3_MASK:
self.toggle_selection(row)
if event.state == Gdk.ModifierType.BUTTON3_MASK:
adjustment = self._listbox.get_adjustment()
autoscroll_border = (
adjustment.get_value() +
adjustment.get_page_increment() -
ItemsView.AUTOSCROLL_BORDER_OFFSET
)
if event.y > autoscroll_border:
if not self._autoscroll_timeout_id:
self._autoscroll_timeout_id = GLib.timeout_add(
ItemsView.AUTOSCROLL_TIMEOUT_MS,
do_autoscroll_and_selection
)
elif event.y < autoscroll_border and self._autoscroll_timeout_id:
GLib.source_remove(self._autoscroll_timeout_id)
self._autoscroll_timeout_id = 0
row = self._listbox.get_row_at_y(event.y)
if row:
item = row.get_child().item
if not self._last_entered_item:
self._last_entered_item = item
maybe_toggle_selection(row)
self.emit('item-entered', item)
elif self._last_entered_item != item:
maybe_toggle_selection(row)
self.emit('item-left', self._last_entered_item)
self.emit('item-entered', item)
self._last_entered_item = item
elif self._last_entered_item:
self.emit('item-left', self._last_entered_item)
self._last_entered_item = None
def _on_button_press_event(self, listbox, event):
row = self._listbox.get_row_at_y(event.y)
if not row or event.button != 3: return
self.toggle_selection(row)
def _on_button_release_event(self, listbox, event):
if self._autoscroll_timeout_id:
GLib.source_remove(self._autoscroll_timeout_id)
self._autoscroll_timeout_id = 0
def _on_row_selected(self, listbox, row):
if row: self.emit('item-selected', row.get_child().item)
def _on_row_activated(self, listbox, row):
if not row: return
item = row.get_child().item
if item: self.activate_item(item)
def _on_changed(self, history_items):
self.show_items()
self.set_active_item()
self.resume_selection() or self.select_first()
self._last_selected_index = 0
def _remove(self, history_items, item=None):
self.save_selection()
result = False
if not item: return result
row = self._get_row_for_item(item)
if row:
row.remove(row.get_child())
row.destroy()
result = True
return result
def _get_row_for_item(self, item):
result = False
for row in self._listbox.get_children():
if row.get_child().item == item:
result = row
break
return result
def save_selection(self):
def get_current_index(child):
result = None
for i, ch in enumerate(self._listbox.get_children()):
if ch != child: continue
result = i
break
return result
selected_row = self._listbox.get_selected_rows()
try:
selected_row = selected_row[0]
except IndexError:
return
self._last_selected_index = get_current_index(selected_row)
def resume_selection(self):
if not self._last_selected_index: return False
children = self._listbox.get_children()
if len(children) == self._last_selected_index:
self._last_selected_index -= 1
for i, row in enumerate(children):
if i == self._last_selected_index:
self._listbox.select_row(row)
# i'm sorry
GLib.timeout_add(200, lambda *a, **ka: row.grab_focus())
break
return True
def bind(self, history_items):
if self._bound_history:
raise AlreadyBound()
self._bound_history = history_items
self._bound_history.connect('changed', self._on_changed)
self._bound_history.connect('removed', self._remove)
self._items_counter.set_history_items(self._bound_history)
self._items_counter.update()
self.show_items()
def show_items(self):
limit = common.SETTINGS[common.ITEMS_VIEW_LIMIT]
items = self._bound_history
if limit: items = items[:limit]
self.clear()
for item in items:
self._listbox.add(item.widget)
if len(items) < len(self._bound_history):
self._load_rest_btn.show()
else:
self._load_rest_btn.hide()
self.show_all()
def load_rest_items(self):
limit = common.SETTINGS[common.ITEMS_VIEW_LIMIT]
if not limit: return
for item in self._bound_history[limit:]:
self._listbox.add(item.widget)
self._load_rest_btn.hide()
self.show_all()
return True
def set_active_item(self):
def on_clipboard(clipboard, text):
for row in self._listbox.get_children():
item_widget = row.get_child()
item = item_widget.item
if item.raw != text:
row.set_activatable(True)
item_widget.set_sensitive(True)
item_widget.set_active(False)
else:
row.set_activatable(False)
item_widget.set_active(True)
if len(self) < 1: return
clipboard = Gtk.Clipboard.get_default(Gdk.Display.get_default())
text = clipboard.wait_for_text()
on_clipboard(clipboard, text)
def select_first(self, grab_focus=False):
self._listbox.unselect_all()
self.set_active_item()
for row in self._listbox.get_children():
if not row.get_activatable() or not row.get_mapped(): continue
self._listbox.select_row(row)
if grab_focus: row.grab_focus()
break
self.reset_scroll()
def get_selected(self):
result = []
rows = self._listbox.get_selected_rows()
for row in rows:
result.append(row.get_child().item)
return result
def clear(self):
self._listbox.unselect_all()
if self._autoscroll_timeout_id:
GLib.source_remove(self._autoscroll_timeout_id)
self._autoscroll_timeout_id = 0
for row in self._listbox.get_children():
child = row.get_child()
if child: row.remove(child)
row.destroy()
def reset_scroll(self):
adjustment = self._listbox.get_adjustment()
lower = adjustment.get_lower()
adjustment.set_value(lower)
def toggle_selection(self, row):
if row.is_selected(): self._listbox.unselect_row(row)
else: self._listbox.select_row(row)
def activate_item(self, item):
if item: self.emit('item-activated', item)
def get_for_shortcut(self, number):
result = None
curr_index = None
children = self._listbox.get_children()
for index, row in enumerate(children):
if row.get_child().item.index == 0: continue
visible = utils.is_visible_on_scroll(
self._listbox.get_adjustment(),
row
)
if visible:
if curr_index is None:
curr_index = 0
else:
curr_index += 1
if not curr_index is None and curr_index == number:
result = row.get_child().item
break
return result
def show_shortcut_hints(self, show):
curr_index = -1
children = self._listbox.get_children()
if show:
for index, row in enumerate(children):
if curr_index >= 8: break
if row.get_child().item.index == 0: continue
visible = utils.is_visible_on_scroll(
self._listbox.get_adjustment(),
row
)
if visible:
if curr_index == -1:
curr_index = 0
else:
curr_index += 1
row.get_child().show_shortcut_hint(curr_index + 1)
else:
for row in children:
row.get_child().show_shortcut_hint(None)
@property
def histories_manager(self):
return self._histories_manager
@property
def listbox(self):
return self._listbox
@property
def n_selected(self):
selected = self.get_selected()
return len(selected)
| gpl-3.0 |
gseismic/vnpy | vn.ctp/vnctptd/test/tdtest.py | 96 | 4886 | # encoding: UTF-8
import sys
from time import sleep
from PyQt4 import QtGui
from vnctptd import *
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print key + ':' + str(value)
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print ""
print str(func.__name__)
return func(*args, **kw)
return wrapper
########################################################################
class TestTdApi(TdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestTdApi, self).__init__()
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
pass
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print n
#----------------------------------------------------------------------
@simple_log
def onHeartBeatWarning(self, n):
"""心跳报警"""
print n
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print_dict(data)
print_dict(error)
self.brokerID = data['BrokerID']
self.userID = data['UserID']
self.frontID = data['FrontID']
self.sessionID = data['SessionID']
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspQrySettlementInfo(self, data, error, n, last):
"""查询结算信息回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspQryInstrument(self, data, error, n, last):
"""查询合约回报"""
print_dict(data)
print_dict(error)
print n
print last
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建Qt应用对象,用于事件循环
app = QtGui.QApplication(sys.argv)
# 创建API对象,测试通过
api = TestTdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址,测试通过
api.createFtdcTraderApi('')
# 设置数据流重传方式,测试通过
api.subscribePrivateTopic(1)
api.subscribePublicTopic(1)
# 注册前置机地址,测试通过
api.registerFront("tcp://qqfz-front1.ctp.shcifco.com:32305")
# 初始化api,连接前置机,测试通过
api.init()
sleep(0.5)
# 登陆,测试通过
loginReq = {} # 创建一个空字典
loginReq['UserID'] = '' # 参数作为字典键值的方式传入
loginReq['Password'] = '' # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = ''
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, reqid)
sleep(0.5)
## 查询合约, 测试通过
#reqid = reqid + 1
#i = api.reqQryInstrument({}, reqid)
## 查询结算, 测试通过
#req = {}
#req['BrokerID'] = api.brokerID
#req['InvestorID'] = api.userID
#reqid = reqid + 1
#i = api.reqQrySettlementInfo(req, reqid)
#sleep(0.5)
## 确认结算, 测试通过
#req = {}
#req['BrokerID'] = api.brokerID
#req['InvestorID'] = api.userID
#reqid = reqid + 1
#i = api.reqSettlementInfoConfirm(req, reqid)
#sleep(0.5)
# 连续运行
app.exec_()
if __name__ == '__main__':
main()
| mit |
blckshrk/Weboob | modules/gazelle/backend.py | 1 | 2167 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.torrent import ICapTorrent
from weboob.tools.backend import BaseBackend, BackendConfig
from weboob.tools.value import ValueBackendPassword, Value
from .browser import GazelleBrowser
__all__ = ['GazelleBackend']
class GazelleBackend(BaseBackend, ICapTorrent):
NAME = 'gazelle'
MAINTAINER = u'Romain Bignon'
EMAIL = '[email protected]'
VERSION = '0.h'
DESCRIPTION = 'Gazelle-based BitTorrent trackers'
LICENSE = 'AGPLv3+'
CONFIG = BackendConfig(Value('domain', label='Domain (example "ssl.what.cd")'),
Value('protocol', label='Protocol to use', choices=('http', 'https')),
Value('username', label='Username'),
ValueBackendPassword('password', label='Password'))
BROWSER = GazelleBrowser
def create_default_browser(self):
return self.create_browser(self.config['protocol'].get(), self.config['domain'].get(),
self.config['username'].get(), self.config['password'].get())
def get_torrent(self, id):
return self.browser.get_torrent(id)
def get_torrent_file(self, id):
torrent = self.browser.get_torrent(id)
if not torrent:
return None
return self.browser.openurl(torrent.url.encode('utf-8')).read()
def iter_torrents(self, pattern):
return self.browser.iter_torrents(pattern)
| agpl-3.0 |
wowadrien/SFGP | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| apache-2.0 |
rcfduarte/nmsat | projects/examples/scripts/single_neuron_dcinput.py | 1 | 7658 | __author__ = 'duarte'
from modules.parameters import ParameterSet, ParameterSpace, extract_nestvalid_dict
from modules.input_architect import EncodingLayer
from modules.net_architect import Network
from modules.io import set_storage_locations
from modules.signals import iterate_obj_list
from modules.analysis import single_neuron_dcresponse
import cPickle as pickle
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as pl
import nest
# ######################################################################################################################
# Experiment options
# ======================================================================================================================
plot = True
display = True
save = True
# ######################################################################################################################
# Extract parameters from file and build global ParameterSet
# ======================================================================================================================
params_file = '../parameters/single_neuron_fI.py'
parameter_set = ParameterSpace(params_file)[0]
parameter_set = parameter_set.clean(termination='pars')
if not isinstance(parameter_set, ParameterSet):
if isinstance(parameter_set, basestring) or isinstance(parameter_set, dict):
parameter_set = ParameterSet(parameter_set)
else:
raise TypeError("parameter_set must be ParameterSet, string with full path to parameter file or dictionary")
# ######################################################################################################################
# Setup extra variables and parameters
# ======================================================================================================================
if plot:
import modules.visualization as vis
vis.set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
paths = set_storage_locations(parameter_set, save)
np.random.seed(parameter_set.kernel_pars['np_seed'])
results = dict()
# ######################################################################################################################
# Set kernel and simulation parameters
# ======================================================================================================================
print('\nRuning ParameterSet {0}'.format(parameter_set.label))
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
nest.SetKernelStatus(extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(), param_type='kernel'))
# ######################################################################################################################
# Build network
# ======================================================================================================================
net = Network(parameter_set.net_pars)
# ######################################################################################################################
# Randomize initial variable values
# ======================================================================================================================
for idx, n in enumerate(list(iterate_obj_list(net.populations))):
if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
for k, v in randomize.items():
n.randomize_initial_states(k, randomization_function=v[0], **v[1])
# ######################################################################################################################
# Build and connect input
# ======================================================================================================================
enc_layer = EncodingLayer(parameter_set.encoding_pars)
enc_layer.connect(parameter_set.encoding_pars, net)
# ######################################################################################################################
# Set-up Analysis
# ======================================================================================================================
net.connect_devices()
# ######################################################################################################################
# Simulate
# ======================================================================================================================
if parameter_set.kernel_pars.transient_t:
net.simulate(parameter_set.kernel_pars.transient_t)
net.flush_records()
net.simulate(parameter_set.kernel_pars.sim_time + nest.GetKernelStatus()['resolution'])
# ######################################################################################################################
# Extract and store data
# ======================================================================================================================
net.extract_population_activity(t_start=parameter_set.kernel_pars.transient_t + nest.GetKernelStatus()['resolution'],
t_stop=parameter_set.kernel_pars.sim_time + parameter_set.kernel_pars.transient_t)
net.extract_network_activity()
net.flush_records()
# ######################################################################################################################
# Analyse / plot data
# ======================================================================================================================
analysis_interval = [parameter_set.kernel_pars.transient_t + nest.GetKernelStatus()['resolution'],
parameter_set.kernel_pars.sim_time + parameter_set.kernel_pars.transient_t]
for idd, nam in enumerate(net.population_names):
results.update({nam: {}})
results[nam] = single_neuron_dcresponse(net.populations[idd],
parameter_set, start=analysis_interval[0],
stop=analysis_interval[1], plot=plot,
display=display, save=paths['figures'] + paths['label'])
idx = np.min(np.where(results[nam]['output_rate']))
print("Rate range for neuron {0} = [{1}, {2}] Hz".format(
str(nam), str(np.min(results[nam]['output_rate'][results[nam]['output_rate'] > 0.])),
str(np.max(results[nam]['output_rate'][results[nam]['output_rate'] > 0.]))))
results[nam].update({'min_rate': np.min(results[nam]['output_rate'][results[nam]['output_rate'] > 0.]),
'max_rate': np.max(results[nam]['output_rate'][results[nam]['output_rate'] > 0.])})
print("Rheobase Current for neuron {0} in [{1}, {2}]".format(
str(nam), str(results[nam]['input_amplitudes'][idx - 1]), str(results[nam]['input_amplitudes'][idx])))
x = np.array(results[nam]['input_amplitudes'])
y = np.array(results[nam]['output_rate'])
iddxs = np.where(y)
slope, intercept, r_value, p_value, std_err = stats.linregress(x[iddxs], y[iddxs])
print("fI Slope for neuron {0} = {1} Hz/nA [linreg method]".format(nam, str(slope * 1000.)))
results[nam].update({'fI_slope': slope * 1000., 'I_rh': [results[nam]['input_amplitudes'][idx - 1],
results[nam]['input_amplitudes'][idx]]})
# ######################################################################################################################
# Save data
# ======================================================================================================================
if save:
with open(paths['results'] + 'Results_' + parameter_set.label, 'w') as f:
pickle.dump(results, f)
parameter_set.save(paths['parameters'] + 'Parameters_' + parameter_set.label)
| gpl-2.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_internal/utils/glibc.py | 5 | 3282 | from __future__ import absolute_import
import ctypes
import re
import warnings
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
def glibc_version_string():
# type: () -> Optional[str]
"Returns glibc version string, or None if not using glibc."
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
# Separated out from have_compatible_glibc for easier unit testing
def check_glibc_version(version_str, required_major, minimum_minor):
# type: (str, int, int) -> bool
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn("Expected glibc version with 2 components major.minor,"
" got: %s" % version_str, RuntimeWarning)
return False
return (int(m.group("major")) == required_major and
int(m.group("minor")) >= minimum_minor)
def have_compatible_glibc(required_major, minimum_minor):
# type: (int, int) -> bool
version_str = glibc_version_string() # type: Optional[str]
if version_str is None:
return False
return check_glibc_version(version_str, required_major, minimum_minor)
# platform.libc_ver regularly returns completely nonsensical glibc
# versions. E.g. on my computer, platform says:
#
# ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
# ('glibc', '2.7')
# ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
# ('glibc', '2.9')
#
# But the truth is:
#
# ~$ ldd --version
# ldd (Debian GLIBC 2.22-11) 2.22
#
# This is unfortunate, because it means that the linehaul data on libc
# versions that was generated by pip 8.1.2 and earlier is useless and
# misleading. Solution: instead of using platform, use our code that actually
# works.
def libc_ver():
# type: () -> Tuple[str, str]
"""Try to determine the glibc version
Returns a tuple of strings (lib, version) which default to empty strings
in case the lookup fails.
"""
glibc_version = glibc_version_string()
if glibc_version is None:
return ("", "")
else:
return ("glibc", glibc_version)
| apache-2.0 |
tiborsimko/zenodo | zenodo/modules/search_ui/__init__.py | 8 | 1063 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Search interface customizations."""
from __future__ import absolute_import, print_function
| gpl-2.0 |
nttks/edx-platform | common/test/utils.py | 61 | 3533 | """
General testing utilities.
"""
import sys
from contextlib import contextmanager
from django.dispatch import Signal
from markupsafe import escape
from mock import Mock, patch
@contextmanager
def nostderr():
"""
ContextManager to suppress stderr messages
http://stackoverflow.com/a/1810086/882918
"""
savestderr = sys.stderr
class Devnull(object):
""" /dev/null incarnation as output-stream-like object """
def write(self, _):
""" Write method - just does nothing"""
pass
sys.stderr = Devnull()
try:
yield
finally:
sys.stderr = savestderr
class XssTestMixin(object):
"""
Mixin for testing XSS vulnerabilities.
"""
def assert_xss(self, response, xss_content):
"""Assert that `xss_content` is not present in the content of
`response`, and that its escaped version is present. Uses the
same `markupsafe.escape` function as Mako templates.
Args:
response (Response): The HTTP response
xss_content (str): The Javascript code to check for.
Returns:
None
"""
self.assertContains(response, escape(xss_content))
self.assertNotContains(response, xss_content)
def disable_signal(module, signal):
"""Replace `signal` inside of `module` with a dummy signal. Can be
used as a method or class decorator, as well as a context manager."""
return patch.object(module, signal, new=Signal())
class MockSignalHandlerMixin(object):
"""Mixin for testing sending of signals."""
@contextmanager
def assert_signal_sent(self, module, signal, *args, **kwargs):
"""Assert that a signal was sent with the correct arguments. Since
Django calls signal handlers with the signal as an argument,
it is added to `kwargs`.
Uses `mock.patch.object`, which requires the target to be
specified as a module along with a variable name inside that
module.
Args:
module (module): The module in which to patch the given signal name.
signal (str): The name of the signal to patch.
*args, **kwargs: The arguments which should have been passed
along with the signal. If `exclude_args` is passed as a
keyword argument, its value should be a list of keyword
arguments passed to the signal whose values should be
ignored.
"""
with patch.object(module, signal, new=Signal()) as mock_signal:
def handler(*args, **kwargs): # pylint: disable=unused-argument
"""No-op signal handler."""
pass
mock_handler = Mock(spec=handler)
mock_signal.connect(mock_handler)
yield
self.assertTrue(mock_handler.called)
mock_args, mock_kwargs = mock_handler.call_args # pylint: disable=unpacking-non-sequence
if 'exclude_args' in kwargs:
for key in kwargs['exclude_args']:
self.assertIn(key, mock_kwargs)
del mock_kwargs[key]
del kwargs['exclude_args']
self.assertEqual(mock_args, args)
self.assertEqual(mock_kwargs, dict(kwargs, signal=mock_signal))
@contextmanager
def skip_signal(signal, **kwargs):
"""
ContextManager to skip a signal by disconnecting it, yielding,
and then reconnecting the signal.
"""
signal.disconnect(**kwargs)
yield
signal.connect(**kwargs)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.