code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
import textwrap
from twisted.internet import defer
from twisted.internet import reactor
from zope.interface import implementer
from buildbot import util
from buildbot.interfaces import IRenderable
from buildbot.process import buildstep
from buildbot.process import remotecommand
from buildbot.process import results
from buildbot.steps.source.base import Source
@implementer(IRenderable)
class RepoDownloadsFromProperties(util.ComparableMixin):
parse_download_re = (re.compile(r"repo download ([^ ]+) ([0-9]+/[0-9]+)"),
re.compile(r"([^ ]+) ([0-9]+/[0-9]+)"),
re.compile(r"([^ ]+)/([0-9]+/[0-9]+)"),
)
compare_attrs = ('names',)
def __init__(self, names):
self.names = names
def getRenderingFor(self, props):
downloads = []
for propName in self.names:
s = props.getProperty(propName)
if s is not None:
downloads.extend(self.parseDownloadProperty(s))
return downloads
def parseDownloadProperty(self, s):
"""
lets try to be nice in the format we want
can support several instances of "repo download proj number/patch"
(direct copy paste from gerrit web site) or several instances of "proj number/patch"
(simpler version)
This feature allows integrator to build with several pending interdependent changes.
returns list of repo downloads sent to the worker
"""
if s is None:
return []
ret = []
for cur_re in self.parse_download_re:
res = cur_re.search(s)
while res:
ret.append("{} {}".format(res.group(1), res.group(2)))
s = s[:res.start(0)] + s[res.end(0):]
res = cur_re.search(s)
return ret
@implementer(IRenderable)
class RepoDownloadsFromChangeSource(util.ComparableMixin):
compare_attrs = ('codebase',)
def __init__(self, codebase=None):
self.codebase = codebase
def getRenderingFor(self, props):
downloads = []
if self.codebase is None:
changes = props.getBuild().allChanges()
else:
changes = props.getBuild().getSourceStamp(self.codebase).changes
for change in changes:
if ("event.type" in change.properties and
change.properties["event.type"] == "patchset-created"):
downloads.append("{} {}/{}".format(change.properties["event.change.project"],
change.properties["event.change.number"],
change.properties["event.patchSet.number"]))
return downloads
class Repo(Source):
""" Class for Repo with all the smarts """
name = 'repo'
renderables = ["manifestURL", "manifestBranch", "manifestFile", "tarball", "jobs",
"syncAllBranches", "updateTarballAge", "manifestOverrideUrl",
"repoDownloads", "depth"]
ref_not_found_re = re.compile(r"fatal: Couldn't find remote ref")
cherry_pick_error_re = re.compile(r"|".join([r"Automatic cherry-pick failed",
r"error: "
r"fatal: "
r"possibly due to conflict resolution."]))
re_change = re.compile(r".* refs/changes/\d\d/(\d+)/(\d+) -> FETCH_HEAD$")
re_head = re.compile(r"^HEAD is now at ([0-9a-f]+)...")
# number of retries, if we detect mirror desynchronization
mirror_sync_retry = 10
# wait 1min between retries (thus default total retry time is 10min)
mirror_sync_sleep = 60
def __init__(self,
manifestURL=None,
manifestBranch="master",
manifestFile="default.xml",
tarball=None,
jobs=None,
syncAllBranches=False,
updateTarballAge=7 * 24.0 * 3600.0,
manifestOverrideUrl=None,
repoDownloads=None,
depth=0,
syncQuietly=False,
**kwargs):
"""
@type manifestURL: string
@param manifestURL: The URL which points at the repo manifests repository.
@type manifestBranch: string
@param manifestBranch: The manifest branch to check out by default.
@type manifestFile: string
@param manifestFile: The manifest to use for sync.
@type syncAllBranches: bool.
@param syncAllBranches: true, then we must slowly synchronize all branches.
@type updateTarballAge: float
@param updateTarballAge: renderable to determine the update tarball policy,
given properties
Returns: max age of tarball in seconds, or None, if we
want to skip tarball update
@type manifestOverrideUrl: string
@param manifestOverrideUrl: optional http URL for overriding the manifest
usually coming from Property setup by a ForceScheduler
@type repoDownloads: list of strings
@param repoDownloads: optional repo download to perform after the repo sync
@type depth: integer
@param depth: optional depth parameter to repo init.
If specified, create a shallow clone with given depth.
@type syncQuietly: bool.
@param syncQuietly: true, then suppress verbose output from repo sync.
"""
self.manifestURL = manifestURL
self.manifestBranch = manifestBranch
self.manifestFile = manifestFile
self.tarball = tarball
self.jobs = jobs
self.syncAllBranches = syncAllBranches
self.updateTarballAge = updateTarballAge
self.manifestOverrideUrl = manifestOverrideUrl
if repoDownloads is None:
repoDownloads = []
self.repoDownloads = repoDownloads
self.depth = depth
self.syncQuietly = syncQuietly
super().__init__(**kwargs)
assert self.manifestURL is not None
def computeSourceRevision(self, changes):
if not changes:
return None
return changes[-1].revision
def filterManifestPatches(self):
"""
Patches to manifest projects are a bit special.
repo does not support a way to download them automatically,
so we need to implement the boilerplate manually.
This code separates the manifest patches from the other patches,
and generates commands to import those manifest patches.
"""
manifest_unrelated_downloads = []
manifest_related_downloads = []
for download in self.repoDownloads:
project, ch_ps = download.split(" ")[-2:]
if (self.manifestURL.endswith("/" + project) or
self.manifestURL.endswith("/" + project + ".git")):
ch, ps = map(int, ch_ps.split("/"))
branch = "refs/changes/%02d/%d/%d" % (ch % 100, ch, ps)
manifest_related_downloads.append(
["git", "fetch", self.manifestURL, branch])
manifest_related_downloads.append(
["git", "cherry-pick", "FETCH_HEAD"])
else:
manifest_unrelated_downloads.append(download)
self.repoDownloads = manifest_unrelated_downloads
self.manifestDownloads = manifest_related_downloads
def _repoCmd(self, command, abandonOnFailure=True, **kwargs):
return self._Cmd(["repo"] + command, abandonOnFailure=abandonOnFailure, **kwargs)
@defer.inlineCallbacks
def _Cmd(self, command, abandonOnFailure=True, workdir=None, **kwargs):
if workdir is None:
workdir = self.workdir
cmd = remotecommand.RemoteShellCommand(workdir, command,
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout, **kwargs)
self.lastCommand = cmd
# does not make sense to logEnviron for each command (just for first)
self.logEnviron = False
cmd.useLog(self.stdio_log, False)
yield self.stdio_log.addHeader("Starting command: {}\n".format(" ".join(command)))
self.description = ' '.join(command[:2])
# FIXME: enable when new style step is switched on yield self.updateSummary()
yield self.runCommand(cmd)
if abandonOnFailure and cmd.didFail():
self.descriptionDone = "repo failed at: {}".format(" ".join(command[:2]))
msg = "Source step failed while running command {}\n".format(cmd)
yield self.stdio_log.addStderr(msg)
raise buildstep.BuildStepFailed()
return cmd.rc
def repoDir(self):
return self.build.path_module.join(self.workdir, ".repo")
def sourcedirIsUpdateable(self):
return self.pathExists(self.repoDir())
def run_vc(self, branch, revision, patch):
return self.doStartVC()
@defer.inlineCallbacks
def doStartVC(self):
self.stdio_log = yield self.addLogForRemoteCommands("stdio")
self.filterManifestPatches()
if self.repoDownloads:
yield self.stdio_log.addHeader("will download:\nrepo download {}\n".format(
"\nrepo download ".join(self.repoDownloads)))
self.willRetryInCaseOfFailure = True
try:
yield self.doRepoSync()
except buildstep.BuildStepFailed as e:
if not self.willRetryInCaseOfFailure:
raise
yield self.stdio_log.addStderr("got issue at first try:\n" + str(e) +
"\nRetry after clobber...")
yield self.doRepoSync(forceClobber=True)
yield self.maybeUpdateTarball()
# starting from here, clobbering will not help
yield self.doRepoDownloads()
return results.SUCCESS
@defer.inlineCallbacks
def doClobberStart(self):
yield self.runRmdir(self.workdir)
yield self.runMkdir(self.workdir)
yield self.maybeExtractTarball()
@defer.inlineCallbacks
def doRepoSync(self, forceClobber=False):
updatable = yield self.sourcedirIsUpdateable()
if not updatable or forceClobber:
# no need to re-clobber in case of failure
self.willRetryInCaseOfFailure = False
yield self.doClobberStart()
yield self.doCleanup()
yield self._repoCmd(['init',
'-u', self.manifestURL,
'-b', self.manifestBranch,
'-m', self.manifestFile,
'--depth', str(self.depth)])
if self.manifestOverrideUrl:
msg = "overriding manifest with {}\n".format(self.manifestOverrideUrl)
yield self.stdio_log.addHeader(msg)
local_path = self.build.path_module.join(self.workdir, self.manifestOverrideUrl)
local_file = yield self.pathExists(local_path)
if local_file:
yield self._Cmd(["cp", "-f", self.manifestOverrideUrl, "manifest_override.xml"])
else:
yield self._Cmd(["wget", self.manifestOverrideUrl, "-O", "manifest_override.xml"])
yield self._Cmd(["ln", "-sf", "../manifest_override.xml", "manifest.xml"],
workdir=self.build.path_module.join(self.workdir, ".repo"))
for command in self.manifestDownloads:
yield self._Cmd(command, workdir=self.build.path_module.join(self.workdir, ".repo",
"manifests"))
command = ['sync', '--force-sync']
if self.jobs:
command.append('-j' + str(self.jobs))
if not self.syncAllBranches:
command.append('-c')
if self.syncQuietly:
command.append('-q')
self.description = "repo sync"
# FIXME: enable when new style step is used: yield self.updateSummary()
yield self.stdio_log.addHeader("synching manifest {} from branch {} from {}\n".format(
self.manifestFile, self.manifestBranch, self.manifestURL))
yield self._repoCmd(command)
command = ['manifest', '-r', '-o', 'manifest-original.xml']
yield self._repoCmd(command)
# check whether msg matches one of the
# compiled regexps in self.re_error_messages
def _findErrorMessages(self, error_re):
for logname in ['stderr', 'stdout']:
if not hasattr(self.lastCommand, logname):
continue
msg = getattr(self.lastCommand, logname)
if not (re.search(error_re, msg) is None):
return True
return False
def _sleep(self, delay):
d = defer.Deferred()
reactor.callLater(delay, d.callback, 1)
return d
@defer.inlineCallbacks
def doRepoDownloads(self):
self.repo_downloaded = ""
for download in self.repoDownloads:
command = ['download'] + download.split(' ')
yield self.stdio_log.addHeader("downloading changeset {}\n".format(download))
retry = self.mirror_sync_retry + 1
while retry > 0:
yield self._repoCmd(command, abandonOnFailure=False,
collectStdout=True, collectStderr=True)
if not self._findErrorMessages(self.ref_not_found_re):
break
retry -= 1
yield self.stdio_log.addStderr("failed downloading changeset {}\n".format(download))
yield self.stdio_log.addHeader("wait one minute for mirror sync\n")
yield self._sleep(self.mirror_sync_sleep)
if retry == 0:
self.descriptionDone = "repo: change {} does not exist".format(download)
raise buildstep.BuildStepFailed()
if self.lastCommand.didFail() or self._findErrorMessages(self.cherry_pick_error_re):
# cherry pick error! We create a diff with status current workdir
# in stdout, which reveals the merge errors and exit
command = ['forall', '-c', 'git', 'diff', 'HEAD']
yield self._repoCmd(command, abandonOnFailure=False)
self.descriptionDone = "download failed: {}".format(download)
raise buildstep.BuildStepFailed()
if hasattr(self.lastCommand, 'stderr'):
lines = self.lastCommand.stderr.split("\n")
match1 = match2 = False
for line in lines:
if not match1:
match1 = self.re_change.match(line)
if not match2:
match2 = self.re_head.match(line)
if match1 and match2:
self.repo_downloaded += "{}/{} {} ".format(match1.group(1), match1.group(2),
match2.group(1))
self.setProperty("repo_downloaded", self.repo_downloaded, "Source")
def computeTarballOptions(self):
# Keep in mind that the compression part of tarball generation
# can be non negligible
tar = ['tar']
if self.tarball.endswith("pigz"):
tar.append('-I')
tar.append('pigz')
elif self.tarball.endswith("gz"):
tar.append('-z')
elif self.tarball.endswith("bz2") or self.tarball.endswith("bz"):
tar.append('-j')
elif self.tarball.endswith("lzma"):
tar.append('--lzma')
elif self.tarball.endswith("lzop"):
tar.append('--lzop')
return tar
@defer.inlineCallbacks
def maybeExtractTarball(self):
if self.tarball:
tar = self.computeTarballOptions() + ['-xvf', self.tarball]
res = yield self._Cmd(tar, abandonOnFailure=False)
if res: # error with tarball.. erase repo dir and tarball
yield self._Cmd(["rm", "-f", self.tarball], abandonOnFailure=False)
yield self.runRmdir(self.repoDir(), abandonOnFailure=False)
@defer.inlineCallbacks
def maybeUpdateTarball(self):
if not self.tarball or self.updateTarballAge is None:
return
# tarball path is absolute, so we cannot use worker's stat command
# stat -c%Y gives mtime in second since epoch
res = yield self._Cmd(["stat", "-c%Y", self.tarball], collectStdout=True,
abandonOnFailure=False)
if not res:
tarball_mtime = int(self.lastCommand.stdout)
yield self._Cmd(["stat", "-c%Y", "."], collectStdout=True)
now_mtime = int(self.lastCommand.stdout)
age = now_mtime - tarball_mtime
if res or age > self.updateTarballAge:
tar = self.computeTarballOptions() + \
['-cvf', self.tarball, ".repo"]
res = yield self._Cmd(tar, abandonOnFailure=False)
if res: # error with tarball.. erase tarball, but don't fail
yield self._Cmd(["rm", "-f", self.tarball], abandonOnFailure=False)
# a simple shell script to gather all cleanup tweaks...
# doing them one by one just complicate the stuff
# and mess up the stdio log
def _getCleanupCommand(self):
"""also used by tests for expectations"""
return textwrap.dedent("""\
set -v
if [ -d .repo/manifests ]
then
# repo just refuse to run if manifest is messed up
# so ensure we are in a known state
cd .repo/manifests
rm -f .git/index.lock
git fetch origin
git reset --hard remotes/origin/%(manifestBranch)s
git config branch.default.merge %(manifestBranch)s
cd ..
ln -sf manifests/%(manifestFile)s manifest.xml
cd ..
fi
repo forall -c rm -f .git/index.lock
repo forall -c git clean -f -d -x 2>/dev/null
repo forall -c git reset --hard HEAD 2>/dev/null
rm -f %(workdir)s/.repo/project.list
""") % dict(manifestBranch=self.manifestBranch,
manifestFile=self.manifestFile,
workdir=self.workdir)
def doCleanup(self):
command = self._getCleanupCommand()
return self._Cmd(["bash", "-c", command], abandonOnFailure=False)
| tardyp/buildbot | master/buildbot/steps/source/repo.py | Python | gpl-2.0 | 19,517 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Edgewall Software
# Copyright (C) 2015 Dirk Stöcker <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Dirk Stöcker <[email protected]>
import urllib2
from trac.admin import IAdminPanelProvider
from trac.config import BoolOption, IntOption
from trac.core import Component, implements
from trac.web.api import HTTPNotFound
from trac.web.chrome import (
ITemplateProvider, add_link, add_script, add_script_data, add_stylesheet)
from tracspamfilter.api import _, gettext, ngettext
from tracspamfilter.filters.akismet import AkismetFilterStrategy
from tracspamfilter.filters.blogspam import BlogSpamFilterStrategy
from tracspamfilter.filters.botscout import BotScoutFilterStrategy
from tracspamfilter.filters.fspamlist import FSpamListFilterStrategy
from tracspamfilter.filters.stopforumspam import StopForumSpamFilterStrategy
from tracspamfilter.filtersystem import FilterSystem
from tracspamfilter.model import LogEntry, Statistics
try:
from tracspamfilter.filters.bayes import BayesianFilterStrategy
except ImportError: # SpamBayes not installed
BayesianFilterStrategy = None
try:
from tracspamfilter.filters.httpbl import HttpBLFilterStrategy
from tracspamfilter.filters.ip_blacklist import IPBlacklistFilterStrategy
from tracspamfilter.filters.url_blacklist import URLBlacklistFilterStrategy
except ImportError: # DNS python not installed
HttpBLFilterStrategy = None
IPBlacklistFilterStrategy = None
URLBlacklistFilterStrategy = None
try:
from tracspamfilter.filters.mollom import MollomFilterStrategy
except ImportError: # Mollom not installed
MollomFilterStrategy = None
class SpamFilterAdminPageProvider(Component):
"""Web administration panel for configuring and monitoring the spam
filtering system.
"""
implements(ITemplateProvider)
implements(IAdminPanelProvider)
MAX_PER_PAGE = 10000
MIN_PER_PAGE = 5
DEF_PER_PAGE = IntOption('spam-filter', 'spam_monitor_entries', '100',
"How many monitor entries are displayed by default "
"(between 5 and 10000).", doc_domain='tracspamfilter')
train_only = BoolOption('spam-filter', 'show_train_only', False,
"Show the buttons for training without deleting entry.",
doc_domain='tracspamfilter')
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'config', _("Configuration"))
if 'SPAM_MONITOR' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'monitor', _("Monitoring"))
def render_admin_panel(self, req, cat, page, path_info):
if page == 'config':
if req.method == 'POST':
if self._process_config_panel(req):
req.redirect(req.href.admin(cat, page))
data = self._render_config_panel(req, cat, page)
else:
if req.method == 'POST':
if self._process_monitoring_panel(req):
req.redirect(req.href.admin(cat, page,
page=req.args.getint('page'),
num=req.args.getint('num')))
if path_info:
data = self._render_monitoring_entry(req, cat, page, path_info)
page = 'entry'
else:
data = self._render_monitoring_panel(req, cat, page)
data['allowselect'] = True
data['monitor'] = True
add_script_data(req, {
'bayestext': _("SpamBayes determined spam probability "
"of %s%%"),
'sel100text': _("Select 100.00%% entries") % (),
'sel90text': _("Select >90.00%% entries") % (),
'sel10text': _("Select <10.00%% entries") % (),
'sel0text': _("Select 0.00%% entries") % (),
'selspamtext': _("Select Spam entries"),
'selhamtext': _('Select Ham entries')
})
add_script(req, 'spamfilter/adminmonitor.js')
add_script_data(req, {'toggleform': 'spammonitorform'})
add_script(req, 'spamfilter/toggle.js')
add_stylesheet(req, 'spamfilter/admin.css')
data['accmgr'] = 'ACCTMGR_USER_ADMIN' in req.perm
return 'admin_spam%s.html' % page, data
# ITemplateProvider methods
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('spamfilter', resource_filename(__name__, 'htdocs'))]
def get_templates_dirs(self):
from pkg_resources import resource_filename
return [resource_filename(__name__, 'templates')]
# Internal methods
def _render_config_panel(self, req, cat, page):
req.perm.require('SPAM_CONFIG')
filter_system = FilterSystem(self.env)
strategies = []
for strategy in filter_system.strategies:
for variable in dir(strategy):
if variable.endswith('karma_points'):
strategies.append({
'name': strategy.__class__.__name__,
'karma_points': getattr(strategy, variable),
'variable': variable,
'karma_help': gettext(getattr(strategy.__class__,
variable).__doc__)
})
add_script(req, 'spamfilter/adminconfig.js')
return {
'strategies': sorted(strategies, key=lambda x: x['name']),
'min_karma': filter_system.min_karma,
'authenticated_karma': filter_system.authenticated_karma,
'attachment_karma': filter_system.attachment_karma,
'register_karma': filter_system.register_karma,
'trust_authenticated': filter_system.trust_authenticated,
'logging_enabled': filter_system.logging_enabled,
'nolog_obvious': filter_system.nolog_obvious,
'purge_age': filter_system.purge_age,
'spam_monitor_entries_min': self.MIN_PER_PAGE,
'spam_monitor_entries_max': self.MAX_PER_PAGE,
'spam_monitor_entries': self.DEF_PER_PAGE
}
def _process_config_panel(self, req):
req.perm.require('SPAM_CONFIG')
spam_config = self.config['spam-filter']
min_karma = req.args.as_int('min_karma')
if min_karma is not None:
spam_config.set('min_karma', min_karma)
attachment_karma = req.args.as_int('attachment_karma')
if attachment_karma is not None:
spam_config.set('attachment_karma', attachment_karma)
register_karma = req.args.as_int('register_karma')
if register_karma is not None:
spam_config.set('register_karma', register_karma)
authenticated_karma = req.args.as_int('authenticated_karma')
if authenticated_karma is not None:
spam_config.set('authenticated_karma', authenticated_karma)
for strategy in FilterSystem(self.env).strategies:
for variable in dir(strategy):
if variable.endswith('karma_points'):
key = strategy.__class__.__name__ + '_' + variable
points = req.args.get(key)
if points is not None:
option = getattr(strategy.__class__, variable)
self.config.set(option.section, option.name, points)
logging_enabled = 'logging_enabled' in req.args
spam_config.set('logging_enabled', logging_enabled)
nolog_obvious = 'nolog_obvious' in req.args
spam_config.set('nolog_obvious', nolog_obvious)
trust_authenticated = 'trust_authenticated' in req.args
spam_config.set('trust_authenticated', trust_authenticated)
if logging_enabled:
purge_age = req.args.as_int('purge_age')
if purge_age is not None:
spam_config.set('purge_age', purge_age)
spam_monitor_entries = req.args.as_int('spam_monitor_entries',
min=self.MIN_PER_PAGE,
max=self.MAX_PER_PAGE)
if spam_monitor_entries is not None:
spam_config.set('spam_monitor_entries', spam_monitor_entries)
self.config.save()
return True
def _render_monitoring_panel(self, req, cat, page):
req.perm.require('SPAM_MONITOR')
pagenum = req.args.as_int('page', 1) - 1
pagesize = req.args.as_int('num', self.DEF_PER_PAGE,
min=self.MIN_PER_PAGE,
max=self.MAX_PER_PAGE)
total = LogEntry.count(self.env)
if total < pagesize:
pagenum = 0
elif total <= pagenum * pagesize:
pagenum = (total - 1) / pagesize
offset = pagenum * pagesize
entries = list(LogEntry.select(self.env, limit=pagesize,
offset=offset))
if pagenum > 0:
add_link(req, 'prev',
req.href.admin(cat, page, page=pagenum, num=pagesize),
_("Previous Page"))
if offset + pagesize < total:
add_link(req, 'next',
req.href.admin(cat, page, page=pagenum + 2, num=pagesize),
_("Next Page"))
return {
'enabled': FilterSystem(self.env).logging_enabled,
'entries': entries,
'offset': offset + 1,
'page': pagenum + 1,
'num': pagesize,
'total': total,
'train_only': self.train_only
}
def _render_monitoring_entry(self, req, cat, page, entry_id):
req.perm.require('SPAM_MONITOR')
entry = LogEntry.fetch(self.env, entry_id)
if not entry:
raise HTTPNotFound(_("Log entry not found"))
previous = entry.get_previous()
if previous:
add_link(req, 'prev', req.href.admin(cat, page, previous.id),
_("Log Entry %(id)s", id=previous.id))
add_link(req, 'up', req.href.admin(cat, page), _("Log Entry List"))
next = entry.get_next()
if next:
add_link(req, 'next', req.href.admin(cat, page, next.id),
_("Log Entry %(id)s", id=next.id))
return {'entry': entry, 'train_only': self.train_only}
def _process_monitoring_panel(self, req):
req.perm.require('SPAM_TRAIN')
filtersys = FilterSystem(self.env)
spam = 'markspam' in req.args or 'markspamdel' in req.args
train = spam or 'markham' in req.args or 'markhamdel' in req.args
delete = 'delete' in req.args or 'markspamdel' in req.args or \
'markhamdel' in req.args or 'deletenostats' in req.args
deletestats = 'delete' in req.args
if train or delete:
entries = req.args.getlist('sel')
if entries:
if train:
filtersys.train(req, entries, spam=spam, delete=delete)
elif delete:
filtersys.delete(req, entries, deletestats)
if 'deleteobvious' in req.args:
filtersys.deleteobvious(req)
return True
class ExternalAdminPageProvider(Component):
"""Web administration panel for configuring the External spam filters."""
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'external', _("External Services"))
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CONFIG')
data = {}
spam_config = self.config['spam-filter']
akismet = AkismetFilterStrategy(self.env)
stopforumspam = StopForumSpamFilterStrategy(self.env)
botscout = BotScoutFilterStrategy(self.env)
fspamlist = FSpamListFilterStrategy(self.env)
ip_blacklist_default = ip6_blacklist_default = \
url_blacklist_default = None
if HttpBLFilterStrategy:
ip_blacklist = IPBlacklistFilterStrategy(self.env)
ip_blacklist_default = ip_blacklist.servers_default
ip6_blacklist_default = ip_blacklist.servers6_default
url_blacklist = URLBlacklistFilterStrategy(self.env)
url_blacklist_default = url_blacklist.servers_default
mollom = 0
if MollomFilterStrategy:
mollom = MollomFilterStrategy(self.env)
blogspam = BlogSpamFilterStrategy(self.env)
if req.method == 'POST':
if 'cancel' in req.args:
req.redirect(req.href.admin(cat, page))
akismet_api_url = req.args.get('akismet_api_url')
akismet_api_key = req.args.get('akismet_api_key')
mollom_api_url = req.args.get('mollom_api_url')
mollom_public_key = req.args.get('mollom_public_key')
mollom_private_key = req.args.get('mollom_private_key')
stopforumspam_api_key = req.args.get('stopforumspam_api_key')
botscout_api_key = req.args.get('botscout_api_key')
fspamlist_api_key = req.args.get('fspamlist_api_key')
httpbl_api_key = req.args.get('httpbl_api_key')
ip_blacklist_servers = req.args.get('ip_blacklist_servers')
ip6_blacklist_servers = req.args.get('ip6_blacklist_servers')
url_blacklist_servers = req.args.get('url_blacklist_servers')
blogspam_api_url = req.args.get('blogspam_api_url')
blogspam_skip_tests = req.args.get('blogspam_skip_tests')
use_external = 'use_external' in req.args
train_external = 'train_external' in req.args
skip_external = req.args.get('skip_external')
stop_external = req.args.get('stop_external')
skip_externalham = req.args.get('skip_externalham')
stop_externalham = req.args.get('stop_externalham')
try:
verified_key = akismet.verify_key(req, akismet_api_url,
akismet_api_key)
if akismet_api_key and not verified_key:
data['akismeterror'] = 'The API key is invalid'
data['error'] = 1
except urllib2.URLError, e:
data['alismeterror'] = e.reason[1]
data['error'] = 1
if mollom:
try:
verified_key = mollom.verify_key(req, mollom_api_url,
mollom_public_key,
mollom_private_key)
except urllib2.URLError, e:
data['mollomerror'] = e.reason[1]
data['error'] = 1
else:
if mollom_public_key and mollom_private_key and \
not verified_key:
data['mollomerror'] = 'The API keys are invalid'
data['error'] = 1
if not data.get('error', 0):
spam_config.set('akismet_api_url', akismet_api_url)
spam_config.set('akismet_api_key', akismet_api_key)
spam_config.set('mollom_api_url', mollom_api_url)
spam_config.set('mollom_public_key', mollom_public_key)
spam_config.set('mollom_private_key', mollom_private_key)
spam_config.set('stopforumspam_api_key', stopforumspam_api_key)
spam_config.set('botscout_api_key', botscout_api_key)
spam_config.set('fspamlist_api_key', fspamlist_api_key)
spam_config.set('httpbl_api_key', httpbl_api_key)
if HttpBLFilterStrategy:
if ip_blacklist_servers != ip_blacklist_default:
spam_config.set('ip_blacklist_servers',
ip_blacklist_servers)
else:
spam_config.remove('ip_blacklist_servers')
if ip6_blacklist_servers != ip6_blacklist_default:
spam_config.set('ip6_blacklist_servers',
ip6_blacklist_servers)
else:
spam_config.remove('ip6_blacklist_servers')
if url_blacklist_servers != url_blacklist_default:
spam_config.set('url_blacklist_servers',
url_blacklist_servers)
else:
spam_config.remove('url_blacklist_servers')
spam_config.set('blogspam_json_api_url',
blogspam_api_url)
spam_config.set('blogspam_json_skip_tests',
blogspam_skip_tests)
spam_config.set('use_external', use_external)
spam_config.set('train_external', train_external)
spam_config.set('skip_external', skip_external)
spam_config.set('stop_external', stop_external)
spam_config.set('skip_externalham', skip_externalham)
spam_config.set('stop_externalham', stop_externalham)
self.config.save()
req.redirect(req.href.admin(cat, page))
else:
filter_system = FilterSystem(self.env)
use_external = filter_system.use_external
train_external = filter_system.train_external
skip_external = filter_system.skip_external
stop_external = filter_system.stop_external
skip_externalham = filter_system.skip_externalham
stop_externalham = filter_system.stop_externalham
blogspam_api_url = blogspam.api_url
blogspam_skip_tests = ','.join(blogspam.skip_tests)
akismet_api_url = akismet.api_url
akismet_api_key = akismet.api_key
mollom_public_key = mollom_private_key = mollom_api_url = None
if MollomFilterStrategy:
mollom_api_url = mollom.api_url
mollom_public_key = mollom.public_key
mollom_private_key = mollom.private_key
stopforumspam_api_key = stopforumspam.api_key
botscout_api_key = botscout.api_key
fspamlist_api_key = fspamlist.api_key
httpbl_api_key = spam_config.get('httpbl_api_key')
ip_blacklist_servers = spam_config.get('ip_blacklist_servers')
ip6_blacklist_servers = spam_config.get('ip6_blacklist_servers')
url_blacklist_servers = spam_config.get('url_blacklist_servers')
if HttpBLFilterStrategy:
data['blacklists'] = 1
data['ip_blacklist_default'] = ip_blacklist_default
data['ip6_blacklist_default'] = ip6_blacklist_default
data['url_blacklist_default'] = url_blacklist_default
if MollomFilterStrategy:
data['mollom'] = 1
data['mollom_public_key'] = mollom_public_key
data['mollom_private_key'] = mollom_private_key
data['mollom_api_url'] = mollom_api_url
data['blogspam_api_url'] = blogspam_api_url
data['blogspam_skip_tests'] = blogspam_skip_tests
data['blogspam_methods'] = blogspam.getmethods()
data.update({
'akismet_api_key': akismet_api_key,
'akismet_api_url': akismet_api_url,
'httpbl_api_key': httpbl_api_key,
'stopforumspam_api_key': stopforumspam_api_key,
'botscout_api_key': botscout_api_key,
'fspamlist_api_key': fspamlist_api_key,
'use_external': use_external,
'train_external': train_external,
'skip_external': skip_external,
'stop_external': stop_external,
'skip_externalham': skip_externalham,
'stop_externalham': stop_externalham,
'ip_blacklist_servers': ip_blacklist_servers,
'ip6_blacklist_servers': ip6_blacklist_servers,
'url_blacklist_servers': url_blacklist_servers
})
add_script(req, 'spamfilter/adminexternal.js')
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_external.html', data
class BayesAdminPageProvider(Component):
"""Web administration panel for configuring the Bayes spam filter."""
if BayesianFilterStrategy:
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield 'spamfilter', _("Spam Filtering"), 'bayes', _("Bayes")
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CONFIG')
bayes = BayesianFilterStrategy(self.env)
hammie = bayes._get_hammie()
data = {}
if req.method == 'POST':
if 'train' in req.args:
bayes.train(None, None, req.args['bayes_content'], '127.0.0.1',
spam='spam' in req.args['train'].lower())
req.redirect(req.href.admin(cat, page))
elif 'test' in req.args:
bayes_content = req.args['bayes_content']
data['content'] = bayes_content
try:
data['score'] = hammie.score(bayes_content.encode('utf-8'))
except Exception, e:
self.log.warn('Bayes test failed: %s', e, exc_info=True)
data['error'] = unicode(e)
else:
if 'reset' in req.args:
self.log.info('Resetting SpamBayes training database')
self.env.db_transaction("DELETE FROM spamfilter_bayes")
elif 'reduce' in req.args:
self.log.info('Reducing SpamBayes training database')
bayes.reduce()
min_training = req.args.as_int('min_training')
if min_training is not None and \
min_training != bayes.min_training:
self.config.set('spam-filter', 'bayes_min_training',
min_training)
self.config.save()
min_dbcount = req.args.as_int('min_dbcount')
if min_dbcount is not None and \
min_dbcount != bayes.min_dbcount:
self.config.set('spam-filter', 'bayes_min_dbcount',
min_dbcount)
self.config.save()
req.redirect(req.href.admin(cat, page))
ratio = ''
nspam = hammie.bayes.nspam
nham = hammie.bayes.nham
if nham and nspam:
if nspam > nham:
ratio = _("(ratio %.1f : 1)") % (float(nspam) / float(nham))
else:
ratio = _("(ratio 1 : %.1f)") % (float(nham) / float(nspam))
dblines, dblines_spamonly, dblines_hamonly, dblines_reduce = \
bayes.dblines()
dblines_mixed = dblines - dblines_hamonly - dblines_spamonly
data.update({
'min_training': bayes.min_training,
'min_dbcount': bayes.min_dbcount,
'dblines': dblines,
'dblinesreducenum': dblines_reduce,
'dblinesspamonly':
ngettext("%(num)d spam", "%(num)d spam", dblines_spamonly),
'dblineshamonly':
ngettext("%(num)d ham", "%(num)d ham", dblines_hamonly),
'dblinesreduce':
ngettext("%(num)d line", "%(num)d lines", dblines_reduce),
'dblinesmixed':
ngettext("%(num)d mixed", "%(num)d mixed", dblines_mixed),
'nspam': nspam,
'nham': nham,
'ratio': ratio
})
add_script_data(req, {'hasdata': True if nham + nspam > 0 else False})
add_script(req, 'spamfilter/adminbayes.js')
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_bayes.html', data
class StatisticsAdminPageProvider(Component):
"""Web administration panel for spam filter statistics."""
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'statistics', _("Statistics"))
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CONFIG')
stats = Statistics(self.env)
if req.method == 'POST':
if 'clean' in req.args:
stats.clean(req.args['strategy'])
elif 'cleanall' in req.args:
stats.cleanall()
req.redirect(req.href.admin(cat, page))
strategies, overall = stats.getstats()
data = {'strategies': strategies, 'overall': overall}
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_statistics.html', data
| Puppet-Finland/trac | files/spam-filter/tracspamfilter/admin.py | Python | bsd-2-clause | 26,364 |
"""Represents pricing data for a given block of time."""
class Bar:
"""Represents pricing data for a given block of time.
Attributes not specified in the constructor:
open -- opening price for the time period
high -- closing price
low -- low price
close -- close price
volume -- volume
count -- number of trades
"""
def __init__(self, local_symbol, milliseconds):
"""Initialize a new instance of a Bar.
Keyword arguments:
local_symbol -- ticker symbol
milliseconds -- time in milliseconds since the Epoch
"""
self.local_symbol = local_symbol
self.milliseconds = milliseconds
self.open = 0.0
self.high = 0.0
self.low = 0.0
self.close = 0.0
self.volume = 0
self.count = 0
def __lt__(self, other):
"""Return True if this object is strictly less than the specified
object; False, otherwise.
Keyword arguments:
other -- Bar to compare to this Bar
"""
return self.milliseconds < other.milliseconds
| larmer01/ibapipy | data/bar.py | Python | apache-2.0 | 1,107 |
# The following is a Python translation of a MATLAB file originally written principally by Mike Tipping
# as part of his SparseBayes software library. Initially published on GitHub on July 21st, 2015.
# SB2_USEROPTIONS User option specification for SPARSEBAYES
#
# OPTIONS = SB2_USEROPTIONS(parameter1, value1, parameter2, value2,...)
#
# OUTPUT ARGUMENTS:
#
# OPTIONS An options structure to pass to SPARSEBAYES
#
# INPUT ARGUMENTS:
#
# Optional number of parameter-value pairs to specify the following:
#
# ITERATIONS Number of interations to run for.
#
# TIME Time limit to run for, expressed as a space-separated
# string. e.g. '1.5 hours', '30 minutes', '1 second'.
#
# DIAGNOSTICLEVEL Integer [0,4] or string to determine the verbosity of
# diagnostic output.
# 0 or 'ZERO' or 'NONE' No output
# 1 or 'LOW' Low level of output
# 2 or 'MEDIUM' etc...
# 3 or 'HIGH'
# 4 or 'ULTRA'
#
# DIAGNOSTICFILE Filename to write diagnostics to file instead of
# the default stdout.
#
# MONITOR Integer number: diagnostic information is output
# every MONITOR iterations.
#
# FIXEDNOISE True/false whether the Gaussian noise is to be fixed
# (default: false.
#
# FREEBASIS Indices of basis vectors considered "free" and not
# constrained by the Bayesian prior (e.g. the "bias").
#
# CALLBACK External function to call each iteration of the algorithm
# (string). Intended to facilitate graphical demos etc.
#
# CALLBACKDATA Arbitrary additional data to pass to the CALLBACK
# function.
#
# EXAMPLE:
#
# OPTIONS = SB2_UserOptions('diagnosticLevel','medium',...
# 'monitor',25,...
# 'diagnosticFile', 'logfile.txt');
#
# NOTES:
#
# Each option (field of OPTIONS) is given a default value in
# SB2_USEROPTIONS. Any supplied property-value pairs over-ride those
# defaults.
#
#
# Copyright 2009, Vector Anomaly Ltd
#
# This file is part of the SPARSEBAYES library for Matlab (V2.0).
#
# SPARSEBAYES is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SPARSEBAYES is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with SPARSEBAYES in the accompanying file "licence.txt"; if not, write to
# the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA
#
# Contact the author: m a i l [at] m i k e t i p p i n g . c o m
#
def SB2_UserOptions(*args):
# Ensure arguments are supplied in pairs
if len(args) % 2 != 0:
raise Exception('Arguments to SB2_UserOptions should be (property, value) pairs')
# Any options specified?
numSettings = len(args)/2
###########################################################################
# Set defaults
OPTIONS = {}
# Assume we will infer the noise in the Gaussian case
OPTIONS['FIXEDNOISE'] = False
# Option to allow subset of the basis (e.g. bias) to be unregularised
OPTIONS['FREEBASIS'] = []
# Option to set max iterations to run for
OPTIONS['ITERATIONS'] = 10000
# Option to set max time to run for
OPTIONS['TIME'] = 10000 # seconds
# Set options for monitoring and recording the algorithm's progress
OPTIONS['MONITOR'] = 0
OPTIONS['DIAGNOSTICLEVEL'] = 0
OPTIONS['DIAGNOSTICFID'] = 1 # stdout
OPTIONS['DIAGNOSTICFILE'] = []
# Option to call a function during each iteration (to create demos etc)
OPTIONS['CALLBACK'] = False
OPTIONS['CALLBACKFUNC'] = []
OPTIONS['CALLBACKDATA'] = {}
###########################################################################
# Parse string/variable pairs
for n in range(numSettings):
property_ = args[n*2]
value = args[n*2 + 1]
if property_ not in OPTIONS:
raise Exception('Unrecognised user option: {0}'.format(property_))
OPTIONS[property_] = value
if property_ == 'DIAGNOSTICLEVEL':
if type(value) is str:
if value == 'ZERO' or value == 'NONE':
OPTIONS['DIAGNOSTICLEVEL'] = 0
elif value == 'LOW':
OPTIONS['DIAGNOSTICLEVEL'] = 1
elif value == 'MEDIUM':
OPTIONS['DIAGNOSTICLEVEL'] = 2
elif value == 'HIGH':
OPTIONS['DIAGNOSTICLEVEL'] = 3
elif value == 'ULTRA':
OPTIONS['DIAGNOSTICLEVEL'] = 4
else:
raise Exception('Unrecognised textual diagnostic level: {0}'.format(value))
elif type(value) is int:
if value < 0 or value > 4:
raise Exception('Supplied level should be integer in [0,4], or one of ZERO/LOW/MEDIUM/HIGH/ULTRA')
if property_ == 'DIAGNOSTICFILE':
OPTIONS['DIAGNOSTICFID'] = -1 # "It will be opened later"
if property_ == 'CALLBACK':
OPTIONS['CALLBACK'] = True
OPTIONS['CALLBACKFUNC'] = value
if OPTIONS['CALLBACKFUNC'] not in locals(): #UNCERTAIN ABOUT THIS
raise Exception('Callback function {0} does not appear to exist'.format(value))
if property_ == 'TIME':
OPTIONS['TIME'] = timeInSeconds(value)
return OPTIONS
##### Support function: parse time specification
def timeInSeconds(value_):
args = value_.split()
args[1] = args[1].upper()
v = int(args[0])
if args[1] == 'SECONDS' or args[1] == 'SECOND':
pass
elif args[1] == 'MINUTES' or args[1] == 'MINUTE':
v *= 60
elif args[1] == 'HOURS' or args[1] == 'HOUR':
v *= 3600
else:
raise Exception('Badly formed time string: {0}'.format(value_))
return v # Returns time in seconds
| jhallock7/SparseBayes-Python | SB2_UserOptions.py | Python | gpl-2.0 | 6,672 |
def double(n):
return n * 2
def halve(n):
return n // 2
def fastmult(m,n):
if n > 0:
if n % 2 == 0:
return fastmult(double(m),halve(n))
else:
return m+fastmult(m,n-1)
else:
return 0
#그냥 재귀 | imscs21/myuniv | 1학기/programming/basic/파이썬/파이썬 과제/5/5_6.py | Python | apache-2.0 | 271 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions that impose RGB and depth motion-consistency across frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf # tf
from depth_from_video_in_the_wild import transform_utils
from tensorflow.contrib import resampler as contrib_resampler
def rgbd_consistency_loss(frame1transformed_depth, frame1rgb, frame2depth,
frame2rgb):
"""Computes a loss that penalizes RGB and depth inconsistencies betwen frames.
This function computes 3 losses that penalize inconsistencies between two
frames: depth, RGB, and structural similarity. It IS NOT SYMMETRIC with
respect to both frames. In particular, to address occlusions, it only
penalizes depth and RGB inconsistencies at pixels where frame1 is closer to
the camera than frame2. (Why? see https://arxiv.org/abs/1904.04998). Therefore
the intended usage pattern is running it twice - second time with the two
frames swapped.
Args:
frame1transformed_depth: A transform_depth_map.TransformedDepthMap object
representing the depth map of frame 1 after it was motion-transformed to
frame 2, a motion transform that accounts for all camera and object motion
that occurred between frame1 and frame2. The tensors inside
frame1transformed_depth are of shape [B, H, W].
frame1rgb: A tf.Tensor of shape [B, H, W, C] containing the RGB image at
frame1.
frame2depth: A tf.Tensor of shape [B, H, W] containing the depth map at
frame2.
frame2rgb: A tf.Tensor of shape [B, H, W, C] containing the RGB image at
frame2.
Returns:
A dicionary from string to tf.Tensor, with the following entries:
depth_error: A tf scalar, the depth mismatch error between the two frames.
rgb_error: A tf scalar, the rgb mismatch error between the two frames.
ssim_error: A tf scalar, the strictural similarity mismatch error between
the two frames.
depth_proximity_weight: A tf.Tensor of shape [B, H, W], representing a
function that peaks (at 1.0) for pixels where there is depth consistency
between the two frames, and is small otherwise.
frame1_closer_to_camera: A tf.Tensor of shape [B, H, W, 1], a mask that is
1.0 when the depth map of frame 1 has smaller depth than frame 2.
"""
pixel_xy = frame1transformed_depth.pixel_xy
frame2depth_resampled = _resample_depth(frame2depth, pixel_xy)
frame2rgb_resampled = contrib_resampler.resampler(
frame2rgb, pixel_xy)
# f1td.depth is the predicted depth at [pixel_y, pixel_x] for frame2. Now we
# generate (by interpolation) the actual depth values for frame2's depth, at
# the same locations, so that we can compare the two depths.
# We penalize inconsistencies between the two frames' depth maps only if the
# transformed depth map (of frame 1) falls closer to the camera than the
# actual depth map (of frame 2). This is intended for avoiding penalizing
# points that become occluded because of the transform.
# So what about depth inconsistencies where frame1's depth map is FARTHER from
# the camera than frame2's? These will be handled when we swap the roles of
# frame 1 and 2 (more in https://arxiv.org/abs/1904.04998).
frame1_closer_to_camera = tf.to_float(
tf.logical_and(
frame1transformed_depth.mask,
tf.less(frame1transformed_depth.depth, frame2depth_resampled)))
depth_error = tf.reduce_mean(
tf.abs(frame2depth_resampled - frame1transformed_depth.depth) *
frame1_closer_to_camera)
rgb_error = (
tf.abs(frame2rgb_resampled - frame1rgb) * tf.expand_dims(
frame1_closer_to_camera, -1))
rgb_error = tf.reduce_mean(rgb_error)
# We generate a weight function that peaks (at 1.0) for pixels where when the
# depth difference is less than its standard deviation across the frame, and
# fall off to zero otherwise. This function is used later for weighing the
# structural similarity loss term. We only want to demand structural
# similarity for surfaces that are close to one another in the two frames.
depth_error_second_moment = _weighted_average(
tf.square(frame2depth_resampled - frame1transformed_depth.depth),
frame1_closer_to_camera) + 1e-4
depth_proximity_weight = (
depth_error_second_moment /
(tf.square(frame2depth_resampled - frame1transformed_depth.depth) +
depth_error_second_moment) * tf.to_float(frame1transformed_depth.mask))
# If we don't stop the gradient training won't start. The reason is presumably
# that then the network can push the depths apart instead of seeking RGB
# consistency.
depth_proximity_weight = tf.stop_gradient(depth_proximity_weight)
ssim_error, avg_weight = weighted_ssim(
frame2rgb_resampled,
frame1rgb,
depth_proximity_weight,
c1=float('inf'), # These values of c1 and c2 work better than defaults.
c2=9e-6)
ssim_error = tf.reduce_mean(ssim_error * avg_weight)
endpoints = {
'depth_error': depth_error,
'rgb_error': rgb_error,
'ssim_error': ssim_error,
'depth_proximity_weight': depth_proximity_weight,
'frame1_closer_to_camera': frame1_closer_to_camera
}
return endpoints
def motion_field_consistency_loss(frame1transformed_pixelxy, mask,
rotation1, translation1,
rotation2, translation2):
"""Computes a cycle consistency loss between two motion maps.
Given two rotation and translation maps (of two frames), and a mapping from
one frame to the other, this function assists in imposing that the fields at
frame 1 represent the opposite motion of the ones in frame 2.
In other words: At any given pixel on frame 1, if we apply the translation and
rotation designated at that pixel, we land on some pixel in frame 2, and if we
apply the translation and rotation designated there, we land back at the
original pixel at frame 1.
Args:
frame1transformed_pixelxy: A tf.Tensor of shape [B, H, W, 2] representing
the motion-transformed location of each pixel in frame 1. It is assumed
(but not verified) that frame1transformed_pixelxy was obtained by properly
applying rotation1 and translation1 on the depth map of frame 1.
mask: A tf.Tensor of shape [B, H, W, 2] expressing the weight of each pixel
in the calculation of the consistency loss.
rotation1: A tf.Tensor of shape [B, 3] representing rotation angles.
translation1: A tf.Tensor of shape [B, H, W, 3] representing translation
vectors.
rotation2: A tf.Tensor of shape [B, 3] representing rotation angles.
translation2: A tf.Tensor of shape [B, H, W, 3] representing translation
vectors.
Returns:
A dicionary from string to tf.Tensor, with the following entries:
rotation_error: A tf scalar, the rotation consistency error.
translation_error: A tf scalar, the translation consistency error.
"""
translation2resampled = contrib_resampler.resampler(
translation2, tf.stop_gradient(frame1transformed_pixelxy))
rotation1field = tf.broadcast_to(
_expand_dims_twice(rotation1, -2), tf.shape(translation1))
rotation2field = tf.broadcast_to(
_expand_dims_twice(rotation2, -2), tf.shape(translation2))
rotation1matrix = transform_utils.matrix_from_angles(rotation1field)
rotation2matrix = transform_utils.matrix_from_angles(rotation2field)
rot_unit, trans_zero = transform_utils.combine(
rotation2matrix, translation2resampled,
rotation1matrix, translation1)
eye = tf.eye(3, batch_shape=tf.shape(rot_unit)[:-2])
transform_utils.matrix_from_angles(rotation1field) # Delete this later
transform_utils.matrix_from_angles(rotation2field) # Delete this later
# We normalize the product of rotations by the product of their norms, to make
# the loss agnostic of their magnitudes, only wanting them to be opposite in
# directions. Otherwise the loss has a tendency to drive the rotations to
# zero.
rot_error = tf.reduce_mean(tf.square(rot_unit - eye), axis=(3, 4))
rot1_scale = tf.reduce_mean(tf.square(rotation1matrix - eye), axis=(3, 4))
rot2_scale = tf.reduce_mean(tf.square(rotation2matrix - eye), axis=(3, 4))
rot_error /= (1e-24 + rot1_scale + rot2_scale)
rotation_error = tf.reduce_mean(rot_error)
def norm(x):
return tf.reduce_sum(tf.square(x), axis=-1)
# Here again, we normalize by the magnitudes, for the same reason.
translation_error = tf.reduce_mean(
mask * norm(trans_zero) /
(1e-24 + norm(translation1) + norm(translation2)))
return {
'rotation_error': rotation_error,
'translation_error': translation_error
}
def rgbd_and_motion_consistency_loss(frame1transformed_depth, frame1rgb,
frame2depth, frame2rgb, rotation1,
translation1, rotation2, translation2):
"""A helper that bundles rgbd and motion consistency losses together."""
endpoints = rgbd_consistency_loss(frame1transformed_depth, frame1rgb,
frame2depth, frame2rgb)
# We calculate the loss only for when frame1transformed_depth is closer to the
# camera than frame2 (occlusion-awareness). See explanation in
# rgbd_consistency_loss above.
endpoints.update(motion_field_consistency_loss(
frame1transformed_depth.pixel_xy, endpoints['frame1_closer_to_camera'],
rotation1, translation1, rotation2, translation2))
return endpoints
def weighted_ssim(x, y, weight, c1=0.01**2, c2=0.03**2, weight_epsilon=0.01):
"""Computes a weighted structured image similarity measure.
See https://en.wikipedia.org/wiki/Structural_similarity#Algorithm. The only
difference here is that not all pixels are weighted equally when calculating
the moments - they are weighted by a weight function.
Args:
x: A tf.Tensor representing a batch of images, of shape [B, H, W, C].
y: A tf.Tensor representing a batch of images, of shape [B, H, W, C].
weight: A tf.Tensor of shape [B, H, W], representing the weight of each
pixel in both images when we come to calculate moments (means and
correlations).
c1: A floating point number, regularizes division by zero of the means.
c2: A floating point number, regularizes division by zero of the second
moments.
weight_epsilon: A floating point number, used to regularize division by the
weight.
Returns:
A tuple of two tf.Tensors. First, of shape [B, H-2, W-2, C], is scalar
similarity loss oer pixel per channel, and the second, of shape
[B, H-2. W-2, 1], is the average pooled `weight`. It is needed so that we
know how much to weigh each pixel in the first tensor. For example, if
`'weight` was very small in some area of the images, the first tensor will
still assign a loss to these pixels, but we shouldn't take the result too
seriously.
"""
if c1 == float('inf') and c2 == float('inf'):
raise ValueError('Both c1 and c2 are infinite, SSIM loss is zero. This is '
'likely unintended.')
weight = tf.expand_dims(weight, -1)
average_pooled_weight = _avg_pool3x3(weight)
weight_plus_epsilon = weight + weight_epsilon
inverse_average_pooled_weight = 1.0 / (average_pooled_weight + weight_epsilon)
def weighted_avg_pool3x3(z):
wighted_avg = _avg_pool3x3(z * weight_plus_epsilon)
return wighted_avg * inverse_average_pooled_weight
mu_x = weighted_avg_pool3x3(x)
mu_y = weighted_avg_pool3x3(y)
sigma_x = weighted_avg_pool3x3(x**2) - mu_x**2
sigma_y = weighted_avg_pool3x3(y**2) - mu_y**2
sigma_xy = weighted_avg_pool3x3(x * y) - mu_x * mu_y
if c1 == float('inf'):
ssim_n = (2 * sigma_xy + c2)
ssim_d = (sigma_x + sigma_y + c2)
elif c2 == float('inf'):
ssim_n = 2 * mu_x * mu_y + c1
ssim_d = mu_x**2 + mu_y**2 + c1
else:
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2)
result = ssim_n / ssim_d
return tf.clip_by_value((1 - result) / 2, 0, 1), average_pooled_weight
def _avg_pool3x3(x):
return tf.nn.avg_pool(x, [1, 3, 3, 1], [1, 1, 1, 1], 'VALID')
def _weighted_average(x, w, epsilon=1.0):
weighted_sum = tf.reduce_sum(x * w, axis=(1, 2), keepdims=True)
sum_of_weights = tf.reduce_sum(w, axis=(1, 2), keepdims=True)
return weighted_sum / (sum_of_weights + epsilon)
def _resample_depth(depth, coordinates):
depth = tf.expand_dims(depth, -1)
result = contrib_resampler.resampler(depth, coordinates)
return tf.squeeze(result, axis=3)
def _expand_dims_twice(x, dim):
return tf.expand_dims(tf.expand_dims(x, dim), dim)
| google-research/google-research | depth_from_video_in_the_wild/consistency_losses.py | Python | apache-2.0 | 13,363 |
#!flask/bin/python
import zmq
import sys
import threading
import json
import settings
import utils
import os
from db import db
from images import images
from infra import infra
class KlapiServer(threading.Thread):
def __init__(self, sets):
threading.Thread.__init__(self)
self.port = 5555
self.backend_name = 'klapi_backend'
self.worker_count = 5
if 'backend_port' in sets:
self.port = sets['backend_port']
if 'backend_name' in sets:
self.backend_name = sets['backend_name']
if 'backend_workers' in sets:
self.worker_count = sets['backend_workers']
self.running = False
def _init_connection(self):
context = zmq.Context()
server = context.socket(zmq.ROUTER)
server.bind('tcp://*:%s' % (self.port))
backend = context.socket(zmq.DEALER)
backend.bind('inproc://%s' % (self.backend_name))
return (context, server, backend)
def _deinit_connection(self, context, server, backend):
server.close()
backend.close()
context.term()
def launch_workers(self, context):
workers = []
for _ in range(self.worker_count):
worker = KlapiWorker(context, self.backend_name)
worker.start()
workers.append(worker)
return workers
def init_poller(self, server, backend):
poll = zmq.Poller()
poll.register(server, zmq.POLLIN)
poll.register(backend, zmq.POLLIN)
return poll
def serve(self, server, backend):
self.running = True
poll = self.init_poller(server, backend)
print ('Klapi-backend on port %s (%s workers), worker IPC on "%s"' % (
self.port, self.worker_count, self.backend_name))
while self.running:
sockets = dict(poll.poll())
# Work as a simple proxy
if server in sockets:
data = server.recv_multipart()
backend.send_multipart(data)
if backend in sockets:
data = backend.recv_multipart()
server.send_multipart(data)
def run(self):
(context, server, backend) = self._init_connection()
workers = self.launch_workers(context)
self.serve(server, backend)
self._deinit_connection(context, server, backend)
for worker in self.workers:
worker.join()
class KlapiWorker(threading.Thread):
def __init__(self, context, backend_name):
threading.Thread.__init__ (self)
self.context = context
self.backend_name = backend_name
self.running = False
def run(self):
worker = self.context.socket(zmq.DEALER)
worker.connect('inproc://%s' % (self.backend_name))
self.running = True
while self.running:
ident, msg = worker.recv_multipart()
msg_json = json.loads(msg)
result = self.handle(msg_json)
if 'terminate' in result:
self.running = False
new_msg = json.dumps(result)
worker.send_multipart([ident, new_msg])
worker.close()
def handle(self, msg):
if 'createMachine' in msg:
self.createMachine(msg['createMachine'])
return msg
def createMachine(self, res):
extras = []
extra = ''
base = ''
inf = infra.provider(settings.settings())
volume = self.get_volume_from_image(res['image'], utils.generateID() + '_', resize=res['size'])
if volume:
base = os.path.basename(res['image'])
extras.append(inf.fileStorage(volume))
cdrom = self.get_cdrom_image(res['cdrom'])
if cdrom:
if not base:
base = os.path.basename(cdrom)
extras.append(inf.cdromStorage(cdrom))
image_extra_loader = None
if volume or cdrom:
item = cdrom
if volume:
item = volume
image_extra_loader = self.image_extra_config(os.path.basename(item), res['name'])
image_extra_userdata = {}
if image_extra_loader is not None:
print ('Found image loader: %s' % (image_extra_loader.base()))
extra_device = image_extra_loader.extraDeviceConfig(inf)
if extra_device:
extras.append(extra_device)
image_extra = image_extra_loader.extra()
if image_extra:
extra += image_extra
image_extra_userdata = image_extra_loader.userdata()
# TODO: Support other features
if 'network_name' in settings.settings():
extras.append(inf.defineNetworkInterface(settings.settings()['network_name']))
extradevices = '\n'.join(extras)
dom_xml = inf.customDomain(res['name'], res['cpus'], res['memory'], extradevices=extradevices, extra=extra)
dom = inf.createDomain(dom_xml)
dom_res = dom.create()
_db = db.connect(settings.settings())
config_data = json.dumps(image_extra_userdata)
config_data = config_data.replace('\'', '\\\'')
db.update(_db, 'machines',
'config=\'%s\'' % (config_data),
where='id="%s"' % (res['name']))
def get_volume_from_image(self, image, prefix='', resize=''):
img = images.provider(settings.settings())
vol = images.volume_provider(settings.settings())
try:
src_img = img.get(image)
return vol.copyFrom(src_img, prefix=prefix, resize=resize)
except Exception as e:
print ('ERROR: %s' % (e))
return ''
def get_cdrom_image(self, image):
img = images.provider(settings.settings())
try:
return img.get(image)
except:
return ''
def image_extra_config(self, name, init_name):
loader = images.config.ImageConfig()
image_class = loader.search(name)
if image_class is None:
return None
return image_class(init_name)
if __name__ == '__main__':
server = KlapiServer(settings.settings())
server.start()
server.join()
| jroivas/klapi | klapi-backend.py | Python | mit | 6,207 |
from __future__ import print_function
import sys
import time
import threading
import iorio
def make_requester(bucket, stream, conn, i, id):
def requester():
data = [{"op": "add", "path": "/items/-", "value": i}]
retries = 0
do_request = True
reqs = []
while do_request:
result = conn.send_patch(bucket, stream, data)
reqs.append([id, i, result.status])
if result.status == 200:
do_request = False
else:
retries += 1
do_request = retries < 20
#for req in reqs:
# print(req)
return result
return requester
def check_race_condition(bucket, stream, conn, count, id):
result = conn.send(bucket, stream, {'items': []})
#print(result)
threads = [threading.Thread(
target=make_requester(bucket, stream, conn, i, id)) for i in range(count)]
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
thread.join()
result = conn.query(bucket, stream)
items = result.body[0]['data']['items']
if len(items) >= count:
print('patch ok (%d) %s' % (len(items), items))
return True
else:
print('patch error, expected {} items got {}'.format(count, len(items)))
return False
def main():
username = 'admin'
password = 'secret'
bucket = '_user_' + username
stream = 'patch_race_condition_test'
host = 'localhost'
port = 8080
conn = iorio.Connection(host, port)
auth_ok, auth_resp = conn.authenticate(username, password)
count = 0
if auth_ok:
ok = True
while ok:
count += 1
ok = check_race_condition(bucket, stream, conn, 10, count)
else:
print('error authenticating', auth_resp)
if __name__ == '__main__':
main()
| javierdallamore/ioriodb | tools/patchtest.py | Python | mpl-2.0 | 1,904 |
import pkg_resources
SCRIPT_DIR = pkg_resources.resource_filename(__name__, 'setup/scripts/')
| keithhendry/treadmill | treadmill/infra/__init__.py | Python | apache-2.0 | 95 |
# leading_edge_ui.py - leading edge user interface components
#
# Copyright (C) 2013-2017 - Curtis Olson, [email protected]
# http://madesigner.flightgear.org
import sys
from PyQt5.QtWidgets import (QWidget,
QHBoxLayout, QVBoxLayout, QFrame, QFormLayout,
QPushButton, QTabWidget, QGroupBox,
QLineEdit, QTextEdit, QLabel, QScrollArea,
QInputDialog, QMenu)
from .combobox_nowheel import QComboBoxNoWheel
class LeadingEdgeUI():
def __init__(self, changefunc):
self.valid = True
self.changefunc = changefunc
self.container = self.make_page()
def onChange(self):
self.changefunc()
def rebuild_stations(self, stations):
station_list = str(stations).split()
start_text = self.edit_start.currentText()
end_text = self.edit_end.currentText()
self.edit_start.clear()
self.edit_start.addItem("Start: Inner")
self.edit_end.clear()
self.edit_end.addItem("End: Outer")
for index,station in enumerate(station_list):
text = "Start: " + str(station)
self.edit_start.addItem(text)
text = "End: " + str(station)
self.edit_end.addItem(text)
index = self.edit_start.findText(start_text)
if index != None:
self.edit_start.setCurrentIndex(index)
index = self.edit_end.findText(end_text)
if index != None:
self.edit_end.setCurrentIndex(index)
def delete_self(self):
if self.valid:
self.changefunc()
self.container.deleteLater()
self.valid = False
def make_page(self):
# make the edit line
page = QFrame()
layout = QVBoxLayout()
page.setLayout( layout )
line1 = QFrame()
layout1 = QHBoxLayout()
line1.setLayout( layout1 )
layout.addWidget( line1 )
layout1.addWidget( QLabel("<b>Size:</b> ") )
self.edit_size = QLineEdit()
self.edit_size.setFixedWidth(50)
self.edit_size.textChanged.connect(self.onChange)
layout1.addWidget( self.edit_size )
self.edit_start = QComboBoxNoWheel()
self.edit_start.addItem("-")
self.edit_start.addItem("1")
self.edit_start.addItem("2")
self.edit_start.addItem("3")
self.edit_start.currentIndexChanged.connect(self.onChange)
layout1.addWidget(self.edit_start)
self.edit_end = QComboBoxNoWheel()
self.edit_end.addItem("-")
self.edit_end.addItem("1")
self.edit_end.addItem("2")
self.edit_end.addItem("3")
self.edit_end.currentIndexChanged.connect(self.onChange)
layout1.addWidget(self.edit_end)
layout1.addStretch(1)
delete = QPushButton('Delete ')
delete.clicked.connect(self.delete_self)
layout1.addWidget(delete)
return page
def get_widget(self):
return self.container
def load(self, node):
self.edit_size.setText(node.getString('size'))
index = self.edit_start.findText(node.getString('start_station'))
if index != None:
self.edit_start.setCurrentIndex(index)
index = self.edit_end.findText(node.getString('end_station'))
if index != None:
self.edit_end.setCurrentIndex(index)
def save(self, node):
node.setString('size', self.edit_size.text())
node.setString('start_station', self.edit_start.currentText())
node.setString('end_station', self.edit_end.currentText())
| clolsonus/madesigner | madesigner/madgui/leading_edge_ui.py | Python | gpl-3.0 | 3,643 |
import sys
from pyAMI import *
class amiListNomenclatures:
"""
A command to list all ATLAS dataset nomenclature.
This command returns the nomenclature name, the template, and a tag.
The tag is used to associate nomenclature to projects. The first argument can be a sub string to search on. "
Use the option -valid to return only valid nomenclature.
Only those with writeStatus=valid can be used for new names.
"""
def __init__(self):
return
def command(self , argv):
i = 0
like = "%"
valid = ""
if len(argv) > 0 and argv[0][0] != '-':
like = "%" + argv[0] + "%"
i = 1
if(len(argv) > 1 and argv[1] == "-valid"):
valid = "AND nomenclature.writeStatus='valid'"
if(len(argv) > 0 and argv[0] == "-valid"):
valid = "AND nomenclature.writeStatus='valid'"
argument = []
argument.append("SearchQuery")
argument.append("entity=nomenclature")
argument.append("glite=SELECT nomenclature.nomenclatureName, nomenclature.nomenclatureTemplate, nomenclature.nomenclatureTag WHERE (nomenclature.nomenclatureName like '"
+ like + "') " + valid + " ORDER BY nomenclatureName ")
argument.append("project=Atlas_Production")
argument.append("processingStep=*")
argument.append("mode=defaultField")
argument.extend(argv[i:])
return argument
def main(argv):
try:
if(len(argv) > 0 and ((argv[0] == "-help")or (argv[0] == "help"))):
print amiListNomenclatures.__doc__
return None
pyAMI_setEndPointType(argv)
amiclient = AMI()
argv=amiclient.setUserCredentials(argv)
result = amiclient.execute(amiListNomenclatures().command(argv))
print result.output()
except Exception, msg:
print msg
if __name__ == '__main__':
main(sys.argv[1:])
| ndawe/pyAMI | devscripts/done/amiListNomenclatures.py | Python | gpl-3.0 | 1,967 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Pi-Yueh Chuang <[email protected]>
#
# Distributed under terms of the MIT license.
"""convert the output file in a batch"""
import os
import os.path as op
import sys
import argparse
if os.getenv("PyFR") is None:
raise EnvironmentError("Environmental variable PyFR is not set")
else:
PyFRPath = os.getenv("PyFR")
if PyFRPath not in sys.path:
sys.path.append(PyFRPath)
try:
import pyfr
import pyfr.writers
except ImportError as err:
err.msg += "! Please check the path set in the environmental variable PyFR."
raise
def parseArgs(args=sys.argv[1:]):
"""parse arguments
Args:
args: list of strings. Default is sys.argv[1:].
Returns:
parser.parse_args(args)
"""
parser = argparse.ArgumentParser(
description="2D Cavity Flow Post-Precessor")
parser.add_argument(
"casePath", metavar="path",
help="The path to a PyFR case folder", type=str)
parser.add_argument(
"-s", "--soln-dir", metavar="soln-dir", dest="solnDir",
help="The directory (under casePath) containing *.pyfrs files. " +
"(Default = solutions)",
type=str, default="solutions")
parser.add_argument(
"-v", "--vtu-dir", metavar="vtu-dir", dest="vtuDir",
help="The directory (under casePath) in where *.vtu files will be. " +
"If the folder does not exist, the script will create it. "
"(Default = vtu)",
type=str, default="vtu")
parser.add_argument(
"-m", "--mesh", metavar="mesh", dest="mesh",
help="The mesh file required. " +
"The default is to use the first-found .pyfrm file in the case " +
"directory. If multiple .pyfrm files exist in the case directory, "
"it is suggested to set the argument.",
type=str, default=None)
parser.add_argument(
"-o", "--overwrite", dest="overwrite",
help="Whether to overwrite the output files if they already exist.",
action="store_true")
parser.add_argument(
"-d", "--degree", dest="degree",
help="The level of mesh. If the solver use higher-order " +
"polynomials, than it may be necessary to set larger degree.",
type=int, default=0)
return parser.parse_args(args)
def setup_dirs(args):
"""set up path to directories necessary
Args:
args: parsed arguments generated by parser.parse_args()
Returns:
areparse.Namespace object with full paths
"""
# set up the path to case directory
args.casePath = os.path.abspath(args.casePath)
# set up and check the path to case directory
args.solnDir = args.casePath + "/" + args.solnDir
if not op.isdir(args.solnDir):
raise RuntimeError(
"The path " + args.solnDir + " does not exist.")
# set up the path for .pyfrm file
if args.mesh is not None:
args.mesh = args.casePath + "/" + args.mesh
if not op.isfile(args.mesh):
raise RuntimeError(
"The input mesh file " + args.mesh + " does not exist.")
else:
for f in os.listdir(args.casePath):
if f.endswith(".pyfrm"):
args.mesh = args.casePath + "/" + f
if args.mesh is None:
raise RuntimeError(
"Could not find any .pyfrm file in the case folder " +
args.casePath)
# set up and create the directory for .vtu files, if it does not exist
args.vtuDir = args.casePath + "/" + args.vtuDir
if not op.isdir(args.vtuDir):
os.mkdir(args.vtuDir)
return args
def get_pyfrs_list(pyfrsDirPath):
"""get list of file names that end with .pyfrs in pyfrsDirPath
Args:
pyfrsDirPath: path to the folder of .pyfrs files
Returns:
a list of file names
"""
fileList = [f for f in os.listdir(pyfrsDirPath)
if op.splitext(f)[1] == ".pyfrs"]
if len(fileList) == 0:
raise RuntimeError(
"No .pyfrs file was found in the path " + pyfrsDirPath)
return fileList
def generate_vtu(vtuPath, pyfrsPath, pyfrsList, mesh, overwrite, degree):
"""generate .vtu files, if they do not exist
Args:
vtuPath: the path to folder of .vtu files
pyfrsPath: the path to .pyfrs files
pyfrsList: the list of .pyfrs which to be converted
mesh: the .pyfrm file
overwrite: whether to overwrite the .vtu file if it already exist
"""
vtuList = [op.splitext(f)[0]+".vtu" for f in pyfrsList]
for i, o in zip(pyfrsList, vtuList):
ifile = op.join(pyfrsPath, i)
ofile = op.join(vtuPath, o)
if op.isfile(ofile) and not overwrite:
print("Warning: " +
"the vtu file " + o + " exists " +
"and won't be overwrited because overwrite=False")
else:
output_vtu(mesh, ifile, ofile, degree)
def output_vtu(mesh, iFile, oFile, g=True, p="double", d=0):
"""convert a single .pyfrs file to .vtu file using PyFR's converter
Args:
mesh: mesh file (must end with .pyfrm)
input: input file name (must end with .pyfrs)
output: output file name (must end with .vtu)
g: whether to export gradients
p: precision, either "single" or "double"
d: degree of the element (set this according the order of the polynimal)
"""
writerArgs = argparse.Namespace(
meshf=mesh, solnf=iFile, outf=oFile, precision=p,
gradients=g, divisor=d)
writer = pyfr.writers.get_writer_by_extn(".vtu", writerArgs)
print("Converting " + iFile + " to " + oFile)
writer.write_out()
def get_pyfrs_files(pyfrsDirPath):
pass
if __name__ == "__main__":
args = parseArgs()
args = setup_dirs(args)
pyfrsList = get_pyfrs_list(args.solnDir)
generate_vtu(
args.vtuDir, args.solnDir, pyfrsList,
args.mesh, args.overwrite, args.degree)
| piyueh/PyFR-Cases | utils/batch_conversion.py | Python | mit | 6,025 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPegas(RPackage):
"""Population and Evolutionary Genetics Analysis System
Functions for reading, writing, plotting, analysing, and
manipulating allelic and haplotypic data, including from VCF files,
and for the analysis of population nucleotide sequences and
micro-satellites including coalescent analyses, linkage
disequilibrium, population structure (Fst, Amova) and equilibrium
(HWE), haplotype networks, minimum spanning tree and network, and
median-joining networks."""
homepage = "http://ape-package.ird.fr/pegas.html"
url = "https://cloud.r-project.org/src/contrib/pegas_0.14.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/pegas"
maintainers = ['dorton21']
version('0.14', sha256='7df90e6c4a69e8dbed2b3f68b18f1975182475bf6f86d4159256b52fd5332053')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-adegenet', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-pegas/package.py | Python | lgpl-2.1 | 1,221 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^carrinho/adicionar/(?P<slug>[\w_-]+)/$', views.create_cartitem,
name='create_cartitem'
),
] | DaywisonFerreira/E-Commerce | checkout/urls.py | Python | cc0-1.0 | 200 |
# -*- coding: utf-8 -*-
"""
Runs functions in pipeline to get query reuslts and does some caching.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import six # NOQA
from os.path import exists
#from ibeis.algo.hots import query_request
#from ibeis.algo.hots import hots_query_result
#from ibeis.algo.hots import exceptions as hsexcept
from ibeis.algo.hots import chip_match
from ibeis.algo.hots import pipeline
from ibeis.algo.hots import _pipeline_helpers as plh # NOQA
(print, rrr, profile) = ut.inject2(__name__, '[mc4]')
# TODO: Move to params
USE_HOTSPOTTER_CACHE = pipeline.USE_HOTSPOTTER_CACHE
USE_CACHE = not ut.get_argflag(('--nocache-query', '--noqcache')) and USE_HOTSPOTTER_CACHE
USE_BIGCACHE = not ut.get_argflag(('--nocache-big', '--no-bigcache-query', '--noqcache', '--nobigcache')) and ut.USE_CACHE
SAVE_CACHE = not ut.get_argflag('--nocache-save')
#MIN_BIGCACHE_BUNDLE = 20
#MIN_BIGCACHE_BUNDLE = 150
MIN_BIGCACHE_BUNDLE = 64
HOTS_BATCH_SIZE = ut.get_argval('--hots-batch-size', type_=int, default=None)
#----------------------
# Main Query Logic
#----------------------
def empty_query(ibs, qaids):
r"""
Hack to give an empty query a query result object
Args:
ibs (ibeis.IBEISController): ibeis controller object
qaids (list):
Returns:
tuple: (qaid2_cm, qreq_)
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-empty_query
python -m ibeis.algo.hots.match_chips4 --test-empty_query --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> qaids = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
>>> # execute function
>>> (qaid2_cm, qreq_) = empty_query(ibs, qaids)
>>> # verify results
>>> result = str((qaid2_cm, qreq_))
>>> print(result)
>>> cm = qaid2_cm[qaids[0]]
>>> ut.assert_eq(len(cm.get_top_aids()), 0)
>>> ut.quit_if_noshow()
>>> cm.ishow_top(ibs, update=True, make_figtitle=True, show_query=True, sidebyside=False)
>>> from matplotlib import pyplot as plt
>>> plt.show()
"""
daids = []
qreq_ = ibs.new_query_request(qaids, daids)
cm = qreq_.make_empty_chip_matches()
qaid2_cm = dict(zip(qaids, cm))
return qaid2_cm, qreq_
def submit_query_request_nocache(ibs, qreq_, verbose=pipeline.VERB_PIPELINE):
""" depricate """
assert len(qreq_.qaids) > 0, ' no current query aids'
if len(qreq_.daids) == 0:
print('[mc4] WARNING no daids... returning empty query')
qaid2_cm, qreq_ = empty_query(ibs, qreq_.qaids)
return qaid2_cm
save_qcache = False
qaid2_cm = execute_query2(ibs, qreq_, verbose, save_qcache)
return qaid2_cm
@profile
def submit_query_request(ibs, qaid_list, daid_list, use_cache=None,
use_bigcache=None, cfgdict=None, qreq_=None,
verbose=None, save_qcache=None,
prog_hook=None):
"""
The standard query interface.
TODO: rename use_cache to use_qcache
Checks a big cache for qaid2_cm. If cache miss, tries to load each cm
individually. On an individual cache miss, it preforms the query.
Args:
ibs (ibeis.IBEISController) : ibeis control object
qaid_list (list): query annotation ids
daid_list (list): database annotation ids
use_cache (bool):
use_bigcache (bool):
Returns:
qaid2_cm (dict): dict of QueryResult objects
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-submit_query_request
Examples:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> qaid_list = [1]
>>> daid_list = [1, 2, 3, 4, 5]
>>> use_bigcache = True
>>> use_cache = True
>>> ibs = ibeis.opendb(db='testdb1')
>>> qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict={}, verbose=True)
>>> qaid2_cm = submit_query_request(ibs, qaid_list, daid_list, use_cache, use_bigcache, qreq_=qreq_)
"""
# Get flag defaults if necessary
if verbose is None:
verbose = pipeline.VERB_PIPELINE
if use_cache is None:
use_cache = USE_CACHE
if save_qcache is None:
save_qcache = SAVE_CACHE
if use_bigcache is None:
use_bigcache = USE_BIGCACHE
# Create new query request object to store temporary state
if verbose:
#print('[mc4] --- Submit QueryRequest_ --- ')
ut.colorprint('[mc4] --- Submit QueryRequest_ --- ', 'darkyellow')
assert qreq_ is not None, 'query request must be prebuilt'
qreq_.prog_hook = prog_hook
# --- BIG CACHE ---
# Do not use bigcache single queries
use_bigcache_ = (use_bigcache and use_cache and
len(qaid_list) > MIN_BIGCACHE_BUNDLE)
if (use_bigcache_ or save_qcache) and len(qaid_list) > MIN_BIGCACHE_BUNDLE:
bc_dpath, bc_fname, bc_cfgstr = qreq_.get_bigcache_info()
if use_bigcache_:
# Try and load directly from a big cache
try:
qaid2_cm = ut.load_cache(bc_dpath, bc_fname, bc_cfgstr)
cm_list = [qaid2_cm[qaid] for qaid in qaid_list]
except (IOError, AttributeError):
pass
else:
return cm_list
# ------------
# Execute query request
qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=verbose)
# ------------
if save_qcache and len(qaid_list) > MIN_BIGCACHE_BUNDLE:
ut.save_cache(bc_dpath, bc_fname, bc_cfgstr, qaid2_cm)
cm_list = [qaid2_cm[qaid] for qaid in qaid_list]
return cm_list
@profile
def execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=True, batch_size=None):
"""
Args:
ibs (ibeis.IBEISController):
qreq_ (ibeis.QueryRequest):
use_cache (bool):
Returns:
qaid2_cm
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:0
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:1
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:2
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:3
Example0:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsone', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> # TEST SAVE
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, True, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # SLOW_DOCTEST
>>> # TEST LOAD
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = True, True, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # ENABLE_DOCTEST
>>> # TEST PARTIAL HIT
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=False, prescore_method='csum')
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3,
>>> 4, 5, 6,
>>> 7, 8, 9])
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, True, False
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> cm = qaid2_cm[1]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[4]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[5]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[6]
>>> ut.delete(cm.get_fpath(qreq_))
>>> print('Re-execute')
>>> qaid2_cm_ = execute_query_and_save_L1(ibs, qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> assert all([qaid2_cm_[qaid] == qaid2_cm[qaid] for qaid in qreq_.qaids])
>>> [ut.delete(fpath) for fpath in qreq_.get_chipmatch_fpaths(qreq_.qaids)]
Ignore:
other = cm_ = qaid2_cm_[qaid]
cm = qaid2_cm[qaid]
"""
if use_cache:
if ut.VERBOSE:
print('[mc4] cache-query is on')
if ut.DEBUG2:
# sanity check
qreq_.assert_self(ibs)
# Try loading as many cached results as possible
qaid2_cm_hit = {}
external_qaids = qreq_.qaids
fpath_list = qreq_.get_chipmatch_fpaths(external_qaids)
exists_flags = [exists(fpath) for fpath in fpath_list]
qaids_hit = ut.compress(external_qaids, exists_flags)
fpaths_hit = ut.compress(fpath_list, exists_flags)
fpath_iter = ut.ProgressIter(
fpaths_hit, nTotal=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
lbl='loading cache hits', adjust=True, freq=1)
try:
cm_hit_list = [
chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
for fpath in fpath_iter
]
assert all([qaid == cm.qaid for qaid, cm in zip(qaids_hit, cm_hit_list)]), (
'inconsistent')
qaid2_cm_hit = {cm.qaid: cm for cm in cm_hit_list}
except chip_match.NeedRecomputeError:
print('NeedRecomputeError: Some cached chips need to recompute')
fpath_iter = ut.ProgressIter(
fpaths_hit, nTotal=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
lbl='checking chipmatch cache', adjust=True, freq=1)
# Recompute those that fail loading
qaid2_cm_hit = {}
for fpath in fpath_iter:
try:
cm = chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
except chip_match.NeedRecomputeError:
pass
else:
qaid2_cm_hit[cm.qaid] = cm
print('%d / %d cached matches need to be recomputed' % (
len(qaids_hit) - len(qaid2_cm_hit), len(qaids_hit)))
if len(qaid2_cm_hit) == len(external_qaids):
return qaid2_cm_hit
else:
if len(qaid2_cm_hit) > 0 and not ut.QUIET:
print('... partial cm cache hit %d/%d' % (
len(qaid2_cm_hit), len(external_qaids)))
cachehit_qaids = list(qaid2_cm_hit.keys())
# mask queries that have already been executed
qreq_.set_external_qaid_mask(cachehit_qaids)
else:
if ut.VERBOSE:
print('[mc4] cache-query is off')
qaid2_cm_hit = {}
qaid2_cm = execute_query2(ibs, qreq_, verbose, save_qcache, batch_size)
if ut.DEBUG2:
# sanity check
qreq_.assert_self(ibs)
# Merge cache hits with computed misses
if len(qaid2_cm_hit) > 0:
qaid2_cm.update(qaid2_cm_hit)
qreq_.set_external_qaid_mask(None) # undo state changes
return qaid2_cm
@profile
def execute_query2(ibs, qreq_, verbose, save_qcache, batch_size=None):
"""
Breaks up query request into several subrequests
to process "more efficiently" and safer as well.
"""
with ut.Timer('Timing Query'):
if qreq_.prog_hook is not None:
preload_hook, query_hook = qreq_.prog_hook.subdivide(spacing=[0, .15, .8])
preload_hook(0, lbl='preloading')
qreq_.prog_hook = query_hook
else:
preload_hook = None
# Load features / weights for all annotations
qreq_.lazy_preload(prog_hook=preload_hook, verbose=verbose and ut.NOT_QUIET)
all_qaids = qreq_.qaids
print('len(missed_qaids) = %r' % (len(all_qaids),))
qaid2_cm = {}
# vsone must have a chunksize of 1
if batch_size is None:
if HOTS_BATCH_SIZE is None:
hots_batch_size = ibs.cfg.other_cfg.hots_batch_size
else:
hots_batch_size = HOTS_BATCH_SIZE
else:
hots_batch_size = batch_size
chunksize = 1 if qreq_.qparams.vsone else hots_batch_size
# Iterate over vsone queries in chunks.
nTotalChunks = ut.get_nTotalChunks(len(all_qaids), chunksize)
qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
_qreq_iter = (qreq_.shallowcopy(qaids=qaids) for qaids in qaid_chunk_iter)
sub_qreq_iter = ut.ProgressIter(_qreq_iter, nTotal=nTotalChunks, freq=1,
lbl='[mc4] query chunk: ',
prog_hook=qreq_.prog_hook)
for sub_qreq_ in sub_qreq_iter:
if ut.VERBOSE:
print('Generating vsmany chunk')
sub_cm_list = pipeline.request_ibeis_query_L0(ibs, sub_qreq_,
verbose=verbose)
assert len(sub_qreq_.qaids) == len(sub_cm_list), 'not aligned'
assert all([qaid == cm.qaid for qaid, cm in
zip(sub_qreq_.qaids, sub_cm_list)]), 'not corresonding'
if save_qcache:
fpath_list = qreq_.get_chipmatch_fpaths(sub_qreq_.qaids)
_iter = zip(sub_cm_list, fpath_list)
_iter = ut.ProgressIter(_iter, nTotal=len(sub_cm_list),
lbl='saving chip matches', adjust=True, freq=1)
for cm, fpath in _iter:
cm.save_to_fpath(fpath, verbose=False)
else:
if ut.VERBOSE:
print('[mc4] not saving vsmany chunk')
qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
return qaid2_cm
if __name__ == '__main__':
"""
python -m ibeis.algo.hots.match_chips4
python -m ibeis.algo.hots.match_chips4 --allexamples --testslow
"""
import multiprocessing
multiprocessing.freeze_support()
ut.doctest_funcs()
| SU-ECE-17-7/ibeis | ibeis/algo/hots/match_chips4.py | Python | apache-2.0 | 15,985 |
# -*- coding: utf-8 -*-
u"""
Created on 2017-6-7
@author: cheng.li
"""
from PyFin.tests.Math.RootFinder.testBrent import TestBrent
__all__ = ['TestBrent']
| wegamekinglc/Finance-Python | PyFin/tests/Math/RootFinder/__init__.py | Python | mit | 158 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import unittest
from pathlib import Path
import nbformat
import notebook
import pytest
from nbconvert.preprocessors import CellExecutionError, ExecutePreprocessor
_NB_FOLDER = "docs/notebooks"
_NB_NAME = "EventTimeline.ipynb"
class TestTimeline(unittest.TestCase):
"""Unit test class."""
@pytest.mark.skipif(
not os.environ.get("MSTICPY_TEST_NOSKIP"), reason="Skipped for local tests."
)
def test_timeline_controls(self):
nb_path = Path(_NB_FOLDER).joinpath(_NB_NAME)
abs_path = Path(_NB_FOLDER).absolute()
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
try:
ep.preprocess(nb, {"metadata": {"path": abs_path}})
except CellExecutionError:
nb_err = str(nb_path).replace(".ipynb", "-err.ipynb")
msg = f"Error executing the notebook '{nb_path}'.\n"
msg += f"See notebook '{nb_err}' for the traceback."
print(msg)
with open(nb_err, mode="w", encoding="utf-8") as f:
nbformat.write(nb, f)
raise
| VirusTotal/msticpy | tests/test_timeline.py | Python | mit | 1,474 |
from functools import wraps
import logging
import os
from . import process
from .utils import which, tempdir
from ._compat import FileExistsError
def ensure_git(return_value=None):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if which('git'):
return func(*args, **kwargs)
else:
logging.error('git is not installed')
return return_value
return wrapper
return decorator
@ensure_git()
def clone(url, dest=None, depth=None):
if dest and os.path.exists(dest):
raise FileExistsError('Destination already exists: %s' % dest)
dest = dest if dest else tempdir()
cmd = ['git', 'clone', url, dest]
if depth:
cmd += ['--depth', depth]
process.call(cmd)
return dest
class Repository(object):
def __init__(self, path, autopull=None, autopush=None):
self.path = path
self.autopush = autopush
self.autopull = autopull
self.author = "Passpie <passpie@localhost>"
if autopull:
self.pull_rebase(*autopull)
@ensure_git()
def init(self):
cmd = ['git', 'init', self.path]
process.call(cmd)
@ensure_git()
def pull_rebase(self, remote='origin', branch='master'):
cmd = ['git', 'pull', '--rebase', remote, branch]
process.call(cmd, cwd=self.path)
@ensure_git()
def push(self, remote='origin', branch='master'):
cmd = ['git', 'push', remote, branch]
process.call(cmd, cwd=self.path)
@ensure_git()
def add(self, all=False):
if all is True:
cmd = ['git', 'add', '--all', '.']
else:
cmd = ['git', 'add', '.']
process.call(cmd, cwd=self.path)
@ensure_git()
def commit(self, message, add=True):
author_option = "--author={}".format(self.author)
if add:
self.add(all=True)
cmd = ['git', 'commit', author_option, '-m', message]
process.call(cmd, cwd=self.path)
if self.autopush:
self.push()
@ensure_git(return_value=[])
def commit_list(self):
cmd = ['git', 'log', '--reverse', '--pretty=format:%s']
output, _ = process.call(cmd, cwd=self.path)
return output.splitlines()
@ensure_git(return_value=[])
def sha_list(self):
cmd = ['git', 'log', '--reverse', '--pretty=format:%h']
output, _ = process.call(cmd, cwd=self.path)
return output.splitlines()
@ensure_git()
def reset(self, to_index):
try:
sha = self.sha_list()[to_index]
cmd = ['git', 'reset', '--hard', sha]
process.call(cmd, cwd=self.path)
except IndexError:
logging.info('commit on index "{}" not found'.format(to_index))
| marcwebbie/passpie | passpie/history.py | Python | mit | 2,814 |
"""Unit test for sysinfo.
"""
import os
import sys
import unittest
from collections import namedtuple
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
import treadmill
import treadmill.appmgr
from treadmill import sysinfo
class SysinfoTest(unittest.TestCase):
"""treadmill.sysinfo test."""
def test_proc_info(self):
"""Proc info test."""
proc_info = sysinfo.proc_info(os.getpid())
# Handle running python with options, as in:
# sys.argv[0] == 'python -m unittest'
expected = os.path.basename(sys.argv[0].split()[0])
# TODO: When running coverage, script is execed under python.
# but sys.argv[0] reports as setup.py
#
# train starts subprocess for the test with altnose.py
# this makes this assert unusable
expected_progs = ['setup.py', 'altnose.py', 'sysinfo_test.py']
if expected not in expected_progs:
self.assertEquals(expected, proc_info.filename)
self.assertEquals(os.getppid(), proc_info.ppid)
# We do not check the starttime, but just verify that calling
# proc_info twice returns same starttime, which can be used as part of
# process signature.
self.assertEquals(proc_info.starttime,
sysinfo.proc_info(os.getpid()).starttime)
def test_mem_info(self):
"""Mock test for mem info."""
proc_meminfo = """
MemTotal: 7992596 kB
MemFree: 3572940 kB
Buffers: 202564 kB
Cached: 2371108 kB
SwapCached: 0 kB
Active: 2959388 kB
Inactive: 868476 kB
HighTotal: 0 kB
HighFree: 0 kB
LowTotal: 7992596 kB
LowFree: 3572940 kB
SwapTotal: 4064436 kB
SwapFree: 4064436 kB
Dirty: 240 kB
Writeback: 0 kB
AnonPages: 1254148 kB
Mapped: 104244 kB
Slab: 500152 kB
PageTables: 17180 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
CommitLimit: 11257772 kB
Committed_AS: 2268028 kB
VmallocTotal: 34359738367 kB
VmallocUsed: 335508 kB
VmallocChunk: 34359375019 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
Hugepagesize: 2048 kB
"""
open_mock = mock.mock_open(read_data=proc_meminfo.strip())
with mock.patch('__builtin__.open', open_mock, create=True):
meminfo = sysinfo.mem_info()
self.assertEquals(7992596, meminfo.total)
@mock.patch('os.statvfs', mock.Mock())
def test_disk_usage(self):
"""Mock test for disk usage."""
os.statvfs.return_value = namedtuple(
'statvfs',
'f_blocks f_bavail, f_frsize')(100, 20, 4)
du = sysinfo.disk_usage('/var/tmp')
os.statvfs.assert_called_with('/var/tmp')
self.assertEquals(400, du.total)
self.assertEquals(80, du.free)
def test_bogomips(self):
"""Mock test for mem info."""
cpuinfo = """
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6385.66
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6384.64
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6385.26
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6384.10
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
"""
open_mock = mock.mock_open(read_data=cpuinfo.strip())
with mock.patch('__builtin__.open', open_mock, create=True):
bogomips = sysinfo.total_bogomips()
# bogomips : 6385.66
# bogomips : 6384.64
# bogomips : 6385.26
# bogomips : 6384.10
# -------------------
# total : 25539.659999999996
self.assertEquals(25539, bogomips)
@mock.patch('time.time', mock.Mock(return_value=50))
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=42*1024**2))
@mock.patch('treadmill.sysinfo.BMIPS_PER_CPU', 1)
@mock.patch('treadmill.sysinfo.total_bogomips',
mock.Mock(return_value=2))
@mock.patch('treadmill.sysinfo._app_cpu_shares_prct',
mock.Mock(return_value=1.0)) # 100% is available to TM apps.
@mock.patch('treadmill.syscall.sysinfo.sysinfo',
mock.Mock(return_value=namedtuple('mock_si', ['uptime'])(42)))
def test_node_info(self):
"""Test node information report generation.
"""
# Access protected members
# pylint: disable=W0212
mock_tm_env = mock.Mock(
spec_set=treadmill.appmgr.AppEnvironment,
svc_cgroup=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_localdisk=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_network=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
)
mock_tm_env.svc_localdisk.status.return_value = {
'size': 100*1024**2,
}
res = sysinfo.node_info(mock_tm_env)
mock_tm_env.svc_localdisk.status.assert_called_with(timeout=30)
mock_tm_env.svc_cgroup.status.assert_called_with(timeout=30)
self.assertEquals(
res,
{
'cpu': '200%', # 100% of 2 cores is available
'memory': '42M', # As read from cgroup
'disk': '100M', # As returned by localdisk service
'up_since': 8,
}
)
@mock.patch('treadmill.cgroups.get_value', mock.Mock())
def test__app_cpu_shares_prct(self):
"""Test available cpu shares calculation.
"""
# Access protected members
# pylint: disable=W0212
treadmill.cgroups.get_value.side_effect = [
2500, # system
7500, # treadmill
2000, # core
2000, # apps
]
res = sysinfo._app_cpu_shares_prct()
self.assertEquals(
res,
0.375, # 0.75 (tm/sys split) * 0.5 (core/apps split)
)
if __name__ == '__main__':
unittest.main()
| toenuff/treadmill | tests/sysinfo_test.py | Python | apache-2.0 | 7,897 |
import geojson as gj
import logging
import bson.objectid as boi
import emission.core.wrapper.common_place as ecwcp
import emission.core.get_database as edb
import emission.simulation.markov_model_counter as esmmc
import emission.storage.decorations.common_trip_queries as esdctp
#################################################################################
############################ database functions #################################
#################################################################################
def save_common_place(common_place):
db = edb.get_common_place_db()
db.save(common_place)
def get_common_place_from_db(common_place_id):
db = edb.get_common_place_db()
json_obj = db.find_one({"_id" : common_place_id})
return make_common_place(json_obj)
def get_all_common_places_for_user(user_id):
db = edb.get_common_place_db()
return db.find({"user_id" : user_id})
def get_common_place_at_location(loc):
db = edb.get_common_place_db()
return make_common_place(db.find_one({"location": loc}))
def make_new_common_place(user_id, loc):
place = ecwcp.CommonPlace()
place.user_id = user_id
place.location = loc
return place
def make_common_place(props):
return ecwcp.CommonPlace(props)
def clear_existing_places(user_id):
db = edb.get_common_place_db()
db.remove({'user_id': user_id})
def get_all_place_objs(common_place):
trip.trips = [unc_trip.get_id() for unc_trip in dct["sections"]]
place_db = edb.get_place_db()
start_places = []
end_places = []
for t in trip.trips:
start = place_db.find_one({"_id" : t.start_place})
end = place_db.find_one({"_id" : t.end_place})
start_places.append(start)
end_places.append(end)
################################################################################
def create_places(list_of_cluster_data, user_id):
places_to_successors = {}
places_dct = {}
logging.debug("About to create places for %d clusters" % len(list_of_cluster_data))
for dct in list_of_cluster_data:
start_name = dct['start']
end_name = dct['end']
start_loc = gj.Point(dct['start_coords'].coordinate_list())
end_loc = gj.Point(dct['end_coords'].coordinate_list())
start_loc_str = gj.dumps(start_loc, sort_keys=True)
end_loc_str = gj.dumps(end_loc, sort_keys=True)
if start_loc_str not in places_to_successors:
places_to_successors[start_loc_str] = []
else:
places_to_successors[start_loc_str].append(end_loc)
if end_loc_str not in places_to_successors:
places_to_successors[end_loc_str] = []
if start_loc_str not in places_dct:
places_dct[start_loc_str] = dct["start_places"]
if end_loc_str not in places_dct:
places_dct[end_loc_str] = dct["end_places"]
clear_existing_places(user_id)
logging.debug("After creating map, number of places is %d" % len(places_to_successors))
for loc_str in places_to_successors.iterkeys():
start = make_new_common_place(user_id, gj.loads(loc_str))
logging.debug("Adding %d places for this place" % len(places_dct[loc_str]))
start.places = places_dct[loc_str]
save_common_place(start)
for loc_str, successors in places_to_successors.iteritems():
start = get_common_place_at_location(gj.loads(loc_str))
successor_places = map(lambda loc:get_common_place_at_location(loc), successors)
start.successors = successor_places
save_common_place(start)
### Graph queries
def get_succesor(user_id, place_id, time):
temp = esmmc.Counter()
day = time.weekday()
place = get_common_place_from_db(place_id)
for suc in place["successors"]:
trip = esdctp.get_common_trip_from_db(user_id, place_id, suc)
for temp_hour in xrange(time.hour, esdctp.HOURS_IN_DAY):
counter_key = ("%s" % suc, temp_hour)
temp[counter_key] = trip.probabilites[day, temp_hour]
return boi.ObjectId(esmmc.sampleFromCounter(temp)[0])
def has_succesor(user_id, place_id, time):
day = time.weekday()
place = get_common_place_from_db(place_id)
for suc in place["successors"]:
trip = esdctp.get_common_trip_from_db(user_id, place_id, suc)
for temp_hour in xrange(time.hour, esdctp.HOURS_IN_DAY):
if trip.probabilites[day, temp_hour] > 0:
return True
return False
| joshzarrabi/e-mission-server | emission/storage/decorations/common_place_queries.py | Python | bsd-3-clause | 4,478 |
"""
Interactive Average
===================
The plot below uses an interval selection, which causes the chart to include an interactive brush
(shown in grey). The brush selection parameterizes the red guideline, which visualizes the average
value within the selected interval.
"""
# category: interactive charts
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
brush = alt.selection(type='interval', encodings=['x'])
bars = alt.Chart().mark_bar().encode(
x='month(date):O',
y='mean(precipitation):Q',
opacity=alt.condition(brush, alt.OpacityValue(1), alt.OpacityValue(0.7)),
).add_selection(
brush
)
line = alt.Chart().mark_rule(color='firebrick').encode(
y='mean(precipitation):Q',
size=alt.SizeValue(3)
).transform_filter(
brush
)
alt.layer(bars, line, data=source)
| altair-viz/altair | altair/examples/selection_layer_bar_month.py | Python | bsd-3-clause | 835 |
"""
Virt management features
Copyright 2007, Red Hat, Inc
Michael DeHaan <[email protected]>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
# warning: virt management is rather complicated
# to see a simple example of func, look at the
# service control module. API docs on how
# to use this to come.
# other modules
import os
import libvirt
import sub_process
from xml.dom import minidom
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class FuncLibvirtConnection(object):
def __init__(self, connection):
conn = libvirt.open(connection)
self.conn = conn
def find_vm(self, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
"""
conn = self.conn
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID();
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise Exception("virtual machine %s not found" % vmid)
def get_vnc_port(self, vmid):
vmxml = self.find_vm(vmid).XMLDesc(0)
vmdoc = minidom.parseString(vmxml)
vncelement = vmdoc.getElementsByTagName('graphics')[0]
return vncelement.getAttribute('port')
def get_mac_address(self, vmid):
vmxml = self.find_vm(vmid).XMLDesc(0)
vmdoc = minidom.parseString(vmxml)
macelement = vmdoc.getElementsByTagName('mac')[0]
return macelement.getAttribute('address')
def shutdown(self, vmid):
return self.find_vm(vmid).shutdown()
def pause(self, vmid):
return self.suspend(self.conn,vmid)
def unpause(self, vmid):
return self.resume(self.conn,vmid)
def suspend(self, vmid):
return self.find_vm(vmid).suspend()
def resume(self, vmid):
return self.find_vm(vmid).resume()
def create(self, vmid):
return self.find_vm(vmid).create()
def destroy(self, vmid):
return self.find_vm(vmid).destroy()
def undefine(self, vmid):
return self.find_vm(vmid).undefine()
def get_status2(self, vm):
state = vm.info()[0]
# print "DEBUG: state: %s" % state
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def get_status(self, vmid):
state = self.find_vm(vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def nodeinfo(self):
return self.conn.getInfo()
def get_type(self):
return self.conn.getType()
class Virt(object):
def __init__(self, connection, hostname):
self.connection = connection
self.hostname = hostname
def __get_conn(self):
self.conn = FuncLibvirtConnection(self.connection)
return self.conn
def __send_ssh(self, command, **kwargs):
ssh_args = ['/usr/bin/ssh',
'root@%s' % self.hostname]
for arg in command:
ssh_args.append(arg)
return sub_process.call(ssh_args, **kwargs)
def state(self):
vms = self.list_vms()
state = []
for vm in vms:
state_blurb = self.conn.get_status(vm)
state.append("%s %s" % (vm,state_blurb))
return state
def info(self):
vms = self.list_vms()
info = dict()
for vm in vms:
data = self.conn.find_vm(vm).info()
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
info[vm] = {
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
"maxMem" : str(data[1]),
"memory" : str(data[2]),
"nrVirtCpu" : data[3],
"cpuTime" : str(data[4])
}
return info
def nodeinfo(self):
self.__get_conn()
info = dict()
data = self.conn.nodeinfo()
info = {
"cpumodel" : str(data[0]),
"phymemory" : str(data[1]),
"cpus" : str(data[2]),
"cpumhz" : str(data[3]),
"numanodes" : str(data[4]),
"sockets" : str(data[5]),
"cpucores" : str(data[6]),
"cputhreads" : str(data[7])
}
return info
def list_vms(self):
self.conn = self.__get_conn()
vms = self.conn.find_vm(-1)
results = []
for x in vms:
try:
results.append(x.name())
except:
pass
return results
def virttype(self):
return self.__get_conn().get_type()
def autostart(self, vm):
self.conn = self.__get_conn()
if self.conn.get_type() == "Xen":
autostart_args = [
"/bin/ln",
"-s",
"/etc/xen/%s" % vm,
"/etc/xen/auto"
]
else:
# When using KVM, we need to make sure the autostart
# directory exists
mkdir_args = [
"/bin/mkdir",
"-p",
"/etc/libvirt/qemu/autostart"
]
self.__send_ssh(mkdir_args,shell=False,close_fds=True)
# We aren't using virsh autostart because we want
# the command to work even when the VM isn't running
autostart_args = [
"/bin/ln",
"-s",
"/etc/libvirt/qemu/%s.xml" % vm,
"/etc/libvirt/qemu/autostart/%s.xml" % vm
]
return self.__send_ssh(autostart_args,shell=False,close_fds=True)
def freemem(self):
self.conn = self.__get_conn()
# Start with the physical memory and subtract
memory = self.conn.nodeinfo()[1]
# Take 256M off which is reserved for Domain-0
memory = memory - 256
vms = self.conn.find_vm(-1)
for vm in vms:
# Exclude stopped vms and Domain-0 by using
# ids greater than 0
if vm.ID() > 0:
# This node is active - remove its memory (in bytes)
memory = memory - int(vm.info()[2])/1024
return memory
def freecpus(self):
self.conn = self.__get_conn()
# Start with the total number of CPUs and subtract
cpus = self.conn.nodeinfo()[2]
vms = self.conn.find_vm(-1)
for vm in vms:
# Exclude stopped vms and Domain-0 by using
# ids greater than 0
if vm.ID() > 0:
# This node is active - remove its cpus
cpus -= int(vm.info()[3])
return cpus
def install(self, server_name, target_name, system=False, image=False,
virt_name=None, virt_path=None):
"""
Install a new virt system by way of a named cobbler profile.
"""
# Example:
# install("bootserver.example.org", "fc7webserver", True)
# install("bootserver.example.org", "client.example.org", True, "client-disk0", "HostVolGroup00")
conn = self.__get_conn()
if conn is None:
raise Exception("no connection")
target = "profile"
if image:
target = "image"
if system:
target = "system"
koan_args = [
"/usr/bin/koan",
"--virt",
"--%s=%s" % (target, target_name),
"--server=%s" % server_name
]
if virt_name:
koan_args.append("--virt-name=%s" % virt_name)
if virt_path:
koan_args.append("--virt-path=%s" % virt_path)
rc = self.__send_ssh(koan_args,shell=False,close_fds=True)
if rc == 0:
return 0
else:
raise Exception("koan returned %d" % rc)
def shutdown(self, vmid):
"""
Make the machine with the given vmid stop running.
Whatever that takes.
"""
self.__get_conn()
self.conn.shutdown(vmid)
return 0
def pause(self, vmid):
"""
Pause the machine with the given vmid.
"""
self.__get_conn()
self.conn.suspend(vmid)
return 0
def unpause(self, vmid):
"""
Unpause the machine with the given vmid.
"""
self.__get_conn()
self.conn.resume(vmid)
return 0
def create(self, vmid):
"""
Start the machine via the given mac address.
"""
self.__get_conn()
self.conn.create(vmid)
return 0
def destroy(self, vmid):
"""
Pull the virtual power from the virtual domain, giving it virtually no
time to virtually shut down.
"""
self.__get_conn()
self.conn.destroy(vmid)
return 0
def undefine(self, vmid):
"""
Stop a domain, and then wipe it from the face of the earth.
by deleting the disk image and its configuration file.
"""
self.__get_conn()
self.conn.undefine(vmid)
return 0
def get_status(self, vmid):
"""
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
"""
self.__get_conn()
return unicode(self.conn.get_status(vmid))
def get_number_of_guests(self):
"""
Return the total amount of guests running on a host.
"""
vms = self.list_vms()
return len(vms)
def get_mac_address(self, vmid):
"""
Return the mac address of a supplied VM.
"""
self.__get_conn()
return unicode(self.conn.get_mac_address(vmid))
def get_vnc_port(self, vmid):
"""
Return the VNC port of a supplied VM (if running or specified in XML)
"""
self.__get_conn()
return unicode(self.conn.get_vnc_port(vmid))
| ssalevan/fogmachine | fogmachine/virt.py | Python | gpl-2.0 | 10,780 |
# -*- coding: UTF-8 -*-
from Products.Five.browser import BrowserView
from docpool.base.content.dpdocument import IDPDocument
from plone import api
from zope.component import getMultiAdapter
import logging
log = logging.getLogger(__name__)
class ActionHelpers(BrowserView):
def can_change_password(self):
portal_state = getMultiAdapter(
(self.context, self.request), name=u'plone_portal_state')
member = portal_state.member()
# IMIS-Users uses SSO and cannot change their password
if member.getId()[:2] == 'i-':
return False
# User with only these roles should not change their password.
# They are usually shared by multiple people.
# FIXME: THIS DOES NOT WORK ! - also users which can add portal content in their group do only have these groups
# roles = member.getRolesInContext(self.context)
# read_only = ['Member', 'Authenticated', 'ELANUser', 'Reader']
# can_change_pwd_roles = [r for r in roles if r not in read_only]
# return bool(can_change_pwd_roles)
# read only ELAN-Users
# usually shared by multiple people
if (member.getId()[-2:] =='-u') or (member.getId()[-5:] == '-info'):
return False
return True
def is_rei_workflow(self, doc=None):
"""
Checks if a rei workflow is activated on a dpdocument
:param doc:
:return:
"""
if not doc:
doc = self.context
# Its a brain lets get the object
if hasattr(doc, "getObject"):
doc = doc.getObject()
# rei workflow is only possible on dpdocument
if not IDPDocument.providedBy(doc):
log.info("Rei WF only possible on dpdocument")
return
wf_tool = api.portal.get_tool('portal_workflow')
workflow = wf_tool.getWorkflowsFor(doc)[0]
rei_wfs = ['rei_review_workflow_alternative', 'rei_review_workflow_standard']
if workflow.id in rei_wfs:
return True
return False
| OpenBfS/dokpool-plone | Plone/src/docpool.base/docpool/base/browser/actionhelpers.py | Python | gpl-3.0 | 2,062 |
#===============================================================================
# @Author: Madison Aster
# @ModuleDescription:
# @License:
# MediaApp Library - Python Package framework for developing robust Media
# Applications with Qt Library
# Copyright (C) 2013 Madison Aster
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation;
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See LICENSE in the root directory of this library for copy of
# GNU Lesser General Public License and other license details.
#===============================================================================
from Qt import QtGui, QtCore, QtWidgets
class Spacer(QtWidgets.QWidget):
def __init__(self):
super(Spacer, self).__init__()
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
def sizeHint(self):
return QtCore.QSize(0,0)
| ThomasMcVay/MediaApp | MediaAppKnobs/KnobElements/Spacer.py | Python | lgpl-2.1 | 1,551 |
# -*- coding: utf-8 -*-
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Run tests.
This will find all modules whose name match a given prefix in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v verbose -- run tests in verbose mode with output to stdout
-q quiet -- don't print anything except if a test fails
-t testdir -- directory where the tests will be found
-x exclude -- add a test to exclude
-p profile -- profiled execution
-d dbc -- enable design-by-contract
-m match -- only run test matching the tag pattern which follow
If no non-option arguments are present, prefixes used are 'test',
'regrtest', 'smoketest' and 'unittest'.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
# modified copy of some functions from test/regrtest.py from PyXml
# disable camel case warning
# pylint: disable=C0103
import sys
import os, os.path as osp
import re
import traceback
import inspect
import difflib
import tempfile
import math
import warnings
from shutil import rmtree
from operator import itemgetter
from itertools import dropwhile
from inspect import isgeneratorfunction
from six import string_types
from six.moves import builtins, range, configparser, input
from logilab.common.deprecation import deprecated
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2 as unittest
from unittest2 import SkipTest
except ImportError:
raise ImportError("You have to install python-unittest2 to use %s" % __name__)
else:
import unittest
from unittest import SkipTest
from functools import wraps
from logilab.common.debugger import Debugger, colorize_source
from logilab.common.decorators import cached, classproperty
from logilab.common import textutils
__all__ = ['main', 'unittest_main', 'find_tests', 'run_test', 'spawn']
DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest',
'func', 'validation')
is_generator = deprecated('[lgc 0.63] use inspect.isgeneratorfunction')(isgeneratorfunction)
# used by unittest to count the number of relevant levels in the traceback
__unittest = 1
def with_tempdir(callable):
"""A decorator ensuring no temporary file left when the function return
Work only for temporary file created with the tempfile module"""
if isgeneratorfunction(callable):
def proxy(*args, **kwargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
for x in callable(*args, **kwargs):
yield x
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
@wraps(callable)
def proxy(*args, **kargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
return callable(*args, **kargs)
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
def in_tempdir(callable):
"""A decorator moving the enclosed function inside the tempfile.tempfdir
"""
@wraps(callable)
def proxy(*args, **kargs):
old_cwd = os.getcwd()
os.chdir(tempfile.tempdir)
try:
return callable(*args, **kargs)
finally:
os.chdir(old_cwd)
return proxy
def within_tempdir(callable):
"""A decorator run the enclosed function inside a tmpdir removed after execution
"""
proxy = with_tempdir(in_tempdir(callable))
proxy.__name__ = callable.__name__
return proxy
def find_tests(testdir,
prefixes=DEFAULT_PREFIXES, suffix=".py",
excludes=(),
remove_suffix=True):
"""
Return a list of all applicable test modules.
"""
tests = []
for name in os.listdir(testdir):
if not suffix or name.endswith(suffix):
for prefix in prefixes:
if name.startswith(prefix):
if remove_suffix and name.endswith(suffix):
name = name[:-len(suffix)]
if name not in excludes:
tests.append(name)
tests.sort()
return tests
## PostMortem Debug facilities #####
def start_interactive_mode(result):
"""starts an interactive shell so that the user can inspect errors
"""
debuggers = result.debuggers
descrs = result.error_descrs + result.fail_descrs
if len(debuggers) == 1:
# don't ask for test name if there's only one failure
debuggers[0].start()
else:
while True:
testindex = 0
print("Choose a test to debug:")
# order debuggers in the same way than errors were printed
print("\n".join(['\t%s : %s' % (i, descr) for i, (_, descr)
in enumerate(descrs)]))
print("Type 'exit' (or ^D) to quit")
print()
try:
todebug = input('Enter a test name: ')
if todebug.strip().lower() == 'exit':
print()
break
else:
try:
testindex = int(todebug)
debugger = debuggers[descrs[testindex][0]]
except (ValueError, IndexError):
print("ERROR: invalid test number %r" % (todebug, ))
else:
debugger.start()
except (EOFError, KeyboardInterrupt):
print()
break
# test utils ##################################################################
class SkipAwareTestResult(unittest._TextTestResult):
def __init__(self, stream, descriptions, verbosity,
exitfirst=False, pdbmode=False, cvg=None, colorize=False):
super(SkipAwareTestResult, self).__init__(stream,
descriptions, verbosity)
self.skipped = []
self.debuggers = []
self.fail_descrs = []
self.error_descrs = []
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.colorize = colorize
self.pdbclass = Debugger
self.verbose = verbosity > 1
def descrs_for(self, flavour):
return getattr(self, '%s_descrs' % flavour.lower())
def _create_pdb(self, test_descr, flavour):
self.descrs_for(flavour).append( (len(self.debuggers), test_descr) )
if self.pdbmode:
self.debuggers.append(self.pdbclass(sys.exc_info()[2]))
def _iter_valid_frames(self, frames):
"""only consider non-testlib frames when formatting traceback"""
lgc_testlib = osp.abspath(__file__)
std_testlib = osp.abspath(unittest.__file__)
invalid = lambda fi: osp.abspath(fi[1]) in (lgc_testlib, std_testlib)
for frameinfo in dropwhile(invalid, frames):
yield frameinfo
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method is overridden here because we want to colorize
lines if --color is passed, and display local variables if
--verbose is passed
"""
exctype, exc, tb = err
output = ['Traceback (most recent call last)']
frames = inspect.getinnerframes(tb)
colorize = self.colorize
frames = enumerate(self._iter_valid_frames(frames))
for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames:
filename = osp.abspath(filename)
if ctx is None: # pyc files or C extensions for instance
source = '<no source available>'
else:
source = ''.join(ctx)
if colorize:
filename = textutils.colorize_ansi(filename, 'magenta')
source = colorize_source(source)
output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname))
output.append(' %s' % source.strip())
if self.verbose:
output.append('%r == %r' % (dir(frame), test.__module__))
output.append('')
output.append(' ' + ' local variables '.center(66, '-'))
for varname, value in sorted(frame.f_locals.items()):
output.append(' %s: %r' % (varname, value))
if varname == 'self': # special handy processing for self
for varname, value in sorted(vars(value).items()):
output.append(' self.%s: %r' % (varname, value))
output.append(' ' + '-' * 66)
output.append('')
output.append(''.join(traceback.format_exception_only(exctype, exc)))
return '\n'.join(output)
def addError(self, test, err):
"""err -> (exc_type, exc, tcbk)"""
exc_type, exc, _ = err
if isinstance(exc, SkipTest):
assert exc_type == SkipTest
self.addSkip(test, exc)
else:
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addError(test, err)
self._create_pdb(descr, 'error')
def addFailure(self, test, err):
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addFailure(test, err)
self._create_pdb(descr, 'fail')
def addSkip(self, test, reason):
self.skipped.append((test, reason))
if self.showAll:
self.stream.writeln("SKIPPED")
elif self.dots:
self.stream.write('S')
def printErrors(self):
super(SkipAwareTestResult, self).printErrors()
self.printSkippedList()
def printSkippedList(self):
# format (test, err) compatible with unittest2
for test, err in self.skipped:
descr = self.getDescription(test)
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % ('SKIPPED', descr))
self.stream.writeln("\t%s" % err)
def printErrorList(self, flavour, errors):
for (_, descr), (test, err) in zip(self.descrs_for(flavour), errors):
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, descr))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln('no stdout'.center(len(self.separator2)))
self.stream.writeln('no stderr'.center(len(self.separator2)))
# Add deprecation warnings about new api used by module level fixtures in unittest2
# http://www.voidspace.org.uk/python/articles/unittest2.shtml#setupmodule-and-teardownmodule
class _DebugResult(object): # simplify import statement among unittest flavors..
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
# backward compatibility: TestSuite might be imported from lgc.testlib
TestSuite = unittest.TestSuite
class keywords(dict):
"""Keyword args (**kwargs) support for generative tests."""
class starargs(tuple):
"""Variable arguments (*args) for generative tests."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
unittest_main = unittest.main
class InnerTestSkipped(SkipTest):
"""raised when a test is skipped"""
pass
def parse_generative_args(params):
args = []
varargs = ()
kwargs = {}
flags = 0 # 2 <=> starargs, 4 <=> kwargs
for param in params:
if isinstance(param, starargs):
varargs = param
if flags:
raise TypeError('found starargs after keywords !')
flags |= 2
args += list(varargs)
elif isinstance(param, keywords):
kwargs = param
if flags & 4:
raise TypeError('got multiple keywords parameters')
flags |= 4
elif flags & 2 or flags & 4:
raise TypeError('found parameters after kwargs or args')
else:
args.append(param)
return args, kwargs
class InnerTest(tuple):
def __new__(cls, name, *data):
instance = tuple.__new__(cls, data)
instance.name = name
return instance
class Tags(set):
"""A set of tag able validate an expression"""
def __init__(self, *tags, **kwargs):
self.inherit = kwargs.pop('inherit', True)
if kwargs:
raise TypeError("%s are an invalid keyword argument for this function" % kwargs.keys())
if len(tags) == 1 and not isinstance(tags[0], string_types):
tags = tags[0]
super(Tags, self).__init__(tags, **kwargs)
def __getitem__(self, key):
return key in self
def match(self, exp):
return eval(exp, {}, self)
def __or__(self, other):
return Tags(*super(Tags, self).__or__(other))
# duplicate definition from unittest2 of the _deprecate decorator
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
class TestCase(unittest.TestCase):
"""A unittest.TestCase extension with some additional methods."""
maxDiff = None
pdbclass = Debugger
tags = Tags()
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.__exc_info = sys.exc_info
self.__testMethodName = self._testMethodName
self._current_test_descr = None
self._options_ = None
@classproperty
@cached
def datadir(cls): # pylint: disable=E0213
"""helper attribute holding the standard test's data directory
NOTE: this is a logilab's standard
"""
mod = sys.modules[cls.__module__]
return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')
# cache it (use a class method to cache on class since TestCase is
# instantiated for each test run)
@classmethod
def datapath(cls, *fname):
"""joins the object's datadir and `fname`"""
return osp.join(cls.datadir, *fname)
def set_description(self, descr):
"""sets the current test's description.
This can be useful for generative tests because it allows to specify
a description per yield
"""
self._current_test_descr = descr
# override default's unittest.py feature
def shortDescription(self):
"""override default unittest shortDescription to handle correctly
generative tests
"""
if self._current_test_descr is not None:
return self._current_test_descr
return super(TestCase, self).shortDescription()
def quiet_run(self, result, func, *args, **kwargs):
try:
func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except unittest.SkipTest as e:
if hasattr(result, 'addSkip'):
result.addSkip(self, str(e))
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
return False
except:
result.addError(self, self.__exc_info())
return False
return True
def _get_test_method(self):
"""return the test method"""
return getattr(self, self._testMethodName)
def optval(self, option, default=None):
"""return the option value or default if the option is not define"""
return getattr(self._options_, option, default)
def __call__(self, result=None, runcondition=None, options=None):
"""rewrite TestCase.__call__ to support generative tests
This is mostly a copy/paste from unittest.py (i.e same
variable names, same logic, except for the generative tests part)
"""
from logilab.common.pytest import FILE_RESTART
if result is None:
result = self.defaultTestResult()
result.pdbclass = self.pdbclass
self._options_ = options
# if result.cvg:
# result.cvg.start()
testMethod = self._get_test_method()
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
if runcondition and not runcondition(testMethod):
return # test is skipped
result.startTest(self)
try:
if not self.quiet_run(result, self.setUp):
return
generative = isgeneratorfunction(testMethod)
# generative tests
if generative:
self._proceed_generative(result, testMethod,
runcondition)
else:
status = self._proceed(result, testMethod)
success = (status == 0)
if not self.quiet_run(result, self.tearDown):
return
if not generative and success:
if hasattr(options, "exitfirst") and options.exitfirst:
# add this test to restart file
try:
restartfile = open(FILE_RESTART, 'a')
try:
descr = '.'.join((self.__class__.__module__,
self.__class__.__name__,
self._testMethodName))
restartfile.write(descr+os.linesep)
finally:
restartfile.close()
except Exception:
print("Error while saving succeeded test into",
osp.join(os.getcwd(), FILE_RESTART),
file=sys.__stderr__)
raise
result.addSuccess(self)
finally:
# if result.cvg:
# result.cvg.stop()
result.stopTest(self)
def _proceed_generative(self, result, testfunc, runcondition=None):
# cancel startTest()'s increment
result.testsRun -= 1
success = True
try:
for params in testfunc():
if runcondition and not runcondition(testfunc,
skipgenerator=False):
if not (isinstance(params, InnerTest)
and runcondition(params)):
continue
if not isinstance(params, (tuple, list)):
params = (params, )
func = params[0]
args, kwargs = parse_generative_args(params[1:])
# increment test counter manually
result.testsRun += 1
status = self._proceed(result, func, args, kwargs)
if status == 0:
result.addSuccess(self)
success = True
else:
success = False
# XXX Don't stop anymore if an error occured
#if status == 2:
# result.shouldStop = True
if result.shouldStop: # either on error or on exitfirst + error
break
except:
# if an error occurs between two yield
result.addError(self, self.__exc_info())
success = False
return success
def _proceed(self, result, testfunc, args=(), kwargs=None):
"""proceed the actual test
returns 0 on success, 1 on failure, 2 on error
Note: addSuccess can't be called here because we have to wait
for tearDown to be successfully executed to declare the test as
successful
"""
kwargs = kwargs or {}
try:
testfunc(*args, **kwargs)
except self.failureException:
result.addFailure(self, self.__exc_info())
return 1
except KeyboardInterrupt:
raise
except InnerTestSkipped as e:
result.addSkip(self, e)
return 1
except SkipTest as e:
result.addSkip(self, e)
return 0
except:
result.addError(self, self.__exc_info())
return 2
return 0
def defaultTestResult(self):
"""return a new instance of the defaultTestResult"""
return SkipAwareTestResult()
skip = _deprecate(unittest.TestCase.skipTest)
assertEquals = _deprecate(unittest.TestCase.assertEqual)
assertNotEquals = _deprecate(unittest.TestCase.assertNotEqual)
assertAlmostEquals = _deprecate(unittest.TestCase.assertAlmostEqual)
assertNotAlmostEquals = _deprecate(unittest.TestCase.assertNotAlmostEqual)
def innerSkip(self, msg=None):
"""mark a generative test as skipped for the <msg> reason"""
msg = msg or 'test was skipped'
raise InnerTestSkipped(msg)
@deprecated('Please use assertDictEqual instead.')
def assertDictEquals(self, dict1, dict2, msg=None, context=None):
"""compares two dicts
If the two dict differ, the first difference is shown in the error
message
:param dict1: a Python Dictionary
:param dict2: a Python Dictionary
:param msg: custom message (String) in case of failure
"""
dict1 = dict(dict1)
msgs = []
for key, value in dict2.items():
try:
if dict1[key] != value:
msgs.append('%r != %r for key %r' % (dict1[key], value,
key))
del dict1[key]
except KeyError:
msgs.append('missing %r key' % key)
if dict1:
msgs.append('dict2 is lacking %r' % dict1)
if msg:
self.failureException(msg)
elif msgs:
if context is not None:
base = '%s\n' % context
else:
base = ''
self.fail(base + '\n'.join(msgs))
@deprecated('Please use assertCountEqual instead.')
def assertUnorderedIterableEquals(self, got, expected, msg=None):
"""compares two iterable and shows difference between both
:param got: the unordered Iterable that we found
:param expected: the expected unordered Iterable
:param msg: custom message (String) in case of failure
"""
got, expected = list(got), list(expected)
self.assertSetEqual(set(got), set(expected), msg)
if len(got) != len(expected):
if msg is None:
msg = ['Iterable have the same elements but not the same number',
'\t<element>\t<expected>i\t<got>']
got_count = {}
expected_count = {}
for element in got:
got_count[element] = got_count.get(element, 0) + 1
for element in expected:
expected_count[element] = expected_count.get(element, 0) + 1
# we know that got_count.key() == expected_count.key()
# because of assertSetEqual
for element, count in got_count.iteritems():
other_count = expected_count[element]
if other_count != count:
msg.append('\t%s\t%s\t%s' % (element, other_count, count))
self.fail(msg)
assertUnorderedIterableEqual = assertUnorderedIterableEquals
assertUnordIterEquals = assertUnordIterEqual = assertUnorderedIterableEqual
@deprecated('Please use assertSetEqual instead.')
def assertSetEquals(self,got,expected, msg=None):
"""compares two sets and shows difference between both
Don't use it for iterables other than sets.
:param got: the Set that we found
:param expected: the second Set to be compared to the first one
:param msg: custom message (String) in case of failure
"""
if not(isinstance(got, set) and isinstance(expected, set)):
warnings.warn("the assertSetEquals function if now intended for set only."\
"use assertUnorderedIterableEquals instead.",
DeprecationWarning, 2)
return self.assertUnorderedIterableEquals(got, expected, msg)
items={}
items['missing'] = expected - got
items['unexpected'] = got - expected
if any(items.itervalues()):
if msg is None:
msg = '\n'.join('%s:\n\t%s' % (key, "\n\t".join(str(value) for value in values))
for key, values in items.iteritems() if values)
self.fail(msg)
@deprecated('Please use assertListEqual instead.')
def assertListEquals(self, list_1, list_2, msg=None):
"""compares two lists
If the two list differ, the first difference is shown in the error
message
:param list_1: a Python List
:param list_2: a second Python List
:param msg: custom message (String) in case of failure
"""
_l1 = list_1[:]
for i, value in enumerate(list_2):
try:
if _l1[0] != value:
from pprint import pprint
pprint(list_1)
pprint(list_2)
self.fail('%r != %r for index %d' % (_l1[0], value, i))
del _l1[0]
except IndexError:
if msg is None:
msg = 'list_1 has only %d elements, not %s '\
'(at least %r missing)'% (i, len(list_2), value)
self.fail(msg)
if _l1:
if msg is None:
msg = 'list_2 is lacking %r' % _l1
self.fail(msg)
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertLinesEquals(self, string1, string2, msg=None, striplines=False):
"""compare two strings and assert that the text lines of the strings
are equal.
:param string1: a String
:param string2: a String
:param msg: custom message (String) in case of failure
:param striplines: Boolean to trigger line stripping before comparing
"""
lines1 = string1.splitlines()
lines2 = string2.splitlines()
if striplines:
lines1 = [l.strip() for l in lines1]
lines2 = [l.strip() for l in lines2]
self.assertListEqual(lines1, lines2, msg)
assertLineEqual = assertLinesEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLWellFormed(self, stream, msg=None, context=2):
"""asserts the XML stream is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import parse
self._assertETXMLWellFormed(stream, parse, msg)
except ImportError:
from xml.sax import make_parser, SAXParseException
parser = make_parser()
try:
parser.parse(stream)
except SAXParseException as ex:
if msg is None:
stream.seek(0)
for _ in range(ex.getLineNumber()):
line = stream.readline()
pointer = ('' * (ex.getLineNumber() - 1)) + '^'
msg = 'XML stream not well formed: %s\n%s%s' % (ex, line, pointer)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLStringWellFormed(self, xml_string, msg=None, context=2):
"""asserts the XML string is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import fromstring
except ImportError:
from elementtree.ElementTree import fromstring
self._assertETXMLWellFormed(xml_string, fromstring, msg)
def _assertETXMLWellFormed(self, data, parse, msg=None, context=2):
"""internal function used by /assertXML(String)?WellFormed/ functions
:param data: xml_data
:param parse: appropriate parser function for this data
:param msg: error message
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
from xml.parsers.expat import ExpatError
try:
from xml.etree.ElementTree import ParseError
except ImportError:
# compatibility for <python2.7
ParseError = ExpatError
try:
parse(data)
except (ExpatError, ParseError) as ex:
if msg is None:
if hasattr(data, 'readlines'): #file like object
data.seek(0)
lines = data.readlines()
else:
lines = data.splitlines(True)
nb_lines = len(lines)
context_lines = []
# catch when ParseError doesn't set valid lineno
if ex.lineno is not None:
if context < 0:
start = 1
end = nb_lines
else:
start = max(ex.lineno-context, 1)
end = min(ex.lineno+context, nb_lines)
line_number_length = len('%i' % end)
line_pattern = " %%%ii: %%s" % line_number_length
for line_no in range(start, ex.lineno):
context_lines.append(line_pattern % (line_no, lines[line_no-1]))
context_lines.append(line_pattern % (ex.lineno, lines[ex.lineno-1]))
context_lines.append('%s^\n' % (' ' * (1 + line_number_length + 2 +ex.offset)))
for line_no in range(ex.lineno+1, end+1):
context_lines.append(line_pattern % (line_no, lines[line_no-1]))
rich_context = ''.join(context_lines)
msg = 'XML stream not well formed: %s\n%s' % (ex, rich_context)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLEqualsTuple(self, element, tup):
"""compare an ElementTree Element to a tuple formatted as follow:
(tagname, [attrib[, children[, text[, tail]]]])"""
# check tag
self.assertTextEquals(element.tag, tup[0])
# check attrib
if len(element.attrib) or len(tup)>1:
if len(tup)<=1:
self.fail( "tuple %s has no attributes (%s expected)"%(tup,
dict(element.attrib)))
self.assertDictEqual(element.attrib, tup[1])
# check children
if len(element) or len(tup)>2:
if len(tup)<=2:
self.fail( "tuple %s has no children (%i expected)"%(tup,
len(element)))
if len(element) != len(tup[2]):
self.fail( "tuple %s has %i children%s (%i expected)"%(tup,
len(tup[2]),
('', 's')[len(tup[2])>1], len(element)))
for index in range(len(tup[2])):
self.assertXMLEqualsTuple(element[index], tup[2][index])
#check text
if element.text or len(tup)>3:
if len(tup)<=3:
self.fail( "tuple %s has no text value (%r expected)"%(tup,
element.text))
self.assertTextEquals(element.text, tup[3])
#check tail
if element.tail or len(tup)>4:
if len(tup)<=4:
self.fail( "tuple %s has no tail value (%r expected)"%(tup,
element.tail))
self.assertTextEquals(element.tail, tup[4])
def _difftext(self, lines1, lines2, junk=None, msg_prefix='Texts differ'):
junk = junk or (' ', '\t')
# result is a generator
result = difflib.ndiff(lines1, lines2, charjunk=lambda x: x in junk)
read = []
for line in result:
read.append(line)
# lines that don't start with a ' ' are diff ones
if not line.startswith(' '):
self.fail('\n'.join(['%s\n'%msg_prefix]+read + list(result)))
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertTextEquals(self, text1, text2, junk=None,
msg_prefix='Text differ', striplines=False):
"""compare two multiline strings (using difflib and splitlines())
:param text1: a Python BaseString
:param text2: a second Python Basestring
:param junk: List of Caracters
:param msg_prefix: String (message prefix)
:param striplines: Boolean to trigger line stripping before comparing
"""
msg = []
if not isinstance(text1, string_types):
msg.append('text1 is not a string (%s)'%(type(text1)))
if not isinstance(text2, string_types):
msg.append('text2 is not a string (%s)'%(type(text2)))
if msg:
self.fail('\n'.join(msg))
lines1 = text1.strip().splitlines(True)
lines2 = text2.strip().splitlines(True)
if striplines:
lines1 = [line.strip() for line in lines1]
lines2 = [line.strip() for line in lines2]
self._difftext(lines1, lines2, junk, msg_prefix)
assertTextEqual = assertTextEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertStreamEquals(self, stream1, stream2, junk=None,
msg_prefix='Stream differ'):
"""compare two streams (using difflib and readlines())"""
# if stream2 is stream2, readlines() on stream1 will also read lines
# in stream2, so they'll appear different, although they're not
if stream1 is stream2:
return
# make sure we compare from the beginning of the stream
stream1.seek(0)
stream2.seek(0)
# compare
self._difftext(stream1.readlines(), stream2.readlines(), junk,
msg_prefix)
assertStreamEqual = assertStreamEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertFileEquals(self, fname1, fname2, junk=(' ', '\t')):
"""compares two files using difflib"""
self.assertStreamEqual(open(fname1), open(fname2), junk,
msg_prefix='Files differs\n-:%s\n+:%s\n'%(fname1, fname2))
assertFileEqual = assertFileEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertDirEquals(self, path_a, path_b):
"""compares two files using difflib"""
assert osp.exists(path_a), "%s doesn't exists" % path_a
assert osp.exists(path_b), "%s doesn't exists" % path_b
all_a = [ (ipath[len(path_a):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_a)]
all_a.sort(key=itemgetter(0))
all_b = [ (ipath[len(path_b):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_b)]
all_b.sort(key=itemgetter(0))
iter_a, iter_b = iter(all_a), iter(all_b)
partial_iter = True
ipath_a, idirs_a, ifiles_a = data_a = None, None, None
while True:
try:
ipath_a, idirs_a, ifiles_a = datas_a = next(iter_a)
partial_iter = False
ipath_b, idirs_b, ifiles_b = datas_b = next(iter_b)
partial_iter = True
self.assertTrue(ipath_a == ipath_b,
"unexpected %s in %s while looking %s from %s" %
(ipath_a, path_a, ipath_b, path_b))
errors = {}
sdirs_a = set(idirs_a)
sdirs_b = set(idirs_b)
errors["unexpected directories"] = sdirs_a - sdirs_b
errors["missing directories"] = sdirs_b - sdirs_a
sfiles_a = set(ifiles_a)
sfiles_b = set(ifiles_b)
errors["unexpected files"] = sfiles_a - sfiles_b
errors["missing files"] = sfiles_b - sfiles_a
msgs = [ "%s: %s"% (name, items)
for name, items in errors.items() if items]
if msgs:
msgs.insert(0, "%s and %s differ :" % (
osp.join(path_a, ipath_a),
osp.join(path_b, ipath_b),
))
self.fail("\n".join(msgs))
for files in (ifiles_a, ifiles_b):
files.sort()
for index, path in enumerate(ifiles_a):
self.assertFileEquals(osp.join(path_a, ipath_a, path),
osp.join(path_b, ipath_b, ifiles_b[index]))
except StopIteration:
break
assertDirEqual = assertDirEquals
def assertIsInstance(self, obj, klass, msg=None, strict=False):
"""check if an object is an instance of a class
:param obj: the Python Object to be checked
:param klass: the target class
:param msg: a String for a custom message
:param strict: if True, check that the class of <obj> is <klass>;
else check with 'isinstance'
"""
if strict:
warnings.warn('[API] Non-standard. Strict parameter has vanished',
DeprecationWarning, stacklevel=2)
if msg is None:
if strict:
msg = '%r is not of class %s but of %s'
else:
msg = '%r is not an instance of %s but of %s'
msg = msg % (obj, klass, type(obj))
if strict:
self.assertTrue(obj.__class__ is klass, msg)
else:
self.assertTrue(isinstance(obj, klass), msg)
@deprecated('Please use assertIsNone instead.')
def assertNone(self, obj, msg=None):
"""assert obj is None
:param obj: Python Object to be tested
"""
if msg is None:
msg = "reference to %r when None expected"%(obj,)
self.assertTrue( obj is None, msg )
@deprecated('Please use assertIsNotNone instead.')
def assertNotNone(self, obj, msg=None):
"""assert obj is not None"""
if msg is None:
msg = "unexpected reference to None"
self.assertTrue( obj is not None, msg )
@deprecated('Non-standard. Please use assertAlmostEqual instead.')
def assertFloatAlmostEquals(self, obj, other, prec=1e-5,
relative=False, msg=None):
"""compares if two floats have a distance smaller than expected
precision.
:param obj: a Float
:param other: another Float to be comparted to <obj>
:param prec: a Float describing the precision
:param relative: boolean switching to relative/absolute precision
:param msg: a String for a custom message
"""
if msg is None:
msg = "%r != %r" % (obj, other)
if relative:
prec = prec*math.fabs(obj)
self.assertTrue(math.fabs(obj - other) < prec, msg)
def failUnlessRaises(self, excClass, callableObj=None, *args, **kwargs):
"""override default failUnlessRaises method to return the raised
exception instance.
Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
CAUTION! There are subtle differences between Logilab and unittest2
- exc is not returned in standard version
- context capabilities in standard version
- try/except/else construction (minor)
:param excClass: the Exception to be raised
:param callableObj: a callable Object which should raise <excClass>
:param args: a List of arguments for <callableObj>
:param kwargs: a List of keyword arguments for <callableObj>
"""
# XXX cube vcslib : test_branches_from_app
if callableObj is None:
_assert = super(TestCase, self).assertRaises
return _assert(excClass, callableObj, *args, **kwargs)
try:
callableObj(*args, **kwargs)
except excClass as exc:
class ProxyException:
def __init__(self, obj):
self._obj = obj
def __getattr__(self, attr):
warn_msg = ("This exception was retrieved with the old testlib way "
"`exc = self.assertRaises(Exc, callable)`, please use "
"the context manager instead'")
warnings.warn(warn_msg, DeprecationWarning, 2)
return self._obj.__getattribute__(attr)
return ProxyException(exc)
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
assertRaises = failUnlessRaises
if sys.version_info >= (3,2):
assertItemsEqual = unittest.TestCase.assertCountEqual
else:
assertCountEqual = unittest.TestCase.assertItemsEqual
if sys.version_info < (2,7):
def assertIsNotNone(self, value, *args, **kwargs):
self.assertNotEqual(None, value, *args, **kwargs)
TestCase.assertItemsEqual = deprecated('assertItemsEqual is deprecated, use assertCountEqual')(
TestCase.assertItemsEqual)
import doctest
class SkippedSuite(unittest.TestSuite):
def test(self):
"""just there to trigger test execution"""
self.skipped_test('doctest module has no DocTestSuite class')
class DocTestFinder(doctest.DocTestFinder):
def __init__(self, *args, **kwargs):
self.skipped = kwargs.pop('skipped', ())
doctest.DocTestFinder.__init__(self, *args, **kwargs)
def _get_test(self, obj, name, module, globs, source_lines):
"""override default _get_test method to be able to skip tests
according to skipped attribute's value
"""
if getattr(obj, '__name__', '') in self.skipped:
return None
return doctest.DocTestFinder._get_test(self, obj, name, module,
globs, source_lines)
class DocTest(TestCase):
"""trigger module doctest
I don't know how to make unittest.main consider the DocTestSuite instance
without this hack
"""
skipped = ()
def __call__(self, result=None, runcondition=None, options=None):\
# pylint: disable=W0613
try:
finder = DocTestFinder(skipped=self.skipped)
suite = doctest.DocTestSuite(self.module, test_finder=finder)
# XXX iirk
doctest.DocTestCase._TestCase__exc_info = sys.exc_info
except AttributeError:
suite = SkippedSuite()
# doctest may gork the builtins dictionnary
# This happen to the "_" entry used by gettext
old_builtins = builtins.__dict__.copy()
try:
return suite.run(result)
finally:
builtins.__dict__.clear()
builtins.__dict__.update(old_builtins)
run = __call__
def test(self):
"""just there to trigger test execution"""
MAILBOX = None
class MockSMTP:
"""fake smtplib.SMTP"""
def __init__(self, host, port):
self.host = host
self.port = port
global MAILBOX
self.reveived = MAILBOX = []
def set_debuglevel(self, debuglevel):
"""ignore debug level"""
def sendmail(self, fromaddr, toaddres, body):
"""push sent mail in the mailbox"""
self.reveived.append((fromaddr, toaddres, body))
def quit(self):
"""ignore quit"""
class MockConfigParser(configparser.ConfigParser):
"""fake ConfigParser.ConfigParser"""
def __init__(self, options):
configparser.ConfigParser.__init__(self)
for section, pairs in options.iteritems():
self.add_section(section)
for key, value in pairs.iteritems():
self.set(section, key, value)
def write(self, _):
raise NotImplementedError()
class MockConnection:
"""fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)"""
def __init__(self, results):
self.received = []
self.states = []
self.results = results
def cursor(self):
"""Mock cursor method"""
return self
def execute(self, query, args=None):
"""Mock execute method"""
self.received.append( (query, args) )
def fetchone(self):
"""Mock fetchone method"""
return self.results[0]
def fetchall(self):
"""Mock fetchall method"""
return self.results
def commit(self):
"""Mock commiy method"""
self.states.append( ('commit', len(self.received)) )
def rollback(self):
"""Mock rollback method"""
self.states.append( ('rollback', len(self.received)) )
def close(self):
"""Mock close method"""
pass
def mock_object(**params):
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type('Mock', (), params)()
def create_files(paths, chroot):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = osp.join(chroot, path)
filename = osp.basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(osp.dirname(path))
files.add(path)
for dirpath in dirs:
if not osp.isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class AttrObject: # XXX cf mock_object
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def tag(*args, **kwargs):
"""descriptor adding tag to a function"""
def desc(func):
assert not hasattr(func, 'tags')
func.tags = Tags(*args, **kwargs)
return func
return desc
def require_version(version):
""" Compare version of python interpreter to the given one. Skip the test
if older.
"""
def check_require_version(f):
version_elements = version.split('.')
try:
compare = tuple([int(v) for v in version_elements])
except ValueError:
raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version)
current = sys.version_info[:3]
if current < compare:
def new_f(self, *args, **kwargs):
self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current])))
new_f.__name__ = f.__name__
return new_f
else:
return f
return check_require_version
def require_module(module):
""" Check if the given module is loaded. Skip the test if not.
"""
def check_require_module(f):
try:
__import__(module)
return f
except ImportError:
def new_f(self, *args, **kwargs):
self.skipTest('%s can not be imported.' % module)
new_f.__name__ = f.__name__
return new_f
return check_require_module
| JetChars/vim | vim/bundle/python-mode/pymode/libs/logilab/common/testlib.py | Python | apache-2.0 | 50,506 |
# -*- coding: utf-8 -*-
import logging
from outwiker.core.config import Config
from outwiker.core.event import Event, CustomEvents
from outwiker.core.events import PostWikiCloseParams, PreWikiCloseParams
from outwiker.core.recent import RecentWiki
from outwiker.core.pluginsloader import PluginsLoader
from outwiker.core.pageuiddepot import PageUidDepot
logger = logging.getLogger('outwiker.core.application')
class ApplicationParams(object):
def __init__(self):
# Opened wiki
self.__wikiroot = None
# Application's main window
self.__mainWindow = None
# Application's global config
self.config = None
self.recentWiki = None
self.actionController = None
self.plugins = PluginsLoader(self)
self.pageUidDepot = PageUidDepot()
# Values for shared purpose
self.sharedData = {}
self.customEvents = CustomEvents()
# Events
# Opening wiki event
# Parameters:
# root - opened wiki root (it may be None)
self.onWikiOpen = Event()
# Closing wiki event
# Parameters:
# page - current (selected) page
# params - instance of the outwiker.core.events.PreWikiCloseParams
# class
self.onPreWikiClose = Event()
# Updating page wiki event
# Parameters:
# page - updated page
# **kwargs
# kwargs contain 'change' key, which contain changing flags
self.onPageUpdate = Event()
# Creating new wiki page
# Parameters:
# sender - new page
self.onPageCreate = Event()
# Tree updating event
# Parameters:
# sender - Page, because of which the tree is updated.
self.onTreeUpdate = Event()
# Other page selection event
# Parameters:
# sender - selected page
self.onPageSelect = Event()
# User want insert link to selected attached files to page
# Parameters:
# fnames - selected file names (names only, without full paths)
self.onAttachmentPaste = Event()
# Changings in the bookmarks list event
# Parameters:
# bookmark - Bookmarks class instance
self.onBookmarksChanged = Event()
# Removing the page event
# Parameters:
# page - page is removed
self.onPageRemove = Event()
# Renaming page event
# Parameters:
# page - page is renamed,
# oldSubpath - previous relative path to page
self.onPageRename = Event()
# Beginning complex tree updating (updating of several steps) event
# Parameters:
# root - wiki tree root
self.onStartTreeUpdate = Event()
# Finishing complex tree updating (updating of several steps) event
# Parameters:
# root - wiki tree root
self.onEndTreeUpdate = Event()
# Beginning HTML rendering event
# Parameters:
# page - rendered page
# htmlView - window for HTML view
self.onHtmlRenderingBegin = Event()
# Finishing HTML rendering event
# Parameters:
# page - rendered page
# htmlView - window for HTML view
self.onHtmlRenderingEnd = Event()
# Changing page order event
# Parameters:
# page - page with changed order
self.onPageOrderChange = Event()
# Evont for forced saving page state
# (e.g. by the loss the focus or by timer)
# Parameters:
# --
self.onForceSave = Event()
# The event occurs after wiki parser (Parser class) creation,
# but before it using
# Parameter:
# parser - Parser class instance
self.onWikiParserPrepare = Event()
# Event occurs during preferences dialog creation
# Parameters:
# dialog - outwiker.gui.preferences.prefdialog.PrefDialog
# class instance
self.onPreferencesDialogCreate = Event()
# Event occurs after preferences dialog closing.
# Parameters:
# dialog - outwiker.gui.preferences.prefdialog.PrefDialog
# class instance
self.onPreferencesDialogClose = Event()
# Event occurs after (!) the page view creation
# (inside CurrentPagePanel instance)
# Parameters:
# page - new selected page
self.onPageViewCreate = Event()
# Event occurs before(!) the page view removing
# (inside CurrentPagePanel instance)
# Parameters:
# page - Current selected page
self.onPageViewDestroy = Event()
# Event occurs after the popup menu creation by right mouse click
# on the notes tree
# Parameters:
# menu - created popup menu,
# page - the page on which was right clicked in the notes tree
self.onTreePopupMenu = Event()
# Event occurs before HTML generation (for wiki and HTML pages)
# Order of the calling preprocessing events is not regulated
# Parameters:
# page - page for which HTML is generated
# params - instance of the outwiker.core.events.PreprocessingParams
# class
self.onPreprocessing = Event()
# Event occurs after HTML generation (for wiki and HTML pages)
# Order of the calling preprocessing events is not regulated
# Parameters:
# page - page for which HTML is generated
# params - instance of the outwiker.core.events.PostprocessingParams
# class
self.onPostprocessing = Event()
# Event occurs after wiki parsing but before HTML improving
# Parameters:
# page - page for which HTML is generated
# params - instance of the
# outwiker.core.events.PreHtmlImprovingParams class
self.onPreHtmlImproving = Event()
# Event occurs during HtmlImproverFactory instance creation
# Parameters:
# factory - HtmlImproverFactory instance in which can add
# the new HtmlImprover instances by add() method
self.onPrepareHtmlImprovers = Event()
# Event occurs when cursor hovers under link on preview tab
# Parameters:
# page - current page
# params - instance of the outwiker.core.events.HoverLinkParams
# class
self.onHoverLink = Event()
# Event occurs when user click to link on a page
# Parameters:
# page - current page
# params - instance of the outwiker.core.events.LinkClickParams
# class
self.onLinkClick = Event()
# Event occurs when user click with right button in text editor
# Parameters:
# page - current page
# params - instance of the the
# outwiker.core.events.EditorPopupMenuParams class
self.onEditorPopupMenu = Event()
# Event occurs after page dialog creation
# Parameters:
# page - current (selected) page
# params - instance of the PageDialogInitParams class
self.onPageDialogInit = Event()
# Event occurs before page dialog will be destroyed
# Parameters:
# page - current (selected) page
# params - instance of the PageDialogDestroyParams class
self.onPageDialogDestroy = Event()
# Event occurs after page type changing
# Parameters:
# page - current (selected) page
# params - instance of the PageDialogPageTypeChangedParams class
self.onPageDialogPageTypeChanged = Event()
# Event occurs after page type changing
# Parameters:
# page - current(selected) page
# params - instance of the PageDialogPageTitleChangedParams class
self.onPageDialogPageTitleChanged = Event()
# Event occurs after page style changing
# Parameters:
# page - current (selected) page
# params - instance of the PageDialogPageStyleChangedParams class
self.onPageDialogPageStyleChanged = Event()
# Event occurs after page icon changing
# Parameters:
# page - current (selected) page
# params - instance of the PageDialogPageIconChangedParams class
self.onPageDialogPageIconChanged = Event()
# Event occurs after page tag list changing
# Parameters:
# page - current(selected) page
# params - instance of the PageDialogPageTagsChangedParams class
self.onPageDialogPageTagsChanged = Event()
# Event occurs during page dialog initialization,
# during general panel creation. Evens sender expect what event
# handlers will fill the page factories list with addPageFactory method
# Parameters:
# page - current (selected) page
# params - instance of the PageDialogPageFactoriesNeededParams
# class
self.onPageDialogPageFactoriesNeeded = Event()
# Event occurs by TextEditor when it needs styles
# Parameters:
# page - current (selected) page
# params - instance of the EditorStyleNeededParams class
self.onEditorStyleNeeded = Event()
# Event forces update and render current page
# Parameters:
# page - current (selected) page
# params - instance of the PageUpdateNeededParams class
self.onPageUpdateNeeded = Event()
# Event occurs before wiki opening
# Parameters:
# page - current (selected) page
# params - instance of the PreWikiOpenParams class
self.onPreWikiOpen = Event()
# Event occurs after wiki opening
# Parameters:
# page - current (selected) page
# params - instance of the PostWikiOpenParams class
self.onPostWikiOpen = Event()
# Event occurs after wiki closing
# Parameters:
# params - instance of the PostWikiCloseParams class
self.onPostWikiClose = Event()
# Event occurs in the IconsPanel after generation list of
# the icons groups.
# Parameters:
# page - current (selected) page
# params - instance of the IconsGroupsListInitParams class
self.onIconsGroupsListInit = Event()
# Event occurs after switch mode of a page: text / preview / HTML / ...
# Parameters:
# page - current (selected) page
# params - instance of the PageModeChangeParams class
self.onPageModeChange = Event()
# Event occurs after change attached file list.
# Parameters:
# page - current (selected) page
# params - instance of the AttachListChangedParams class
self.onAttachListChanged = Event()
# Event occurs after key pressing in the notes text editor
# Parameters:
# page - current (selected) page
# params - instance of the TextEditorKeyDownParams class
self.onTextEditorKeyDown = Event()
# Event occurs after caret moving in a text editor
# Parameters:
# page - current (selected) page
# params - instance of the TextEditorCaretMoveParams class
self.onTextEditorCaretMove = Event()
# Event occurs after page content reading. The content can be changed
# by event handlers
# Parameters:
# page - current (selected) page
# params - instance of the PostContentReadingParams class
self.onPostContentReading = Event()
# Event occurs before page content writing. The content can be changed
# by event handlers
# Parameters:
# page - current (selected) page
# params - instance of the PreContentWritingParams class
self.onPreContentWriting = Event()
def init(self, fullConfigPath):
"""
Initialize config and locale
"""
self.fullConfigPath = fullConfigPath
self.config = Config(fullConfigPath)
self.recentWiki = RecentWiki(self.config)
def clear(self):
if self.wikiroot is not None:
self.__unbindWikiEvents(self.wikiroot)
self._unbindAllEvents()
self.wikiroot = None
self.config = None
self.mainWindow = None
def _unbindAllEvents(self):
for member_name in sorted(dir(self)):
member = getattr(self, member_name)
if isinstance(member, Event):
member.clear()
for key in list(self.customEvents.getKeys()):
self.customEvents.clear(key)
@property
def wikiroot(self):
"""
Return the root of the wiki opened in the current time or None if
no wiki opened
"""
return self.__wikiroot
@wikiroot.setter
def wikiroot(self, value):
"""
Set wiki as current
"""
if self.__wikiroot is not None:
wikiPath = self.__wikiroot.path
preWikiCloseParams = PreWikiCloseParams(self.__wikiroot)
self.onPreWikiClose(self.selectedPage, preWikiCloseParams)
if preWikiCloseParams.abortClose:
logger.debug('Wiki closing aborted: {}'.format(wikiPath))
return
self.__unbindWikiEvents(self.__wikiroot)
try:
self.__wikiroot.save()
except OSError:
logger.error("Can't save notes tree settings: {}".format(self.__wikiroot.path))
self.__wikiroot = None
postWikiCloseParams = PostWikiCloseParams(wikiPath)
self.onPostWikiClose(postWikiCloseParams)
self.__wikiroot = value
if self.__wikiroot is not None:
self.__bindWikiEvents(self.__wikiroot)
self.pageUidDepot = PageUidDepot(self.__wikiroot)
self.onWikiOpen(self.__wikiroot)
@property
def mainWindow(self):
"""
Return main window instance or None if main window is not created
"""
return self.__mainWindow
@mainWindow.setter
def mainWindow(self, value):
"""
Set main window for the program
"""
self.__mainWindow = value
def __bindWikiEvents(self, wiki):
"""
Subscribe to wiki event to forward it to next receiver.
"""
wiki.onPageSelect += self.onPageSelect
wiki.onPageUpdate += self.onPageUpdate
wiki.onTreeUpdate += self.onTreeUpdate
wiki.onStartTreeUpdate += self.onStartTreeUpdate
wiki.onEndTreeUpdate += self.onEndTreeUpdate
wiki.onPageOrderChange += self.onPageOrderChange
wiki.onPageRename += self.onPageRename
wiki.onPageCreate += self.onPageCreate
wiki.onPageRemove += self.onPageRemove
wiki.onAttachListChanged += self.onAttachListChanged
wiki.bookmarks.onBookmarksChanged += self.onBookmarksChanged
wiki.onPostContentReading += self.onPostContentReading
wiki.onPreContentWriting += self.onPreContentWriting
def __unbindWikiEvents(self, wiki):
"""
Unsubscribe from wiki events.
"""
wiki.onPageSelect -= self.onPageSelect
wiki.onPageUpdate -= self.onPageUpdate
wiki.onTreeUpdate -= self.onTreeUpdate
wiki.onStartTreeUpdate -= self.onStartTreeUpdate
wiki.onEndTreeUpdate -= self.onEndTreeUpdate
wiki.onPageOrderChange -= self.onPageOrderChange
wiki.onPageRename -= self.onPageRename
wiki.onPageCreate -= self.onPageCreate
wiki.onPageRemove -= self.onPageRemove
wiki.onAttachListChanged -= self.onAttachListChanged
wiki.bookmarks.onBookmarksChanged -= self.onBookmarksChanged
wiki.onPostContentReading -= self.onPostContentReading
wiki.onPreContentWriting -= self.onPreContentWriting
@property
def selectedPage(self):
"""
Return the instance of the selected page or None if no selected page.
"""
if self.__wikiroot is None:
return None
return self.__wikiroot.selectedPage
@selectedPage.setter
def selectedPage(self, page):
"""
Set page as selected
"""
if (self.__wikiroot is not None and
self.__wikiroot.selectedPage != page):
self.__wikiroot.selectedPage = page
def getEvent(self, name):
"""Return build-in event or custom event"""
if hasattr(self, name) and isinstance(getattr(self, name), Event):
return getattr(self, name)
return self.customEvents.get(name)
Application = ApplicationParams()
| unreal666/outwiker | src/outwiker/core/application.py | Python | gpl-3.0 | 16,964 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import pickle
try:
import skopt as sko
except ImportError:
sko = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
def _validate_warmstart(parameter_names, points_to_evaluate,
evaluated_rewards):
if points_to_evaluate:
if not isinstance(points_to_evaluate, list):
raise TypeError(
"points_to_evaluate expected to be a list, got {}.".format(
type(points_to_evaluate)))
for point in points_to_evaluate:
if not isinstance(point, list):
raise TypeError(
"points_to_evaluate expected to include list, got {}.".
format(point))
if not len(point) == len(parameter_names):
raise ValueError("Dim of point {}".format(point) +
" and parameter_names {}".format(
parameter_names) + " do not match.")
if points_to_evaluate and evaluated_rewards:
if not isinstance(evaluated_rewards, list):
raise TypeError(
"evaluated_rewards expected to be a list, got {}.".format(
type(evaluated_rewards)))
if not len(evaluated_rewards) == len(points_to_evaluate):
raise ValueError(
"Dim of evaluated_rewards {}".format(evaluated_rewards) +
" and points_to_evaluate {}".format(points_to_evaluate) +
" do not match.")
class SkOptSearch(SuggestionAlgorithm):
"""A wrapper around skopt to provide trial suggestions.
Requires skopt to be installed.
Parameters:
optimizer (skopt.optimizer.Optimizer): Optimizer provided
from skopt.
parameter_names (list): List of parameter names. Should match
the dimension of the optimizer output.
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list of lists): A list of points you'd like to run
first before sampling from the optimiser, e.g. these could be
parameter configurations you already know work well to help
the optimiser select good values. Each point is a list of the
parameters using the order definition given by parameter_names.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate. (See tune/examples/skopt_example.py)
Example:
>>> from skopt import Optimizer
>>> optimizer = Optimizer([(0,20),(-100,100)])
>>> current_best_params = [[10, 0], [15, -20]]
>>> algo = SkOptSearch(optimizer,
>>> ["width", "height"],
>>> max_concurrent=4,
>>> metric="mean_loss",
>>> mode="min",
>>> points_to_evaluate=current_best_params)
"""
def __init__(self,
optimizer,
parameter_names,
max_concurrent=10,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
points_to_evaluate=None,
evaluated_rewards=None,
**kwargs):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
assert type(max_concurrent) is int and max_concurrent > 0
_validate_warmstart(parameter_names, points_to_evaluate,
evaluated_rewards)
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
self._initial_points = []
if points_to_evaluate and evaluated_rewards:
optimizer.tell(points_to_evaluate, evaluated_rewards)
elif points_to_evaluate:
self._initial_points = points_to_evaluate
self._max_concurrent = max_concurrent
self._parameters = parameter_names
self._metric = metric
# Skopt internally minimizes, so "max" => -1
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
self._skopt_opt = optimizer
self._live_trial_mapping = {}
super(SkOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
if self._initial_points:
suggested_config = self._initial_points[0]
del self._initial_points[0]
else:
suggested_config = self._skopt_opt.ask()
self._live_trial_mapping[trial_id] = suggested_config
return dict(zip(self._parameters, suggested_config))
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to skopt unless early terminated or errored.
The result is internally negated when interacting with Skopt
so that Skopt Optimizers can "maximize" this value,
as it minimizes on default.
"""
skopt_trial_info = self._live_trial_mapping.pop(trial_id)
if result:
self._skopt_opt.tell(skopt_trial_info,
self._metric_op * result[self._metric])
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = (self._initial_points, self._skopt_opt)
with open(checkpoint_dir, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self._initial_points = trials_object[0]
self._skopt_opt = trials_object[1]
| ujvl/ray-ng | python/ray/tune/suggest/skopt.py | Python | apache-2.0 | 6,934 |
'''
DeadlineV5Migration.py - Easily Migrate Deadline v5 -> v6 (only)
Batch migrate en mass all Deadline v5 users/slaves/pools/groups/limits to a Deadline v6 DB
For Sys Admins / IT only. Be careful as existing v6 settings can be overwritten!
Deadline v7 ships with an [IMPORT SETTINGS] WIZARD, so please use this feature for v6 -> v7 migrations in the future.
'''
from System.IO import *
from System.Collections.Specialized import *
from Deadline.Scripting import *
from DeadlineUI.Controls.Scripting.DeadlineScriptDialog import DeadlineScriptDialog
import os
try:
import xml.etree.cElementTree as xml
except ImportError:
import xml.etree.ElementTree as xml
########################################################################
# Globals
########################################################################
scriptDialog = None
########################################################################
# Main Function Called By Deadline
########################################################################
def __main__():
global scriptDialog
dialogWidth = 500
dialogHeight = -1
buttonWidth = 100
labelWidth = 300
controlWidth = 400
scriptDialog = DeadlineScriptDialog()
scriptDialog.SetSize(dialogWidth, dialogHeight)
scriptDialog.SetTitle("Deadline v5 --> v6 Migration Tool")
scriptDialog.AddRow()
scriptDialog.AddControl("RepoPathLabel", "LabelControl", "v5 Repository Root Directory", labelWidth - 100, -1, "Select the (rooted - mapped drive letter) v5 Root Directory in your Deadline Repository.")
scriptDialog.AddSelectionControl("RepoPathBox", "FolderBrowserControl", "", "", controlWidth, -1)
scriptDialog.EndRow()
scriptDialog.AddControl("Separator1", "SeparatorControl", "Pools / Groups", labelWidth + controlWidth - 100, -1)
scriptDialog.AddRow()
scriptDialog.AddSelectionControl("PoolsBox", "CheckBoxControl", False, "Migrate Pools", labelWidth, -1)
scriptDialog.EndRow()
scriptDialog.AddRow()
scriptDialog.AddSelectionControl("GroupsBox", "CheckBoxControl", False, "Migrate Groups", labelWidth, -1)
scriptDialog.EndRow()
scriptDialog.AddControl("Separator2", "SeparatorControl", "Slaves", labelWidth + controlWidth - 100, -1)
scriptDialog.AddRow()
SlavesBoxButton = scriptDialog.AddSelectionControl("SlavesBox", "CheckBoxControl", False, "Migrate Slaves", labelWidth, -1)
SlavesBoxButton.ValueModified.connect(SlavesBoxButtonPressed)
scriptDialog.EndRow()
scriptDialog.AddRow()
scriptDialog.AddSelectionControl("OverwriteExistingSlavesBox", "CheckBoxControl", False, "Force Overwrite of Existing Identical Slaves", labelWidth + 200, -1, "If enabled, any existing v6 slaves with the same name as an imported XML file, will be overwritten!")
scriptDialog.SetEnabled("OverwriteExistingSlavesBox", False)
scriptDialog.EndRow()
scriptDialog.AddControl("Separator3", "SeparatorControl", "Limits", labelWidth + controlWidth - 100, -1)
scriptDialog.AddRow()
LimitsBoxButton = scriptDialog.AddSelectionControl("LimitsBox", "CheckBoxControl", False, "Migrate Limits", labelWidth, -1)
LimitsBoxButton.ValueModified.connect(LimitsBoxButtonPressed)
scriptDialog.EndRow()
scriptDialog.AddRow()
scriptDialog.AddSelectionControl("OverwriteExistingLimitsBox", "CheckBoxControl", False, "Force Overwrite of Existing Identical Limits", labelWidth + 200, -1, "If enabled, any existing v6 limits with the same name, will be overwritten!")
scriptDialog.SetEnabled("OverwriteExistingLimitsBox", False)
scriptDialog.EndRow()
scriptDialog.AddControl("Separator4", "SeparatorControl", "Users", labelWidth + controlWidth - 100, -1)
scriptDialog.AddRow()
UsersBoxButton = scriptDialog.AddSelectionControl("UsersBox", "CheckBoxControl", False, "Migrate Users", labelWidth, -1)
UsersBoxButton.ValueModified.connect(UsersBoxButtonPressed)
scriptDialog.EndRow()
scriptDialog.AddRow()
scriptDialog.AddSelectionControl("OverwriteExistingUsersBox", "CheckBoxControl", False, "Force Overwrite of Existing Identical User Names", labelWidth + 200, -1, "If enabled, any existing v6 users with the same name as an imported XML file, will be overwritten!")
scriptDialog.SetEnabled("OverwriteExistingUsersBox", False)
scriptDialog.EndRow()
scriptDialog.AddRow()
scriptDialog.AddSelectionControl("MigrateGroupsBox", "CheckBoxControl", False, "Migrate v5 User Groups (Ensure 'Normal' / 'Power' groups created prior)", labelWidth + 200, -1, "If enabled, Deadline v5 User Groups: Normal & Power will be migrated and applicable users added automatically.")
scriptDialog.SetEnabled("MigrateGroupsBox", False)
scriptDialog.EndRow()
scriptDialog.AddRow()
scriptDialog.AddControl("DummyLabel1", "LabelControl", "", dialogWidth - (buttonWidth * 2) + 95, -1)
sendButton = scriptDialog.AddControl("SelectButton", "ButtonControl", "Execute", buttonWidth, -1)
sendButton.ValueModified.connect(ExecuteMigration)
closeButton = scriptDialog.AddControl("CloseButton", "ButtonControl", "Close", buttonWidth, -1)
closeButton.ValueModified.connect(CloseButtonPressed)
scriptDialog.EndRow()
scriptDialog.ShowDialog(True)
########################################################################
# Helper Functions
########################################################################
def InitializeDialog(*args):
global scriptDialog
SlavesBoxButtonPressed()
LimitsBoxButtonPressed()
UsersBoxButtonPressed()
def CloseButtonPressed(*args):
global scriptDialog
scriptDialog.CloseDialog()
def SlavesBoxButtonPressed(*args):
global scriptDialog
checked = scriptDialog.GetValue("SlavesBox")
if bool(checked):
scriptDialog.SetEnabled("OverwriteExistingSlavesBox", True)
else:
scriptDialog.SetEnabled("OverwriteExistingSlavesBox", False)
def LimitsBoxButtonPressed(*args):
global scriptDialog
checked = scriptDialog.GetValue("LimitsBox")
if bool(checked):
scriptDialog.SetEnabled("OverwriteExistingLimitsBox", True)
else:
scriptDialog.SetEnabled("OverwriteExistingLimitsBox", False)
def UsersBoxButtonPressed(*args):
global scriptDialog
checked = scriptDialog.GetValue("UsersBox")
if bool(checked):
scriptDialog.SetEnabled("OverwriteExistingUsersBox", True)
scriptDialog.SetEnabled("MigrateGroupsBox", True)
else:
scriptDialog.SetEnabled("OverwriteExistingUsersBox", False)
scriptDialog.SetEnabled("MigrateGroupsBox", False)
def ExecuteMigration():
global scriptDialog
try:
# Check Repo Path has been specified
repoPath = scriptDialog.GetValue("RepoPathBox")
if repoPath == "":
scriptDialog.ShowMessageBox("No Repository Root Directory specified!", "Error")
return
# Check it looks like a v5 Repo Path and if not, return
poolsFile = (os.path.join(repoPath, "settings", "pools.ini"))
groupsFile = (os.path.join(repoPath, "settings", "groups.ini"))
slavesPath = (os.path.join(repoPath, "slaves"))
usersPath = (os.path.join(repoPath, "users"))
limitsPath = (os.path.join(repoPath, "limitGroups"))
if(not File.Exists(poolsFile)):
scriptDialog.ShowMessageBox("pools.ini File [%s] does not exist!\n\nAre you sure this is a valid Deadline v5 Repository Path?" % poolsFile, "Error")
return
if(not File.Exists(groupsFile)):
scriptDialog.ShowMessageBox("groups.ini File [%s] does not exist!\n\nAre you sure this is a valid Deadline v5 Repository Path?" % groupsFile, "Error")
return
if(not Directory.Exists(slavesPath)):
scriptDialog.ShowMessageBox("Slaves Directory [%s] does NOT exist!\n\nAre you sure this is a valid Deadline v5 Repository Path?" % slavesPath, "Error")
return
if(not Directory.Exists(usersPath)):
scriptDialog.ShowMessageBox("Users Directory [%s] does NOT exist!\n\nAre you sure this is a valid Deadline v5 Repository Path?" % usersPath, "Error")
return
if(not Directory.Exists(limitsPath)):
scriptDialog.ShowMessageBox("Limits Directory [%s] does NOT exist!\n\nAre you sure this is a valid Deadline v5 Repository Path?" % limitsPath, "Error")
return
# Read INI File
def ReadINIFile(iniFile):
if iniFile != "":
Lines = open(iniFile, "r").read().splitlines()
return Lines
else:
LogWarning("No [*_*.ini] File Found?")
return ""
# POOLS
migratePools = scriptDialog.GetValue("PoolsBox")
if bool(migratePools):
pools = []
paths = ReadINIFile(poolsFile)
for path in paths:
key, val = path.split("=")
pools.append(key)
print("pools: %s" % pools)
poolAdded = 0
poolDuplicate = 0
for pool in pools:
try:
RepositoryUtils.AddPool(pool)
poolAdded += 1
except:
poolDuplicate += 1
scriptDialog.ShowMessageBox("Pools injected into DB: %d\nPools failed to be injected into DB: %d" % (poolAdded, poolDuplicate), "Pool Results")
# GROUPS
migrateGroups = scriptDialog.GetValue("GroupsBox")
if bool(migrateGroups):
groups = []
paths = ReadINIFile(groupsFile)
for path in paths:
key, val = path.split("=")
groups.append(key)
print("groups: %s" % groups)
groupAdded = 0
groupDuplicate = 0
for group in groups:
try:
RepositoryUtils.AddGroup(group)
groupAdded += 1
except:
groupDuplicate += 1
scriptDialog.ShowMessageBox("Groups injected into DB: %d\nGroups failed to be injected into DB: %d" % (groupAdded, groupDuplicate), "Group Results")
# SLAVES
migrateSlaves = scriptDialog.GetValue("SlavesBox")
if bool(migrateSlaves):
ssFiles = Directory.GetFiles(slavesPath, "*.slaveSettings", SearchOption.AllDirectories)
args = StringCollection()
args.Add("-GetSlaveNames")
ExistingSlaves = (ClientUtils.ExecuteCommandAndGetOutput(args)).split()
slavesAdded = 0
slavesSkipped = 0
# Create v6 Slave Object if not already existing for each old v5 slave name
for ssFile in ssFiles:
slaveName = (Path.GetFileNameWithoutExtension(ssFile)).lower()
# Initalise for each Slave XML File
Description = ""
if slaveName in ExistingSlaves and (not scriptDialog.GetValue("OverwriteExistingSlavesBox")):
slavesSkipped += 1
else:
# Parse Slave Settings XML File
tree = xml.ElementTree()
tree.parse(ssFile)
root = tree.getroot()
for child in root.getchildren():
if child.tag == 'Description':
if child.text is not None:
if child.text != "":
Description = str(child.text)
else:
Description = "BLANK"
args = StringCollection()
args.Add("-SetSlaveSetting")
args.Add("%s" % slaveName)
args.Add("Description %s" % Description)
ClientUtils.ExecuteCommand(args)
slaveSettings = RepositoryUtils.GetSlaveSettings(slaveName, True)
# Initalise for each Slave Settings XML File
Comment = ""
ConcurrentTasksLimit = 0
Enabled = True
SlaveIncludeInNoneGroup = True
SlaveIncludeInNonePool = True
NormalizedRenderTimeMultiplier = 0.0
NormalizedTimeoutMultiplier = 0.0
OverrideCpuAffinity = False
CpuAffinity = []
Pools = []
Groups = []
# Parse Slave Settings XML File
tree = xml.ElementTree()
tree.parse(ssFile)
root = tree.getroot()
for child in root.getchildren():
if child.tag == 'Comment':
if child.text is not None:
if child.text != "":
slaveSettings.SlaveComment = str(child.text)
if child.tag == 'ConcurrentTasksLimit':
if child.text is not None:
slaveSettings.SlaveConcurrentTasksLimit = int(child.text)
if child.tag == 'Description':
if child.text is not None:
if child.text != "":
slaveSettings.SlaveDescription = str(child.text)
if child.tag == 'Enabled':
if child.text is not None:
if child.text == "true":
slaveSettings.SlaveEnabled = True
else:
slaveSettings.SlaveEnabled = False
if child.tag == 'SlaveIncludeInNoneGroup':
if child.text is not None:
if child.text == "true":
slaveSettings.SlaveIncludeInNoneGroup = True
else:
slaveSettings.SlaveIncludeInNoneGroup = False
if child.tag == 'SlaveIncludeInNonePool':
if child.text is not None:
if child.text == "true":
slaveSettings.SlaveIncludeInNonePool = True
else:
slaveSettings.SlaveIncludeInNonePool = False
if child.tag == 'NormalizedRenderTimeMultiplier':
if child.text is not None:
slaveSettings.SlaveNormalizedRenderTimeMultiplier = float(child.text)
if child.tag == 'NormalizedTimeoutMultiplier':
if child.text is not None:
slaveSettings.SlaveNormalizedTimeoutMultiplier = float(child.text)
if child.tag == 'OverrideCpuAffinity':
if child.text is not None:
if child.text == "true":
slaveSettings.SlaveOverrideCpuAffinity = True
OverrideCpuAffinity = True
else:
slaveSettings.SlaveOverrideCpuAffinity = False
if child.tag == 'CpuAffinity':
if child.text is not None:
if OverrideCpuAffinity:
slaveSettings.SlaveCpuAffinity = child.text
if child.tag == 'Pools':
for step_child in child:
if step_child is not None:
Pools.append(str(step_child.text))
if child.tag == 'Groups':
for step_child in child:
if step_child is not None:
Groups.append(str(step_child.text))
# Set Pools for Slave
slaveSettings.SetSlavePools(Pools)
# Set Groups for Slave
slaveSettings.SetSlaveGroups(Groups)
# Save all the settings for this slave object
RepositoryUtils.SaveSlaveSettings(slaveSettings)
slavesAdded += 1
scriptDialog.ShowMessageBox("Slaves injected into DB: %d\nSlaves skipped: %d\n" % (slavesAdded, slavesSkipped), "Slaves Results")
# USERS
migrateUsers = scriptDialog.GetValue("UsersBox")
if bool(migrateUsers):
userFiles = Directory.GetFiles(usersPath, "*.xml", SearchOption.TopDirectoryOnly)
if(len(userFiles) == 0):
scriptDialog.ShowMessageBox("No User XML file(s) found!", "Error")
return
if(scriptDialog.GetValue("MigrateGroupsBox")):
result = scriptDialog.ShowMessageBox("Have you already created User Groups: 'Normal' and 'Power' in your repository?", "Warning", ("Yes", "No"))
if(result == "No"):
return
args = StringCollection()
args.Add("-GetUserNames")
ExistingUsers = (ClientUtils.ExecuteCommandAndGetOutput(args)).split()
successes = 0
failures = 0
for userFile in userFiles:
# Initalise for each User XML File
UserName = ""
Email = ""
MachineName = ""
NotifyByEmail = ""
NotifyByPopupMessage = ""
UserLevel = "Normal"
UserName = (Path.GetFileNameWithoutExtension(userFile)).lower()
if UserName in ExistingUsers and (not scriptDialog.GetValue("OverwriteExistingUsersBox")):
failures += 1
else:
# Parse User XML File
tree = xml.ElementTree()
tree.parse(userFile)
root = tree.getroot()
for child in root.getchildren():
if child.tag == 'EmailAddress':
if child.text is not None:
Email = child.text
if child.tag == 'MachineName':
if child.text is not None:
MachineName = child.text
if child.tag == 'EmailNotification':
if child.text is not None:
NotifyByEmail = child.text
if child.tag == 'NetsendNotification':
if child.text is not None:
NotifyByPopupMessage = child.text
if child.tag == 'UserLevel':
if child.text is not None:
UserLevel = child.text
args = StringCollection()
args.Add("-SetUser")
args.Add("%s" % UserName)
args.Add("%s" % Email)
args.Add("%s" % MachineName)
args.Add("%s" % NotifyByEmail)
args.Add("%s" % NotifyByPopupMessage)
if bool(scriptDialog.GetValue("MigrateGroupsBox")) is True:
args.Add("%s" % UserLevel)
exitCode = ClientUtils.ExecuteCommand(args)
if(exitCode == 0):
successes = successes + 1
else:
failures = failures + 1
scriptDialog.ShowMessageBox("Users successfully injected into DB: %d\nUsers failed to be injected into DB: %d" % (successes, failures), "User Account Migration Results")
# LIMITS
migrateLimits = scriptDialog.GetValue("LimitsBox")
if bool(migrateLimits):
limitsFiles = Directory.GetFiles(limitsPath, "*.limitGroup", SearchOption.TopDirectoryOnly)
args = StringCollection()
args.Add("-GetLimitGroupNames")
ExistingLimits = (ClientUtils.ExecuteCommandAndGetOutput(args)).split()
limitsAdded = 0
limitsSkipped = 0
limitsFailed = 0
for limitsFile in limitsFiles:
# Initalise for each limitGroup XML File
Name = ""
Limit = 0
ReleasePercentage = -1
IsJobSpecific = True
ListedSlaves = []
WhitelistFlag = False
ExcludedSlaves = []
# Parse User XML File
tree = xml.ElementTree()
tree.parse(limitsFile)
root = tree.getroot()
for child in root.getchildren():
if child.tag == 'Name':
if child.text is not None:
Name = child.text
if child.tag == 'Limit':
if child.text is not None:
Limit = child.text
if child.tag == 'ReleasePercentage':
if child.text is not None:
ReleasePercentage = child.text
if child.tag == 'IsJobSpecific':
if child.text is not None:
if child.text == "true":
IsJobSpecific = True
else:
IsJobSpecific = False
if child.tag == 'ListedSlaves':
if child.text is not None:
ListedSlaves = child.text
if child.tag == 'WhitelistFlag':
if child.text is not None:
WhitelistFlag = child.text
if child.tag == 'ExcludedSlaves':
if child.text is not None:
ExcludedSlaves = child.text
if Name in ExistingLimits and (not scriptDialog.GetValue("OverwriteExistingLimitsBox")):
limitsSkipped += 1
else:
if not IsJobSpecific:
try:
RepositoryUtils.SetLimitGroup(str(Name), int(Limit), ListedSlaves, bool(WhitelistFlag), ExcludedSlaves, float(ReleasePercentage))
limitsAdded += 1
except:
limitsFailed += 1
else:
limitsSkipped += 1
scriptDialog.ShowMessageBox("Limits injected into DB: %d\nLimits skipped: %d\nLimits failed to be injected into DB: %d" % (limitsAdded, limitsSkipped, limitsFailed), "Limits Results")
scriptDialog.ShowMessageBox("v5 -> v6 Migration Completed", "Completed")
except:
import traceback
scriptDialog.ShowMessageBox(str(traceback.format_exc()), "Error")
| ThinkboxSoftware/Deadline | Installation/Migration/DeadlineV5Migration.py | Python | apache-2.0 | 23,899 |
import os
import sys
import json
import random
import urllib3
from urllib3 import PoolManager, Retry, Timeout
import certifi
from random import randint
from bs4 import BeautifulSoup
from time import sleep
class WebCrawler:
def __init__(self, url, bad_words):
self.soup = None
self.r = None
self.url = url
self.bad_words = bad_words
# Object makes HTTP and HTTPS requests.
# Note: https://urllib3.readthedocs.org/en/latest/security.html#using-certifi-with-urllib3
self.http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED', # Force certificate check.
ca_certs=certifi.where(), # Path to the Certifi bundle.
timeout=120.0, # Manager with 120 seconds combined timeout.
retries=Retry(1, redirect=False), # 1 Retry and no redirects
)
def fetch_and_process(self):
try:
# Lookup the URL and get the HTML data from it and if
# any errors occured then cancel the crawling with empty
# array being returned.
self.r = self.http.request('GET', self.url)
except Exception as e:
print("Failed getting link, reason: ", e)
return False
if self.r.status == 200:
# Load up the HTML parser for the returned data or else
# if error, return the urls that where currently processed.
try:
self.soup = BeautifulSoup(self.r.data, "html.parser")
return True
except Exception as e:
print("Failed loading BeautifulSoup, reason:", e)
return False
def fetch_image(self, url):
"""
Function downloads the image from the site but does nothing.
"""
urls = []
if self.soup is None:
return urls
try:
r = self.http.request('GET', url)
except Exception as e:
#print("Failed getting link, reason: ", e)
return False
# Only process if a successful result was returned.
if r.status == 200:
return True
else:
return False
def all_images(self):
urls = []
if self.soup is None:
return urls
try:
html_imgs = self.soup.find_all('img')
for img_element in html_imgs:
src_url = img_element.get('src')
if src_url:
# Cannot support 'gif' file format so skip this file.
if ".gif" in src_url:
pass
elif src_url[0] == '/' or src_url[0] == '?':
# Generate the new url by appending the newly
# discovered src_url.
src_url = self.url + src_url
urls.append(src_url)
elif src_url[0] == '#':
pass
else:
urls.append(src_url)
except Exception as e:
print('Error at URL:{}.ERROR:{}'.format(self.url,e))
# Return all the valid urls we can use in our application.
return self.filter_urls(urls)
def all_urls(self):
# Only process if a successful result was returned.
urls = []
if self.soup is None:
return urls
# Find all the links on the page which are link elements
html_links = self.soup.find_all('a')
for a_element in html_links:
try:
href_url = a_element.get('href')
if href_url:
if href_url[0] == '/' or href_url[0] == '?':
# Generate the new url by appending the newly
# discovered href_url.
href_url = self.url + href_url
urls.append(href_url)
elif href_url[0] == '#':
pass
else:
urls.append(href_url)
except Exception as e:
print('Error at URL:{}.ERROR:{}'.format(self.url,e))
# Return all the valid urls we can use in our application.
return self.filter_urls(urls)
def filter_urls(self, uncertain_urls):
"""
Function will look through all the urls and remove any
URLs that have 'bad' words in them, such as 'terrorism'.
"""
good_urls = []
bad_words = self.bad_words
for url in uncertain_urls:
url = url.lower()
is_url_ok = True
for bad_word in bad_words:
if bad_word in url:
is_url_ok = False
if "javascript" in url or ".ico" in url or "data:image" in url:
is_url_ok = False
if is_url_ok:
good_urls.append(url)
return good_urls | bartmika/py-autobrowserbot | crawler.py | Python | mit | 5,018 |
#!/usr/bin/env python3
# -*- coding: cp1252 -*-
# created on May 21, 2014 by baloan
"""
Launch to orbit (with atmosphere)
"""
from threading import Thread
import krpc
from toolkit import ksp
from toolkit import launch
from toolkit import system
from toolkit import warp
from vessels import surveyor, stock
STAGING_DICT = {
"Surveyor 1": surveyor.surveyor1,
"Kerbal X": stock.default,
}
def main():
cx = ksp.connect(name='Trajectory')
ksp.set_globals(cx)
# system.checkvessel("Surveyor 1")
warp.warpday()
# setup staging
try:
staging = STAGING_DICT[SC.active_vessel.name]
except KeyError:
staging = stock.default
stage = Thread(target=staging, args=["Staging", ])
# launch to orbit
stage.start()
launch.ltoa()
system.tts()
if __name__ == "__main__":
main()
| baloan/mt-krpc | krpc/lko.py | Python | mit | 899 |
"""
.. data:: MOTOR_VELOCITY_SAMPLING_TIME
Time step for calculation of motor velocity by measuring two postion
values. Longer values will create more acurate results but reading the
velocity will take more time.
.. data:: PROGRESS_BAR
Turn on progress bar by long-lasting operations if tqdm package is present
"""
from concert.quantities import q
# Prints the exception source by fake futures
PROGRESS_BAR = True
MOTOR_VELOCITY_SAMPLING_TIME = 0.1 * q.s
# Logging
AIODEBUG = 9
PERFDEBUG = 8
| ufo-kit/concert | concert/config.py | Python | lgpl-3.0 | 514 |
# pythonに定数は存在しないので、
# 大文字で変数名を記載するなどして、識別すること。
# 予約文字も当然使えません。
# 定数
TEISUU = "定数です"
print(TEISUU)
| ekazyam/study | パーフェクトPython/pp_059_定数定義.py | Python | mit | 210 |
# -*- encoding: utf-8 -*-
def silence_last(n=1, use_multimeasure_rests=None):
r'''Makes silence mask with last `n` indices equal to zero.
.. container:: example
**Example 1.** Silences last division:
::
>>> mask = rhythmmakertools.silence_last()
::
>>> print(format(mask))
rhythmmakertools.SilenceMask(
indices=(-1,),
)
::
>>> maker = rhythmmakertools.NoteRhythmMaker(
... output_masks=[mask],
... )
>>> divisions = [(7, 16), (3, 8), (7, 16), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> f(staff)
\new RhythmicStaff {
{
\time 7/16
c'4..
}
{
\time 3/8
c'4.
}
{
\time 7/16
c'4..
}
{
\time 3/8
r4.
}
}
.. container:: example
**Example 2.** Silences last two divisions:
::
>>> mask = rhythmmakertools.silence_last(n=2)
::
>>> print(format(mask))
rhythmmakertools.SilenceMask(
indices=(-2, -1),
)
::
>>> maker = rhythmmakertools.NoteRhythmMaker(
... output_masks=[mask],
... )
>>> divisions = [(7, 16), (3, 8), (7, 16), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> f(staff)
\new RhythmicStaff {
{
\time 7/16
c'4..
}
{
\time 3/8
c'4.
}
{
\time 7/16
r4..
}
{
\time 3/8
r4.
}
}
.. container:: example
**Example 3.** Silences no last divisions:
::
>>> mask = rhythmmakertools.silence_last(n=0)
::
>>> print(format(mask))
rhythmmakertools.SilenceMask(
indices=(),
)
::
>>> maker = rhythmmakertools.NoteRhythmMaker(
... output_masks=[mask],
... )
>>> divisions = [(7, 16), (3, 8), (7, 16), (3, 8)]
>>> music = maker(divisions)
>>> lilypond_file = rhythmmakertools.make_lilypond_file(
... music,
... divisions,
... )
>>> show(lilypond_file) # doctest: +SKIP
.. doctest::
>>> staff = maker._get_rhythmic_staff(lilypond_file)
>>> f(staff)
\new RhythmicStaff {
{
\time 7/16
c'4..
}
{
\time 3/8
c'4.
}
{
\time 7/16
c'4..
}
{
\time 3/8
c'4.
}
}
Returns silence mask.
'''
from abjad.tools import rhythmmakertools
assert 0 <= n, repr(n)
indices = list(reversed(range(-1, -n-1, -1)))
return rhythmmakertools.SilenceMask(
indices=indices,
use_multimeasure_rests=use_multimeasure_rests,
) | mscuthbert/abjad | abjad/tools/rhythmmakertools/silence_last.py | Python | gpl-3.0 | 4,207 |
import os
import warnings
import numpy as np
from pyrates.utility.genetic_algorithm import CGSGeneticAlgorithm
from pandas import DataFrame, read_hdf
from copy import deepcopy
class CustomGOA(CGSGeneticAlgorithm):
def eval_fitness(self, target: list, **kwargs):
# define simulation conditions
worker_file = self.cgs_config['worker_file'] if 'worker_file' in self.cgs_config else None
param_grid = self.pop.drop(['fitness', 'sigma', 'results'], axis=1)
result_vars = ['r_e', 'r_p', 'r_a', 'r_m', 'r_f']
freq_targets = [0.0, np.nan, np.nan, np.nan, np.nan]
#param_grid, invalid_params = eval_params(param_grid)
conditions = [{}, # healthy control
{'k_pe': 0.2, 'k_ae': 0.2}, # AMPA blockade in GPe
{'k_pe': 0.2, 'k_ae': 0.2, 'k_pp': 0.2, 'k_pa': 0.2, 'k_pm': 0.2, 'k_aa': 0.2, 'k_ap': 0.2,
'k_am': 0.2}, # AMPA blockade and GABAA blockade in GPe
{'k_pp': 0.2, 'k_pa': 0.2, 'k_pm': 0.2, 'k_aa': 0.2, 'k_ap': 0.2,
'k_am': 0.2}, # GABAA blockade in GPe
{'k_pe': 0.0, 'k_ae': 0.0}, # STN blockade
{'k_ep': 0.2}, # GABAA blocker in STN
]
param_scalings = [
('delta_e', 'tau_e', 2.0),
('delta_p', 'tau_p', 2.0),
('delta_a', 'tau_a', 2.0),
('delta_m', 'tau_m', 2.0),
('delta_f', 'tau_f', 2.0),
('k_ee', 'delta_e', 0.5),
('k_ep', 'delta_e', 0.5),
('k_pe', 'delta_p', 0.5),
('k_pp', 'delta_p', 0.5),
('k_pa', 'tau_p', 0.5),
('k_pm', 'tau_p', 0.5),
('k_ae', 'tau_a', 0.5),
('k_ap', 'tau_a', 0.5),
('k_aa', 'tau_a', 0.5),
('k_am', 'tau_a', 0.5),
('k_mf', 'delta_m', 0.5),
('k_mm', 'delta_m', 0.5),
('k_fa', 'delta_f', 0.5),
('k_ff', 'delta_f', 0.5),
('eta_e', 'delta_e', 1.0),
('eta_p', 'delta_p', 1.0),
('eta_a', 'delta_a', 1.0),
('eta_m', 'delta_m', 1.0),
('eta_f', 'delta_f', 1.0),
]
chunk_size = [
60, # carpenters
100, # osttimor
60, # spanien
100, # animals
60, # kongo
60, # tschad
#100, # uganda
# 50, # tiber
#50, # giraffe
40, # lech
20, # rilke
12, # dinkel
#10, # rosmarin
#10, # mosambik
# 50, # compute servers
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
]
# perform simulations
if len(param_grid) > 0:
self.gs_config['init_kwargs'].update(kwargs)
res_file = self.cgs.run(
circuit_template=self.gs_config['circuit_template'],
param_grid=deepcopy(param_grid),
param_map=self.gs_config['param_map'],
simulation_time=self.gs_config['simulation_time'],
dt=self.gs_config['step_size'],
inputs=self.gs_config['inputs'],
outputs=self.gs_config['outputs'],
sampling_step_size=self.gs_config['sampling_step_size'],
permute=False,
chunk_size=chunk_size,
worker_file=worker_file,
worker_env=self.cgs_config['worker_env'],
gs_kwargs={'init_kwargs': self.gs_config['init_kwargs'], 'conditions': conditions,
'param_scalings': param_scalings},
worker_kwargs={'y': target, 'time_lim': 7200.0, 'freq_targets': freq_targets},
result_concat_axis=0)
results_tmp = read_hdf(res_file, key=f'Results/results')
# calculate fitness
for gene_id in param_grid.index:
self.pop.at[gene_id, 'fitness'] = 1.0 / results_tmp.at[gene_id, 'fitness']
self.pop.at[gene_id, 'results'] = [results_tmp.at[gene_id, v] for v in result_vars]
# set fitness of invalid parametrizations
#for gene_id in invalid_params.index:
# self.pop.at[gene_id, 'fitness'] = 0.0
# self.pop.at[gene_id, 'results'] = [0. for _ in result_vars]
def fitness(y, t):
y = np.asarray(y).flatten()
t = np.asarray(t).flatten()
diff = np.asarray([0.0 if np.isnan(t_tmp) else y_tmp - t_tmp for y_tmp, t_tmp in zip(y, t)]).flatten()
t[np.isnan(t)] = 1.0
t[t == 0] = 1.0
weights = 1 / np.abs(t)
return weights @ np.abs(diff)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
pop_size = 1024
pop_genes = {
'k_ee': {'min': 0, 'max': 15, 'size': pop_size, 'sigma': 0.1, 'loc': 1.0, 'scale': 0.5},
'k_ae': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_pe': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_pp': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_ep': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_ap': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_aa': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_pa': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_fa': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_mm': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_am': {'min': 0, 'max': 200, 'size': pop_size, 'sigma': 0.8, 'loc': 40.0, 'scale': 4.0},
'k_pm': {'min': 0, 'max': 200, 'size': pop_size, 'sigma': 0.5, 'loc': 5.0, 'scale': 1.0},
'k_mf': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_ff': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'eta_e': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_p': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_a': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_m': {'min': -10, 'max': 0, 'size': pop_size, 'sigma': 0.2, 'loc': -3.0, 'scale': 0.5},
'eta_f': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'delta_e': {'min': 0.01, 'max': 1.0, 'size': pop_size, 'sigma': 0.05, 'loc': 0.1, 'scale': 0.1},
'delta_p': {'min': 0.01, 'max': 1.0, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'delta_a': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.4, 'scale': 0.1},
'delta_m': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'delta_f': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'tau_e': {'min': 12, 'max': 12, 'size': pop_size, 'sigma': 0.0, 'loc': 12.0, 'scale': 0.0},
'tau_p': {'min': 24, 'max': 24, 'size': pop_size, 'sigma': 0.0, 'loc': 24.0, 'scale': 0.0},
'tau_a': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
'tau_m': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
'tau_f': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
#'tau_ee_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 0.5, 'scale': 0.1},
# 'tau_ei': {'min': 3.0, 'max': 5.0, 'size': 1, 'sigma': 0.1, 'loc': 4.0, 'scale': 0.1},
#'tau_ei_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 1.0, 'scale': 0.2},
# 'tau_ie': {'min': 2.0, 'max': 4.0, 'size': 1, 'sigma': 0.1, 'loc': 3.0, 'scale': 0.1},
#'tau_ie_v': {'min': 0.8, 'max': 1.6, 'size': 2, 'sigma': 0.1, 'loc': 0.7, 'scale': 0.1},
#'tau_ii_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 0.5, 'scale': 0.1},
}
param_map = {
'k_ee': {'vars': ['weight'], 'edges': [('stn', 'stn')]},
'k_ae': {'vars': ['weight'], 'edges': [('stn', 'gpe_a')]},
'k_pe': {'vars': ['weight'], 'edges': [('stn', 'gpe_p')]},
'k_pp': {'vars': ['weight'], 'edges': [('gpe_p', 'gpe_p')]},
'k_ep': {'vars': ['weight'], 'edges': [('gpe_p', 'stn')]},
'k_ap': {'vars': ['weight'], 'edges': [('gpe_p', 'gpe_a')]},
'k_aa': {'vars': ['weight'], 'edges': [('gpe_a', 'gpe_a')]},
'k_pa': {'vars': ['weight'], 'edges': [('gpe_a', 'gpe_p')]},
'k_fa': {'vars': ['weight'], 'edges': [('gpe_a', 'fsi')]},
'k_mm': {'vars': ['weight'], 'edges': [('msn', 'msn')]},
'k_am': {'vars': ['weight'], 'edges': [('msn', 'gpe_a')]},
'k_pm': {'vars': ['weight'], 'edges': [('msn', 'gpe_p')]},
'k_ff': {'vars': ['weight'], 'edges': [('fsi', 'fsi')]},
'k_mf': {'vars': ['weight'], 'edges': [('fsi', 'msn')]},
'eta_e': {'vars': ['stn_op/eta_e'], 'nodes': ['stn']},
'eta_p': {'vars': ['gpe_proto_op/eta_i'], 'nodes': ['gpe_p']},
'eta_a': {'vars': ['gpe_arky_op/eta_a'], 'nodes': ['gpe_a']},
'eta_m': {'vars': ['str_msn_op/eta_s'], 'nodes': ['msn']},
'eta_f': {'vars': ['str_fsi_op/eta_f'], 'nodes': ['fsi']},
'delta_e': {'vars': ['stn_op/delta_e'], 'nodes': ['stn']},
'delta_p': {'vars': ['gpe_proto_op/delta_i'], 'nodes': ['gpe_p']},
'delta_a': {'vars': ['gpe_arky_op/delta_a'], 'nodes': ['gpe_a']},
'delta_m': {'vars': ['str_msn_op/delta_s'], 'nodes': ['msn']},
'delta_f': {'vars': ['str_fsi_op/delta_f'], 'nodes': ['fsi']},
'tau_e': {'vars': ['stn_op/tau_e'], 'nodes': ['stn']},
'tau_p': {'vars': ['gpe_proto_op/tau_i'], 'nodes': ['gpe_p']},
'tau_a': {'vars': ['gpe_arky_op/tau_a'], 'nodes': ['gpe_a']},
'tau_m': {'vars': ['str_msn_op/tau_s'], 'nodes': ['msn']},
'tau_f': {'vars': ['str_fsi_op/tau_f'], 'nodes': ['fsi']},
}
T = 2000.
dt = 1e-2
dts = 1e-1
compute_dir = f"{os.getcwd()}/stn_gpe_str_opt"
# perform genetic optimization
ga = CustomGOA(fitness_measure=fitness,
gs_config={
'circuit_template': f"{os.getcwd()}/config/stn_gpe/stn_gpe_str",
'permute_grid': True,
'param_map': param_map,
'simulation_time': T,
'step_size': dt,
'sampling_step_size': dts,
'inputs': {},
'outputs': {'r_e': "stn/stn_op/R_e", 'r_p': 'gpe_p/gpe_proto_op/R_i',
'r_a': 'gpe_a/gpe_arky_op/R_a', 'r_m': 'msn/str_msn_op/R_s',
'r_f': 'fsi/str_fsi_op/R_f'},
'init_kwargs': {'backend': 'numpy', 'solver': 'scipy', 'step_size': dt},
},
cgs_config={'nodes': [
'carpenters',
'osttimor',
'spanien',
'animals',
'kongo',
'tschad',
#'uganda',
# 'tiber',
#'giraffe',
'lech',
'rilke',
'dinkel',
#'rosmarin',
#'mosambik',
# 'comps06h01',
# 'comps06h02',
# 'comps06h03',
# 'comps06h04',
# 'comps06h05',
# 'comps06h06',
# 'comps06h07',
# 'comps06h08',
# 'comps06h09',
# 'comps06h10',
# 'comps06h11',
# 'comps06h12',
# 'comps06h13',
# 'comps06h14',
# 'scorpions',
# 'spliff',
# 'supertramp',
# 'ufo'
],
'compute_dir': compute_dir,
'worker_file': f'{os.getcwd()}/stn_gpe_str_worker.py',
'worker_env': "/data/u_rgast_software/anaconda3/envs/pyrates/bin/python3",
})
drop_save_dir = f'{compute_dir}/PopulationDrops/'
os.makedirs(drop_save_dir, exist_ok=True)
winner = ga.run(
initial_gene_pool=pop_genes,
gene_sampling_func=np.random.normal,
new_member_sampling_func=np.random.normal,
target=[[20, 60, 20, 2, 20], # healthy control
[np.nan, 2/3, np.nan, np.nan, np.nan], # ampa blockade in GPe
[np.nan, 1, np.nan, np.nan, np.nan], # ampa and gabaa blockade in GPe
[np.nan, 2, np.nan, np.nan, np.nan], # GABAA blockade in GPe
[np.nan, 1/2, np.nan, np.nan, np.nan], # STN blockade
[2, 2, np.nan, np.nan, np.nan], # GABAA blockade in STN
],
max_iter=100,
enforce_max_iter=True,
min_fit=1.0,
n_winners=10,
n_parent_pairs=40,
n_new=62,
sigma_adapt=0.05,
candidate_save=f'{compute_dir}/GeneticCGSCandidatestn.h5',
drop_save=drop_save_dir,
new_pop_on_drop=True,
pop_save=f'{drop_save_dir}/pop_summary',
permute=False
)
# winner.to_hdf(f'{drop_save_dir}/winner.h5', key='data')
| Richert/BrainNetworks | BasalGanglia/stn_gpe_str_opt.py | Python | apache-2.0 | 14,060 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ROOT_URLCONF = 'theresumator.urls'
SECRET_KEY = 'fake-key'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
"tests",
"resumator",
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'TEST_NAME': ':memory:',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'site_media')
MEDIA_URL = '/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
)
| AmmsA/django-resumator | tests/test_settings.py | Python | mit | 1,175 |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para dailymotion
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ---------------------------------------------------------------------------------------------------------------------
import re
from core import logger
from core import scrapertools
DEFAULT_HEADERS = [["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0"]]
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" % page_url)
video_urls = []
data, headers = scrapertools.read_body_and_headers(page_url, headers=DEFAULT_HEADERS)
data = data.replace("\\", "")
'''
"240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
'''
patron = '"([^"]+)":\[\{"type":"video/([^"]+)","url":"([^"]+)"\}\]'
matches = scrapertools.find_multiple_matches(data, patron)
subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
for cookie in headers:
if cookie[0] == "set-cookie":
header_cookie = cookie[1]
DEFAULT_HEADERS.append(['Cookie', header_cookie])
for stream_name, stream_type, stream_url in matches:
stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",
headers=DEFAULT_HEADERS)
video_urls.append([stream_name + "p ." + stream_type + " [dailymotion]", stream_url, 0, subtitle])
for video_url in video_urls:
logger.info("pelisalacarta.servers.dailymotion %s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.dailymotion.com/embed/video/xrva9o
# http://www.dailymotion.com/swf/video/xocczx
# http://www.dailymotion.com/swf/x17idxo&related=0
# http://www.dailymotion.com/video/xrva9o
patronvideos = 'dailymotion.com/(?:video/|swf/(?:video/|)|)(?:embed/video/|)([A-z0-9]+)'
logger.info("pelisalacarta.servers.dailymotion find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[dailymotion]"
url = "http://www.dailymotion.com/embed/video/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'dailymotion'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
| ChopChopKodi/pelisalacarta | python/main-classic/servers/dailymotion.py | Python | gpl-3.0 | 2,895 |
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Calculation of topological/topochemical descriptors.
"""
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import Graphs
from rdkit.Chem import rdchem
from rdkit.Chem import rdMolDescriptors
# FIX: remove this dependency here and below
from rdkit.Chem import pyPeriodicTable as PeriodicTable
import numpy
import math
from rdkit.ML.InfoTheory import entropy
periodicTable = rdchem.GetPeriodicTable()
_log2val = math.log(2)
def _log2(x):
return math.log(x) / _log2val
def _VertexDegrees(mat,onlyOnes=0):
""" *Internal Use Only*
this is just a row sum of the matrix... simple, neh?
"""
if not onlyOnes:
res = sum(mat)
else:
res = sum(numpy.equal(mat,1))
return res
def _NumAdjacencies(mol,dMat):
""" *Internal Use Only*
"""
res = mol.GetNumBonds()
return res
def _GetCountDict(arr):
""" *Internal Use Only*
"""
res = {}
for v in arr:
res[v] = res.get(v,0)+1
return res
def _pyHallKierAlpha(m):
""" calculate the Hall-Kier alpha value for a molecule
From equations (58) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
alphaSum = 0.0
rC = PeriodicTable.nameTable['C'][5]
for atom in m.GetAtoms():
atNum=atom.GetAtomicNum()
if not atNum: continue
symb = atom.GetSymbol()
alphaV = PeriodicTable.hallKierAlphas.get(symb,None)
if alphaV is not None:
hyb = atom.GetHybridization()-2
if(hyb<len(alphaV)):
alpha = alphaV[hyb]
if alpha is None:
alpha = alphaV[-1]
else:
alpha = alphaV[-1]
else:
rA = PeriodicTable.nameTable[symb][5]
alpha = rA/rC - 1
print(atom.GetIdx(),atom.GetSymbol(),alpha)
alphaSum += alpha
return alphaSum
#HallKierAlpha.version="1.0.2"
def Ipc(mol, avg = 0, dMat = None, forceDMat = 0):
"""This returns the information content of the coefficients of the characteristic
polynomial of the adjacency matrix of a hydrogen-suppressed graph of a molecule.
'avg = 1' returns the information content divided by the total population.
From D. Bonchev & N. Trinajstic, J. Chem. Phys. vol 67, 4517-4533 (1977)
"""
if forceDMat or dMat is None:
if forceDMat:
dMat = Chem.GetDistanceMatrix(mol,0)
mol._adjMat = dMat
else:
try:
dMat = mol._adjMat
except AttributeError:
dMat = Chem.GetDistanceMatrix(mol,0)
mol._adjMat = dMat
adjMat = numpy.equal(dMat,1)
cPoly = abs(Graphs.CharacteristicPolynomial(mol, adjMat))
if avg:
return entropy.InfoEntropy(cPoly)
else:
return sum(cPoly)*entropy.InfoEntropy(cPoly)
Ipc.version="1.0.0"
def _pyKappa1(mol):
""" Hall-Kier Kappa1 value
From equations (58) and (59) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
P1 = mol.GetNumBonds(1)
A = mol.GetNumHeavyAtoms()
alpha = HallKierAlpha(mol)
denom = P1 + alpha
if denom:
kappa = (A + alpha)*(A + alpha - 1)**2 / denom**2
else:
kappa = 0.0
return kappa
#Kappa1.version="1.0.0"
def _pyKappa2(mol):
""" Hall-Kier Kappa2 value
From equations (58) and (60) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
P2 = len(Chem.FindAllPathsOfLengthN(mol,2))
A = mol.GetNumHeavyAtoms()
alpha = HallKierAlpha(mol)
denom = (P2 + alpha)**2
if denom:
kappa = (A + alpha - 1)*(A + alpha - 2)**2 / denom
else:
kappa = 0
return kappa
#Kappa2.version="1.0.0"
def _pyKappa3(mol):
""" Hall-Kier Kappa3 value
From equations (58), (61) and (62) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
P3 = len(Chem.FindAllPathsOfLengthN(mol,3))
A = mol.GetNumHeavyAtoms()
alpha = HallKierAlpha(mol)
denom = (P3 + alpha)**2
if denom:
if A % 2 == 1:
kappa = (A + alpha - 1)*(A + alpha - 3)**2 / denom
else:
kappa = (A + alpha - 2)*(A + alpha - 3)**2 / denom
else:
kappa = 0
return kappa
#Kappa3.version="1.0.0"
HallKierAlpha = lambda x:rdMolDescriptors.CalcHallKierAlpha(x)
HallKierAlpha.version=rdMolDescriptors._CalcHallKierAlpha_version
Kappa1 = lambda x:rdMolDescriptors.CalcKappa1(x)
Kappa1.version=rdMolDescriptors._CalcKappa1_version
Kappa2 = lambda x:rdMolDescriptors.CalcKappa2(x)
Kappa2.version=rdMolDescriptors._CalcKappa2_version
Kappa3 = lambda x:rdMolDescriptors.CalcKappa3(x)
Kappa3.version=rdMolDescriptors._CalcKappa3_version
def Chi0(mol):
""" From equations (1),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas,'d')
res = sum(numpy.sqrt(1./deltas))
return res
Chi0.version="1.0.0"
def Chi1(mol):
""" From equations (1),(11) and (12) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
c1s = [x.GetBeginAtom().GetDegree()*x.GetEndAtom().GetDegree() for x in mol.GetBonds()]
while 0 in c1s:
c1s.remove(0)
c1s = numpy.array(c1s,'d')
res = sum(numpy.sqrt(1./c1s))
return res
Chi1.version="1.0.0"
def _nVal(atom):
return periodicTable.GetNOuterElecs(atom.GetAtomicNum())-atom.GetTotalNumHs()
def _hkDeltas(mol,skipHs=1):
global periodicTable
res = []
if hasattr(mol,'_hkDeltas') and mol._hkDeltas is not None:
return mol._hkDeltas
for atom in mol.GetAtoms():
n = atom.GetAtomicNum()
if n>1:
nV = periodicTable.GetNOuterElecs(n)
nHs = atom.GetTotalNumHs()
if n <= 10:
# first row
res.append(float(nV-nHs))
else:
# second row and up
res.append(float(nV-nHs)/float(n-nV-1))
elif n==1:
if not skipHs:
res.append(0.0)
else:
res.append(0.0)
mol._hkDeltas = res
return res
def _pyChi0v(mol):
""" From equations (5),(9) and (10) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
deltas = _hkDeltas(mol)
while 0 in deltas:
deltas.remove(0)
mol._hkDeltas=None
res = sum(numpy.sqrt(1./numpy.array(deltas)))
return res
def _pyChi1v(mol):
""" From equations (5),(11) and (12) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
deltas = numpy.array(_hkDeltas(mol,skipHs=0))
res = 0.0
for bond in mol.GetBonds():
v = deltas[bond.GetBeginAtomIdx()]*deltas[bond.GetEndAtomIdx()]
if v != 0.0:
res += numpy.sqrt(1./v)
return res
def _pyChiNv_(mol,order=2):
""" From equations (5),(15) and (16) of Rev. Comp. Chem. vol 2, 367-422, (1991)
**NOTE**: because the current path finding code does, by design,
detect rings as paths (e.g. in C1CC1 there is *1* atom path of
length 3), values of ChiNv with N >= 3 may give results that differ
from those provided by the old code in molecules that have rings of
size 3.
"""
deltas = numpy.array([(1. / numpy.sqrt(hkd) if hkd!=0.0 else 0.0) for hkd in _hkDeltas(mol, skipHs=0)])
accum = 0.0
for path in Chem.FindAllPathsOfLengthN(mol, order + 1, useBonds=0):
accum += numpy.prod(deltas[numpy.array(path)])
return accum
def _pyChi2v(mol):
""" From equations (5),(15) and (16) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
return _pyChiNv_(mol,2)
def _pyChi3v(mol):
""" From equations (5),(15) and (16) of Rev. Comp. Chem. vol 2, 367-422, (1991)
"""
return _pyChiNv_(mol,3)
def _pyChi4v(mol):
""" From equations (5),(15) and (16) of Rev. Comp. Chem. vol 2, 367-422, (1991)
**NOTE**: because the current path finding code does, by design,
detect rings as paths (e.g. in C1CC1 there is *1* atom path of
length 3), values of Chi4v may give results that differ from those
provided by the old code in molecules that have 3 rings.
"""
return _pyChiNv_(mol,4)
def _pyChi0n(mol):
""" Similar to Hall Kier Chi0v, but uses nVal instead of valence
This makes a big difference after we get out of the first row.
"""
deltas = [_nVal(x) for x in mol.GetAtoms()]
while deltas.count(0):
deltas.remove(0)
deltas = numpy.array(deltas,'d')
res = sum(numpy.sqrt(1./deltas))
return res
def _pyChi1n(mol):
""" Similar to Hall Kier Chi1v, but uses nVal instead of valence
"""
delts = numpy.array([_nVal(x) for x in mol.GetAtoms()],'d')
res = 0.0
for bond in mol.GetBonds():
v = delts[bond.GetBeginAtomIdx()]*delts[bond.GetEndAtomIdx()]
if v != 0.0:
res += numpy.sqrt(1./v)
return res
def _pyChiNn_(mol,order=2):
""" Similar to Hall Kier ChiNv, but uses nVal instead of valence
This makes a big difference after we get out of the first row.
**NOTE**: because the current path finding code does, by design,
detect rings as paths (e.g. in C1CC1 there is *1* atom path of
length 3), values of ChiNn with N >= 3 may give results that differ
from those provided by the old code in molecules that have rings of
size 3.
"""
nval = [_nVal(x) for x in mol.GetAtoms()]
deltas = numpy.array([(1. / numpy.sqrt(x) if x else 0.0) for x in nval])
accum = 0.0
for path in Chem.FindAllPathsOfLengthN(mol,order+1,useBonds=0):
accum += numpy.prod(deltas[numpy.array(path)])
return accum
def _pyChi2n(mol):
""" Similar to Hall Kier Chi2v, but uses nVal instead of valence
This makes a big difference after we get out of the first row.
"""
return _pyChiNn_(mol,2)
def _pyChi3n(mol):
""" Similar to Hall Kier Chi3v, but uses nVal instead of valence
This makes a big difference after we get out of the first row.
"""
return _pyChiNn_(mol,3)
def _pyChi4n(mol):
""" Similar to Hall Kier Chi4v, but uses nVal instead of valence
This makes a big difference after we get out of the first row.
**NOTE**: because the current path finding code does, by design,
detect rings as paths (e.g. in C1CC1 there is *1* atom path of
length 3), values of Chi4n may give results that differ from those
provided by the old code in molecules that have 3 rings.
"""
return _pyChiNn_(mol,4)
Chi0v = lambda x:rdMolDescriptors.CalcChi0v(x)
Chi0v.version=rdMolDescriptors._CalcChi0v_version
Chi1v = lambda x:rdMolDescriptors.CalcChi1v(x)
Chi1v.version=rdMolDescriptors._CalcChi1v_version
Chi2v = lambda x:rdMolDescriptors.CalcChi2v(x)
Chi2v.version=rdMolDescriptors._CalcChi2v_version
Chi3v = lambda x:rdMolDescriptors.CalcChi3v(x)
Chi3v.version=rdMolDescriptors._CalcChi3v_version
Chi4v = lambda x:rdMolDescriptors.CalcChi4v(x)
Chi4v.version=rdMolDescriptors._CalcChi4v_version
ChiNv_ = lambda x,y:rdMolDescriptors.CalcChiNv(x,y)
ChiNv_.version=rdMolDescriptors._CalcChiNv_version
Chi0n = lambda x:rdMolDescriptors.CalcChi0n(x)
Chi0n.version=rdMolDescriptors._CalcChi0n_version
Chi1n = lambda x:rdMolDescriptors.CalcChi1n(x)
Chi1n.version=rdMolDescriptors._CalcChi1n_version
Chi2n = lambda x:rdMolDescriptors.CalcChi2n(x)
Chi2n.version=rdMolDescriptors._CalcChi2n_version
Chi3n = lambda x:rdMolDescriptors.CalcChi3n(x)
Chi3n.version=rdMolDescriptors._CalcChi3n_version
Chi4n = lambda x:rdMolDescriptors.CalcChi4n(x)
Chi4n.version=rdMolDescriptors._CalcChi4n_version
ChiNn_ = lambda x,y:rdMolDescriptors.CalcChiNn(x,y)
ChiNn_.version=rdMolDescriptors._CalcChiNn_version
def BalabanJ(mol,dMat=None,forceDMat=0):
""" Calculate Balaban's J value for a molecule
**Arguments**
- mol: a molecule
- dMat: (optional) a distance/adjacency matrix for the molecule, if this
is not provide, one will be calculated
- forceDMat: (optional) if this is set, the distance/adjacency matrix
will be recalculated regardless of whether or not _dMat_ is provided
or the molecule already has one
**Returns**
- a float containing the J value
We follow the notation of Balaban's paper:
Chem. Phys. Lett. vol 89, 399-404, (1982)
"""
# if no dMat is passed in, calculate one ourselves
if forceDMat or dMat is None:
if forceDMat:
# FIX: should we be using atom weights here or not?
dMat = Chem.GetDistanceMatrix(mol,useBO=1,useAtomWts=0,force=1)
mol._balabanMat = dMat
adjMat = Chem.GetAdjacencyMatrix(mol,useBO=0,emptyVal=0,force=0,prefix="NoBO")
mol._adjMat = adjMat
else:
try:
# first check if the molecule already has one
dMat = mol._balabanMat
except AttributeError:
# nope, gotta calculate one
dMat = Chem.GetDistanceMatrix(mol,useBO=1,useAtomWts=0,force=0,prefix="Balaban")
# now store it
mol._balabanMat = dMat
try:
adjMat = mol._adjMat
except AttributeError:
adjMat = Chem.GetAdjacencyMatrix(mol,useBO=0,emptyVal=0,force=0,prefix="NoBO")
mol._adjMat = adjMat
else:
adjMat = Chem.GetAdjacencyMatrix(mol,useBO=0,emptyVal=0,force=0,prefix="NoBO")
s = _VertexDegrees(dMat)
q = _NumAdjacencies(mol,dMat)
n = mol.GetNumAtoms()
mu = q - n + 1
sum = 0.
nS = len(s)
for i in range(nS):
si = s[i]
for j in range(i,nS):
if adjMat[i,j] == 1:
sum += 1./numpy.sqrt(si*s[j])
if mu+1 != 0:
J = float(q) / float(mu + 1) * sum
else:
J = 0
return J
BalabanJ.version="1.0.0"
#------------------------------------------------------------------------
#
# Start block of BertzCT stuff.
#
def _AssignSymmetryClasses(mol, vdList, bdMat, forceBDMat, numAtoms, cutoff):
"""
Used by BertzCT
vdList: the number of neighbors each atom has
bdMat: "balaban" distance matrix
"""
if forceBDMat:
bdMat = Chem.GetDistanceMatrix(mol,useBO=1,useAtomWts=0,force=1,
prefix="Balaban")
mol._balabanMat = bdMat
atomIdx = 0
keysSeen = []
symList = [0]*numAtoms
for i in range(numAtoms):
tmpList = bdMat[i].tolist()
tmpList.sort()
theKey = tuple(['%.4f'%x for x in tmpList[:cutoff]])
try:
idx = keysSeen.index(theKey)
except ValueError:
idx = len(keysSeen)
keysSeen.append(theKey)
symList[i] = idx+1
return tuple(symList)
def _LookUpBondOrder(atom1Id, atom2Id, bondDic):
"""
Used by BertzCT
"""
if atom1Id < atom2Id:
theKey = (atom1Id,atom2Id)
else:
theKey = (atom2Id,atom1Id)
tmp = bondDic[theKey]
if tmp == Chem.BondType.AROMATIC:
tmp = 1.5
else:
tmp = float(tmp)
#tmp = int(tmp)
return tmp
def _CalculateEntropies(connectionDict, atomTypeDict, numAtoms):
"""
Used by BertzCT
"""
connectionList = list(connectionDict.values())
totConnections = sum(connectionList)
connectionIE = totConnections*(entropy.InfoEntropy(numpy.array(connectionList)) +
math.log(totConnections)/_log2val)
atomTypeList = list(atomTypeDict.values())
atomTypeIE = numAtoms*entropy.InfoEntropy(numpy.array(atomTypeList))
return atomTypeIE + connectionIE
def _CreateBondDictEtc(mol, numAtoms):
""" _Internal Use Only_
Used by BertzCT
"""
bondDict = {}
nList = [None]*numAtoms
vdList = [0]*numAtoms
for aBond in mol.GetBonds():
atom1=aBond.GetBeginAtomIdx()
atom2=aBond.GetEndAtomIdx()
if atom1>atom2: atom2,atom1=atom1,atom2
if not aBond.GetIsAromatic():
bondDict[(atom1,atom2)] = aBond.GetBondType()
else:
# mark Kekulized systems as aromatic
bondDict[(atom1,atom2)] = Chem.BondType.AROMATIC
if nList[atom1] is None:
nList[atom1] = [atom2]
elif atom2 not in nList[atom1]:
nList[atom1].append(atom2)
if nList[atom2] is None:
nList[atom2] = [atom1]
elif atom1 not in nList[atom2]:
nList[atom2].append(atom1)
for i,element in enumerate(nList):
try:
element.sort()
vdList[i] = len(element)
except Exception:
vdList[i] = 0
return bondDict, nList, vdList
def BertzCT(mol, cutoff = 100, dMat = None, forceDMat = 1):
""" A topological index meant to quantify "complexity" of molecules.
Consists of a sum of two terms, one representing the complexity
of the bonding, the other representing the complexity of the
distribution of heteroatoms.
From S. H. Bertz, J. Am. Chem. Soc., vol 103, 3599-3601 (1981)
"cutoff" is an integer value used to limit the computational
expense. A cutoff value tells the program to consider vertices
topologically identical if their distance vectors (sets of
distances to all other vertices) are equal out to the "cutoff"th
nearest-neighbor.
**NOTE** The original implementation had the following comment:
> this implementation treats aromatic rings as the
> corresponding Kekule structure with alternating bonds,
> for purposes of counting "connections".
Upon further thought, this is the WRONG thing to do. It
results in the possibility of a molecule giving two different
CT values depending on the kekulization. For example, in the
old implementation, these two SMILES:
CC2=CN=C1C3=C(C(C)=C(C=N3)C)C=CC1=C2C
CC3=CN=C2C1=NC=C(C)C(C)=C1C=CC2=C3C
which correspond to differentk kekule forms, yield different
values.
The new implementation uses consistent (aromatic) bond orders
for aromatic bonds.
THIS MEANS THAT THIS IMPLEMENTATION IS NOT BACKWARDS COMPATIBLE.
Any molecule containing aromatic rings will yield different
values with this implementation. The new behavior is the correct
one, so we're going to live with the breakage.
**NOTE** this barfs if the molecule contains a second (or
nth) fragment that is one atom.
"""
atomTypeDict = {}
connectionDict = {}
numAtoms = mol.GetNumAtoms()
if forceDMat or dMat is None:
if forceDMat:
# nope, gotta calculate one
dMat = Chem.GetDistanceMatrix(mol,useBO=0,useAtomWts=0,force=1)
mol._adjMat = dMat
else:
try:
dMat = mol._adjMat
except AttributeError:
dMat = Chem.GetDistanceMatrix(mol,useBO=0,useAtomWts=0,force=1)
mol._adjMat = dMat
if numAtoms < 2:
return 0
bondDict, neighborList, vdList = _CreateBondDictEtc(mol, numAtoms)
symmetryClasses = _AssignSymmetryClasses(mol, vdList, dMat, forceDMat, numAtoms, cutoff)
#print('Symmm Classes:',symmetryClasses)
for atomIdx in range(numAtoms):
hingeAtomNumber = mol.GetAtomWithIdx(atomIdx).GetAtomicNum()
atomTypeDict[hingeAtomNumber] = atomTypeDict.get(hingeAtomNumber,0)+1
hingeAtomClass = symmetryClasses[atomIdx]
numNeighbors = vdList[atomIdx]
for i in range(numNeighbors):
neighbor_iIdx = neighborList[atomIdx][i]
NiClass = symmetryClasses[neighbor_iIdx]
bond_i_order = _LookUpBondOrder(atomIdx, neighbor_iIdx, bondDict)
#print('\t',atomIdx,i,hingeAtomClass,NiClass,bond_i_order)
if (bond_i_order > 1) and (neighbor_iIdx > atomIdx):
numConnections = bond_i_order*(bond_i_order - 1)/2
connectionKey = (min(hingeAtomClass, NiClass), max(hingeAtomClass, NiClass))
connectionDict[connectionKey] = connectionDict.get(connectionKey,0)+numConnections
for j in range(i+1, numNeighbors):
neighbor_jIdx = neighborList[atomIdx][j]
NjClass = symmetryClasses[neighbor_jIdx]
bond_j_order = _LookUpBondOrder(atomIdx, neighbor_jIdx, bondDict)
numConnections = bond_i_order*bond_j_order
connectionKey = (min(NiClass, NjClass), hingeAtomClass, max(NiClass, NjClass))
connectionDict[connectionKey] = connectionDict.get(connectionKey,0)+numConnections
if not connectionDict:
connectionDict = {'a':1}
return _CalculateEntropies(connectionDict, atomTypeDict, numAtoms)
BertzCT.version="2.0.0"
# Recent Revisions:
# 1.0.0 -> 2.0.0:
# - force distance matrix updates properly (Fixed as part of Issue 125)
# - handle single-atom fragments (Issue 136)
#
# End block of BertzCT stuff.
#
#------------------------------------------------------------------------
| adalke/rdkit | rdkit/Chem/GraphDescriptors.py | Python | bsd-3-clause | 19,951 |
#!/usr/bin/env python
# Copyright (C) 2009-2011 :
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
""" TODO : Add some comment about this class for the doc"""
class Comment:
id = 1
properties = {
'entry_time': None,
'persistent': None,
'author': None,
'comment': None,
'comment_type': None,
'entry_type': None,
'source': None,
'expires': None,
'expire_time': None,
'can_be_deleted': None,
# TODO: find a very good way to handle the downtime "ref"
# ref must effectively not be in properties because it points onto a real object.
# 'ref': None
}
# Adds a comment to a particular service. If the "persistent" field
# is set to zero (0), the comment will be deleted the next time
# Shinken is restarted. Otherwise, the comment will persist
# across program restarts until it is deleted manually.
def __init__(self, ref, persistent, author, comment, comment_type, entry_type, source, expires, expire_time):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref #pointer to srv or host we are apply
self.entry_time = int(time.time())
self.persistent = persistent
self.author = author
self.comment = comment
#Now the hidden attributes
#HOST_COMMENT=1,SERVICE_COMMENT=2
self.comment_type = comment_type
#USER_COMMENT=1,DOWNTIME_COMMENT=2,FLAPPING_COMMENT=3,ACKNOWLEDGEMENT_COMMENT=4
self.entry_type = entry_type
#COMMENTSOURCE_INTERNAL=0,COMMENTSOURCE_EXTERNAL=1
self.source = source
self.expires = expires
self.expire_time = expire_time
self.can_be_deleted = False
def __str__(self):
return "Comment id=%d %s" % (self.id, self.comment)
# Call by pickle for dataify the ackn
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = { 'id' : self.id }
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted funtion of getstate
def __setstate__(self, state):
cls = self.__class__
# Maybe it's not a dict but a list like in the old 0.4 format
# so we should call the 0.4 function for it
if isinstance(state, list):
self.__setstate_deprecated__(state)
return
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
# to prevent from duplicating id in comments:
if self.id >= cls.id:
cls.id = self.id + 1
# This function is DEPRECATED and will be removed in a future version of
# Shinken. It should not be useful any more after a first load/save pass.
# Inverted funtion of getstate
def __setstate_deprecated__(self, state):
cls = self.__class__
# Check if the len of this state is like the previous,
# if not, we will do errors!
# -1 because of the 'id' prop
if len(cls.properties) != (len(state) - 1):
print "Passing comment"
return
self.id = state.pop()
for prop in cls.properties:
val = state.pop()
setattr(self, prop, val)
if self.id >= cls.id:
cls.id = self.id + 1
| baloo/shinken | shinken/comment.py | Python | agpl-3.0 | 4,368 |
from dimagi.ext.couchdbkit import *
from dimagi.utils.decorators.memoized import memoized
class SnapshotMixin(DocumentSchema):
copy_history = StringListProperty()
@property
def is_copy(self):
return True if self.copy_history else False
@property
@memoized
def copied_from(self):
doc_id = self.copy_history[-1] if self.is_copy else None
if doc_id:
doc = self.get(doc_id)
return doc
return None
def get_updated_history(self):
return self.copy_history + [self._id]
| puttarajubr/commcare-hq | corehq/apps/appstore/models.py | Python | bsd-3-clause | 559 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyignite import Client
client = Client()
client.connect('127.0.0.1', 10800)
my_cache = client.create_cache('my cache')
my_cache.put_all({'key_{}'.format(v): v for v in range(20)})
# {
# 'key_0': 0,
# 'key_1': 1,
# 'key_2': 2,
# ... 20 elements in total...
# 'key_18': 18,
# 'key_19': 19
# }
result = my_cache.scan()
for k, v in result:
print(k, v)
# 'key_17' 17
# 'key_10' 10
# 'key_6' 6,
# ... 20 elements in total...
# 'key_16' 16
# 'key_12' 12
result = my_cache.scan()
print(dict(result))
# {
# 'key_17': 17,
# 'key_10': 10,
# 'key_6': 6,
# ... 20 elements in total...
# 'key_16': 16,
# 'key_12': 12
# }
my_cache.destroy()
client.close()
| samaitra/ignite | modules/platforms/python/examples/scans.py | Python | apache-2.0 | 1,487 |
# Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import logging
import os
import urwid
from urwidtrees import ArrowTree, TreeBox, NestedTree
from notmuch import NotmuchError
from .settings.const import settings
from . import commands
from .walker import PipeWalker
from .helper import shorten_author_string
from .db.errors import NonexistantObjectError
from .widgets.globals import TagWidget
from .widgets.globals import HeadersList
from .widgets.globals import AttachmentWidget
from .widgets.bufferlist import BufferlineWidget
from .widgets.search import ThreadlineWidget
from .widgets.thread import ThreadTree
class Buffer(object):
"""Abstract base class for buffers."""
modename = None # mode identifier for subclasses
def __init__(self, ui, widget):
self.ui = ui
self.body = widget
def __str__(self):
return '[%s]' % self.modename
def render(self, size, focus=False):
return self.body.render(size, focus)
def selectable(self):
return self.body.selectable()
def rebuild(self):
"""tells the buffer to (re)construct its visible content."""
pass
def keypress(self, size, key):
return self.body.keypress(size, key)
def cleanup(self):
"""called before buffer is closed"""
pass
def get_info(self):
"""
return dict of meta infos about this buffer.
This can be requested to be displayed in the statusbar.
"""
return {}
class BufferlistBuffer(Buffer):
"""lists all active buffers"""
modename = 'bufferlist'
def __init__(self, ui, filtfun=lambda x: x):
self.filtfun = filtfun
self.ui = ui
self.isinitialized = False
self.rebuild()
Buffer.__init__(self, ui, self.body)
def index_of(self, b):
"""
returns the index of :class:`Buffer` `b` in the global list of active
buffers.
"""
return self.ui.buffers.index(b)
def rebuild(self):
if self.isinitialized:
focusposition = self.bufferlist.get_focus()[1]
else:
focusposition = 0
self.isinitialized = True
lines = list()
displayedbuffers = [b for b in self.ui.buffers if self.filtfun(b)]
for (num, b) in enumerate(displayedbuffers):
line = BufferlineWidget(b)
if (num % 2) == 0:
attr = settings.get_theming_attribute('bufferlist',
'line_even')
else:
attr = settings.get_theming_attribute('bufferlist', 'line_odd')
focus_att = settings.get_theming_attribute('bufferlist',
'line_focus')
buf = urwid.AttrMap(line, attr, focus_att)
num = urwid.Text('%3d:' % self.index_of(b))
lines.append(urwid.Columns([('fixed', 4, num), buf]))
self.bufferlist = urwid.ListBox(urwid.SimpleListWalker(lines))
num_buffers = len(displayedbuffers)
if focusposition is not None and num_buffers > 0:
self.bufferlist.set_focus(focusposition % num_buffers)
self.body = self.bufferlist
def get_selected_buffer(self):
"""returns currently selected :class:`Buffer` element from list"""
linewidget, _ = self.bufferlist.get_focus()
bufferlinewidget = linewidget.get_focus().original_widget
return bufferlinewidget.get_buffer()
def focus_first(self):
"""Focus the first line in the buffer list."""
self.body.set_focus(0)
class EnvelopeBuffer(Buffer):
"""message composition mode"""
modename = 'envelope'
def __init__(self, ui, envelope):
self.ui = ui
self.envelope = envelope
self.all_headers = False
self.rebuild()
Buffer.__init__(self, ui, self.body)
def __str__(self):
to = self.envelope.get('To', fallback='unset')
return '[envelope] to: %s' % (shorten_author_string(to, 400))
def get_info(self):
info = {}
info['to'] = self.envelope.get('To', fallback='unset')
return info
def cleanup(self):
if self.envelope.tmpfile:
os.unlink(self.envelope.tmpfile.name)
def rebuild(self):
displayed_widgets = []
hidden = settings.get('envelope_headers_blacklist')
# build lines
lines = []
for (k, vlist) in self.envelope.headers.iteritems():
if (k not in hidden) or self.all_headers:
for value in vlist:
lines.append((k, value))
# sign/encrypt lines
if self.envelope.sign:
description = 'Yes'
sign_key = self.envelope.sign_key
if sign_key is not None and len(sign_key.subkeys) > 0:
description += ', with key ' + sign_key.uids[0].uid
lines.append(('GPG sign', description))
if self.envelope.encrypt:
description = 'Yes'
encrypt_keys = self.envelope.encrypt_keys.values()
if len(encrypt_keys) == 1:
description += ', with key '
elif len(encrypt_keys) > 1:
description += ', with keys '
key_ids = []
for key in encrypt_keys:
if key is not None and key.subkeys:
key_ids.append(key.uids[0].uid)
description += ', '.join(key_ids)
lines.append(('GPG encrypt', description))
if self.envelope.tags:
lines.append(('Tags', ','.join(self.envelope.tags)))
# add header list widget iff header values exists
if lines:
key_att = settings.get_theming_attribute('envelope', 'header_key')
value_att = settings.get_theming_attribute('envelope',
'header_value')
gaps_att = settings.get_theming_attribute('envelope', 'header')
self.header_wgt = HeadersList(lines, key_att, value_att, gaps_att)
displayed_widgets.append(self.header_wgt)
# display attachments
lines = []
for a in self.envelope.attachments:
lines.append(AttachmentWidget(a, selectable=False))
if lines:
self.attachment_wgt = urwid.Pile(lines)
displayed_widgets.append(self.attachment_wgt)
self.body_wgt = urwid.Text(self.envelope.body)
displayed_widgets.append(self.body_wgt)
self.body = urwid.ListBox(displayed_widgets)
def toggle_all_headers(self):
"""toggles visibility of all envelope headers"""
self.all_headers = not self.all_headers
self.rebuild()
class SearchBuffer(Buffer):
"""shows a result list of threads for a query"""
modename = 'search'
threads = []
_REVERSE = {'oldest_first': 'newest_first',
'newest_first': 'oldest_first'}
def __init__(self, ui, initialquery='', sort_order=None):
self.dbman = ui.dbman
self.ui = ui
self.querystring = initialquery
default_order = settings.get('search_threads_sort_order')
self.sort_order = sort_order or default_order
self.result_count = 0
self.isinitialized = False
self.proc = None # process that fills our pipe
self.rebuild()
Buffer.__init__(self, ui, self.body)
def __str__(self):
formatstring = '[search] for "%s" (%d message%s)'
return formatstring % (self.querystring, self.result_count,
's' if self.result_count > 1 else '')
def get_info(self):
info = {}
info['querystring'] = self.querystring
info['result_count'] = self.result_count
info['result_count_positive'] = 's' if self.result_count > 1 else ''
return info
def cleanup(self):
self.kill_filler_process()
def kill_filler_process(self):
"""
terminates the process that fills this buffers
:class:`~alot.walker.PipeWalker`.
"""
if self.proc:
if self.proc.is_alive():
self.proc.terminate()
def rebuild(self, reverse=False):
self.isinitialized = True
self.reversed = reverse
self.kill_filler_process()
self.result_count = self.dbman.count_messages(self.querystring)
if reverse:
order = self._REVERSE[self.sort_order]
else:
order = self.sort_order
exclude_tags = settings.get_notmuch_setting('search', 'exclude_tags')
if exclude_tags:
exclude_tags = [t for t in exclude_tags.split(';') if t]
try:
self.pipe, self.proc = self.dbman.get_threads(self.querystring,
order,
exclude_tags)
except NotmuchError:
self.ui.notify('malformed query string: %s' % self.querystring,
'error')
self.listbox = urwid.ListBox([])
self.body = self.listbox
return
self.threadlist = PipeWalker(self.pipe, ThreadlineWidget,
dbman=self.dbman,
reverse=reverse)
self.listbox = urwid.ListBox(self.threadlist)
self.body = self.listbox
def get_selected_threadline(self):
"""
returns curently focussed :class:`alot.widgets.ThreadlineWidget`
from the result list.
"""
threadlinewidget, _ = self.threadlist.get_focus()
return threadlinewidget
def get_selected_thread(self):
"""returns currently selected :class:`~alot.db.Thread`"""
threadlinewidget = self.get_selected_threadline()
thread = None
if threadlinewidget:
thread = threadlinewidget.get_thread()
return thread
def consume_pipe(self):
while not self.threadlist.empty:
self.threadlist._get_next_item()
def focus_first(self):
if not self.reversed:
self.body.set_focus(0)
else:
self.rebuild(reverse=False)
def focus_last(self):
if self.reversed:
self.body.set_focus(0)
elif self.result_count < 200 or self.sort_order not in self._REVERSE:
self.consume_pipe()
num_lines = len(self.threadlist.get_lines())
self.body.set_focus(num_lines - 1)
else:
self.rebuild(reverse=True)
class ThreadBuffer(Buffer):
"""displays a thread as a tree of messages"""
modename = 'thread'
def __init__(self, ui, thread):
"""
:param ui: main UI
:type ui: :class:`~alot.ui.UI`
:param thread: thread to display
:type thread: :class:`~alot.db.Thread`
"""
self.thread = thread
self.message_count = thread.get_total_messages()
# two semaphores for auto-removal of unread tag
self._auto_unread_dont_touch_mids = set([])
self._auto_unread_writing = False
self.rebuild()
Buffer.__init__(self, ui, self.body)
def __str__(self):
return '[thread] %s (%d message%s)' % (self.thread.get_subject(),
self.message_count,
's' * (self.message_count > 1))
def get_info(self):
info = {}
info['subject'] = self.thread.get_subject()
info['authors'] = self.thread.get_authors_string()
info['tid'] = self.thread.get_thread_id()
info['message_count'] = self.message_count
return info
def get_selected_thread(self):
"""returns the displayed :class:`~alot.db.Thread`"""
return self.thread
def rebuild(self):
try:
self.thread.refresh()
except NonexistantObjectError:
self.body = urwid.SolidFill()
self.message_count = 0
return
self._tree = ThreadTree(self.thread)
bars_att = settings.get_theming_attribute('thread', 'arrow_bars')
heads_att = settings.get_theming_attribute('thread', 'arrow_heads')
A = ArrowTree(
self._tree,
indent=2,
childbar_offset=0,
arrow_tip_att=heads_att,
arrow_att=bars_att)
self._nested_tree = NestedTree(A, interpret_covered=True)
self.body = TreeBox(self._nested_tree)
self.message_count = self.thread.get_total_messages()
def render(self, size, focus=False):
if self.message_count == 0:
return self.body.render(size, focus)
if settings.get('auto_remove_unread'):
logging.debug('Tbuffer: auto remove unread tag from msg?')
msg = self.get_selected_message()
mid = msg.get_message_id()
focus_pos = self.body.get_focus()[1]
summary_pos = (self.body.get_focus()[1][0], (0,))
cursor_on_non_summary = (focus_pos != summary_pos)
if cursor_on_non_summary:
if mid not in self._auto_unread_dont_touch_mids:
if 'unread' in msg.get_tags():
logging.debug('Tbuffer: removing unread')
def clear():
self._auto_unread_writing = False
self._auto_unread_dont_touch_mids.add(mid)
self._auto_unread_writing = True
msg.remove_tags(['unread'], afterwards=clear)
fcmd = commands.globals.FlushCommand(silent=True)
self.ui.apply_command(fcmd)
else:
logging.debug('Tbuffer: No, msg not unread')
else:
logging.debug('Tbuffer: No, mid locked for autorm-unread')
else:
if not self._auto_unread_writing and \
mid in self._auto_unread_dont_touch_mids:
self._auto_unread_dont_touch_mids.remove(mid)
logging.debug('Tbuffer: No, cursor on summary')
return self.body.render(size, focus)
def get_selected_mid(self):
"""returns Message ID of focussed message"""
return self.body.get_focus()[1][0]
def get_selected_message_position(self):
"""returns position of focussed message in the thread tree"""
return self._sanitize_position((self.get_selected_mid(),))
def get_selected_messagetree(self):
"""returns currently focussed :class:`MessageTree`"""
return self._nested_tree[self.body.get_focus()[1][:1]]
def get_selected_message(self):
"""returns focussed :class:`~alot.db.message.Message`"""
return self.get_selected_messagetree()._message
def get_messagetree_positions(self):
"""
returns a Generator to walk through all positions of
:class:`MessageTree` in the :class:`ThreadTree` of this buffer.
"""
return [(pos,) for pos in self._tree.positions()]
def messagetrees(self):
"""
returns a Generator of all :class:`MessageTree` in the
:class:`ThreadTree` of this buffer.
"""
for pos in self._tree.positions():
yield self._tree[pos]
def refresh(self):
"""refresh and flushe caches of Thread tree"""
self.body.refresh()
# needed for ui.get_deep_focus..
def get_focus(self):
"Get the focus from the underlying body widget."
return self.body.get_focus()
def set_focus(self, pos):
"Set the focus in the underlying body widget."
logging.debug('setting focus to %s ', pos)
self.body.set_focus(pos)
def focus_first(self):
"""set focus to first message of thread"""
self.body.set_focus(self._nested_tree.root)
def focus_last(self):
self.body.set_focus(next(self._nested_tree.positions(reverse=True)))
def _sanitize_position(self, pos):
return self._nested_tree._sanitize_position(pos,
self._nested_tree._tree)
def focus_selected_message(self):
"""focus the summary line of currently focussed message"""
# move focus to summary (root of current MessageTree)
self.set_focus(self.get_selected_message_position())
def focus_parent(self):
"""move focus to parent of currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.parent_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_first_reply(self):
"""move focus to first reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.first_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_last_reply(self):
"""move focus to last reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.last_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_next_sibling(self):
"""focus next sibling of currently focussed message in thread tree"""
mid = self.get_selected_mid()
newpos = self._tree.next_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_prev_sibling(self):
"""
focus previous sibling of currently focussed message in thread tree
"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos)
def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_prev(self):
"""focus previous message in depth first order"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos)
def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the
first message tree that matches the given property"""
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos)
def focus_next_matching(self, querystring):
"""focus next matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.next_position)
def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position)
def focus_next_unfolded(self):
"""focus next unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.next_position)
def focus_prev_unfolded(self):
"""focus previous unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.prev_position)
def expand(self, msgpos):
"""expand message at given position"""
MT = self._tree[msgpos]
MT.expand(MT.root)
def messagetree_at_position(self, pos):
"""get :class:`MessageTree` for given position"""
return self._tree[pos[0]]
def expand_all(self):
"""expand all messages in thread"""
for MT in self.messagetrees():
MT.expand(MT.root)
def collapse(self, msgpos):
"""collapse message at given position"""
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message()
def collapse_all(self):
"""collapse all messages in thread"""
for MT in self.messagetrees():
MT.collapse(MT.root)
self.focus_selected_message()
def unfold_matching(self, querystring, focus_first=True):
"""
expand all messages that match a given querystring.
:param querystring: query to match
:type querystring: str
:param focus_first: set the focus to the first matching message
:type focus_first: bool
"""
first = None
for MT in self.messagetrees():
msg = MT._message
if msg.matches(querystring):
MT.expand(MT.root)
if first is None:
first = (self._tree.position_of_messagetree(MT), MT.root)
self.body.set_focus(first)
else:
MT.collapse(MT.root)
self.body.refresh()
class TagListBuffer(Buffer):
"""lists all tagstrings present in the notmuch database"""
modename = 'taglist'
def __init__(self, ui, alltags=None, filtfun=lambda x: x):
self.filtfun = filtfun
self.ui = ui
self.tags = alltags or []
self.isinitialized = False
self.rebuild()
Buffer.__init__(self, ui, self.body)
def rebuild(self):
if self.isinitialized:
focusposition = self.taglist.get_focus()[1]
else:
focusposition = 0
self.isinitialized = True
lines = list()
displayedtags = sorted((t for t in self.tags if self.filtfun(t)),
key=unicode.lower)
for (num, b) in enumerate(displayedtags):
if (num % 2) == 0:
attr = settings.get_theming_attribute('taglist', 'line_even')
else:
attr = settings.get_theming_attribute('taglist', 'line_odd')
focus_att = settings.get_theming_attribute('taglist', 'line_focus')
tw = TagWidget(b, attr, focus_att)
rows = [('fixed', tw.width(), tw)]
if tw.hidden:
rows.append(urwid.Text(b + ' [hidden]'))
elif tw.translated is not b:
rows.append(urwid.Text('(%s)' % b))
line = urwid.Columns(rows, dividechars=1)
line = urwid.AttrMap(line, attr, focus_att)
lines.append(line)
self.taglist = urwid.ListBox(urwid.SimpleListWalker(lines))
self.body = self.taglist
self.taglist.set_focus(focusposition % len(displayedtags))
def focus_first(self):
"""Focus the first line in the tag list."""
self.body.set_focus(0)
def focus_last(self):
allpos = self.taglist.body.positions(reverse=True)
if allpos:
lastpos = allpos[0]
self.body.set_focus(lastpos)
def get_selected_tag(self):
"""returns selected tagstring"""
cols, _ = self.taglist.get_focus()
tagwidget = cols.original_widget.get_focus()
return tagwidget.tag
| geier/alot | alot/buffers.py | Python | gpl-3.0 | 24,285 |
# Copyright (C) 2013-2014 SignalFuse, Inc.
# Copyright (C) 2015 SignalFx, Inc.
#
# Docker container orchestration utility.
from __future__ import print_function
import functools
import sys
import threading
from . import tasks
from .. import audit
from .. import exceptions
from .. import termoutput
from ..termoutput import columns, green, red, supports_color, time_ago
class BaseOrchestrationPlay:
"""Base class for orchestration plays.
Orchestration plays automatically parallelize the orchestration action
while respecting the dependencies between the containers and the dependency
order direction.
"""
# Data column sizes.
# Instance name column is bounded between 20 and 40 characters. Ship name
# column is bounded between 0 and 40 characters. We keep 60 columns for
# pending and commited output in the last column.
_COLUMNS = columns()
_INST_CSIZE = min(40, max(20, int((_COLUMNS - 60) / 3)))
_SHIP_CSIZE = min(40, max(0, _COLUMNS - _INST_CSIZE - 80))
# Header line format and titles.
HEADER_FMT = ('{{:>3s}} {{:<{}.{}s}} {{:<20.20s}} {{:<{}.{}s}} '
.format(_INST_CSIZE, _INST_CSIZE,
_SHIP_CSIZE, _SHIP_CSIZE)) + \
tasks.CONTAINER_STATUS_FMT + ' ' + tasks.TASK_RESULT_FMT
HEADERS = [' #', 'INSTANCE', 'SERVICE', 'SHIP', 'CONTAINER', 'STATUS']
# Output line format (to which the task output columns are added).
LINE_FMT = ('{{:>3d}}. {}{{:<{}.{}s}}{} {{:<20.20s}} {{:<{}.{}s}}'
.format('\033[1m' if supports_color() else '',
_INST_CSIZE, _INST_CSIZE,
'\033[0m' if supports_color() else '',
_SHIP_CSIZE, _SHIP_CSIZE))
def __init__(self, containers=[], forward=True, ignore_dependencies=False,
concurrency=None, auditor=None):
self._containers = containers
self._forward = forward
self._ignore_dependencies = ignore_dependencies
self._concurrency = threading.Semaphore(concurrency or len(containers))
self._auditor = auditor
self._play = self.__class__.__name__.lower()
self._dependencies = dict(
(c.name, self._gather_dependencies(c)) for c in containers)
self._om = termoutput.OutputManager(len(containers))
self._threads = set([])
self._done = set([])
self._error = None
self._cv = threading.Condition()
@property
def containers(self):
return self._containers
def register(self, task):
"""Register an orchestration action for a given container.
The action is automatically wrapped into a layer that limits the
concurrency to enforce the dependency order of the orchestration play.
The action is only performed once the action has been performed for all
the dependencies (or dependents, depending on the forward parameter).
Args:
task (tasks.Task): the task to execute.
"""
def act(task):
task.o.pending('waiting...')
# Wait until we can be released (or if an error occurred for
# another container).
self._cv.acquire()
while not self._satisfied(task.container) and not self._error:
self._cv.wait(1)
self._cv.release()
# Abort if needed
if self._error:
task.o.commit(red('aborted!'))
return
try:
self._concurrency.acquire(blocking=True)
task.run(auditor=self._auditor)
self._concurrency.release()
self._done.add(task.container)
except Exception:
task.o.commit(red('failed!'))
self._error = sys.exc_info()
finally:
self._cv.acquire()
self._cv.notifyAll()
self._cv.release()
t = threading.Thread(target=act, args=(tuple([task])))
t.daemon = True
t.start()
self._threads.add(t)
def _start(self):
"""Start the orchestration play."""
if self._auditor:
self._auditor.action(level=audit.INFO, action=self._play,
what=self._containers)
print(BaseOrchestrationPlay.HEADER_FMT
.format(*BaseOrchestrationPlay.HEADERS))
self._om.start()
def _end(self):
"""End the orchestration play by waiting for all the action threads to
complete."""
for t in self._threads:
try:
while not self._error and t.is_alive():
t.join(1)
except KeyboardInterrupt:
self._error = (exceptions.MaestroException,
exceptions.MaestroException('Manual abort'),
None)
except Exception:
self._error = sys.exc_info()
finally:
self._cv.acquire()
self._cv.notifyAll()
self._cv.release()
self._om.end()
# Display and raise any error that occurred
if self._error:
if self._auditor:
self._auditor.error(action=self._play, what=self._containers,
message=str(self._error[1]))
exceptions.raise_with_tb(self._error)
else:
if self._auditor:
self._auditor.success(level=audit.INFO, action=self._play,
what=self._containers)
def _run(self):
raise NotImplementedError
def run(self):
self._start()
self._run()
self._end()
def _gather_dependencies(self, container):
"""Transitively gather all containers from the dependencies or
dependent (depending on the value of the forward parameter) services
of the service the given container is a member of. This set is limited
to the containers involved in the orchestration play."""
containers = set(self._containers)
result = set([container])
for container in result:
deps = container.service.requires if self._forward \
else container.service.needed_for
deps = functools.reduce(lambda x, y: x.union(y),
[s.containers for s in deps],
set([]))
result = result.union(deps.intersection(containers))
result.remove(container)
return result
def _satisfied(self, container):
"""Returns True if all the dependencies of a given container have been
satisfied by what's been executed so far (or if it was explicitely
requested to ignore dependencies)."""
if self._ignore_dependencies:
return True
missing = self._dependencies[container.name].difference(self._done)
return len(missing) == 0
class FullStatus(BaseOrchestrationPlay):
"""A Maestro orchestration play that displays the status of the given
services and/or instance containers.
This orchestration play does not make use of the concurrent play execution
features.
"""
def __init__(self, containers=[], show_hosts=False):
BaseOrchestrationPlay.__init__(self, containers)
self._show_hosts = show_hosts
def _run(self):
for order, container in enumerate(self._containers, 1):
ship_name = container.ship.address(self._show_hosts)
o = termoutput.OutputFormatter(prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
order, container.name, container.service.name,
ship_name)))
try:
o.pending('checking container...')
status = container.status()
if status and status['State']['Running']:
o.commit(green(tasks.CONTAINER_STATUS_FMT.format(
container.shortid_and_tag)))
o.commit(green('running{}'.format(
time_ago(container.started_at))))
else:
o.commit(red('down{}'.format(
time_ago(container.finished_at))))
o.commit('\n')
image_info = termoutput.OutputFormatter(prefix=' ')
image_info.commit(container.image)
if status:
image_sha = status['Image']
if ':' in image_sha:
image_sha = image_sha.split(':', 1)[1]
image_info.commit(' ({})'.format(image_sha[:7]))
image_info.commit('\n')
for name, port in container.ports.items():
o = termoutput.OutputFormatter(prefix=' >>')
o.commit('{:>15.15s}: {:>9.9s} is'
.format(name, port['external'][1]))
o.commit(green('up') if container.ping_port(name)
else red('down'))
o.commit('\n')
except Exception:
o.commit(tasks.CONTAINER_STATUS_FMT.format('-'))
o.commit(red('host down'))
class Status(BaseOrchestrationPlay):
"""A less advanced, but faster (concurrent) status display orchestration
play that only looks at the presence and status of the containers."""
def __init__(self, containers=[], concurrency=None, show_hosts=False):
BaseOrchestrationPlay.__init__(
self, containers, ignore_dependencies=True,
concurrency=concurrency)
self._show_hosts = show_hosts
def _run(self):
for order, container in enumerate(self._containers):
ship_name = container.ship.address(self._show_hosts)
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
order + 1, container.name, container.service.name,
ship_name)))
self.register(tasks.StatusTask(o, container))
class Start(BaseOrchestrationPlay):
"""A Maestro orchestration play that will execute the start sequence of the
requested services, starting each container for each instance of the
services, in the given start order, waiting for each container's
application to become available before moving to the next one."""
def __init__(self, containers=[], registries={}, refresh_images=False,
ignore_dependencies=True, concurrency=None, reuse=False,
auditor=None):
BaseOrchestrationPlay.__init__(
self, containers, ignore_dependencies=ignore_dependencies,
concurrency=concurrency, auditor=auditor)
self._registries = registries
self._refresh_images = refresh_images
self._reuse = reuse
def _run(self):
for order, container in enumerate(self._containers):
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
order + 1, container.name, container.service.name,
container.ship.address())))
self.register(tasks.StartTask(o, container, self._registries,
self._refresh_images, self._reuse))
class Pull(BaseOrchestrationPlay):
"""A Maestro orchestration play that will force an image pull to refresh
images for the given services and containers."""
def __init__(self, containers=[], registries={},
ignore_dependencies=True, concurrency=None, auditor=None):
BaseOrchestrationPlay.__init__(
self, containers, ignore_dependencies=ignore_dependencies,
concurrency=concurrency, auditor=auditor)
self._registries = registries
def _run(self):
for order, container in enumerate(self._containers):
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
order + 1, container.name, container.service.name,
container.ship.address())))
self.register(tasks.PullTask(o, container, self._registries))
class Stop(BaseOrchestrationPlay):
"""A Maestro orchestration play that will stop the containers of the
requested services. The list of containers should be provided reversed so
that dependent services are stopped first."""
def __init__(self, containers=[], ignore_dependencies=True,
concurrency=None, auditor=None):
BaseOrchestrationPlay.__init__(
self, containers, forward=False,
ignore_dependencies=ignore_dependencies,
concurrency=concurrency, auditor=auditor)
def _run(self):
for order, container in enumerate(self._containers):
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
len(self._containers) - order, container.name,
container.service.name, container.ship.address())))
self.register(tasks.StopTask(o, container))
class Kill(BaseOrchestrationPlay):
"""A Maestro orchestration play that will stop the containers of the
requested services. The list of containers should be provided reversed so
that dependent services are stopped first."""
def __init__(self, containers=[], ignore_dependencies=True,
concurrency=None, auditor=None):
BaseOrchestrationPlay.__init__(
self, containers, forward=False,
ignore_dependencies=ignore_dependencies,
concurrency=concurrency, auditor=auditor)
def _run(self):
for order, container in enumerate(self._containers):
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
len(self._containers) - order, container.name,
container.service.name, container.ship.address())))
self.register(tasks.KillTask(o, container))
class Clean(BaseOrchestrationPlay):
"""A Maestro orchestration play that will remove stopped containers from
Docker."""
def __init__(self, containers=[], concurrency=None, auditor=None):
BaseOrchestrationPlay.__init__(
self, containers, ignore_dependencies=False,
concurrency=concurrency, auditor=auditor)
def _run(self):
for order, container in enumerate(self._containers):
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
order + 1, container.name, container.service.name,
container.ship.address())))
self.register(tasks.CleanTask(o, container))
class Restart(BaseOrchestrationPlay):
"""A Maestro orchestration play that restarts containers.
By setting an appropriate concurrency level one can achieve "rolling
restart" type orchestration."""
def __init__(self, containers=[], registries={}, refresh_images=False,
ignore_dependencies=True, concurrency=None, step_delay=0,
stop_start_delay=0, reuse=False, only_if_changed=False,
auditor=None):
BaseOrchestrationPlay.__init__(
self, containers, forward=False,
ignore_dependencies=ignore_dependencies,
concurrency=concurrency, auditor=auditor)
self._registries = registries
self._refresh_images = refresh_images
self._step_delay = step_delay
self._stop_start_delay = stop_start_delay
self._reuse = reuse
self._only_if_changed = only_if_changed
def _run(self):
for order, container in enumerate(self._containers):
o = self._om.get_formatter(order, prefix=(
BaseOrchestrationPlay.LINE_FMT.format(
order + 1, container.name, container.service.name,
container.ship.address())))
self.register(tasks.RestartTask(
o, container, self._registries, self._refresh_images,
self._step_delay if order > 0 else 0, self._stop_start_delay,
self._reuse, self._only_if_changed))
| signalfuse/maestro-ng | maestro/plays/__init__.py | Python | apache-2.0 | 16,358 |
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.config import config, configfile, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.SystemInfo import SystemInfo
from Components.Sources.StaticText import StaticText
from Components.Pixmap import Pixmap
from Components.Console import Console
from enigma import getDesktop
from os import access, R_OK
from boxbranding import getBoxType, getBrandOEM
def InitOsd():
SystemInfo["CanChange3DOsd"] = (access('/proc/stb/fb/3dmode', R_OK) or access('/proc/stb/fb/primary/3d', R_OK)) and True or False
SystemInfo["CanChangeOsdAlpha"] = access('/proc/stb/video/alpha', R_OK) and True or False
SystemInfo["CanChangeOsdPosition"] = access('/proc/stb/fb/dst_left', R_OK) and True or False
SystemInfo["OsdSetup"] = SystemInfo["CanChangeOsdPosition"]
if SystemInfo["CanChangeOsdAlpha"] == True or SystemInfo["CanChangeOsdPosition"] == True:
SystemInfo["OsdMenu"] = True
else:
SystemInfo["OsdMenu"] = False
if getBrandOEM() in ('fulan'):
SystemInfo["CanChangeOsdPosition"] = False
SystemInfo["CanChange3DOsd"] = False
def setOSDLeft(configElement):
if SystemInfo["CanChangeOsdPosition"]:
f = open("/proc/stb/fb/dst_left", "w")
f.write('%X' % configElement.value)
f.close()
config.osd.dst_left.addNotifier(setOSDLeft)
def setOSDWidth(configElement):
if SystemInfo["CanChangeOsdPosition"]:
f = open("/proc/stb/fb/dst_width", "w")
f.write('%X' % configElement.value)
f.close()
config.osd.dst_width.addNotifier(setOSDWidth)
def setOSDTop(configElement):
if SystemInfo["CanChangeOsdPosition"]:
f = open("/proc/stb/fb/dst_top", "w")
f.write('%X' % configElement.value)
f.close()
config.osd.dst_top.addNotifier(setOSDTop)
def setOSDHeight(configElement):
if SystemInfo["CanChangeOsdPosition"]:
f = open("/proc/stb/fb/dst_height", "w")
f.write('%X' % configElement.value)
f.close()
config.osd.dst_height.addNotifier(setOSDHeight)
print 'Setting OSD position: %s %s %s %s' % (config.osd.dst_left.value, config.osd.dst_width.value, config.osd.dst_top.value, config.osd.dst_height.value)
def setOSDAlpha(configElement):
print 'Setting OSD alpha:', str(configElement.value)
config.av.osd_alpha.setValue(configElement.value)
f = open("/proc/stb/video/alpha", "w")
f.write(str(configElement.value))
f.close()
config.osd.alpha.addNotifier(setOSDAlpha)
def set3DMode(configElement):
if SystemInfo["CanChange3DOsd"]:
print 'Setting 3D mode:',configElement.value
f = open("/proc/stb/fb/3dmode", "w")
f.write(configElement.value)
f.close()
config.osd.threeDmode.addNotifier(set3DMode)
def set3DZnorm(configElement):
if SystemInfo["CanChange3DOsd"]:
print 'Setting 3D depth:',configElement.value
f = open("/proc/stb/fb/znorm", "w")
f.write('%d' % int(configElement.value))
f.close()
config.osd.threeDznorm.addNotifier(set3DZnorm)
class UserInterfacePositioner(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.setup_title = _("Position Setup")
self.Console = Console()
self["status"] = StaticText()
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Defaults"))
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"left": self.keyLeft,
"right": self.keyRight,
"yellow": self.keyDefault,
}, -2)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
if SystemInfo["CanChangeOsdAlpha"]:
self.list.append(getConfigListEntry(_("User interface visibility"), config.osd.alpha, _("This option lets you adjust the transparency of the user interface")))
if SystemInfo["CanChangeOsdPosition"]:
self.list.append(getConfigListEntry(_("Move Left/Right"), config.osd.dst_left, _("Use the Left/Right buttons on your remote to move the user inyterface left/right")))
self.list.append(getConfigListEntry(_("Width"), config.osd.dst_width, _("Use the Left/Right buttons on your remote to adjust the size of the user interface. Left button decreases the size, Right increases the size.")))
self.list.append(getConfigListEntry(_("Move Up/Down"), config.osd.dst_top, _("Use the Left/Right buttons on your remote to move the user interface up/down")))
self.list.append(getConfigListEntry(_("Height"), config.osd.dst_height, _("Use the Left/Right buttons on your remote to adjust the size of the user interface. Left button decreases the size, Right increases the size.")))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.onLayoutFinish.append(self.layoutFinished)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
self["status"].setText(self["config"].getCurrent()[2])
def layoutFinished(self):
self.setTitle(_(self.setup_title))
self.Console.ePopen('/usr/bin/showiframe /usr/share/enigma2/hd-testcard.mvi')
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setPreviewPosition()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setPreviewPosition()
def keyDefault(self):
config.osd.alpha.setValue(255)
config.osd.dst_left.setValue(0)
config.osd.dst_width.setValue(720)
config.osd.dst_top.setValue(0)
config.osd.dst_height.setValue(576)
def setPreviewPosition(self):
size_w = getDesktop(0).size().width()
size_h = getDesktop(0).size().height()
dsk_w = int(float(size_w)) / float(720)
dsk_h = int(float(size_h)) / float(576)
dst_left = int(config.osd.dst_left.value)
dst_width = int(config.osd.dst_width.value)
dst_top = int(config.osd.dst_top.value)
dst_height = int(config.osd.dst_height.value)
while dst_width + (dst_left / float(dsk_w)) >= 720.5 or dst_width + dst_left > 720:
dst_width = int(dst_width) - 1
while dst_height + (dst_top / float(dsk_h)) >= 576.5 or dst_height + dst_top > 576:
dst_height = int(dst_height) - 1
config.osd.dst_left.setValue(dst_left)
config.osd.dst_width.setValue(dst_width)
config.osd.dst_top.setValue(dst_top)
config.osd.dst_height.setValue(dst_height)
print 'Setting OSD position: %s %s %s %s' % (config.osd.dst_left.value, config.osd.dst_width.value, config.osd.dst_top.value, config.osd.dst_height.value)
def saveAll(self):
for x in self["config"].list:
x[1].save()
configfile.save()
# keySave and keyCancel are just provided in case you need them.
# you have to call them by yourself.
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), default = False)
else:
self.close()
def run(self):
config.osd.dst_left.save()
config.osd.dst_width.save()
config.osd.dst_top.save()
config.osd.dst_height.save()
configfile.save()
self.close()
class OSD3DSetupScreen(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.setup_title = _("OSD 3D Setup")
self.skinName = "Setup"
self["status"] = StaticText()
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
}, -2)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.list.append(getConfigListEntry(_("3D Mode"), config.osd.threeDmode, _("This option lets you choose the 3D mode")))
self.list.append(getConfigListEntry(_("Depth"), config.osd.threeDznorm, _("This option lets you adjust the 3D depth")))
self.list.append(getConfigListEntry(_("Show in extensions list ?"), config.osd.show3dextensions, _("This option lets you show the option in the extension screen")))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.onLayoutFinish.append(self.layoutFinished)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
self["status"].setText(self["config"].getCurrent()[2])
def layoutFinished(self):
self.setTitle(_(self.setup_title))
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def saveAll(self):
for x in self["config"].list:
x[1].save()
configfile.save()
# keySave and keyCancel are just provided in case you need them.
# you have to call them by yourself.
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close()
| popazerty/EG-2 | lib/python/Screens/UserInterfacePositioner.py | Python | gpl-2.0 | 10,036 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
import os.path
class Openshift(Plugin, RedHatPlugin):
'''Openshift node and broker'''
plugin_name = "openshift"
profiles = ('virt', 'openshift')
# The 'broker' and 'node' options are obsolete but are maintained
# here for compatibility with external programs that call sosreport
# with these names.
option_list = [("broker", "Gathers broker specific files", "slow", False),
("node", "Gathers node specific files", "slow", False)]
ruby = "ruby193"
vendor = "rh"
def is_broker(self):
return os.path.exists("/etc/openshift/broker.conf")
def is_node(self):
return os.path.exists("/etc/openshift/node.conf")
def setup(self):
self.add_copy_spec([
"/etc/openshift-enterprise-release",
"/var/log/openshift",
"/etc/openshift/*.conf",
"/etc/openshift/upgrade",
])
self.add_cmd_output("oo-diagnostics -v")
if self.is_broker():
self.add_copy_spec([
"/etc/openshift/quickstarts.json",
"/etc/openshift/plugins.d/*.conf",
"/var/www/openshift/broker/httpd/conf.d/*.conf",
"/var/www/openshift/console/httpd/conf.d/*.conf",
])
self.add_cmd_output([
"oo-accept-broker -v",
"oo-admin-chk -v",
"oo-mco ping",
])
if self.is_node():
self.add_copy_spec([
"/etc/openshift/node-plugins.d/*.conf",
"/etc/openshift/cart.conf.d",
"/etc/openshift/iptables.*.rules",
"/etc/openshift/env",
"/opt/%s/%s/root/etc/mcollective" % (self.vendor, self.ruby),
"/var/log/httpd/openshift_log",
"/var/log/mcollective.log",
"/var/log/node-web-proxy/access.log",
"/var/log/node-web-proxy/error.log",
"/var/log/node-web-proxy/websockets.log",
"/var/log/node-web-proxy/supervisor.log",
])
self.add_cmd_output([
"oo-accept-node -v",
"oo-admin-ctl-gears list",
"ls -laZ /var/lib/openshift"
])
def postproc(self):
# Redact broker's MongoDB credentials:
# MONGO_PASSWORD="PasswordForOpenshiftUser"
self.do_file_sub('/etc/openshift/broker.conf',
r"(MONGO_PASSWORD\s*=\s*)(.*)",
r"\1*******")
# Redact session SHA keys:
# SESSION_SECRET=0c31...a7c8
self.do_file_sub('/etc/openshift/broker.conf',
r"(SESSION_SECRET\s*=\s*)(.*)",
r"\1*******")
self.do_file_sub('/etc/openshift/console.conf',
r"(SESSION_SECRET\s*=\s*)(.*)",
r"\1*******")
# Redact passwords of the form:
# plugin.activemq.pool.1.password = Pa$sW0Rd
self.do_file_sub("/opt/%s/%s/root/etc/mcollective/server.cfg" %
(self.vendor, self.ruby),
r"(.*password\s*=\s*)\S+",
r"\1********")
self.do_file_sub("/opt/%s/%s/root/etc/mcollective/client.cfg" %
(self.vendor, self.ruby),
r"(.*password\s*=\s*)\S+",
r"\1********")
# vim: et ts=4 sw=4
| codificat/sos | sos/plugins/openshift.py | Python | gpl-2.0 | 4,184 |
import rppy
import numpy as np
import matplotlib.pyplot as plt
vp1 = 3000
vs1 = 1500
p1 = 2000
e1_1 = 0.0
d1_1 = 0.0
y1_1 = 0.0
e2_1 = 0.0
d2_1 = 0.0
y2_1 = 0.0
d3_1 = 0.0
chi1 = 0.0
C1 = rppy.reflectivity.Cij(vp1, vs1, p1, e1_1, d1_1, y1_1, e2_1, d2_1, y2_1, d3_1)
vp2 = 4000
vs2 = 2000
p2 = 2200
e1_2 = 0.0
d1_2 = 0.0
y1_2 = 0.0
e2_2 = 0.0
d2_2 = 0.0
y2_2 = 0.0
d3_2 = 0.0
chi2 = 0.0
C2 = rppy.reflectivity.Cij(vp2, vs2, p2, e1_2, d1_2, y1_2, e2_2, d2_2, y2_2, d3_2)
phi = np.arange(0, 90, 1)
theta = np.arange(0, 90, 1)
loopang = phi
theta = np.array([30])
rphti = np.zeros(np.shape(loopang))
rpzoe = np.zeros(np.shape(loopang))
rprug = np.zeros(np.shape(loopang))
for aid, val in enumerate(loopang):
rphti[aid] = rppy.reflectivity.exact_ortho(C1, p1, C2, p2, chi1, chi2, loopang[aid], theta)
rprug[aid] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e2_1, d2_1, y2_1, vp2, vs2, p2, e2_2, d2_2, y2_2, np.radians(theta), np.radians(loopang[aid]))
rpzoe[aid] = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, np.radians(theta))
plt.figure(1)
plt.plot(loopang, rphti, loopang, rprug, loopang, rpzoe)
plt.legend(['hti', 'ruger', 'zoe'])
plt.show() | shear/rppy | temp_test_ortho.py | Python | bsd-2-clause | 1,170 |
#!/usr/bin/python
import unittest
import apt_pkg
import apt.progress.base
class TestCache(unittest.TestCase):
"""Test invocation of apt_pkg.Cache()"""
def setUp(self):
apt_pkg.init_config()
apt_pkg.init_system()
def test_wrong_invocation(self):
"""cache_invocation: Test wrong invocation."""
apt_cache = apt_pkg.Cache(progress=None)
self.assertRaises(ValueError, apt_pkg.Cache, apt_cache)
self.assertRaises(ValueError, apt_pkg.Cache,
apt.progress.base.AcquireProgress())
self.assertRaises(ValueError, apt_pkg.Cache, 0)
def test_proper_invocation(self):
"""cache_invocation: Test correct invocation."""
apt_cache = apt_pkg.Cache(progress=None)
apt_depcache = apt_pkg.DepCache(apt_cache)
if __name__ == "__main__":
unittest.main()
| suokko/python-apt | tests/test_cache_invocation.py | Python | gpl-2.0 | 863 |
# coding=utf-8
import logging
from django.views import generic
from news.models import FacebookPost
__author__ = 'ilov3'
logger = logging.getLogger(__name__)
class FacebookPostView(generic.ListView):
template_name = 'facebook/posts/posts.html'
context_object_name = 'facebook_posts'
def get_queryset(self):
return FacebookPost.objects.order_by('-created_time')[:20]
| kwameboame/newsdex | news/views/facebook_views.py | Python | bsd-2-clause | 394 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
urls = {
"home": "/",
"discover": "/discover",
"login": "/login",
"register": "/register",
"forgot-password": "/forgot-password",
"new-project": "/project/new",
"new-project-import": "/project/new/import/{0}",
"change-password": "/change-password/{0}", # user.token
"change-email": "/change-email/{0}", # user.email_token
"cancel-account": "/cancel-account/{0}", # auth.token.get_token_for_user(user)
"invitation": "/invitation/{0}", # membership.token
"user": "/profile/{0}", # user.username
"project": "/project/{0}", # project.slug
"epics": "/project/{0}/epics/", # project.slug
"epic": "/project/{0}/epic/{1}", # project.slug, epic.ref
"backlog": "/project/{0}/backlog/", # project.slug
"taskboard": "/project/{0}/taskboard/{1}", # project.slug, milestone.slug
"kanban": "/project/{0}/kanban/", # project.slug
"userstory": "/project/{0}/us/{1}", # project.slug, us.ref
"task": "/project/{0}/task/{1}", # project.slug, task.ref
"issues": "/project/{0}/issues", # project.slug
"issue": "/project/{0}/issue/{1}", # project.slug, issue.ref
"wiki": "/project/{0}/wiki/{1}", # project.slug, wikipage.slug
"team": "/project/{0}/team/", # project.slug
"project-transfer": "/project/{0}/transfer/{1}", # project.slug, project.transfer_token
"project-admin": "/login?next=/project/{0}/admin/project-profile/details", # project.slug
"project-import-jira": "/project/new/import/jira?url={}",
}
| dayatz/taiga-back | taiga/front/urls.py | Python | agpl-3.0 | 2,443 |
# -*- coding: utf-8 -*-
import re
from content_error import ContentError
def strip(line, newlines = False):
def lstrip(line):
i = 0
for e in line:
if type(e) == Letter and e.value.isspace():
i += 1
elif newlines and type(e) == Linebreak:
i += 1
else: break
return line[i:]
def rstrip(line):
i = 0
for e in reversed(line):
if type(e) == Letter and e.value.isspace():
i -= 1
elif newlines and type(e) == Linebreak:
i -= 1
else: break
return line[:-i] if i > 0 else line
return rstrip(lstrip(line))
# according to our assumptions: tag handle only whole lines
def _consumeLines(body, lines, repetitions, finalizer):
idx = 0
while True:
while len(body)>idx and is_whitespace(body[idx]): idx+=1
if len(body) <= idx:
finalizer(lines, repetitions)
return
# two possibilities:
# 1) rep tag
if type(body[idx]) == Tag:
body[idx].consumeLines(lines, repetitions)
idx += 1
while len(body)>idx and type(body[idx]) == Letter and body[idx].value.isspace(): idx+= 1
# newline or end => OK; else => error
if len(body)<=idx:
finalizer(lines, repetitions)
return # OK
if type(body[idx]) == Linebreak:
idx += 1
continue
raise ContentError("Newline only allowed after [/rep]",'')
# 2) simple line
else:
acc = []
# consume till Linebreak or end, fail on Tag
while len(body)>idx and type(body[idx]) not in {Linebreak, Tag}:
acc.append(body[idx])
idx += 1
if len(body)<=idx or type(body[idx]) == Linebreak:
acc = strip(acc)
if acc != []: lines.append(acc)
if len(body)<=idx:
finalizer(lines, repetitions)
return # OK
if type(body[idx]) == Linebreak:
idx += 1
continue
raise ContentError("Tags inside the line not allowed",'')
class ParsingError(Exception):
def __init__(self, message):
self.massage = message
def __repr__(self):
return self.message
class Section:
def __init__(self, head='', options='', body=None):
self.head = head
self.options = options
self.body = (body if body else [])
def __repr__(self):
return '<section: '+self.head+'>['+''.join([repr(t) for t in self.body])+']'
# according to our assumptions: tag handle only whole lines
def consumeLines(self, lines, repetitions):
def finalizer(l, r): pass
_consumeLines(self.body, lines, repetitions, finalizer)
class Letter:
def __init__(self, value):
self.value = value
def __repr__(self): return self.value
class Tag:
def __init__(self, head='', options='', body=None):
self.head = head
self.options = options
self.body = (body if body else [])
def __repr__(self):
return '<tag: '+self.head+'>['+''.join([repr(t) for t in self.body])+']'
# according to our assumptions: tag handle only whole lines
def consumeLines(self, lines, repetitions):
begin = len(lines)
def finalizer(l, r): r.append((begin, len(l), int(self.options)))
_consumeLines(self.body, lines, repetitions, finalizer)
class Chord:
def __init__(self, value):
self.value = value
def __repr__(self):
return '<chord: '+self.value+'>'
class Linebreak:
def __init__(self):
pass
def __repr__(self):
return '\\\\'
class LinebreakSuggestion:
def __init__(self):
pass
def __repr__(self):
return '\\'
class Note:
def __init__(self, message):
self.value = '('+message+')'
def __repr__(self):
return self.value
def parse(s):
section = Section()
sections = [section]
container = [] # tags here
def top_container():
return (section if container == [] else container[-1]).body
def get_group(match, group, default):
r = match.group(group)
if r: return r
else: return default
while s != '':
ret1 = re.match('^\[#\s*(?P<head>\w+)(\s+(?P<options>[^\]]*))?\]', s)
ret2 = re.match('^\[\s*(?P<head>\w+)(\s+(?P<options>[^\]]*))?\]', s)
ret3 = re.match('^\[/\s*(?P<head>\w+)\s*\]', s)
ret4 = re.match('^\{(?P<name>[^\}]+)\}', s)
ret5 = re.match('^\[\((?P<text>.+?)\)\]', s)
if ret1:
if container != []: raise ParsingError('Not closed tag')
section = Section(ret1.group('head').strip(), get_group(ret1, 'options', '').strip())
sections.append(section)
s = s[ret1.end():]
elif ret5:
top_container().append(Note(ret5.group('text')))
s = s[ret5.end():]
elif ret2:
tag = Tag(ret2.group('head').strip(), get_group(ret2, 'options', '').strip())
top_container().append(tag)
container.append(tag)
s = s[ret2.end():]
elif ret3:
if container == [] or ret3.group('head').strip() != container[-1].head:
raise ParsingError('Wrong closing tag: '+ret3.group('head').strip())
container.pop()
s = s[ret3.end():]
elif ret4:
chord = Chord(ret4.group('name'))
top_container().append(chord)
s = s[ret4.end():]
else:
if s[0] == '\n': top_container().append(Linebreak())
elif s[0] == '\\':top_container().append(LinebreakSuggestion())
else: top_container().append(Letter(s[0]))
s = s[1:]
for elt in sections[0].body:
if not is_code_whitespace(elt): raise ParsingError('Some content out of section')
return sections[1:]
def is_whitespace(s):
return is_code_whitespace(s) or type(s) == LinebreakSuggestion
def is_code_whitespace(s):
return type(s) == Linebreak or type(s) == Letter and s.value.isspace()
| wojtex/cantionale | lycode.py | Python | mit | 5,554 |
#
# Copyright 2013 IBM Corp.
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware to replace the plain text message body of an error
response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
from lxml import etree
import webob
from ceilometer.api import hooks
from ceilometer import i18n
from ceilometer.i18n import _
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
@staticmethod
def best_match_language(accept_language):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not accept_language:
return None
all_languages = i18n.get_available_languages()
return accept_language.best_match(all_languages)
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Request for this state, modified by replace_start_response()
# and used when an error is being reported.
state = {}
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
status_code = int(status.split(' ')[0])
state['status_code'] = status_code
except (ValueError, TypeError): # pragma: nocover
raise Exception((
'ErrorDocumentMiddleware received an invalid '
'status %s' % status
))
else:
if (state['status_code'] / 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
headers = [(h, v)
for (h, v) in headers
if h not in ('Content-Length', 'Content-Type')
]
# Save the headers in case we need to modify them.
state['headers'] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
if (state['status_code'] / 100) not in (2, 3):
req = webob.Request(environ)
# Find the first TranslationHook in the array of hooks and use the
# translatable_error object from it
error = None
for hook in self.app.hooks:
if isinstance(hook, hooks.TranslationHook):
error = hook.local_error.translatable_error
break
user_locale = self.best_match_language(req.accept_language)
if (req.accept.best_match(['application/json', 'application/xml'])
== 'application/xml'):
try:
# simple check xml is valid
fault = etree.fromstring('\n'.join(app_iter))
# Add the translated error to the xml data
if error is not None:
for fault_string in fault.findall('faultstring'):
fault_string.text = i18n.translate(error,
user_locale)
body = ['<error_message>' + etree.tostring(fault)
+ '</error_message>']
except etree.XMLSyntaxError as err:
LOG.error(_('Error parsing HTTP response: %s') % err)
body = ['<error_message>%s' % state['status_code']
+ '</error_message>']
state['headers'].append(('Content-Type', 'application/xml'))
else:
try:
fault = json.loads('\n'.join(app_iter))
if error is not None and 'faultstring' in fault:
fault['faultstring'] = i18n.translate(error,
user_locale)
body = [json.dumps({'error_message': fault})]
except ValueError as err:
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0]))))
else:
body = app_iter
return body
| Juniper/ceilometer | ceilometer/api/middleware.py | Python | apache-2.0 | 5,264 |
#!/usr/bin/env python
###############################################################################
# $Id: ogr_sde.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR ArcSDE driver.
# Author: Howard Butler <[email protected]>
#
###############################################################################
# Copyright (c) 2008, Howard Butler <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
###############################################################################
# Open ArcSDE datasource.
sde_server = '172.16.1.193'
sde_port = '5151'
sde_db = 'sde'
sde_user = 'sde'
sde_password = 'sde'
gdaltest.sde_dr = None
try:
gdaltest.sde_dr = ogr.GetDriverByName( 'SDE' )
except:
pass
def ogr_sde_1():
"Test basic opening of a database"
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password)
ds = ogr.Open(base)
if ds is None:
print("Could not open %s" % base)
gdaltest.sde_dr = None
return 'skip'
ds.Destroy()
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'
def ogr_sde_2():
"Test creation of a layer"
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password)
shp_ds = ogr.Open( 'data/poly.shp' )
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer( 'SDE.TPOLY' ,geom_type=ogr.wkbPolygon, srs=shp_lyr.GetSpatialRef(),options = [ 'OVERWRITE=YES' ] )
# lyr = ds.CreateLayer( 'SDE.TPOLY' ,geom_type=ogr.wkbPolygon)
ogrtest.quick_create_layer_def( lyr,
[ ('AREA', ogr.OFTReal),
('EAS_ID', ogr.OFTInteger),
('PRFEDEA', ogr.OFTString),
('WHEN', ogr.OFTDateTime) ] )
#######################################################
# Copy in poly.shp
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while feat is not None:
gdaltest.poly_feat.append( feat )
dst_feat.SetFrom( feat )
lyr.CreateFeature( dst_feat )
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'
def ogr_sde_3():
"Test basic version locking"
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (sde_server, sde_port, sde_db, sde_user, sde_password)
ds = ogr.Open(base, update=1)
ds2 = ogr.Open(base, update=1)
if ds2 is not None:
gdaltest.post_reason('A locked version was able to be opened')
return 'fail'
ds.Destroy()
return 'success'
def ogr_sde_4():
"Test basic version creation"
if gdaltest.sde_dr is None:
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption( 'SDE_VERSIONOVERWRITE', 'TRUE' )
base = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name)
ds = ogr.Open(base, update=1)
ds.Destroy()
gdal.SetConfigOption( 'SDE_VERSIONOVERWRITE', 'FALSE' )
base = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name)
ds = ogr.Open(base, update=1)
ds.Destroy()
return 'success'
def ogr_sde_5():
"Test versioned editing"
if gdaltest.sde_dr is None:
return 'skip'
version_name = 'TESTING'
gdal.SetConfigOption( 'SDE_VERSIONOVERWRITE', 'TRUE' )
base = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, version_name)
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
f1 = l1.GetFeature(1)
f1.SetField("PRFEDEA",'SDE.TESTING')
l1.SetFeature(f1)
ds.Destroy()
del ds
default = 'DEFAULT'
gdal.SetConfigOption( 'SDE_VERSIONOVERWRITE', 'FALSE' )
default = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password, default)
# print default
ds2 = ogr.Open(default, update=1)
l2 = ds2.GetLayerByName('SDE.TPOLY')
f2 = l2.GetFeature(1)
f2.SetField("PRFEDEA",'SDE.DEFAULT')
f2.SetField("WHEN", 2008, 3, 19, 16, 15, 00, 0)
l2.SetFeature(f2)
ds2.Destroy()
del ds2
ds3 = ogr.Open(base)
l3 = ds3.GetLayerByName('SDE.TPOLY')
f3 = l3.GetFeature(1)
if f3.GetField("PRFEDEA") != "SDE.TESTING":
gdaltest.post_reason('versioned editing failed for child version SDE.TESTING')
return 'fail'
ds3.Destroy()
del ds3
ds4 = ogr.Open(default)
l4 = ds4.GetLayerByName('SDE.TPOLY')
f4 = l4.GetFeature(1)
if f4.GetField("PRFEDEA") != "SDE.DEFAULT":
gdaltest.post_reason('versioned editing failed for parent version SDE.DEFAULT')
return 'fail'
idx = f4.GetFieldIndex('WHEN')
df = f4.GetField(idx)
if df != '2008/03/19 16:15:00':
gdaltest.post_reason("datetime handling did not work -- expected '2008/03/19 16:15:00' got '%s' "% df)
ds4.Destroy()
del ds4
return 'success'
def ogr_sde_6():
"Extent fetching"
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (
sde_server, sde_port, sde_db, sde_user, sde_password)
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY')
extent = l1.GetExtent(force=0)
if extent != (0.0, 2147483645.0, 0.0, 2147483645.0):
gdaltest.post_reason("unforced extent did not equal expected value")
extent = l1.GetExtent(force=1)
if extent != (478316.0, 481645.0, 4762881.0, 4765611.0):
gdaltest.post_reason("forced extent did not equal expected value")
return 'success'
def ogr_sde_7():
"Bad layer test"
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT' % (
sde_server, sde_port, sde_db, sde_user, sde_password)
ds = ogr.Open(base, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason("we got a layer when we should not have")
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption( 'SDE_VERSIONOVERWRITE', 'FALSE' )
default = 'SDE:%s,%s,%s,%s,%s,SDE.TPOLY,SDE.DEFAULT,%s' % (
sde_server, sde_port, sde_db, sde_user, sde_password, default)
ds = ogr.Open(default, update=1)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason("we got a layer when we should not have")
ds.Destroy()
default = 'DEFAULT'
gdal.SetConfigOption( 'SDE_VERSIONOVERWRITE', 'FALSE' )
default = 'SDE:%s,%s,%s,%s,%s' % (
sde_server, sde_port, sde_db, sde_user, sde_password)
ds = ogr.Open(default)
l1 = ds.GetLayerByName('SDE.TPOLY2')
if l1:
gdaltest.post_reason("we got a layer when we should not have")
ds.Destroy()
return 'success'
def ogr_sde_8():
"Test spatial references"
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password)
shp_ds = ogr.Open( 'data/poly.shp' )
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
ref = osr.SpatialReference()
ref.ImportFromWkt('LOCAL_CS["IMAGE"]')
ds = ogr.Open(base, update=1)
lyr = ds.CreateLayer( 'SDE.TPOLY' ,geom_type=ogr.wkbPolygon, srs=ref,options = [ 'OVERWRITE=YES' ] )
ref.ImportFromEPSG(4326)
lyr = ds.CreateLayer( 'SDE.TPOLY' ,geom_type=ogr.wkbPolygon, srs=ref,options = [ 'OVERWRITE=YES' ] )
ogrtest.quick_create_layer_def( lyr,
[ ('AREA', ogr.OFTReal),
('EAS_ID', ogr.OFTInteger),
('PRFEDEA', ogr.OFTString),
('WHEN', ogr.OFTDateTime) ] )
#######################################################
# Copy in poly.shp
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while feat is not None:
gdaltest.poly_feat.append( feat )
dst_feat.SetFrom( feat )
lyr.CreateFeature( dst_feat )
feat = shp_lyr.GetNextFeature()
dst_feat.Destroy()
return 'success'
def ogr_sde_cleanup():
if gdaltest.sde_dr is None:
return 'skip'
base = 'SDE:%s,%s,%s,%s,%s' % (sde_server, sde_port, sde_db, sde_user, sde_password)
ds = ogr.Open(base, update=1)
ds.DeleteLayer('%s.%s'%(sde_user.upper(),'TPOLY'))
ds.Destroy()
return 'success'
gdaltest_list = [
ogr_sde_1,
ogr_sde_2,
ogr_sde_3,
ogr_sde_4,
ogr_sde_5,
ogr_sde_6,
ogr_sde_7,
ogr_sde_8,
ogr_sde_cleanup
]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_sde' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| nextgis-extra/tests | lib_gdal/ogr/ogr_sde.py | Python | gpl-2.0 | 10,401 |
from pyx import *
mypainter = graph.axis.painter.regular(outerticklength=graph.axis.painter.ticklength.normal,
basepathattrs=[style.linewidth.THick, deco.earrow.large])
c = graph.axis.pathaxis(path.curve(0, 0, 3, 0, 1, 4, 4, 4),
graph.axis.linear(min=0, max=11, title="axis title", painter=mypainter))
c.writeEPSfile("painter")
c.writePDFfile("painter")
c.writeSVGfile("painter")
| mjg/PyX-svn | examples/axis/painter.py | Python | gpl-2.0 | 445 |
# License: BSD 3 clause
from .model_hawkes import ModelHawkes
__all__ = ["ModelHawkes"]
| X-DataInitiative/tick | tick/hawkes/model/base/__init__.py | Python | bsd-3-clause | 90 |
# coding: utf8
# jmdict.py
# 2/14/2014 jichi
if __name__ == '__main__':
import sys
sys.path.append('..')
def get(dic):
"""
@param dic str such as ipadic or unidic
@return bool
"""
import rc
return rc.runscript('getcabocha.py', (dic,))
if __name__ == "__main__":
get('unidic')
# EOF
| Dangetsu/vnr | Frameworks/Sakura/py/libs/scripts/cabocha.py | Python | gpl-3.0 | 307 |
#!/usr/bin/env python2
import os
import subprocess
from pulp.devel.test_runner import run_tests
# Find and eradicate any existing .pyc files, so they do not eradicate us!
PROJECT_DIR = os.path.dirname(__file__)
subprocess.call(['find', PROJECT_DIR, '-name', '*.pyc', '-delete'])
PACKAGES = [os.path.dirname(__file__), 'pulp_puppet']
EL5_SAFE_TESTS = [
'pulp_puppet_common/test/unit/',
'pulp_puppet_extensions_admin/test/unit/',
'pulp_puppet_extensions_consumer/test/unit/',
'pulp_puppet_handlers/test/unit/',
]
NON_EL5_TESTS = [
'pulp_puppet_plugins/test/unit/',
'pulp_puppet_tools/test/unit/',
]
dir_safe_all_platforms = [os.path.join(os.path.dirname(__file__), x) for x in EL5_SAFE_TESTS]
dir_safe_non_rhel5 = [os.path.join(os.path.dirname(__file__), x) for x in NON_EL5_TESTS]
os.environ['DJANGO_SETTINGS_MODULE'] = 'pulp_puppet.forge.settings'
run_tests(PACKAGES, dir_safe_all_platforms, dir_safe_non_rhel5)
| dkliban/pulp_puppet | run-tests.py | Python | gpl-2.0 | 945 |
# Copyright (c) 2010-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from base64 import standard_b64encode as b64encode
from base64 import standard_b64decode as b64decode
from swift.common.http import HTTP_OK
from swift.common.utils import json, public, config_true_value
from swift.common.middleware.s3api.controllers.base import Controller
from swift.common.middleware.s3api.etree import Element, SubElement, tostring, \
fromstring, XMLSyntaxError, DocumentInvalid
from swift.common.middleware.s3api.s3response import HTTPOk, S3NotImplemented, \
InvalidArgument, \
MalformedXML, InvalidLocationConstraint, NoSuchBucket, \
BucketNotEmpty, InternalError, ServiceUnavailable, NoSuchKey
from swift.common.middleware.s3api.utils import MULTIUPLOAD_SUFFIX
MAX_PUT_BUCKET_BODY_SIZE = 10240
class BucketController(Controller):
"""
Handles bucket request.
"""
def _delete_segments_bucket(self, req):
"""
Before delete bucket, delete segments bucket if existing.
"""
container = req.container_name + MULTIUPLOAD_SUFFIX
marker = ''
seg = ''
try:
resp = req.get_response(self.app, 'HEAD')
if int(resp.sw_headers['X-Container-Object-Count']) > 0:
raise BucketNotEmpty()
# FIXME: This extra HEAD saves unexpected segment deletion
# but if a complete multipart upload happen while cleanup
# segment container below, completed object may be missing its
# segments unfortunately. To be safer, it might be good
# to handle if the segments can be deleted for each object.
except NoSuchBucket:
pass
try:
while True:
# delete all segments
resp = req.get_response(self.app, 'GET', container,
query={'format': 'json',
'marker': marker})
segments = json.loads(resp.body)
for seg in segments:
try:
req.get_response(self.app, 'DELETE', container,
seg['name'])
except NoSuchKey:
pass
except InternalError:
raise ServiceUnavailable()
if segments:
marker = seg['name']
else:
break
req.get_response(self.app, 'DELETE', container)
except NoSuchBucket:
return
except (BucketNotEmpty, InternalError):
raise ServiceUnavailable()
@public
def HEAD(self, req):
"""
Handle HEAD Bucket (Get Metadata) request
"""
resp = req.get_response(self.app)
return HTTPOk(headers=resp.headers)
@public
def GET(self, req):
"""
Handle GET Bucket (List Objects) request
"""
max_keys = req.get_validated_param(
'max-keys', self.conf.max_bucket_listing)
# TODO: Separate max_bucket_listing and default_bucket_listing
tag_max_keys = max_keys
max_keys = min(max_keys, self.conf.max_bucket_listing)
encoding_type = req.params.get('encoding-type')
if encoding_type is not None and encoding_type != 'url':
err_msg = 'Invalid Encoding Method specified in Request'
raise InvalidArgument('encoding-type', encoding_type, err_msg)
query = {
'format': 'json',
'limit': max_keys + 1,
}
if 'marker' in req.params:
query.update({'marker': req.params['marker']})
if 'prefix' in req.params:
query.update({'prefix': req.params['prefix']})
if 'delimiter' in req.params:
query.update({'delimiter': req.params['delimiter']})
# GET Bucket (List Objects) Version 2 parameters
is_v2 = int(req.params.get('list-type', '1')) == 2
fetch_owner = False
if is_v2:
if 'start-after' in req.params:
query.update({'marker': req.params['start-after']})
# continuation-token overrides start-after
if 'continuation-token' in req.params:
decoded = b64decode(req.params['continuation-token'])
query.update({'marker': decoded})
if 'fetch-owner' in req.params:
fetch_owner = config_true_value(req.params['fetch-owner'])
resp = req.get_response(self.app, query=query)
objects = json.loads(resp.body)
elem = Element('ListBucketResult')
SubElement(elem, 'Name').text = req.container_name
SubElement(elem, 'Prefix').text = req.params.get('prefix')
# in order to judge that truncated is valid, check whether
# max_keys + 1 th element exists in swift.
is_truncated = max_keys > 0 and len(objects) > max_keys
objects = objects[:max_keys]
if not is_v2:
SubElement(elem, 'Marker').text = req.params.get('marker')
if is_truncated and 'delimiter' in req.params:
if 'name' in objects[-1]:
SubElement(elem, 'NextMarker').text = \
objects[-1]['name']
if 'subdir' in objects[-1]:
SubElement(elem, 'NextMarker').text = \
objects[-1]['subdir']
else:
if is_truncated:
if 'name' in objects[-1]:
SubElement(elem, 'NextContinuationToken').text = \
b64encode(objects[-1]['name'])
if 'subdir' in objects[-1]:
SubElement(elem, 'NextContinuationToken').text = \
b64encode(objects[-1]['subdir'])
if 'continuation-token' in req.params:
SubElement(elem, 'ContinuationToken').text = \
req.params['continuation-token']
if 'start-after' in req.params:
SubElement(elem, 'StartAfter').text = \
req.params['start-after']
SubElement(elem, 'KeyCount').text = str(len(objects))
SubElement(elem, 'MaxKeys').text = str(tag_max_keys)
if 'delimiter' in req.params:
SubElement(elem, 'Delimiter').text = req.params['delimiter']
if encoding_type is not None:
SubElement(elem, 'EncodingType').text = encoding_type
SubElement(elem, 'IsTruncated').text = \
'true' if is_truncated else 'false'
for o in objects:
if 'subdir' not in o:
contents = SubElement(elem, 'Contents')
SubElement(contents, 'Key').text = o['name']
SubElement(contents, 'LastModified').text = \
o['last_modified'][:-3] + 'Z'
SubElement(contents, 'ETag').text = '"%s"' % o['hash']
SubElement(contents, 'Size').text = str(o['bytes'])
if fetch_owner or not is_v2:
owner = SubElement(contents, 'Owner')
SubElement(owner, 'ID').text = req.user_id
SubElement(owner, 'DisplayName').text = req.user_id
SubElement(contents, 'StorageClass').text = 'STANDARD'
for o in objects:
if 'subdir' in o:
common_prefixes = SubElement(elem, 'CommonPrefixes')
SubElement(common_prefixes, 'Prefix').text = o['subdir']
body = tostring(elem, encoding_type=encoding_type)
return HTTPOk(body=body, content_type='application/xml')
@public
def PUT(self, req):
"""
Handle PUT Bucket request
"""
xml = req.xml(MAX_PUT_BUCKET_BODY_SIZE)
if xml:
# check location
try:
elem = fromstring(
xml, 'CreateBucketConfiguration', self.logger)
location = elem.find('./LocationConstraint').text
except (XMLSyntaxError, DocumentInvalid):
raise MalformedXML()
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error(e)
raise exc_type, exc_value, exc_traceback
if location != self.conf.location:
# s3api cannot support multiple regions currently.
raise InvalidLocationConstraint()
resp = req.get_response(self.app)
resp.status = HTTP_OK
resp.location = '/' + req.container_name
return resp
@public
def DELETE(self, req):
"""
Handle DELETE Bucket request
"""
if self.conf.allow_multipart_uploads:
self._delete_segments_bucket(req)
resp = req.get_response(self.app)
return resp
@public
def POST(self, req):
"""
Handle POST Bucket request
"""
raise S3NotImplemented()
| matthewoliver/swift | swift/common/middleware/s3api/controllers/bucket.py | Python | apache-2.0 | 9,556 |
#!/usr/bin/env python
# coding: utf-8
#
# This file is part of mpdav.
#
# mpdav is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpdav is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpdav. If not, see <http://www.gnu.org/licenses/>.
from wsgiref.simple_server import make_server
import mpdav
class DavWsgiApp(object):
def __init__(self):
self.dav = mpdav.WebDavRequestHandler(mpdav.FileBackend("data"))
def __call__(self, environ, start_response):
method = environ["REQUEST_METHOD"].lower()
host = environ["HTTP_HOST"]
path = environ["PATH_INFO"].decode("utf-8")
headers = self._build_headers(environ)
body = environ["wsgi.input"]
response = self.dav.handle(method, host, path, headers, body)
start_response("%d %s" % (response.status),
[(k, v) for k, v in response.headers.iteritems()])
if response.body:
return response.body
else:
return []
def _build_headers(self, environ):
result = mpdav.HeadersDict()
for k, v in environ.iteritems():
if k.startswith("HTTP_"):
result[k[5:].replace("_", "-")] = v
elif k.startswith("CONTENT_"):
result[k.replace("_", "-")] = v
return result
if __name__ == "__main__":
s = make_server('', 8000, DavWsgiApp())
s.serve_forever()
| mprochnow/mpdav | wsgi.py | Python | gpl-3.0 | 1,855 |
#!/usr/bin/env python
"""
Convert CSV file to libsvm format. Works only with numeric variables.
"""
import sys
import csv
import argparse
def construct_line( label, line ):
new_line = []
if float( label ) == 0.0:
label = "0"
new_line.append( label )
for i, item in enumerate( line ):
if item == '' or float( item ) == 0.0:
continue
new_item = "%s:%s" % ( i + 1, item )
new_line.append( new_item )
new_line = " ".join( new_line )
new_line += "\n"
return new_line
# ---
parser = argparse.ArgumentParser()
parser.add_argument( "input_file", help = "path to the CSV input file" )
parser.add_argument( "output_file", help = "path to the output file" )
parser.add_argument( "-l", "--label-index", help = "zero based index for the label column. If there are no labels in the file, use -1.",
type = int, default = 0 )
parser.add_argument( "-s", "--skip-headers", help = "Use this switch if there are headers in the input file.", action = 'store_true' )
args = parser.parse_args()
#
i = open( args.input_file )
o = open( args.output_file, 'wb' )
reader = csv.reader( i )
if args.skip_headers:
headers = reader.next()
for line in reader:
if args.label_index == -1:
label = "1"
else:
label = line.pop( args.label_index )
try:
new_line = construct_line( label, line )
o.write( new_line )
except ValueError:
print "Problem with the following line, skipping..."
print line
| zygmuntz/phraug2 | csv2libsvm.py | Python | bsd-2-clause | 1,420 |
from django.db.models import Q
from .models import Team
class TeamPermissionsBackend(object):
def authenticate(self, username=None, password=None):
return None
def get_team_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
team memberships.
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, "_team_perm_cache"):
# Member permissions
memberships = Team.objects.filter(
Q(memberships__user=user_obj),
Q(memberships__state="member"),
)
perms = memberships.values_list(
"permissions__content_type__app_label",
"permissions__codename"
).order_by()
permissions = ["%s.%s" % (ct, name) for ct, name in perms]
# Manager permissions
memberships = Team.objects.filter(
Q(memberships__user=user_obj),
Q(memberships__state="manager"),
)
perms = memberships.values_list(
"manager_permissions__content_type__app_label",
"manager_permissions__codename"
).order_by()
permissions += ["%s.%s" % (ct, name) for ct, name in perms]
user_obj._team_perm_cache = set(permissions)
return user_obj._team_perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_team_permissions(user_obj, obj)
| pyconau2017/symposion | symposion/teams/backends.py | Python | bsd-3-clause | 1,686 |
mcinif='mcini_weihera'
runname='gen_test3221i'
mcpick='gen_test3a.pickle'
pathdir='/beegfs/work/ka_oj4748/echoRD'
wdir='/beegfs/work/ka_oj4748/gen_tests'
update_prec=0.04
update_mf='07moist.dat'
update_part=100
import sys
sys.path.append(pathdir)
import run_echoRD as rE
rE.echoRD_job(mcinif=mcinif,mcpick=mcpick,runname=runname,wdir=wdir,pathdir=pathdir,update_prec=update_prec,update_mf=update_mf,update_part=update_part,hdf5pick=False)
| cojacoo/testcases_echoRD | gen_test3221i.py | Python | gpl-3.0 | 441 |
# Copyright 2009-2011 Steven Robertson, Christoph Reiter
# 2020 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import collections
import subprocess
from enum import Enum
from typing import Iterable, Tuple
from gi.repository import GLib, Gst
from quodlibet import _, print_d, config
from quodlibet.util.string import decode
from quodlibet.util import is_linux, is_windows
from quodlibet.player import PlayerError
class AudioSinks(Enum):
"""Relevant Gstreamer sink elements"""
FAKE = "fakesink"
DIRECTSOUND = "directsoundsink"
PULSE = "pulsesink"
"""from plugins-good"""
ALSA = "alsasink"
"""from plugins-base"""
AUTO = "autoaudiosink"
"""from plugins-good"""
JACK = "jackaudiosink"
"""from plugins-good"""
WASAPI = "wasapisink"
def pulse_is_running():
"""Returns whether pulseaudio is running"""
# If we have a pulsesink we can get the server presence through
# setting the ready state
element = Gst.ElementFactory.make(AudioSinks.PULSE.value, None)
if element is not None:
element.set_state(Gst.State.READY)
res = element.get_state(0)[0]
element.set_state(Gst.State.NULL)
return res != Gst.StateChangeReturn.FAILURE
# In case we don't have it call the pulseaudio binary
try:
subprocess.check_call(["pulseaudio", "--check"])
except subprocess.CalledProcessError:
return False
except OSError:
return False
return True
def jack_is_running() -> bool:
""":returns: whether Jack is running"""
element = Gst.ElementFactory.make(AudioSinks.JACK.value, "test sink")
if element:
element.set_state(Gst.State.READY)
res = element.get_state(0)[0]
element.set_state(Gst.State.NULL)
return res != Gst.StateChangeReturn.FAILURE
return False
def link_many(elements: Iterable[Gst.Element]) -> None:
"""Links all elements together
:raises OSError: if they can't all be linked"""
last = None
print_d(f"Attempting to link Gstreamer element(s): "
f"{[type(e).__name__ for e in elements]}")
for element in elements:
if last:
if not Gst.Element.link(last, element):
raise OSError(f"Failed on element: {type(element).__name__}")
last = element
def unlink_many(elements):
last = None
for element in elements:
if last:
if not Gst.Element.unlink(last, element):
return False
last = element
return True
def iter_to_list(func):
objects = []
iter_ = func()
while 1:
status, value = iter_.next()
if status == Gst.IteratorResult.OK:
objects.append(value)
else:
break
return objects
def find_audio_sink() -> Tuple[Gst.Element, str]:
"""Get the best audio sink available.
Returns (element, description) or raises PlayerError.
"""
def sink_options():
# People with Jack running probably want it more than any other options
if config.getboolean("player", "gst_use_jack") and jack_is_running():
print_d("Using JACK output via Gstreamer")
return [AudioSinks.JACK]
elif is_windows():
return [AudioSinks.DIRECTSOUND]
elif is_linux() and pulse_is_running():
return [AudioSinks.PULSE]
else:
return [
AudioSinks.AUTO,
AudioSinks.PULSE,
AudioSinks.ALSA,
]
options = sink_options()
for sink in options:
element = Gst.ElementFactory.make(sink.value, "player")
if (sink == AudioSinks.JACK
and not config.getboolean("player", "gst_jack_auto_connect")):
# Disable the auto-connection to outputs (e.g. maybe there's scripting)
element.set_property("connect", "none")
if element is not None:
return element, sink.value
else:
details = ', '.join(s.value for s in options) if options else "[]"
raise PlayerError(_("No GStreamer audio sink found. Tried: %s") % details)
def GStreamerSink(pipeline_desc):
"""Returns a list of unlinked gstreamer elements ending with an audio sink
and a textual description of the pipeline.
`pipeline_desc` can be gst-launch syntax for multiple elements
with or without an audiosink.
In case of an error, raises PlayerError
"""
pipe = None
if pipeline_desc:
try:
pipe = [Gst.parse_launch(e) for e in pipeline_desc.split('!')]
except GLib.GError as e:
message = e.message
raise PlayerError(_("Invalid GStreamer output pipeline"), message)
if pipe:
# In case the last element is linkable with a fakesink
# it is not an audiosink, so we append the default one
fake = Gst.ElementFactory.make(AudioSinks.FAKE.value, None)
try:
link_many([pipe[-1], fake])
except OSError:
pass
else:
unlink_many([pipe[-1], fake])
default_elm, default_desc = find_audio_sink()
pipe += [default_elm]
pipeline_desc += " ! " + default_desc
else:
elm, pipeline_desc = find_audio_sink()
pipe = [elm]
return pipe, pipeline_desc
class TagListWrapper(collections.Mapping):
def __init__(self, taglist, merge=False):
self._list = taglist
self._merge = merge
def __len__(self):
return self._list.n_tags()
def __iter__(self):
for i in range(len(self)):
yield self._list.nth_tag_name(i)
def __getitem__(self, key):
if not Gst.tag_exists(key):
raise KeyError
values = []
index = 0
while 1:
value = self._list.get_value_index(key, index)
if value is None:
break
values.append(value)
index += 1
if not values:
raise KeyError
if self._merge:
try:
return " - ".join(values)
except TypeError:
return values[0]
return values
def parse_gstreamer_taglist(tags):
"""Takes a GStreamer taglist and returns a dict containing only
numeric and unicode values and str keys."""
merged = {}
for key in tags.keys():
value = tags[key]
# extended-comment sometimes contains a single vorbiscomment or
# a list of them ["key=value", "key=value"]
if key == "extended-comment":
if not isinstance(value, list):
value = [value]
for val in value:
if not isinstance(val, str):
continue
split = val.split("=", 1)
sub_key = split[0]
val = split[-1]
if sub_key in merged:
sub_val = merged[sub_key]
if not isinstance(sub_val, str):
continue
if val not in sub_val.split("\n"):
merged[sub_key] += "\n" + val
else:
merged[sub_key] = val
elif isinstance(value, Gst.DateTime):
value = value.to_iso8601_string()
merged[key] = value
else:
if isinstance(value, (int, float)):
merged[key] = value
continue
if isinstance(value, bytes):
value = decode(value)
if not isinstance(value, str):
value = str(value)
if key in merged:
merged[key] += "\n" + value
else:
merged[key] = value
return merged
def bin_debug(elements, depth=0, lines=None):
"""Takes a list of gst.Element that are part of a prerolled pipeline, and
recursively gets the children and all caps between the elements.
Returns a list of text lines suitable for printing.
"""
from quodlibet.util.dprint import Colorise
if lines is None:
lines = []
else:
lines.append(" " * (depth - 1) + "\\")
for i, elm in enumerate(elements):
for pad in iter_to_list(elm.iterate_sink_pads):
caps = pad.get_current_caps()
if caps:
lines.append("%s| %s" % (" " * depth, caps.to_string()))
name = elm.get_name()
cls = Colorise.blue(type(elm).__name__.split(".", 1)[-1])
lines.append("%s|-%s (%s)" % (" " * depth, cls, name))
if isinstance(elm, Gst.Bin):
children = reversed(iter_to_list(elm.iterate_sorted))
bin_debug(children, depth + 1, lines)
return lines
| Meriipu/quodlibet | quodlibet/player/gstbe/util.py | Python | gpl-2.0 | 8,936 |
#-*- coding: utf-8 -*-
from django.conf import settings
from django.core import exceptions
from django.utils.importlib import import_module
class CartModifiersPool(object):
USE_CACHE = True
def __init__(self):
self._modifiers_list = []
def get_modifiers_list(self):
if not self.USE_CACHE or not self._modifiers_list:
self._modifiers_list = self._load_modifiers_list()
return self._modifiers_list
def _load_modifiers_list(self):
"""
Heavily inspired by django.core.handlers.base...
"""
result = []
if not getattr(settings, 'SHOP_CART_MODIFIERS', None):
return result
for modifier_path in settings.SHOP_CART_MODIFIERS:
try:
mod_module, mod_classname = modifier_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured(
'%s isn\'t a price modifier module' % modifier_path)
try:
mod = import_module(mod_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
'Error importing modifier %s: "%s"' % (mod_module, e))
try:
mod_class = getattr(mod, mod_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
'Price modifier module "%s" does not define a "%s" class' %
(mod_module, mod_classname))
mod_instance = mod_class()
result.append(mod_instance)
return result
cart_modifiers_pool = CartModifiersPool()
| jrutila/django-shop | shop/cart/modifiers_pool.py | Python | bsd-3-clause | 1,659 |
from pylab import plotfile, show, gca
import matplotlib.cbook as cbook
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import string
import numpy
import csv
from itertools import izip
filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X')
def rgb2gray(rgb):
return numpy.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def main(argv):
file=argv[1]
col_names_list=[]
row_names_list=[]
#with open(file) as f:
# with open(file+"tans", 'w') as fw:
# csv.writer(fw, delimiter=',').writerows(izip(*csv.reader(f, delimiter=',')))
import pandas as pd
pd.read_csv(file).T.to_csv('output.csv',header=False)
file ="output.csv"
#return
with open(file) as csv_file:
for row in csv_file:
col_names_list =[x.strip() for x in row.split(",")]
break
for row in csv_file:
row_names_list.append(row.split(",")[0])
col_names= tuple(col_names_list)
#col_names = '.'join(col_names)
#filter_x = lambda x,y: [xi for (xi, yi) in zip(x,y) if not numpy.isnan(yi)]
#filter_y = lambda y: [yi for yi in y if not numpy.isnan(yi)]
filter_x = lambda x,y : x[numpy.isfinite(y)]
filter_y = lambda y : y[numpy.isfinite(y)]
csv_data = matplotlib.mlab.csv2rec(file,delimiter=u',', skiprows=2, missing='--',names=col_names)
print row_names_list
print col_names
xx=[1,2,4,8,16,24,32]
print "-------------------------"
#print csv_data
#print len(xx)
#print len(csv_data['taskbench_gnu_t2_n1000'])
print "-------------------------"
#print filter_x(lbc_768['cores'],lbc_768['lbc_pure_MPI'])
#print filter_y(lbc_768['lbc_pure_MPI'])
size=7
compilers_list=["gnu","intel", "intel-17.0.1.132", "mcc.openmp","mcc.ompss","clang"]
tasks_list=["1000", "5000"]
# seperate(compilers_list,tasks_list,size,csv_data,xx)
thread=["1","2","12"]
seperate2(compilers_list,tasks_list,size,csv_data,xx,thread)
#seperateTrans(compilers_list,tasks_list,size,csv_data,xx,thread)
seperateByBench(compilers_list,tasks_list,size,csv_data,xx,thread,2)
seperateByBench(compilers_list,tasks_list,size,csv_data,xx,thread,9)
seperateByBench(compilers_list,tasks_list,size,csv_data,xx,thread,16)
seperateByBench(compilers_list,tasks_list,size,csv_data,xx,thread,23)
seperateByBench(compilers_list,tasks_list,size,csv_data,xx,thread,30)
seperateByBench(compilers_list,tasks_list,size,csv_data,xx,thread,37)
def nameFilter(input, compiler, thread):
# out = input.translate(None, string.digits)+" "+compiler+ " threads="+ thread
# out = input.translate(None, string.digits)+" "+compiler+ " threads="+ thread
# out = out.replace("TASK DEPENDENCY","")
# out = out.replace("MASTER","")
out = compiler+ " threads="+ thread
return out
def seperateByBench(compilers_list,tasks_list,size,csv_data,xx,threads,start):
for tasks in tasks_list:
plt.figure()
for thread in threads:
for compiler in compilers_list:
line=":"
color="red"
marker="o"
if compiler == "intel":
color="green"
marker="<"
if compiler == "intel-17.0.1.132":
color="yellow"
marker=">"
if compiler == "mcc.openmp":
color = "blue"
marker="_"
if compiler == "mcc.ompss":
color = "magenta"
marker="|"
if compiler == "clang":
color = "orange"
marker="s"
if thread is "2":
line="--"
if thread is "12":
line="-."
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread), marker=marker, markersize=8, fillstyle="none",color=color,linestyle=line)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
#plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),ncol=2, fancybox=True, shadow=True,fontsize=10)
plt.legend(loc='upper left',bbox_to_anchor=(-0.15, 1.13),ncol=2, fancybox=True, shadow=True,fontsize=8,markerscale=0.5)
plt.grid()
#gca().set_xlabel('threads/per core')
#gca().set_ylabel('walltime in seconds')
gca().set_xscale('log',basex=2)
gca().set_yscale('log',basex=2)
gca().set_xticks(xx)
#gca().set_xticks([1,2,4,8,12,16,24])
#gca().set_yticks([10,20,30,50,100,130,180])
gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xlim(0,36)
#plt.ylim(7,260)
plt.plot()
plt.ylim([0,5000])
name =nameFilter(csv_data["bench"][start],"",thread)
filename = csv_data["bench"][start].translate(None, string.digits)+"_"+str(threads)+"_"+tasks+".pdf"
plt.savefig(filename.translate(None, "[],'").replace(" ", "_"),format='pdf',dpi=600)
#plt.show()
def seperate2(compilers_list,tasks_list,size,csv_data,xx,threads):
for tasks in tasks_list:
plt.figure()
for thread in threads:
for compiler in compilers_list:
line=":"
color="red"
if compiler == "intel":
color="green"
if compiler == "mcc.openmp":
color = "blue"
if compiler == "mcc.ompss":
color = "magenta"
if compiler == "clang":
color = "orange"
if thread is "2":
line="--"
start=2
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread), marker='o', markersize=8, fillstyle="none",color=color,linestyle=line)
start=9
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],compiler,thread), marker='v', markersize=10,color=color,linestyle=line)
start=16
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],compiler,thread), marker='^', markersize=10,color=color,linestyle=line)
start=23
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread), marker='<', markersize=8, fillstyle="none",color=color,linestyle=line)
start=30
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread), marker='>', markersize=8, fillstyle="none",color=color,linestyle=line)
start=37
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread), marker='s', markersize=8, fillstyle="none",color=color,linestyle=line)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
#plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),ncol=2, fancybox=True, shadow=True,fontsize=10)
plt.legend(loc='upper left',ncol=2, fancybox=True, shadow=True,fontsize=8,markerscale=0.5)
plt.grid()
#gca().set_xlabel('threads/per core')
#gca().set_ylabel('walltime in seconds')
gca().set_xscale('log',basex=2)
gca().set_yscale('log',basex=2)
gca().set_xticks(xx)
#gca().set_xticks([1,2,4,8,12,16,24])
#gca().set_yticks([10,20,30,50,100,130,180])
gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xlim(0,36)
#plt.ylim(7,260)
plt.plot()
plt.ylim([0,1000])
plt.savefig("_".join(compilers_list)+"_"+tasks+".pdf",format='pdf',dpi=600)
#plt.show()
def seperate(compilers_list,tasks_list,size,csv_data,xx):
for compiler in compilers_list:
for tasks in tasks_list:
plt.figure()
start=2
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t1_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t1_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","1"), marker='o', markersize=8, fillstyle="none",color='red',linestyle=':')
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t2_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t2_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","2"), marker='o', markersize=8, fillstyle="none",color='green',linestyle='--')
start=9
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t1_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t1_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],"","1"), marker='v', markersize=10,color='red',linestyle=':')
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t2_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t2_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],"","2"), marker='v', markersize=10,color='green',linestyle='--')
start=16
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t1_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t1_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],"","1"), marker='^', markersize=10,color='red',linestyle=':')
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t2_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t2_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],"","2"), marker='^', markersize=10,color='green',linestyle='--')
start=23
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t1_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t1_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","1"), marker='<', markersize=8, fillstyle="none",color='red',linestyle=':')
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t2_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t2_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","2"), marker='<', markersize=8, fillstyle="none",color='green',linestyle='--')
start=30
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t1_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t1_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","1"), marker='>', markersize=8, fillstyle="none",color='red',linestyle=':')
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t2_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t2_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","2"), marker='>', markersize=8, fillstyle="none",color='green',linestyle='--')
start=37
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t1_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t1_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","1"), marker='s', markersize=8, fillstyle="none",color='red',linestyle=':')
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t2_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t2_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],"","2"), marker='s', markersize=8, fillstyle="none",color='green',linestyle='--')
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
#plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),ncol=1, fancybox=True, shadow=True,fontsize=10)
plt.legend(loc='upper left',ncol=1, fancybox=True, shadow=True,fontsize=10,markerscale=0.5)
plt.grid()
gca().set_title(compiler)
#gca().set_xlabel('threads/per core')
#gca().set_ylabel('walltime in seconds')
gca().set_xscale('log',basex=2)
gca().set_yscale('log',basex=2)
gca().set_xticks(xx)
#gca().set_xticks([1,2,4,8,12,16,24])
#gca().set_yticks([10,20,30,50,100,130,180])
gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xlim(0,36)
#plt.ylim(7,260)
plt.plot()
plt.savefig(compiler+"_"+tasks+".pdf",format='pdf',dpi=600)
#plt.show()
def seperateTrans(compilers_list,tasks_list,size,csv_data,xx,threads):
alpha =0.7
for tasks in tasks_list:
plt.figure()
for thread in threads:
for compiler in compilers_list:
line="-"
color="red"
if compiler == "intel":
color="green"
if compiler == "mcc.openmp":
color = "blue"
if compiler == "mcc.ompss":
color = "magenta"
if thread is "2":
line="--"
start=2
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread),
color=color,linestyle=line,linewidth=20.0,alpha=alpha)
start=9
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],compiler,thread), marker='v', markersize=10,color=color,linestyle=line)
start=16
#plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
# label=nameFilter(csv_data["bench"][start],compiler,thread), marker='^', markersize=10,color=color,linestyle=line)
start=23
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread),
color=color,linestyle=line,linewidth=20.0,alpha=alpha)
start=30
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread),
color=color,linestyle=line,linewidth=20.0,alpha=alpha)
start=37
plt.errorbar(xx,csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks][start:start+size], yerr= csv_data['taskbench_'+compiler+'_t'+thread+'_n'+tasks+'_error'][start:start+size],
label=nameFilter(csv_data["bench"][start],compiler,thread),
color=color,linestyle=line,linewidth=20.0,alpha=alpha)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
#plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),ncol=2, fancybox=True, shadow=True,fontsize=10)
#plt.legend(loc='upper left',ncol=2, fancybox=True, shadow=True,fontsize=10)
plt.grid()
#gca().set_xlabel('threads/per core')
#gca().set_ylabel('walltime in seconds')
gca().set_xscale('log',basex=2)
gca().set_yscale('log',basex=2)
gca().set_xticks(xx)
#gca().set_xticks([1,2,4,8,12,16,24])
#gca().set_yticks([10,20,30,50,100,130,180])
gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xlim(0,36)
#plt.ylim(7,260)
plt.plot()
plt.savefig("_".join(compilers_list)+"_"+tasks+".pdf",format='pdf',dpi=600)
plt.show()
if __name__ == "__main__":
import sys
main(sys.argv)
| devreal/omp-tdb | scripts/plot.py | Python | apache-2.0 | 18,729 |
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings # noqa
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'signalserver.settings')
app = Celery('signalserver',
backend='redis://redis:6379',
broker='amqp://guest@rmq:5672//')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
app.conf.update(
CELERY_RESULT_BACKEND='redis://redis:6379'
)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| yayoiukai/signalserver | signalserver/celery.py | Python | mit | 741 |
# coding: utf-8
"""
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
#print(__doc__)
# Author: Deokwooo Jung [email protected]
##################################################################
# General Moduels
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import uuid
import pylab as pl
from scipy import signal
from scipy import stats
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from multiprocessing import Pool
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import calendar
import random
from matplotlib.collections import LineCollection
#from stackedBarGraph import StackedBarGrapher
import pprint
#import radar_chart
# Custom library
from data_tools import *
from data_retrieval import *
from pack_cluster import *
from data_preprocess import *
from shared_constants import *
from pre_bn_state_processing import *
from data_summerization import *
##################################################################
PROC_OUT_DIR=Gvalley_out_dir
DATA_DIR=Gvalley_data_dir
WEATHER_DIR=Gvalley_weather_dir
# Interactive mode for plotting
plt.ion()
# Gvalley dependect function and parameters
gvalley_bgid_dict={'10110102' :'BDV1', '10110105' :'UEZ1', '10110107' :'HST',\
'10110110' :'UEZ2','10110111' :'MAT','10110113' :'ACH1','10110118' :'ACH2',
'10110119' :'ACT8','10110182' :'KLB1','10110187' :'WKR','10110188' :'ATW1',
'10110190' :'ATW2','10110191' :'EDT2','10110192' :'DPT3','10110193' :'GRH',
'10110194' :'KLS1','10110214' :'OMS','11810101' :'MPC'}
gvalley_data_list=['MODEM_NO','HSHLD_INFO_SEQ','CKM_DATE','PF','MAX_DEMND_EPR',\
'CRMON_VLD_EENGY','CRMON_RACT_EENGY','LMON_VLD_EENGY','LMON_RACT_EENGY',\
'LP_FWD_VLD_EENGY','LP_GRD_RACT_EENGY','LP_TRTH_RACT_EENGY','LP_APET_RACT_EENGY',\
'LP_BWD_VLD_EENGY','EMS_GAS_USG','EMS_HTNG_USG','EMS_HOTWT_USG','EMS_TAPWT_USG','MAKEDAY',\
'CORRDAY','REC_CNT']
# 'BDV1' : 1500 sensors
# 'MAT': 3500 sensors
# ATW1 : 1500 sensors
# Define name conversion method
def convert_gvalley_name(id_labels):
out_name=[]
if isinstance(id_labels,list)==False:
id_labels=[id_labels]
for key_label_ in id_labels:
if key_label_[2:10] in gvalley_bgid_dict:
bldg_id_name=gvalley_bgid_dict[key_label_[2:10]]
cdata_id=[data_id for data_id in gvalley_data_list if len(grep(data_id,[id_labels[0]]))>0]
lastdigit_id=key_label_[len(key_label_)-len(cdata_id[0])-5:-1*len(cdata_id[0])-1]
out_name.append(bldg_id_name+'_'+cdata_id[0]+'_'+lastdigit_id)
else:
out_name.append(key_label_)
return out_name
##################################################################
# Processing Configuraiton Settings
##################################################################
# Analysis BLDG ID
BLDG_DATA_POINT_CNT=0
if BLDG_DATA_POINT_CNT==1:
gvalley_data_id_cnt={}
for bldg_id in gvalley_bgid_dict.keys():
print 'process ', gvalley_bgid_dict[bldg_id], '...'
data_id_cnt=[]
for data_id in gvalley_data_list:
temp=subprocess.check_output('ls '+DATA_DIR+'*'+bldg_id+'*.bin |grep '+data_id+' |wc -l', shell=True)
data_id_cnt.append(int(shlex.split(temp)[0]))
gvalley_data_id_cnt.update({gvalley_bgid_dict[bldg_id] :data_id_cnt})
max_count=max(max(gvalley_data_id_cnt.values()))
fig_name='Counts of Data Points'
fig=figure(fig_name,figsize=(30.0,30.0))
for i,bldg_id in enumerate(gvalley_bgid_dict.keys()):
bldg_name=gvalley_bgid_dict[bldg_id]
plt.subplot(7,3,i+1)
x_tick_val=range(len(gvalley_data_list))
x_tick_label=gvalley_data_list
plt.bar(x_tick_val,gvalley_data_id_cnt[bldg_name])
plt.ylabel('# of data points',fontsize='small')
plt.title(bldg_name,fontsize='large')
if i>14:
plt.xticks(x_tick_val,x_tick_label,rotation=270, fontsize=10)
plt.tick_params(labelsize='small')
else:
plt.xticks(x_tick_val,['']*len(x_tick_val),rotation=270, fontsize=10)
plt.tick_params(labelsize='small')
plt.ylim([-0.05,max_count*1.2])
png_name='Gvalley'+remove_dot(fig_name)+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
PRE_BN_STAGE=0
if PRE_BN_STAGE==0:
bldg_key_set=[]
print 'skip PRE_BN_STAGE....'
else:
bldg_key_set=gvalley_bgid_dict.values()
for bldg_key in bldg_key_set:
print '###############################################################################'
print '###############################################################################'
print 'Processing '+ bldg_key+'.....'
print '###############################################################################'
print '###############################################################################'
try:
#bldg_key='BDV1'
#bldg_key='GRH'
bldg_id=[key_val[0] for key_val in gvalley_bgid_dict.items() if key_val[1]==bldg_key][0]
data_id='CRMON_VLD_EENGY'
#temp= subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep "+bldg_id, shell=True)
temp=subprocess.check_output('ls '+DATA_DIR+'*'+bldg_id+'*.bin |grep '+data_id, shell=True)
input_files_temp =shlex.split(temp)
# Get rid of duplicated files
input_files_temp=list(set(input_files_temp))
input_files=input_files_temp
#input_files=['../gvalley/Binfiles/'+temp for temp in input_files_temp]
IS_USING_SAVED_DICT=0
print 'Extract a common time range...'
# Analysis period
ANS_START_T=dt.datetime(2013,1,1,0)
ANS_END_T=dt.datetime(2013,12,30,0)
# Interval of timelet, currently set to 1 Hour
TIMELET_INV=dt.timedelta(hours=1)
#TIMELET_INV=dt.timedelta(minutes=60)
print TIMELET_INV, 'time slot interval is set for this data set !!'
print '-------------------------------------------------------------------'
PROC_AVG=True
PROC_DIFF=False
###############################################################################
# This directly searches files from bin file name
print '###############################################################################'
print '# Data Pre-Processing'
print '###############################################################################'
# define input_files to be read
if IS_USING_SAVED_DICT==0:
ANS_START_T,ANS_END_T,input_file_to_be_included=time_range_check(input_files,ANS_START_T,ANS_END_T,TIMELET_INV)
print 'time range readjusted to (' ,ANS_START_T, ', ', ANS_END_T,')'
start__dictproc_t=time.time()
if IS_SAVING_INDIVIDUAL==True:
data_dict,purge_list=construct_data_dict_2\
(input_files,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict', IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
else:
data_dict,purge_list=\
construct_data_dict(input_file_to_be_included,ANS_START_T,ANS_END_T,TIMELET_INV,\
binfilename='data_dict',IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast('data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
CHECK_DATA_FORMAT=0
if CHECK_DATA_FORMAT==1:
if IS_SAVING_INDIVIDUAL==True:
list_of_wrong_data_format=verify_data_format_2(data_used,data_dict,time_slots)
else:
list_of_wrong_data_format=verify_data_format(data_used,data_dict,time_slots)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
Data_Summarization=1
if Data_Summarization==1:
bldg_out=data_summerization(bldg_key,data_dict,PROC_AVG=True,PROC_DIFF=False)
except:
print 'error occured, pass this '
print '###############################################################################'
print '# Model_Discovery'
print '###############################################################################'
Model_Discovery=1
if Model_Discovery==1:
gvalley_pwr_key='CRMON_VLD_EENGY'; gvalley_gas_key='EMS_GAS_USG'
gvalley_heat_key='EMS_HTNG_USG'; gvalley_hwt_key='EMS_HOTWT_USG'
gvalley_twt_key='EMS_TAPWT_USG'; dict_dir='./Gvalley/'
LOAD_BLDG_OBJ=0
if LOAD_BLDG_OBJ==1:
print 'load Gvalley_bldg_obj.bin'
#bldg_=mt.loadObjectBinaryFast(gvalley_dict_dir_set[0]+'Gvalley_bldg_obj.bin')
bldg_=mt.loadObjectBinaryFast(PROC_OUT_DIR+'Gvalley_bldg_obj.bin')
else:
bldg_dict={}
for bldg_load_key in gvalley_bgid_dict.values():
print 'Building for ',bldg_load_key, '....'
try:
bldg_tag='Gvalley_'+bldg_load_key
bldg_load_out=mt.loadObjectBinaryFast(dict_dir+bldg_load_key+'_out.bin')
except:
print 'not found, skip....'
pass
data_dict_keys=bldg_load_out['data_dict'].keys()
del_idx=grep('CRMON_VLD_EENGY',data_dict_keys)
for idx in del_idx:
key=data_dict_keys[idx]
if key in bldg_load_out['data_dict']:
del bldg_load_out['data_dict'][key]
mt.saveObjectBinaryFast(bldg_load_out['data_dict'],dict_dir+'data_dict.bin')
if 'avgdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'avgdata_dict.bin')
if 'diffdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'diffdata_dict.bin')
pname_key= gvalley_pwr_key
bldg_dict.update({bldg_tag:create_bldg_obj(dict_dir,bldg_tag,pname_key)})
bldg_=obj(bldg_dict)
#cmd_str='bldg_.'+bldg_tag+'.data_out=obj(bldg_load_out)'
#exec(cmd_str)
cmd_str='bldg_.'+bldg_tag+'.gvalley_bgid_dict=gvalley_bgid_dict'
exec(cmd_str)
cmd_str='bldg_.'+bldg_tag+'.gvalley_data_list=gvalley_data_list'
exec(cmd_str)
cmd_str='bldg_obj=bldg_.'+bldg_tag
exec(cmd_str)
anal_out={}
if 'avgdata_dict' in bldg_load_out.keys():
anal_out.update({'avg':bn_prob_analysis(bldg_obj,sig_tag_='avg')})
if 'diffdata_dict' in bldg_load_out.keys():
anal_out.update({'diff':bn_prob_analysis(bldg_obj,sig_tag_='diff')})
cmd_str='bldg_.'+bldg_tag+'.anal_out=obj(anal_out)'
exec(cmd_str)
cmd_str='bldg_.'+'convert_name=convert_gvalley_name'
exec(cmd_str)
mt.saveObjectBinaryFast(bldg_ ,PROC_OUT_DIR+'Gvalley_bldg_obj.bin')
mt.saveObjectBinaryFast('LOAD_BLDG_OBJ' ,PROC_OUT_DIR+'Gvalley_bldg_obj_is_done.txt')
#######################################################################################
# Analysis For GValley
#######################################################################################
# Analysis of BN network result
BN_ANAL=1
if BN_ANAL==1:
# Plotting individual LHs
PLOTTING_LH=1
if PLOTTING_LH==1:
plotting_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='time',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='weather',num_picks=30)
PLOTTING_BN=1
if PLOTTING_BN==1:
plotting_bldg_bn(bldg_)
More_BN_ANAL=0
if More_BN_ANAL==1:
# Extra Steps for GValley network
bdv1_hc_b,cols,bdv1_amat=compute_bn_sensors(bldg_.Gvalley_BDV1)
fig_name='BN for BDV1 power meters '+'CRMON_VLD_EENGY'
plt.figure(fig_name,figsize=(25.0,25.0))
rbn.nx_plot(bdv1_hc_b,convert_gvalley_name(cols),graph_layout='circular',node_text_size=30)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
plt.close()
bdv1_in=np.array([ len(np.nonzero(col==1)[0]) for col in bdv1_amat])
bdv1_out=np.array([ len(np.nonzero(col==1)[0]) for col in bdv1_amat.T])
bdv1_in_out=bdv1_in+bdv1_out
Mat_hc_b,cols,Mat_amat=compute_bn_sensors(bldg_.Gvalley_MAT)
fig_name='BN for MAT power meters '+'CRMON_VLD_EENGY'
plt.figure(fig_name,figsize=(25.0,25.0))
rbn.nx_plot(bdv1_hc_b,convert_gvalley_name(cols),graph_layout='circular',node_text_size=30)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
plt.close()
Mat_in=np.array([ len(np.nonzero(col==1)[0]) for col in Mat_amat])
Mat_out=np.array([ len(np.nonzero(col==1)[0]) for col in Mat_amat.T])
Mat_in_out=Mat_in+Mat_out
atw1_hc_b,cols,atw1_amat=compute_bn_sensors(bldg_.Gvalley_ATW1)
fig_name='BN for ATW1 power meters '+'CRMON_VLD_EENGY'
plt.figure(fig_name,figsize=(25.0,25.0))
rbn.nx_plot(bdv1_hc_b,convert_gvalley_name(cols),graph_layout='circular',node_text_size=30)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
plt.close()
atw1_in=np.array([ len(np.nonzero(col==1)[0]) for col in atw1_amat])
atw1_out=np.array([ len(np.nonzero(col==1)[0]) for col in atw1_amat.T])
atw1_in_out=atw1_in+atw1_out
BN_VERIFY=0
if BN_VERIFY==1:
# Compare with the raw data
#-------------------------------------------
start_t=datetime.datetime(2013, 8, 9, 0, 0, 0)
end_t=datetime.datetime(2013, 8, 13, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.all_cause_label]+[p_name[2:]],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^')
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
name_list_out=[[p_name]+bn_out.all_cause_label,convert_gsbc_name([p_name]+bn_out.all_cause_label)]
pprint.pprint(np.array(name_list_out).T)
pprint.pprint(name_list_out)
start_t=datetime.datetime(2013, 7, 1, 0, 0, 0)
end_t=datetime.datetime(2013, 12, 31, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.s_labels],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^',fontsize='small',xpos=0.00)
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
"""
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
s_val_set=set(peak_state[:,0])
m_val_set=set(peak_state[:,1])
Z_peak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((peak_state[:,0]==s_val)&(peak_state[:,1]==m_val))[0][0]
Z_peak[i,j]=peak_prob[idx]
s_val_set=set(lowpeak_state[:,0])
m_val_set=set(lowpeak_state[:,1])
Z_lowpeak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((lowpeak_state[:,0]==s_val)&(lowpeak_state[:,1]==m_val))[0][0]
Z_lowpeak[i,j]=lowpeak_prob[idx]
Z_lowpeak=lowpeak_prob.reshape((len(s_val_set),len(m_val_set)))
Z_peak=peak_prob.reshape((len(s_val_set),len(m_val_set)))
fig1=figure()
im = plt.imshow(Z_peak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of High-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
im = plt.imshow(Z_lowpeak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of Low-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
print '**************************** End of Program ****************************'
| TinyOS-Camp/DDEA-DEV | Development/df_data_analysis_gvalley.py | Python | gpl-2.0 | 18,795 |
# This file show a demo of showing that MLE(precision matrix) of a ggm
# satisfies the constraints mentioned in the GGM section of the book.
import superimport
import numpy as np
from ggm_fit_htf import ggm_fit_htf
G = np.array([0., 1., 0., 1,
1, 0, 1, 0,
0, 1, 0, 1,
1, 0, 1, 0]).reshape((4, 4))
S = np.array([10., 1., 5., 4.,
1., 10., 2., 6.,
5., 2., 10., 3.,
4., 6., 3., 10]).reshape((4, 4))
max_iter = 30
prec_mat = ggm_fit_htf(S, G, max_iter)
sigma = np.linalg.inv(prec_mat)
guide_sigma = np.array([10., 1., 1.31, 4,
1., 10., 2., 0.87,
1.31, 2., 10., 3,
4., 0.87, 3., 10.]).reshape(4, 4)
guide_prec_mat = np.array([0.12, -0.01, 0, -0.05,
-0.01, 0.11, -0.02, 0.,
0, -0.02, 0.11, -0.03,
-0.05, 0, -0.03, 0.13]).reshape(4, 4)
assert np.all(sigma - guide_sigma < 1e-2)
assert np.all(prec_mat - guide_prec_mat < 1e-2)
| probml/pyprobml | scripts/ggm_fit_demo.py | Python | mit | 1,065 |
raise NotImplementedError("getopt is not yet implemented in Skulpt")
| ArcherSys/ArcherSys | skulpt/src/lib/getopt.py | Python | mit | 69 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: ce_command
version_added: "2.3"
author: "JackyGao2016 (@CloudEngine-Ansible)"
short_description: Run arbitrary command on HUAWEI CloudEngine devices
description:
- Sends an arbitrary command to an HUAWEI CloudEngine node and returns
the results read from the device. The ce_command module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: cloudengine
options:
commands:
description:
- The commands to send to the remote HUAWEI CloudEngine device
over the configured provider. The resulting output from the
command is returned. If the I(wait_for) argument is provided,
the module is not returned until the condition is satisfied
or the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
---
- name: run display version on remote devices
ce_command:
commands: display version
provider: "{{ cli }}"
- name: run display version and check to see if output contains HUAWEI
ce_command:
commands: display version
wait_for: result[0] contains HUAWEI
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
ce_command:
commands:
- display version
- display device
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
ce_command:
commands:
- display version
- display device
wait_for:
- result[0] contains HUAWEI
- result[1] contains Device
provider: "{{ cli }}"
- name: run commands and specify the output format
ce_command:
commands:
- command: display version
output: json
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.netcli import FailedConditionsError
from ansible.module_utils.netcli import FailedConditionalError
from ansible.module_utils.netcli import AddCommandError, AddConditionError
import ansible.module_utils.cloudengine
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
""" to lines """
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module):
""" parse commands """
for cmd in module.params['commands']:
if isinstance(cmd, basestring):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
elif cmd.get('output') not in [None, 'text', 'json']:
module.fail_json(msg='invalid output specified for command')
elif not set(cmd.keys()).issubset(VALID_KEYS):
module.fail_json(msg='unknown keyword specified')
yield cmd
def main():
""" main """
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['any', 'all']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = NetworkModule(argument_spec=spec,
supports_check_mode=True)
commands = list(parse_commands(module))
conditionals = module.params['wait_for'] or list()
warnings = list()
runner = CommandRunner(module)
for cmd in commands:
if module.check_mode and not cmd['command'].startswith('dis'):
warnings.append('only display commands are supported when using '
'check mode, not executing `%s`' % cmd['command'])
else:
if cmd['command'].startswith('sys'):
module.fail_json(msg='ce_command does not support running '
'config mode commands. Please use '
'ce_config instead')
try:
runner.add_command(**cmd)
except AddCommandError:
exc = get_exception()
warnings.append('duplicate command detected: %s' % cmd)
try:
for item in conditionals:
runner.add_conditional(item)
except AddConditionError:
exc = get_exception()
module.fail_json(msg=str(exc), condition=exc.condition)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
runner.match = module.params['match']
try:
runner.run()
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
except FailedConditionalError:
exc = get_exception()
module.fail_json(
msg=str(exc), failed_conditional=exc.failed_conditional)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
result = dict(changed=False)
result['stdout'] = list()
for cmd in commands:
try:
output = runner.get_command(cmd['command'], cmd.get('output'))
except ValueError:
output = 'command not executed due to check_mode, see warnings'
result['stdout'].append(output)
result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
module.exit_json(**result)
if __name__ == '__main__':
main()
| grimmjow8/ansible | lib/ansible/modules/network/cloudengine/ce_command.py | Python | gpl-3.0 | 8,454 |
import pymssql
from enum import Enum
import random
class FakeSqlCursor(object):
def execute(self, sql_query):
pass
def fetchone(self):
return [random.random()]
class FakeSqlConn(object):
def cursor(self):
return FakeSqlCursor()
class TestConnection(object):
def __enter__(self):
self.conn = FakeSqlConn()
return self.conn
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __init__(self, a, b, c):
pass
class ServerConnection(object):
def __enter__(self):
self.conn = pymssql.connect(self.server_name, self.user, self.password)
return self.conn
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def __init__(self, server_name, user, password):
self.server_name = server_name
self.user = user
self.password = password
class AvailableConnections(Enum):
test = 1
bgu = 2
def get_connection(db_type):
if db_type == AvailableConnections.test:
return TestConnection("a", "a", "a")
elif db_type == AvailableConnections.bgu:
return ServerConnection("sqlsrv.cs.bgu.ac.il", "noamant", "1qa@WS")
else:
raise Exception("Non-familiar DB type. DB types: {0}".format(repr(AvailableConnections)))
| DejaToris/MetaphorResearch | research_tools/DbAccess.py | Python | mit | 1,304 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Page objects for integration tests for modules/embed."""
__author__ = [
'John Cox ([email protected])',
]
from models import transforms
from tests.integration import pageobjects
from selenium.common import exceptions
from selenium.webdriver.common import by
from selenium.webdriver.support import expected_conditions
class StateError(object):
"""Type for embeds rendering an error."""
class StateEmbed(object):
"""Type for embeds rendering embed content."""
class StateSignIn(object):
"""Type for embeds rendering the sign-in control."""
class AbstractIframePageObject(pageobjects.PageObject):
"""Base class for pages that have or are in iframes."""
def is_embed_page(self):
try:
ps = self._tester.driver.find_elements_by_tag_name('p')
return bool(ps and 'Greetings' in ps[-1].text)
except exceptions.NoSuchElementException:
return False
def is_error_page(self):
try:
header = self._tester.driver.find_element_by_tag_name('h1')
return 'Embed misconfigured' in header.text
except exceptions.NoSuchElementException:
return False
def is_sign_in_page(self):
try:
return bool(self._tester.driver.find_element_by_class_name(
'cb-embed-sign-in-button'))
except exceptions.NoSuchElementException:
return False
def switch_from_iframe(self):
self._tester.driver.switch_to_default_content()
def switch_to_iframe(self, iframe):
self._tester.driver.switch_to_frame(iframe)
class AbstractIframeContentsPageObject(AbstractIframePageObject):
"""Base page object for pages contained in iframes."""
def __init__(self, tester, iframe):
super(AbstractIframeContentsPageObject, self).__init__(tester)
self._iframe = iframe
def switch_to_iframe(self):
self._tester.driver.switch_to_frame(self._iframe)
class DemoPage(AbstractIframePageObject):
_STATES = [
StateError,
StateEmbed,
StateSignIn,
]
def get_cb_embed_elements(self):
self.wait().until(expected_conditions.visibility_of_element_located(
(by.By.TAG_NAME, 'cb-embed')))
return self._tester.driver.find_elements_by_tag_name('cb-embed')
def get_iframe(self, cb_embed):
def iframe_present(_):
return bool(cb_embed.find_element_by_tag_name('iframe'))
self.wait().until(iframe_present)
return cb_embed.find_element_by_tag_name('iframe')
def get_page(self, iframe):
if self.is_embed_page():
return ExampleEmbedPage(self._tester, iframe)
elif self.is_error_page():
return ErrorPage(self._tester, iframe)
elif self.is_sign_in_page():
return SignInPage(self._tester, iframe)
else:
raise TypeError('No matching page object found')
def is_state_valid(self, state):
return state in self._STATES
def load(self, url):
self.get(url)
return self
def load_embed(self, cb_embed, wait_for=StateEmbed):
if not self.is_state_valid(wait_for):
raise ValueError('Invalid state: %s' % wait_for)
iframe = self.get_iframe(cb_embed)
self.switch_to_iframe(iframe)
def iframe_populated(_):
# Must always block until embed is in the state the caller requires.
# Otherwise, timing issues could cause (for example) tests to run
# against the widget in the sign-in state that expect the widget to
# be displaying content immediately after sign-in. Note that this
# does not replace asserts against embed contents in any particular
# state -- it merely ensures the widget is displaying the right
# state for the assert to run. All checks against widget contents
# must come after these blocking calls.
if wait_for is StateEmbed:
return self.is_embed_page()
elif wait_for is StateError:
return self.is_error_page()
elif wait_for is StateSignIn:
return self.is_sign_in_page()
self.wait().until(iframe_populated)
page = self.get_page(iframe)
self.switch_from_iframe()
return page
def login(self, email):
cb_embed = self.get_cb_embed_elements()[0]
sign_in_page = self.load_embed(
cb_embed, wait_for=StateSignIn)
sign_in_page.click().login(email)
class ErrorPage(AbstractIframeContentsPageObject):
def has_error(self, text):
self.switch_to_iframe()
def loaded(_):
return self.is_error_page()
self.wait().until(loaded)
found = False
for li in self._tester.driver.find_elements_by_tag_name('li'):
if text in li.text:
found = True
break
self.switch_from_iframe()
return found
class ExampleEmbedPage(AbstractIframeContentsPageObject):
def get_text(self):
self.switch_to_iframe()
def loaded(_):
return self.is_embed_page()
self.wait().until(loaded)
text = self._tester.driver.find_elements_by_tag_name('p')[-1].text
self.switch_from_iframe()
return text
class SignInPage(AbstractIframeContentsPageObject):
def click(self):
self.switch_to_iframe()
self._tester.driver.find_element_by_css_selector(
'.cb-embed-sign-in-button').click()
self.switch_from_iframe()
return self
def login(self, email):
last_window_handle = self._tester.driver.current_window_handle
self.switch_to_login_window(last_window_handle)
login_page = pageobjects.LoginPage(self._tester)
login_page.login(email, post_wait=False)
self._tester.driver.switch_to_window(last_window_handle)
def get_text(self):
self.switch_to_iframe()
text = self._tester.driver.find_element_by_css_selector(
'.cb-embed-sign-in-button').text
self.switch_from_iframe()
return text
def switch_to_login_window(self, from_handle):
# Switch to the login window, which cannot be the current window. To
# avoid interleaving with other tests, do not rely on window order.
# Instead, cycle through candidates and pick the first with the correct
# title. We make no attempt to guard against the test executor
# running multiple tests in the same browser that hit login > 1 times
# concurrently.
get_other_handles = lambda: [
h for h in self._tester.driver.window_handles if h != from_handle]
def other_windows_exist(_):
return bool(get_other_handles())
self.wait().until(other_windows_exist)
for candidate_handle in get_other_handles():
self._tester.driver.switch_to_window(candidate_handle)
if self._tester.driver.title != 'Login':
self._tester.driver.switch_to_window(from_handle)
else:
return
raise exceptions.InvalidSwitchToTargetException(
'Unable to find login window')
class EnsureSessionExamplePage(pageobjects.PageObject):
URL = 'modules/embed/ext/ensure-session-example.html'
def load(
self, static_server_base_url, course_builder_base_url,
redirect=False):
config = {}
config['cbHost'] = course_builder_base_url
if redirect:
config['redirect'] = True
self.get('%s/%s#%s' % (
static_server_base_url, self.URL, transforms.dumps(config)))
if redirect:
return pageobjects.LoginPage(
self._tester, continue_page=EnsureSessionExamplePage)
else:
return self
def _get_start_button(self, pre_wait=True):
# Check that the page is visible
self._tester.assertIsNotNone(
self.find_element_by_id('ensure-session-example-para-1'))
buttons = self.find_elements_by_css_selector(
'.cb-embed-sign-in-button', pre_wait=pre_wait)
if buttons:
return buttons[0]
else:
return None
def assert_start_button_is_visible(self):
self._tester.assertIsNotNone(self._get_start_button())
return self
def assert_start_button_is_not_visible(self):
self._tester.assertIsNone(self._get_start_button(pre_wait=False))
return self
def click_start_button(self):
self._get_start_button().click()
return pageobjects.LoginPage(
self._tester, continue_page=EnsureSessionExamplePage)
| GirlsCodePy/girlscode-coursebuilder | modules/embed/embed_pageobjects.py | Python | gpl-3.0 | 9,341 |
# -*- coding: iso-8859-1 -*-
# Copyright (c) 2000, Amit Patel
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Amit Patel nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import time
import heapq
from .. import log, LOG_PROXY
timers = [] # list of (time, function)
def make_timer(delay, callback):
"""
After DELAY seconds, run the CALLBACK function.
"""
log.debug(LOG_PROXY, "Adding %s to %d timers", callback, len(timers))
heapq.heappush(timers, (time.time()+delay, callback))
def run_timers():
"""
Run all timers ready to be run, and return seconds to the next timer.
"""
# Note that we will run timers that are scheduled to be run within
# 10 ms. This is because the select() statement doesn't have
# infinite precision and may end up returning slightly earlier.
# We're willing to run the event a few millisecond earlier.
while timers and timers[0][0] <= time.time() + 0.01:
# This timeout handler should be called
heapq.heappop(timers)[1]()
if timers:
timeout = timers[0][0] - time.time()
# Prevent the timeout from being both to small (even negative) or
# too large.
return max(min(timeout, 60), 0)
else:
# None means don't timeout
return None
| HomeRad/TorCleaner | wc/proxy/timer.py | Python | gpl-2.0 | 2,654 |
# -*- coding: utf-8 -*-
# Copyright (c) 2003, Taro Ogawa. All Rights Reserved.
# Copyright (c) 2013, Savoir-faire Linux inc. All Rights Reserved.
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
from __future__ import unicode_literals
to_19 = (u'không', u'một', u'hai', u'ba', u'bốn', u'năm', u'sáu',
u'bảy', u'tám', u'chín', u'mười', u'mười một', u'mười hai',
u'mười ba', u'mười bốn', u'mười lăm', u'mười sáu', u'mười bảy',
u'mười tám', u'mười chín')
tens = (u'hai mươi', u'ba mươi', u'bốn mươi', u'năm mươi',
u'sáu mươi', u'bảy mươi', u'tám mươi', u'chín mươi')
denom = ('',
u'nghìn', u'triệu', u'tỷ', u'nghìn tỷ', u'trăm nghìn tỷ',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion',
'Quattuordecillion', 'Sexdecillion', 'Septendecillion',
'Octodecillion', 'Novemdecillion', 'Vigintillion')
class Num2Word_VI(object):
def _convert_nn(self, val):
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
a = u'lăm'
if to_19[val % 10] == u'một':
a = u'mốt'
else:
a = to_19[val % 10]
if to_19[val % 10] == u'năm':
a = u'lăm'
return dcap + ' ' + a
return dcap
def _convert_nnn(self, val):
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + u' trăm'
if mod > 0:
word = word + ' '
if mod > 0 and mod < 10:
if mod == 5:
word = word != '' and word + u'lẻ năm' or word + u'năm'
else:
word = word != '' and word + u'lẻ ' \
+ self._convert_nn(mod) or word + self._convert_nn(mod)
if mod >= 10:
word = word + self._convert_nn(mod)
return word
def vietnam_number(self, val):
if val < 100:
return self._convert_nn(val)
if val < 1000:
return self._convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
lval = val // mod
r = val - (lval * mod)
ret = self._convert_nnn(lval) + u' ' + denom[didx]
if 99 >= r > 0:
ret = self._convert_nnn(lval) + u' ' + denom[didx] + u' lẻ'
if r > 0:
ret = ret + ' ' + self.vietnam_number(r)
return ret
def number_to_text(self, number):
number = '%.2f' % number
the_list = str(number).split('.')
start_word = self.vietnam_number(int(the_list[0]))
final_result = start_word
if len(the_list) > 1 and int(the_list[1]) > 0:
end_word = self.vietnam_number(int(the_list[1]))
final_result = final_result + ' phẩy ' + end_word
return final_result
def to_cardinal(self, number):
return self.number_to_text(number)
def to_ordinal(self, number):
return self.to_cardinal(number)
| savoirfairelinux/num2words | num2words/lang_VI.py | Python | lgpl-2.1 | 4,151 |
from pybuilder.core import Author, init, use_plugin
use_plugin("python.core")
use_plugin("python.distutils")
use_plugin("source_distribution")
use_plugin("python.install_dependencies")
use_plugin("python.coverage")
use_plugin("python.unittest")
use_plugin("python.flake8")
use_plugin("python.frosted")
default_task = ["clean", "publish"]
summary = "A Python implementation of the To Cry a Joust rule set."
authors = [Author("Chris Rees", "[email protected]")]
url = "https://github.com/Serneum/jousting-core"
license = "Apache License, Version 2.0"
version = "1.0"
@init
def initialize(project):
project.build_depends_on("coveralls")
project.build_depends_on("mockito")
project.set_property("verbose", True)
project.set_property("coverage_threshold_warn", 90)
project.set_property("flake8_include_test_sources", True)
project.set_property("flake8_break_build", True)
project.set_property("flake8_include_test_sources", True)
project.set_property('frosted_include_test_sources', True)
project.set_property("distutils_classifiers", [
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Games/Entertainment :: Simulation',
'Topic :: Software Development :: Libraries :: Python Modules']) | Serneum/jousting-core | build.py | Python | apache-2.0 | 1,625 |
from typing import NamedTuple
nt = NamedTuple("name", field=str) | dahlstrom-g/intellij-community | python/testData/stubs/ImportedTypingNamedTupleKwargs.py | Python | apache-2.0 | 65 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Paths(object):
"""Paths operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_boolean_true(
self, bool_path=False, custom_headers=None, raw=False, **operation_config):
"""Get true Boolean value on path.
:param bool_path: true boolean value
:type bool_path: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/bool/true/{boolPath}'
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_false(
self, bool_path=False, custom_headers=None, raw=False, **operation_config):
"""Get false Boolean value on path.
:param bool_path: false boolean value
:type bool_path: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/bool/false/{boolPath}'
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_one_million(
self, int_path=1000000, custom_headers=None, raw=False, **operation_config):
"""Get '1000000' integer value.
:param int_path: '1000000' integer value
:type int_path: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/int/1000000/{intPath}'
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_negative_one_million(
self, int_path=-1000000, custom_headers=None, raw=False, **operation_config):
"""Get '-1000000' integer value.
:param int_path: '-1000000' integer value
:type int_path: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/int/-1000000/{intPath}'
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_ten_billion(
self, long_path=10000000000, custom_headers=None, raw=False, **operation_config):
"""Get '10000000000' 64 bit integer value.
:param long_path: '10000000000' 64 bit integer value
:type long_path: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/long/10000000000/{longPath}'
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_negative_ten_billion(
self, long_path=-10000000000, custom_headers=None, raw=False, **operation_config):
"""Get '-10000000000' 64 bit integer value.
:param long_path: '-10000000000' 64 bit integer value
:type long_path: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/long/-10000000000/{longPath}'
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_positive(
self, float_path=1.034E+20, custom_headers=None, raw=False, **operation_config):
"""Get '1.034E+20' numeric value.
:param float_path: '1.034E+20'numeric value
:type float_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/float/1.034E+20/{floatPath}'
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_negative(
self, float_path=-1.034E-20, custom_headers=None, raw=False, **operation_config):
"""Get '-1.034E-20' numeric value.
:param float_path: '-1.034E-20'numeric value
:type float_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/float/-1.034E-20/{floatPath}'
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_positive(
self, double_path=9999999.999, custom_headers=None, raw=False, **operation_config):
"""Get '9999999.999' numeric value.
:param double_path: '9999999.999'numeric value
:type double_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/double/9999999.999/{doublePath}'
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_negative(
self, double_path=-9999999.999, custom_headers=None, raw=False, **operation_config):
"""Get '-9999999.999' numeric value.
:param double_path: '-9999999.999'numeric value
:type double_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/double/-9999999.999/{doublePath}'
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_unicode(
self, string_path="啊齄丂狛狜隣郎隣兀﨩", custom_headers=None, raw=False, **operation_config):
"""Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value.
:param string_path: '啊齄丂狛狜隣郎隣兀﨩'multi-byte string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/unicode/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_url_encoded(
self, string_path="begin!*'();:@ &=+$,/?#[]end", custom_headers=None, raw=False, **operation_config):
"""Get 'begin!*'();:@ &=+$,/?#[]end.
:param string_path: 'begin!*'();:@ &=+$,/?#[]end' url encoded string
value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_empty(
self, string_path="", custom_headers=None, raw=False, **operation_config):
"""Get ''.
:param string_path: '' string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/empty/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_null(
self, string_path, custom_headers=None, raw=False, **operation_config):
"""Get null (should throw).
:param string_path: null string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/null/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_valid(
self, enum_path, custom_headers=None, raw=False, **operation_config):
"""Get using uri with 'green color' in path parameter.
:param enum_path: send the value green. Possible values include: 'red
color', 'green color', 'blue color'
:type enum_path: str or :class:`UriColor
<fixtures.acceptancetestsurl.models.UriColor>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/enum/green%20color/{enumPath}'
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'UriColor')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_null(
self, enum_path, custom_headers=None, raw=False, **operation_config):
"""Get null (should throw on the client before the request is sent on
wire).
:param enum_path: send null should throw. Possible values include:
'red color', 'green color', 'blue color'
:type enum_path: str or :class:`UriColor
<fixtures.acceptancetestsurl.models.UriColor>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/null/{enumPath}'
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'UriColor')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_multi_byte(
self, byte_path, custom_headers=None, raw=False, **operation_config):
"""Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:param byte_path: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte
array
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/byte/multibyte/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_empty(
self, byte_path=bytearray("", encoding="utf-8"), custom_headers=None, raw=False, **operation_config):
"""Get '' as byte array.
:param byte_path: '' as byte array
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/byte/empty/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_null(
self, byte_path, custom_headers=None, raw=False, **operation_config):
"""Get null as byte array (should throw).
:param byte_path: null as byte array (should throw)
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/byte/null/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_valid(
self, date_path, custom_headers=None, raw=False, **operation_config):
"""Get '2012-01-01' as date.
:param date_path: '2012-01-01' as date
:type date_path: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/date/2012-01-01/{datePath}'
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_null(
self, date_path, custom_headers=None, raw=False, **operation_config):
"""Get null as date - this should throw or be unusable on the client
side, depending on date representation.
:param date_path: null as date (should throw)
:type date_path: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/date/null/{datePath}'
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_valid(
self, date_time_path, custom_headers=None, raw=False, **operation_config):
"""Get '2012-01-01T01:01:01Z' as date-time.
:param date_time_path: '2012-01-01T01:01:01Z' as date-time
:type date_time_path: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}'
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_null(
self, date_time_path, custom_headers=None, raw=False, **operation_config):
"""Get null as date-time, should be disallowed or throw depending on
representation of date-time.
:param date_time_path: null as date-time
:type date_time_path: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/datetime/null/{dateTimePath}'
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def base64_url(
self, base64_url_path, custom_headers=None, raw=False, **operation_config):
"""Get 'lorem' encoded value as 'bG9yZW0' (base64url).
:param base64_url_path: base64url encoded value
:type base64_url_path: bytes
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/bG9yZW0/{base64UrlPath}'
path_format_arguments = {
'base64UrlPath': self._serialize.url("base64_url_path", base64_url_path, 'base64')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_csv_in_path(
self, array_path, custom_headers=None, raw=False, **operation_config):
"""Get an array of string ['ArrayPath1', 'begin!*'();:@ &=+$,/?#[]end' ,
null, ''] using the csv-array format.
:param array_path: an array of string ['ArrayPath1', 'begin!*'();:@
&=+$,/?#[]end' , null, ''] using the csv-array format
:type array_path: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}'
path_format_arguments = {
'arrayPath': self._serialize.url("array_path", array_path, '[str]', div=',')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def unix_time_url(
self, unix_time_url_path, custom_headers=None, raw=False, **operation_config):
"""Get the date 2016-04-13 encoded value as '1460505600' (Unix time).
:param unix_time_url_path: Unix time encoded value
:type unix_time_url_path: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/int/1460505600/{unixTimeUrlPath}'
path_format_arguments = {
'unixTimeUrlPath': self._serialize.url("unix_time_url_path", unix_time_url_path, 'unix-time')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| John-Hart/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Url/autoresturltestservice/operations/paths.py | Python | mit | 44,662 |
import math ## Imports functopns
import sys
print('GCSE Controlled Assesment A453\nThomas Bass 4869\nTask 1')
def start(): ## Main process
ask = input('Press [c] to calculate the 8th GTIN Number from 7 numbers. \nPress [v] to vertify an 8 digit GTIN Number \n')
if ask == 'c' or ask == 'C':
gtin = 0
length = 7
check(length)
elif ask == 'v' or ask == 'V':
gtin = 0
length = 8
check(length)
else:
print('Error: Please enter either \'c\' or \'v\' ')
start()
def check(length):
print('Enter the', length, 'digit GTIN number')
gtin = input(': ')
if len(gtin) == length and gtin.isnumeric() == True:
total = 0
for counter in range(0, 7, 2):
total = total + ((int(gtin[counter]))*3)
if counter == 6:
checkdig = int(gtin[length-1])
rounded = (int(math.ceil(total / 10.0)) * 10)
result = (rounded - total)
if length == 7:
print('Final Check Digit = ', result)
print('Whole GTIN-8 Number = ', gtin,result)
park()
else:
if checkdig == result:
print(gtin, 'is a Valid Number')
else:
print(gtin, 'is an Invalid Number')
park()
else:
total = total + ((int(gtin[counter+1]))*1)
else:
print('Error: Only', length, 'numbers are allowed. Try again ')
check(length)
def park():
again = input('Do you want to calculate or verify another number? \n[n] No [y] Yes: ')
if again == 'n' or again == 'N':
sys.exit()
elif again == 'y' or again == 'Y':
start()
start()
| electric-blue-green/GSCE-Coursework-GTIN | Task 1/Development/0.x/0.4.py | Python | apache-2.0 | 1,640 |
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import urllib
import cgi
from invenio.config import \
CFG_CERN_SITE, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS, \
CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS, \
CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS, \
CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SITE_RECORD
from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO, \
CFG_EXTERNAL_AUTH_LOGOUT_SSO
from invenio.urlutils import make_canonical_urlargd, create_url, create_html_link
from invenio.htmlutils import escape_html, nmtoken_from_string
from invenio.messages import gettext_set_language, language_list_long
from invenio.websession_config import CFG_WEBSESSION_GROUP_JOIN_POLICY
class Template:
def tmpl_back_form(self, ln, message, url, link):
"""
A standard one-message-go-back-link page.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'message' *string* - The message to display
- 'url' *string* - The url to go back to
- 'link' *string* - The link text
"""
out = """
<table>
<tr>
<td align="left">%(message)s
<a href="%(url)s">%(link)s</a></td>
</tr>
</table>
"""% {
'message' : message,
'url' : url,
'link' : link,
'ln' : ln
}
return out
def tmpl_external_setting(self, ln, key, value):
_ = gettext_set_language(ln)
out = """
<tr>
<td align="right"><strong>%s:</strong></td>
<td><i>%s</i></td>
</tr>""" % (key, value)
return out
def tmpl_external_user_settings(self, ln, html_settings):
_ = gettext_set_language(ln)
out = """
<p><big><strong class="headline">%(external_user_settings)s</strong></big></p>
<table>
%(html_settings)s
</table>
<p><big><strong class="headline">%(external_user_groups)s</strong></big></p>
<p>%(consult_external_groups)s</p>
""" % {
'external_user_settings' : _('External account settings'),
'html_settings' : html_settings,
'consult_external_groups' : _('You can consult the list of your external groups directly in the %(x_url_open)sgroups page%(x_url_close)s.') % {
'x_url_open' : '<a href="../yourgroups/display?ln=%s#external_groups">' % ln,
'x_url_close' : '</a>'
},
'external_user_groups' : _('External user groups'),
}
return out
def tmpl_user_preferences(self, ln, email, email_disabled, password_disabled, nickname):
"""
Displays a form for the user to change his email/password.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'email' *string* - The email of the user
- 'email_disabled' *boolean* - If the user has the right to edit his email
- 'password_disabled' *boolean* - If the user has the right to edit his password
- 'nickname' *string* - The nickname of the user (empty string if user does not have it)
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<p><big><strong class="headline">%(edit_params)s</strong></big></p>
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_logins_settings">
<p>%(change_user)s</p>
<table>
<tr><td align="right" valign="top"><strong>
<label for="nickname">%(nickname_label)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td valign="top">
%(nickname_prefix)s%(nickname)s%(nickname_suffix)s<br />
<small><span class="quicknote">%(note)s:</span>
%(fixed_nickname_note)s
</small>
</td>
</tr>
<tr><td align="right"><strong>
<label for="email">%(new_email)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td>
<input type="text" size="25" name="email" id="email" %(email_disabled)s value="%(email)s" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">[email protected]</span>
</small>
</td>
</tr>
<tr><td></td><td align="left">
<code class="blocknote"><input class="formbutton" type="submit" value="%(set_values)s" /></code>
</td></tr>
</table>
<input type="hidden" name="action" value="edit" />
</form>
""" % {
'change_user' : _("If you want to change your email or set for the first time your nickname, please set new values in the form below."),
'edit_params' : _("Edit login credentials"),
'nickname_label' : _("Nickname"),
'nickname' : nickname,
'nickname_prefix' : nickname=='' and '<input type="text" size="25" name="nickname" id="nickname" value=""' or '',
'nickname_suffix' : nickname=='' and '" /><br /><small><span class="quicknote">'+_("Example")+':</span><span class="example">johnd</span></small>' or '',
'new_email' : _("New email address"),
'mandatory' : _("mandatory"),
'example' : _("Example"),
'note' : _("Note"),
'set_values' : _("Set new values"),
'email' : email,
'email_disabled' : email_disabled and "readonly" or "",
'sitesecureurl': CFG_SITE_SECURE_URL,
'fixed_nickname_note' : _('Since this is considered as a signature for comments and reviews, once set it can not be changed.')
}
if not password_disabled and not CFG_EXTERNAL_AUTH_USING_SSO:
out += """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_password">
<p>%(change_pass)s</p>
<table>
<tr>
<td align="right"><strong><label for="old_password">%(old_password)s:</label></strong><br />
</td><td align="left">
<input type="password" size="25" name="old_password" id="old_password" %(password_disabled)s /><br />
<small><span class="quicknote">%(note)s:</span>
%(old_password_note)s
</small>
</td>
</tr>
<tr>
<td align="right"><strong><label for="new_password">%(new_password)s:</label></strong><br />
</td><td align="left">
<input type="password" size="25" name="password" id="new_password" %(password_disabled)s /><br />
<small><span class="quicknote">%(note)s:</span>
%(password_note)s
</small>
</td>
</tr>
<tr>
<td align="right"><strong><label for="new_password2">%(retype_password)s:</label></strong></td>
<td align="left">
<input type="password" size="25" name="password2" id="new_password2" %(password_disabled)s value="" />
</td>
</tr>
<tr><td></td><td align="left">
<code class="blocknote"><input class="formbutton" type="submit" value="%(set_values)s" /></code>
</td></tr>
</table>
<input type="hidden" name="action" value="edit" />
</form>
""" % {
'change_pass' : _("If you want to change your password, please enter the old one and set the new value in the form below."),
'mandatory' : _("mandatory"),
'old_password' : _("Old password"),
'new_password' : _("New password"),
'optional' : _("optional"),
'note' : _("Note"),
'password_note' : _("The password phrase may contain punctuation, spaces, etc."),
'old_password_note' : _("You must fill the old password in order to set a new one."),
'retype_password' : _("Retype password"),
'set_values' : _("Set new password"),
'password_disabled' : password_disabled and "disabled" or "",
'sitesecureurl': CFG_SITE_SECURE_URL,
}
elif not CFG_EXTERNAL_AUTH_USING_SSO and CFG_CERN_SITE:
out += "<p>" + _("""If you are using a lightweight CERN account you can
%(x_url_open)sreset the password%(x_url_close)s.""") % \
{'x_url_open' : \
'<a href="http://cern.ch/LightweightRegistration/ResetPassword.aspx%s">' \
% (make_canonical_urlargd({'email': email, 'returnurl' : CFG_SITE_SECURE_URL + '/youraccount/edit' + make_canonical_urlargd({'lang' : ln}, {})}, {})), 'x_url_close' : '</a>'} + "</p>"
elif CFG_EXTERNAL_AUTH_USING_SSO and CFG_CERN_SITE:
out += "<p>" + _("""You can change or reset your CERN account password by means of the %(x_url_open)sCERN account system%(x_url_close)s.""") % \
{'x_url_open' : '<a href="https://cern.ch/login/password.aspx">', 'x_url_close' : '</a>'} + "</p>"
return out
def tmpl_user_bibcatalog_auth(self, bibcatalog_username="", bibcatalog_password="", ln=CFG_SITE_LANG):
"""template for setting username and pw for bibcatalog backend"""
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_bibcatalog_settings">
<p><big><strong class="headline">%(edit_bibcatalog_settings)s</strong></big></p>
<table>
<tr>
<td> %(username)s: <input type="text" size="25" name="bibcatalog_username" value="%(bibcatalog_username)s" id="bibcatuid"></td>
<td> %(password)s: <input type="password" size="25" name="bibcatalog_password" value="%(bibcatalog_password)s" id="bibcatpw"></td>
</tr>
<tr>
<td><input class="formbutton" type="submit" value="%(update_settings)s" /></td>
</tr>
</table>
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'bibcatalog_username' : bibcatalog_username,
'bibcatalog_password' : bibcatalog_password,
'edit_bibcatalog_settings' : _("Edit cataloging interface settings"),
'username' : _("Username"),
'password' : _("Password"),
'update_settings' : _('Update settings')
}
return out
def tmpl_user_lang_edit(self, ln, preferred_lang):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_lang_settings">
<p><big><strong class="headline">%(edit_lang_settings)s</strong></big></p>
<table>
<tr><td align="right"><select name="lang" id="lang">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_lang_settings' : _("Edit language-related settings"),
}
for short_ln, long_ln in language_list_long():
out += """<option %(selected)s value="%(short_ln)s">%(long_ln)s</option>""" % {
'selected' : preferred_lang == short_ln and 'selected="selected"' or '',
'short_ln' : short_ln,
'long_ln' : escape_html(long_ln)
}
out += """</select></td><td valign="top"><strong><label for="lang">%(select_lang)s</label></strong></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table></form>""" % {
'select_lang' : _('Select desired language of the web interface.'),
'update_settings' : _('Update settings')
}
return out
def tmpl_user_websearch_edit(self, ln, current = 10, show_latestbox = True, show_helpbox = True):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_websearch_settings">
<p><big><strong class="headline">%(edit_websearch_settings)s</strong></big></p>
<table>
<tr><td align="right"><input type="checkbox" %(checked_latestbox)s value="1" name="latestbox" id="latestbox"/></td>
<td valign="top"><b><label for="latestbox">%(show_latestbox)s</label></b></td></tr>
<tr><td align="right"><input type="checkbox" %(checked_helpbox)s value="1" name="helpbox" id="helpbox"/></td>
<td valign="top"><b><label for="helpbox">%(show_helpbox)s</label></b></td></tr>
<tr><td align="right"><select name="group_records" id="group_records">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_websearch_settings' : _("Edit search-related settings"),
'show_latestbox' : _("Show the latest additions box"),
'checked_latestbox' : show_latestbox and 'checked="checked"' or '',
'show_helpbox' : _("Show collection help boxes"),
'checked_helpbox' : show_helpbox and 'checked="checked"' or '',
}
for i in 10, 25, 50, 100, 250, 500:
if i <= CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS:
out += """<option %(selected)s>%(i)s</option>
""" % {
'selected' : current == i and 'selected="selected"' or '',
'i' : i
}
out += """</select></td><td valign="top"><strong><label for="group_records">%(select_group_records)s</label></strong></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table>
</form>""" % {
'update_settings' : _("Update settings"),
'select_group_records' : _("Number of search results per page"),
}
return out
def tmpl_user_external_auth(self, ln, methods, current, method_disabled):
"""
Displays a form for the user to change his authentication method.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'methods' *array* - The methods of authentication
- 'method_disabled' *boolean* - If the user has the right to change this
- 'current' *string* - The currently selected method
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change">
<big><strong class="headline">%(edit_method)s</strong></big>
<p>%(explain_method)s:</p>
<table>
<tr><td valign="top"><b>%(select_method)s:</b></td><td>
""" % {
'edit_method' : _("Edit login method"),
'explain_method' : _("Please select which login method you would like to use to authenticate yourself"),
'select_method' : _("Select method"),
'sitesecureurl': CFG_SITE_SECURE_URL,
}
for system in methods:
out += """<input type="radio" name="login_method" value="%(system)s" id="%(id)s" %(disabled)s %(selected)s /><label for="%(id)s">%(system)s</label><br />""" % {
'system' : system,
'disabled' : method_disabled and 'disabled="disabled"' or "",
'selected' : current == system and 'checked="checked"' or "",
'id' : nmtoken_from_string(system),
}
out += """ </td></tr>
<tr><td> </td>
<td><input class="formbutton" type="submit" value="%(select_method)s" /></td></tr></table>
</form>""" % {
'select_method' : _("Select method"),
}
return out
def tmpl_lost_password_form(self, ln):
"""
Displays a form for the user to ask for his password sent by email.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - Explicative message on top of the form.
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<p>" + _("If you have lost the password for your %(sitename)s %(x_fmt_open)sinternal account%(x_fmt_close)s, then please enter your email address in the following form in order to have a password reset link emailed to you.") % {'x_fmt_open' : '<em>', 'x_fmt_close' : '</em>', 'sitename' : CFG_SITE_NAME_INTL[ln]} + "</p>"
out += """
<blockquote>
<form method="post" action="../youraccount/send_email">
<table>
<tr>
<td align="right"><strong><label for="p_email">%(email)s:</label></strong></td>
<td><input type="text" size="25" name="p_email" id="p_email" value="" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="action" value="lost" />
</td>
</tr>
<tr><td> </td>
<td><code class="blocknote"><input class="formbutton" type="submit" value="%(send)s" /></code></td>
</tr>
</table>
</form>
</blockquote>
""" % {
'ln': ln,
'email' : _("Email address"),
'send' : _("Send password reset link"),
}
if CFG_CERN_SITE:
out += "<p>" + _("If you have been using the %(x_fmt_open)sCERN login system%(x_fmt_close)s, then you can recover your password through the %(x_url_open)sCERN authentication system%(x_url_close)s.") % {'x_fmt_open' : '<em>', 'x_fmt_close' : '</em>', 'x_url_open' : '<a href="https://cern.ch/lightweightregistration/ResetPassword.aspx%s">' \
% make_canonical_urlargd({'lf': 'auth', 'returnURL' : CFG_SITE_SECURE_URL + '/youraccount/login?ln='+ln}, {}), 'x_url_close' : '</a>'} + " "
else:
out += "<p>" + _("Note that if you have been using an external login system, then we cannot do anything and you have to ask there.") + " "
out += _("Alternatively, you can ask %s to change your login system from external to internal.") % ("""<a href="mailto:%(email)s">%(email)s</a>""" % { 'email' : CFG_SITE_SUPPORT_EMAIL }) + "</p>"
return out
def tmpl_account_info(self, ln, uid, guest, CFG_CERN_SITE):
"""
Displays the account information
Parameters:
- 'ln' *string* - The language to display the interface in
- 'uid' *string* - The user id
- 'guest' *boolean* - If the user is guest
- 'CFG_CERN_SITE' *boolean* - If the site is a CERN site
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<p>%(account_offer)s</p>
<blockquote>
<dl>
""" % {
'account_offer' : _("%s offers you the possibility to personalize the interface, to set up your own personal library of documents, or to set up an automatic alert query that would run periodically and would notify you of search results by email.") % CFG_SITE_NAME_INTL[ln],
}
if not guest:
out += """
<dt>
<a href="./edit?ln=%(ln)s">%(your_settings)s</a>
</dt>
<dd>%(change_account)s</dd>""" % {
'ln' : ln,
'your_settings' : _("Your Settings"),
'change_account' : _("Set or change your account email address or password. Specify your preferences about the look and feel of the interface.")
}
out += """
<dt><a href="../youralerts/display?ln=%(ln)s">%(your_searches)s</a></dt>
<dd>%(search_explain)s</dd>""" % {
'ln' : ln,
'your_searches' : _("Your Searches"),
'search_explain' : _("View all the searches you performed during the last 30 days."),
}
out += """
<dt><a href="../yourbaskets/display?ln=%(ln)s">%(your_baskets)s</a></dt>
<dd>%(basket_explain)s""" % {
'ln' : ln,
'your_baskets' : _("Your Baskets"),
'basket_explain' : _("With baskets you can define specific collections of items, store interesting records you want to access later or share with others."),
}
if guest and CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
out += self.tmpl_warning_guest_user(ln = ln, type = "baskets")
out += """</dd>
<dt><a href="../youralerts/list?ln=%(ln)s">%(your_alerts)s</a></dt>
<dd>%(explain_alerts)s""" % {
'ln' : ln,
'your_alerts' : _("Your Alerts"),
'explain_alerts' : _("Subscribe to a search which will be run periodically by our service. The result can be sent to you via Email or stored in one of your baskets."),
}
if guest and CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
out += self.tmpl_warning_guest_user(type="alerts", ln = ln)
out += "</dd>"
if CFG_CERN_SITE:
out += """</dd>
<dt><a href="%(CFG_SITE_SECURE_URL)s/yourloans/display?ln=%(ln)s">%(your_loans)s</a></dt>
<dd>%(explain_loans)s</dd>""" % {
'your_loans' : _("Your Loans"),
'explain_loans' : _("Check out book you have on loan, submit borrowing requests, etc. Requires CERN ID."),
'ln': ln,
'CFG_SITE_SECURE_URL': CFG_SITE_SECURE_URL
}
out += """
</dl>
</blockquote>"""
return out
def tmpl_warning_guest_user(self, ln, type):
"""
Displays a warning message about the specified type
Parameters:
- 'ln' *string* - The language to display the interface in
- 'type' *string* - The type of data that will get lost in case of guest account (for the moment: 'alerts' or 'baskets')
"""
# load the right message language
_ = gettext_set_language(ln)
if (type=='baskets'):
msg = _("You are logged in as a guest user, so your baskets will disappear at the end of the current session.") + ' '
elif (type=='alerts'):
msg = _("You are logged in as a guest user, so your alerts will disappear at the end of the current session.") + ' '
msg += _("If you wish you can %(x_url_open)slogin or register here%(x_url_close)s.") % {'x_url_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/login?ln=' + ln + '">',
'x_url_close': '</a>'}
return """<table class="errorbox" summary="">
<tr>
<th class="errorboxheader">%s</th>
</tr>
</table>""" % msg
def tmpl_account_body(self, ln, user):
"""
Displays the body of the actions of the user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'user' *string* - The username (nickname or email)
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("You are logged in as %(x_user)s. You may want to a) %(x_url1_open)slogout%(x_url1_close)s; b) edit your %(x_url2_open)saccount settings%(x_url2_close)s.") %\
{'x_user': user,
'x_url1_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/logout?ln=' + ln + '">',
'x_url1_close': '</a>',
'x_url2_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/edit?ln=' + ln + '">',
'x_url2_close': '</a>',
}
return out + "<br /><br />"
def tmpl_account_template(self, title, body, ln, url):
"""
Displays a block of the your account page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'title' *string* - The title of the block
- 'body' *string* - The body of the block
- 'url' *string* - The URL to go to the proper section
"""
out ="""
<table class="youraccountbox" width="90%%" summary="" >
<tr>
<th class="youraccountheader"><a href="%s">%s</a></th>
</tr>
<tr>
<td class="youraccountbody">%s</td>
</tr>
</table>""" % (url, title, body)
return out
def tmpl_account_page(self, ln, warnings, warning_list, accBody, baskets, alerts, searches, messages, loans, groups, submissions, approvals, tickets, administrative):
"""
Displays the your account page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'accBody' *string* - The body of the heading block
- 'baskets' *string* - The body of the baskets block
- 'alerts' *string* - The body of the alerts block
- 'searches' *string* - The body of the searches block
- 'messages' *string* - The body of the messages block
- 'groups' *string* - The body of the groups block
- 'submissions' *string* - The body of the submission block
- 'approvals' *string* - The body of the approvals block
- 'administrative' *string* - The body of the administrative block
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if warnings == "1":
out += self.tmpl_general_warnings(warning_list)
out += self.tmpl_account_template(_("Your Account"), accBody, ln, '/youraccount/edit?ln=%s' % ln)
if messages:
out += self.tmpl_account_template(_("Your Messages"), messages, ln, '/yourmessages/display?ln=%s' % ln)
if loans:
out += self.tmpl_account_template(_("Your Loans"), loans, ln, '/yourloans/display?ln=%s' % ln)
if baskets:
out += self.tmpl_account_template(_("Your Baskets"), baskets, ln, '/yourbaskets/display?ln=%s' % ln)
if alerts:
out += self.tmpl_account_template(_("Your Alert Searches"), alerts, ln, '/youralerts/list?ln=%s' % ln)
if searches:
out += self.tmpl_account_template(_("Your Searches"), searches, ln, '/youralerts/display?ln=%s' % ln)
if groups:
groups_description = _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s you are administering or are a member of.")
groups_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourgroups/display?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Groups"), groups_description, ln, '/yourgroups/display?ln=%s' % ln)
if submissions:
submission_description = _("You can consult the list of %(x_url_open)syour submissions%(x_url_close)s and inquire about their status.")
submission_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yoursubmissions.py?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Submissions"), submission_description, ln, '/yoursubmissions.py?ln=%s' % ln)
if approvals:
approval_description = _("You can consult the list of %(x_url_open)syour approvals%(x_url_close)s with the documents you approved or refereed.")
approval_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourapprovals.py?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Approvals"), approval_description, ln, '/yourapprovals.py?ln=%s' % ln)
#check if this user might have tickets
if tickets:
ticket_description = _("You can consult the list of %(x_url_open)syour tickets%(x_url_close)s.")
ticket_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourtickets?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Tickets"), ticket_description, ln, '/yourtickets?ln=%s' % ln)
if administrative:
out += self.tmpl_account_template(_("Your Administrative Activities"), administrative, ln, '/admin')
return out
def tmpl_account_emailMessage(self, ln, msg):
"""
Displays a link to retrieve the lost password
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - Explicative message on top of the form.
"""
# load the right message language
_ = gettext_set_language(ln)
out =""
out +="""
<body>
%(msg)s <a href="../youraccount/lost?ln=%(ln)s">%(try_again)s</a>
</body>
""" % {
'ln' : ln,
'msg' : msg,
'try_again' : _("Try again")
}
return out
def tmpl_account_reset_password_email_body(self, email, reset_key, ip_address, ln=CFG_SITE_LANG):
"""
The body of the email that sends lost internal account
passwords to users.
"""
_ = gettext_set_language(ln)
out = """
%(intro)s
%(intro2)s
<%(link)s>
%(outro)s
%(outro2)s""" % {
'intro': _("Somebody (possibly you) coming from %(x_ip_address)s "
"has asked\nfor a password reset at %(x_sitename)s\nfor "
"the account \"%(x_email)s\"." % {
'x_sitename' :CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'x_email' : email,
'x_ip_address' : ip_address,
}
),
'intro2' : _("If you want to reset the password for this account, please go to:"),
'link' : "%s/youraccount/access%s" %
(CFG_SITE_SECURE_URL, make_canonical_urlargd({
'ln' : ln,
'mailcookie' : reset_key
}, {})),
'outro' : _("in order to confirm the validity of this request."),
'outro2' : _("Please note that this URL will remain valid for about %(days)s days only.") % {'days': CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS},
}
return out
def tmpl_account_address_activation_email_body(self, email, address_activation_key, ip_address, ln=CFG_SITE_LANG):
"""
The body of the email that sends email address activation cookie
passwords to users.
"""
_ = gettext_set_language(ln)
out = """
%(intro)s
%(intro2)s
<%(link)s>
%(outro)s
%(outro2)s""" % {
'intro': _("Somebody (possibly you) coming from %(x_ip_address)s "
"has asked\nto register a new account at %(x_sitename)s\nfor the "
"email address \"%(x_email)s\"." % {
'x_sitename' :CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'x_email' : email,
'x_ip_address' : ip_address,
}
),
'intro2' : _("If you want to complete this account registration, please go to:"),
'link' : "%s/youraccount/access%s" %
(CFG_SITE_SECURE_URL, make_canonical_urlargd({
'ln' : ln,
'mailcookie' : address_activation_key
}, {})),
'outro' : _("in order to confirm the validity of this request."),
'outro2' : _("Please note that this URL will remain valid for about %(days)s days only.") % {'days' : CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS},
}
return out
def tmpl_account_emailSent(self, ln, email):
"""
Displays a confirmation message for an email sent
Parameters:
- 'ln' *string* - The language to display the interface in
- 'email' *string* - The email to which the message has been sent
"""
# load the right message language
_ = gettext_set_language(ln)
out =""
out += _("Okay, a password reset link has been emailed to %s.") % email
return out
def tmpl_account_delete(self, ln):
"""
Displays a confirmation message about deleting the account
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<p>" + _("""Deleting your account""") + '</p>'
return out
def tmpl_account_logout(self, ln):
"""
Displays a confirmation message about logging out
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("You are no longer recognized by our system.") + ' '
if CFG_EXTERNAL_AUTH_USING_SSO and CFG_EXTERNAL_AUTH_LOGOUT_SSO:
out += _("""You are still recognized by the centralized
%(x_fmt_open)sSSO%(x_fmt_close)s system. You can
%(x_url_open)slogout from SSO%(x_url_close)s, too.""") % \
{'x_fmt_open' : '<strong>', 'x_fmt_close' : '</strong>',
'x_url_open' : '<a href="%s">' % CFG_EXTERNAL_AUTH_LOGOUT_SSO,
'x_url_close' : '</a>'}
out += '<br />'
out += _("If you wish you can %(x_url_open)slogin here%(x_url_close)s.") % \
{'x_url_open': '<a href="./login?ln=' + ln + '">',
'x_url_close': '</a>'}
return out
def tmpl_login_form(self, ln, referer, internal, register_available, methods, selected_method, msg=None):
"""
Displays a login form
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referer' *string* - The referer URL - will be redirected upon after login
- 'internal' *boolean* - If we are producing an internal authentication
- 'register_available' *boolean* - If users can register freely in the system
- 'methods' *array* - The available authentication methods
- 'selected_method' *string* - The default authentication method
- 'msg' *string* - The message to print before the form, if needed
"""
# load the right message language
_ = gettext_set_language(ln)
if msg is "":
out = "<p>%(please_login)s</p>" % {
'please_login' : cgi.escape(_("If you already have an account, please login using the form below."))
}
if CFG_CERN_SITE:
out += "<p>" + _("If you don't own a CERN account yet, you can register a %(x_url_open)snew CERN lightweight account%(x_url_close)s.") % {'x_url_open' : '<a href="https://www.cern.ch/lightweightregistration/RegisterAccount.aspx">', 'x_url_close' : '</a>'} + "</p>"
else:
if register_available:
out += "<p>"+_("If you don't own an account yet, please %(x_url_open)sregister%(x_url_close)s an internal account.") %\
{'x_url_open': '<a href="../youraccount/register?ln=' + ln + '">',
'x_url_close': '</a>'} + "</p>"
else:
# users cannot register accounts, so advise them
# how to get one, or be silent about register
# facility if account level is more than 4:
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 5:
out += "<p>" + _("If you don't own an account yet, please contact %s.") % ('<a href="mailto:%s">%s</a>' % (cgi.escape(CFG_SITE_SUPPORT_EMAIL, True), cgi.escape(CFG_SITE_SUPPORT_EMAIL))) + "</p>"
else:
out = "<p>%s</p>" % msg
out += """<form method="post" action="../youraccount/login">
<table>
"""
if len(methods) > 1:
# more than one method, must make a select
login_select = """<select name="login_method" id="login_method">"""
for method in methods:
login_select += """<option value="%(method)s" %(selected)s>%(method)s</option>""" % {
'method' : cgi.escape(method, True),
'selected' : (method == selected_method and 'selected="selected"' or "")
}
login_select += "</select>"
out += """
<tr>
<td align="right"><strong><label for="login_method">%(login_title)s</label></strong></td>
<td>%(login_select)s</td>
</tr>""" % {
'login_title' : cgi.escape(_("Login method:")),
'login_select' : cgi.escape(login_select),
}
else:
# only one login method available
out += """<input type="hidden" name="login_method" value="%s" />""" % cgi.escape(methods[0], True)
out += """<tr>
<td align="right">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="referer" value="%(referer)s" />
<strong><label for="p_un">%(username)s:</label></strong>
</td>
<td><input type="text" size="25" name="p_un" id="p_un" value="" /></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw">%(password)s:</label></strong></td>
<td align="left"><input type="password" size="25" name="p_pw" id="p_pw" value="" /></td>
</tr>
<tr>
<td></td>
<td align="left"><input type="checkbox" name="remember_me" id="remember_me"/><em><label for="remember_me">%(remember_me)s</label></em></td>
<tr>
<td></td>
<td align="center" colspan="3"><code class="blocknote"><input class="formbutton" type="submit" name="action" value="%(login)s" /></code>""" % {
'ln': cgi.escape(ln, True),
'referer' : cgi.escape(referer, True),
'username' : cgi.escape(_("Username")),
'password' : cgi.escape(_("Password")),
'remember_me' : cgi.escape(_("Remember login on this computer.")),
'login' : cgi.escape(_("login")),
}
if internal:
out += """ (<a href="./lost?ln=%(ln)s">%(lost_pass)s</a>)""" % {
'ln' : cgi.escape(ln, True),
'lost_pass' : cgi.escape(_("Lost your password?"))
}
out += """</td>
</tr>
</table></form>"""
out += """<p><strong>%(note)s:</strong> %(note_text)s</p>""" % {
'note' : cgi.escape(_("Note")),
'note_text': cgi.escape(_("You can use your nickname or your email address to login."))}
return out
def tmpl_lost_your_password_teaser(self, ln=CFG_SITE_LANG):
"""Displays a short sentence to attract user to the fact that
maybe he lost his password. Used by the registration page.
"""
_ = gettext_set_language(ln)
out = ""
out += """<a href="./lost?ln=%(ln)s">%(maybe_lost_pass)s</a>""" % {
'ln' : ln,
'maybe_lost_pass': ("Maybe you have lost your password?")
}
return out
def tmpl_reset_password_form(self, ln, email, reset_key, msg=''):
"""Display a form to reset the password."""
_ = gettext_set_language(ln)
out = ""
out = "<p>%s</p>" % _("Your request is valid. Please set the new "
"desired password in the following form.")
if msg:
out += """<p class='warning'>%s</p>""" % msg
out += """
<form method="post" action="../youraccount/resetpassword?ln=%(ln)s">
<input type="hidden" name="k" value="%(reset_key)s" />
<input type="hidden" name="e" value="%(email)s" />
<input type="hidden" name="reset" value="1" />
<table>
<tr><td align="right"><strong>%(set_password_for)s</strong>:</td><td><em>%(email)s</em></td></tr>
<tr><td align="right"><strong><label for="password">%(type_new_password)s:</label></strong></td>
<td><input type="password" name="password" id="password" value="123" /></td></tr>
<tr><td align="right"><strong><label for="password2">%(type_it_again)s:</label></strong></td>
<td><input type="password" name="password2" id="password2" value="" /></td></tr>
<tr><td align="center" colspan="2">
<input class="formbutton" type="submit" name="action" value="%(set_new_password)s" />
</td></tr>
</table>
</form>""" % {
'ln' : ln,
'reset_key' : reset_key,
'email' : email,
'set_password_for' : _('Set a new password for'),
'type_new_password' : _('Type the new password'),
'type_it_again' : _('Type again the new password'),
'set_new_password' : _('Set the new password')
}
return out
def tmpl_register_page(self, ln, referer, level):
"""
Displays a login form
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referer' *string* - The referer URL - will be redirected upon after login
- 'level' *int* - Login level (0 - all access, 1 - accounts activated, 2+ - no self-registration)
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if level <= 1:
out += _("Please enter your email address and desired nickname and password:")
if level == 1:
out += _("It will not be possible to use the account before it has been verified and activated.")
out += """
<form method="post" action="../youraccount/register">
<input type="hidden" name="referer" value="%(referer)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<table>
<tr>
<td align="right"><strong><label for="p_email">%(email_address)s:</label></strong><br /><small class="important">(%(mandatory)s)</small></td>
<td><input type="text" size="25" name="p_email" id="p_email" value="" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">[email protected]</span></small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_nickname">%(nickname)s:</label></strong><br /><small class="important">(%(mandatory)s)</small></td>
<td><input type="text" size="25" name="p_nickname" id="p_nickname" value="" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">johnd</span></small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw">%(password)s:</label></strong><br /><small class="quicknote">(%(optional)s)</small></td>
<td align="left"><input type="password" size="25" name="p_pw" id="p_pw" value="" /><br />
<small><span class="quicknote">%(note)s:</span> %(password_contain)s</small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw2">%(retype)s:</label></strong></td>
<td align="left"><input type="password" size="25" name="p_pw2" id="p_pw2" value="" /></td>
<td></td>
</tr>
<tr>
<td></td>
<td align="left" colspan="3"><code class="blocknote"><input class="formbutton" type="submit" name="action" value="%(register)s" /></code></td>
</tr>
</table>
</form>
<p><strong>%(note)s:</strong> %(explain_acc)s""" % {
'referer' : cgi.escape(referer),
'ln' : cgi.escape(ln),
'email_address' : _("Email address"),
'nickname' : _("Nickname"),
'password' : _("Password"),
'mandatory' : _("mandatory"),
'optional' : _("optional"),
'example' : _("Example"),
'note' : _("Note"),
'password_contain' : _("The password phrase may contain punctuation, spaces, etc."),
'retype' : _("Retype Password"),
'register' : _("register"),
'explain_acc' : _("Please do not use valuable passwords such as your Unix, AFS or NICE passwords with this service. Your email address will stay strictly confidential and will not be disclosed to any third party. It will be used to identify you for personal services of %s. For example, you may set up an automatic alert search that will look for new preprints and will notify you daily of new arrivals by email.") % CFG_SITE_NAME,
}
else:
# level >=2, so users cannot register accounts
out += "<p>" + _("It is not possible to create an account yourself. Contact %s if you want an account.") % ('<a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL)) + "</p>"
return out
def tmpl_account_adminactivities(self, ln, uid, guest, roles, activities):
"""
Displays the admin activities block for this user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'uid' *string* - The used id
- 'guest' *boolean* - If the user is guest
- 'roles' *array* - The current user roles
- 'activities' *array* - The user allowed activities
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
# guest condition
if guest:
return _("You seem to be a guest user. You have to %(x_url_open)slogin%(x_url_close)s first.") % \
{'x_url_open': '<a href="../youraccount/login?ln=' + ln + '">',
'x_url_close': '<a/>'}
# no rights condition
if not roles:
return "<p>" + _("You are not authorized to access administrative functions.") + "</p>"
# displaying form
out += "<p>" + _("You are enabled to the following roles: %(x_role)s.") % {'x_role': ('<em>' + ", ".join(roles) + "</em>")} + '</p>'
if activities:
# print proposed links:
activities.sort(lambda x, y: cmp(x.lower(), y.lower()))
tmp_out = ''
for action in activities:
if action == "runbibedit":
tmp_out += """<br /> <a href="%s/%s/edit/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Record Editor"))
if action == "runbibeditmulti":
tmp_out += """<br /> <a href="%s/%s/multiedit/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Multi-Record Editor"))
if action == "runbibcirculation":
tmp_out += """<br /> <a href="%s/admin/bibcirculation/bibcirculationadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run BibCirculation"))
if action == "runbibmerge":
tmp_out += """<br /> <a href="%s/%s/merge/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Record Merger"))
if action == "runbibswordclient":
tmp_out += """<br /> <a href="%s/%s/bibsword/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run BibSword Client"))
if action == "runbatchuploader":
tmp_out += """<br /> <a href="%s/batchuploader/metadata?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run Batch Uploader"))
if action == "cfgbibformat":
tmp_out += """<br /> <a href="%s/admin/bibformat/bibformatadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibFormat"))
tmp_out += """<br /> <a href="%s/kb?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibKnowledge"))
if action == "cfgoaiharvest":
tmp_out += """<br /> <a href="%s/admin/bibharvest/oaiharvestadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure OAI Harvest"))
if action == "cfgoairepository":
tmp_out += """<br /> <a href="%s/admin/bibharvest/oairepositoryadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure OAI Repository"))
if action == "cfgbibindex":
tmp_out += """<br /> <a href="%s/admin/bibindex/bibindexadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibIndex"))
if action == "cfgbibrank":
tmp_out += """<br /> <a href="%s/admin/bibrank/bibrankadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibRank"))
if action == "cfgwebaccess":
tmp_out += """<br /> <a href="%s/admin/webaccess/webaccessadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebAccess"))
if action == "cfgwebcomment":
tmp_out += """<br /> <a href="%s/admin/webcomment/webcommentadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebComment"))
if action == "cfgwebjournal":
tmp_out += """<br /> <a href="%s/admin/webjournal/webjournaladmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebJournal"))
if action == "cfgwebsearch":
tmp_out += """<br /> <a href="%s/admin/websearch/websearchadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebSearch"))
if action == "cfgwebsubmit":
tmp_out += """<br /> <a href="%s/admin/websubmit/websubmitadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebSubmit"))
if action == "runbibdocfile":
tmp_out += """<br /> <a href="%s/submit/managedocfiles?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run Document File Manager"))
if tmp_out:
out += _("Here are some interesting web admin links for you:") + tmp_out
out += "<br />" + _("For more admin-level activities, see the complete %(x_url_open)sAdmin Area%(x_url_close)s.") %\
{'x_url_open': '<a href="' + CFG_SITE_URL + '/help/admin?ln=' + ln + '">',
'x_url_close': '</a>'}
return out
def tmpl_create_userinfobox(self, ln, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats):
"""
Displays the user block
Parameters:
- 'ln' *string* - The language to display the interface in
- 'url_referer' *string* - URL of the page being displayed
- 'guest' *boolean* - If the user is guest
- 'username' *string* - The username (nickname or email)
- 'submitter' *boolean* - If the user is submitter
- 'referee' *boolean* - If the user is referee
- 'admin' *boolean* - If the user is admin
- 'usebaskets' *boolean* - If baskets are enabled for the user
- 'usemessages' *boolean* - If messages are enabled for the user
- 'usealerts' *boolean* - If alerts are enabled for the user
- 'usegroups' *boolean* - If groups are enabled for the user
- 'useloans' *boolean* - If loans are enabled for the user
- 'usestats' *boolean* - If stats are enabled for the user
@note: with the update of CSS classes (cds.cds ->
invenio.css), the variables useloans etc are not used in
this function, since they are in the menus. But we keep
them in the function signature for backwards
compatibility.
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<img src="%s/img/user-icon-1-20x20.gif" border="0" alt=""/> """ % CFG_SITE_URL
if guest:
out += """%(guest_msg)s ::
<a class="userinfo" href="%(sitesecureurl)s/youraccount/login?ln=%(ln)s%(referer)s">%(login)s</a>""" % {
'sitesecureurl': CFG_SITE_SECURE_URL,
'ln' : ln,
'guest_msg' : _("guest"),
'referer' : url_referer and ('&referer=%s' % urllib.quote(url_referer)) or '',
'login' : _('login')
}
else:
out += """
<a class="userinfo" href="%(sitesecureurl)s/youraccount/display?ln=%(ln)s">%(username)s</a> :: """ % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln' : ln,
'username' : username
}
out += """<a class="userinfo" href="%(sitesecureurl)s/youraccount/logout?ln=%(ln)s">%(logout)s</a>""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln' : ln,
'logout' : _("logout"),
}
return out
def tmpl_create_useractivities_menu(self, ln, selected, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats):
"""
Returns the main navigation menu with actions based on user's
priviledges
@param ln: The language to display the interface in
@type ln: string
@param selected: If the menu is currently selected
@type selected: boolean
@param url_referer: URL of the page being displayed
@type url_referer: string
@param guest: If the user is guest
@type guest: string
@param username: The username (nickname or email)
@type username: string
@param submitter: If the user is submitter
@type submitter: boolean
@param referee: If the user is referee
@type referee: boolean
@param admin: If the user is admin
@type admin: boolean
@param usebaskets: If baskets are enabled for the user
@type usebaskets: boolean
@param usemessages: If messages are enabled for the user
@type usemessages: boolean
@param usealerts: If alerts are enabled for the user
@type usealerts: boolean
@param usegroups: If groups are enabled for the user
@type usegroups: boolean
@param useloans: If loans are enabled for the user
@type useloans: boolean
@param usestats: If stats are enabled for the user
@type usestats: boolean
@return: html menu of the user activities
@rtype: string
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''<div class="hassubmenu%(on)s">
<a hreflang="en" class="header%(selected)s" href="%(CFG_SITE_SECURE_URL)s/youraccount/display?ln=%(ln)s">%(personalize)s</a>
<ul class="subsubmenu" style="width: 13em;">''' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'personalize': _("Personalize"),
'on': selected and " on" or '',
'selected': selected and "selected" or ''
}
if not guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youraccount/display?ln=%(ln)s">%(account)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'account' : _('Your account')
}
if usealerts or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youralerts/list?ln=%(ln)s">%(alerts)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'alerts' : _('Your alerts')
}
if referee:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourapprovals.py?ln=%(ln)s">%(approvals)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'approvals' : _('Your approvals')
}
if usebaskets or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourbaskets/display?ln=%(ln)s">%(baskets)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'baskets' : _('Your baskets')
}
if usegroups:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourgroups/display?ln=%(ln)s">%(groups)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'groups' : _('Your groups')
}
if useloans:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourloans/display?ln=%(ln)s">%(loans)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'loans' : _('Your loans')
}
if usemessages:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourmessages/display?ln=%(ln)s">%(messages)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'messages' : _('Your messages')
}
if submitter:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yoursubmissions.py?ln=%(ln)s">%(submissions)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'submissions' : _('Your submissions')
}
if usealerts or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youralerts/display?ln=%(ln)s">%(searches)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'searches' : _('Your searches')
}
out += '</ul></div>'
return out
def tmpl_create_adminactivities_menu(self, ln, selected, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats, activities):
"""
Returns the main navigation menu with actions based on user's
priviledges
@param ln: The language to display the interface in
@type ln: string
@param selected: If the menu is currently selected
@type selected: boolean
@param url_referer: URL of the page being displayed
@type url_referer: string
@param guest: If the user is guest
@type guest: string
@param username: The username (nickname or email)
@type username: string
@param submitter: If the user is submitter
@type submitter: boolean
@param referee: If the user is referee
@type referee: boolean
@param admin: If the user is admin
@type admin: boolean
@param usebaskets: If baskets are enabled for the user
@type usebaskets: boolean
@param usemessages: If messages are enabled for the user
@type usemessages: boolean
@param usealerts: If alerts are enabled for the user
@type usealerts: boolean
@param usegroups: If groups are enabled for the user
@type usegroups: boolean
@param useloans: If loans are enabled for the user
@type useloans: boolean
@param usestats: If stats are enabled for the user
@type usestats: boolean
@param activities: dictionary of admin activities
@rtype activities: dict
@return: html menu of the user activities
@rtype: string
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if activities:
out += '''<div class="hassubmenu%(on)s">
<a hreflang="en" class="header%(selected)s" href="%(CFG_SITE_SECURE_URL)s/youraccount/youradminactivities?ln=%(ln)s">%(admin)s</a>
<ul class="subsubmenu" style="width: 19em;">''' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'admin': _("Administration"),
'on': selected and " on" or '',
'selected': selected and "selected" or ''
}
for name in sorted(activities.iterkeys()):
url = activities[name]
out += '<li><a href="%(url)s">%(name)s</a></li>' % {
'url': url,
'name': name
}
if usestats:
out += """<li><a href="%(CFG_SITE_URL)s/stats/?ln=%(ln)s">%(stats)s</a></li>""" % {
'CFG_SITE_URL' : CFG_SITE_URL,
'ln' : ln,
'stats' : _("Statistics"),
}
out += '</ul></div>'
return out
def tmpl_warning(self, warnings, ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param infos: list of strings
@param ln=language
@return: html output
"""
if not((type(warnings) is list) or (type(warnings) is tuple)):
warnings = [warnings]
warningbox = ""
if warnings != []:
warningbox = "<div class=\"warningbox\">\n <b>Warning:</b>\n"
for warning in warnings:
lines = warning.split("\n")
warningbox += " <p>"
for line in lines[0:-1]:
warningbox += line + " <br />\n"
warningbox += lines[-1] + " </p>"
warningbox += "</div><br />\n"
return warningbox
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_display_all_groups(self,
infos,
admin_group_html,
member_group_html,
external_group_html = None,
warnings=[],
ln=CFG_SITE_LANG):
"""
Displays the 3 tables of groups: admin, member and external
Parameters:
- 'ln' *string* - The language to display the interface in
- 'admin_group_html' *string* - HTML code for displaying all the groups
the user is the administrator of
- 'member_group_html' *string* - HTML code for displaying all the groups
the user is member of
- 'external_group_html' *string* - HTML code for displaying all the
external groups the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_infobox(infos)
group_text += self.tmpl_warning(warnings)
if external_group_html:
group_text += """
<table>
<tr>
<td>%s</td>
</tr>
<tr>
<td><br />%s</td>
</tr>
<tr>
<td><br /><a name='external_groups'></a>%s</td>
</tr>
</table>""" %(admin_group_html, member_group_html, external_group_html)
else:
group_text += """
<table>
<tr>
<td>%s</td>
</tr>
<tr>
<td><br />%s</td>
</tr>
</table>""" %(admin_group_html, member_group_html)
return group_text
def tmpl_display_admin_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the groups the user is admin of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is admin of
- 'infos' *list* - Display infos on top of admin group table
"""
_ = gettext_set_language(ln)
img_link = """
<a href="%(siteurl)s/yourgroups/%(action)s?grpID=%(grpID)s&ln=%(ln)s">
<img src="%(siteurl)s/img/%(img)s" alt="%(text)s" style="border:0" width="25"
height="25" /><br /><small>%(text)s</small>
</a>"""
out = self.tmpl_group_table_title(img="/img/group_admin.png",
text=_("You are an administrator of the following groups:") )
out += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
<td style="width: 20px;" > </td>
<td style="width: 20px;"> </td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" %(_("Group"), _("Description"))
if len(groups) == 0:
out += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="4" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not an administrator of any groups."),)
for group_data in groups:
(grpID, name, description) = group_data
edit_link = img_link % {'siteurl' : CFG_SITE_URL,
'grpID' : grpID,
'ln': ln,
'img':"webbasket_create_small.png",
'text':_("Edit group"),
'action':"edit"
}
members_link = img_link % {'siteurl' : CFG_SITE_URL,
'grpID' : grpID,
'ln': ln,
'img':"webbasket_usergroup.png",
'text':_("Edit %s members") % '',
'action':"members"
}
out += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
<td style="text-align: center;" >%s</td>
<td style="text-align: center;" >%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description), edit_link, members_link)
out += """
<tr class="mailboxfooter">
<td colspan="2">
<form name="newGroup" action="create?ln=%(ln)s" method="post">
<input type="submit" name="create_group" value="%(write_label)s" class="formbutton" />
</form>
</td>
<td> </td>
<td> </td>
<td> </td>
</tr>
</tbody>
</table>""" % {'ln': ln,
'write_label': _("Create new group"),
}
return out
def tmpl_display_member_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the groups the user is member of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_group_table_title(img="/img/webbasket_us.png", text=_("You are a member of the following groups:"))
group_text += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Group"), _("Description"))
if len(groups) == 0:
group_text += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="2" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not a member of any groups."),)
for group_data in groups:
(id, name, description) = group_data
group_text += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description))
group_text += """
<tr class="mailboxfooter">
<td>
<form name="newGroup" action="join?ln=%(ln)s" method="post">
<input type="submit" name="join_group" value="%(join_label)s" class="formbutton" />
</form>
</td>
<td>
<form name="newGroup" action="leave?ln=%(ln)s" method="post">
<input type="submit" name="leave" value="%(leave_label)s" class="formbutton" />
</form>
</td>
</tr>
</tbody>
</table>
""" % {'ln': ln,
'join_label': _("Join new group"),
'leave_label':_("Leave group")
}
return group_text
def tmpl_display_external_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the external groups the user is member of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_group_table_title(img="/img/webbasket_us.png", text=_("You are a member of the following external groups:"))
group_text += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Group"), _("Description"))
if len(groups) == 0:
group_text += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="2" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not a member of any external groups."),)
for group_data in groups:
(id, name, description) = group_data
group_text += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description))
group_text += """
</tbody>
</table>
"""
return group_text
def tmpl_display_input_group_info(self,
group_name,
group_description,
join_policy,
act_type="create",
grpID=None,
warnings=[],
ln=CFG_SITE_LANG):
"""
Display group data when creating or updating a group:
Name, description, join_policy.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'group_name' *string* - name of the group
- 'group_description' *string* - description of the group
- 'join_policy' *string* - join policy
- 'act_type' *string* - info about action : create or edit(update)
- 'grpID' *int* - ID of the group(not None in case of group editing)
- 'warnings' *list* - Display warning if values are not correct
"""
_ = gettext_set_language(ln)
#default
hidden_id =""
form_name = "create_group"
action = CFG_SITE_URL + '/yourgroups/create'
button_label = _("Create new group")
button_name = "create_button"
label = _("Create new group")
delete_text = ""
if act_type == "update":
form_name = "update_group"
action = CFG_SITE_URL + '/yourgroups/edit'
button_label = _("Update group")
button_name = "update"
label = _('Edit group %s') % cgi.escape(group_name)
delete_text = """<input type="submit" value="%s" class="formbutton" name="%s" />"""
delete_text %= (_("Delete group"),"delete")
if grpID is not None:
hidden_id = """<input type="hidden" name="grpID" value="%s" />"""
hidden_id %= grpID
out = self.tmpl_warning(warnings)
out += """
<form name="%(form_name)s" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td><label for="group_name">%(name_label)s</label></td>
<td>
<input type="text" name="group_name" id="group_name" value="%(group_name)s" />
</td>
</tr>
<tr>
<td><label for="group_description">%(description_label)s</label></td>
<td>
<input type="text" name="group_description" id="group_description" value="%(group_description)s" />
</td>
</tr>
<tr>
<td>%(join_policy_label)s</td>
<td>
%(join_policy)s
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
%(hidden_id)s
<table>
<tr>
<td>
<input type="submit" value="%(button_label)s" class="formbutton" name="%(button_name)s" />
</td>
<td>
%(delete_text)s
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
out %= {'action' : action,
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label': label,
'form_name' : form_name,
'name_label': _("Group name:"),
'delete_text': delete_text,
'description_label': _("Group description:"),
'join_policy_label': _("Group join policy:"),
'group_name': cgi.escape(group_name, 1),
'group_description': cgi.escape(group_description, 1),
'button_label': button_label,
'button_name':button_name,
'cancel_label':_("Cancel"),
'hidden_id':hidden_id,
'ln': ln,
'join_policy' :self.__create_join_policy_selection_menu("join_policy",
join_policy,
ln)
}
return out
def tmpl_display_input_join_group(self,
group_list,
group_name,
group_from_search,
search,
warnings=[],
ln=CFG_SITE_LANG):
"""
Display the groups the user can join.
He can use default select list or the search box
Parameters:
- 'ln' *string* - The language to display the interface in
- 'group_list' *list* - All the group the user can join
- 'group_name' *string* - Name of the group the user is looking for
- 'group_from search' *list* - List of the group the user can join matching group_name
- 'search' *int* - User is looking for group using group_name
- 'warnings' *list* - Display warning if two group are selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
search_content = ""
if search:
search_content = """<tr><td> </td><td>"""
if group_from_search != []:
search_content += self.__create_select_menu('grpID', group_from_search, _("Please select:"))
else:
search_content += _("No matching group")
search_content += """</td><td> </td></tr>"""
out += """
<form name="join_group" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td>%(list_label)s</td>
<td>
%(group_list)s
</td>
<td>
</td>
</tr>
<tr>
<td><br /><label for="group_name">%(label2)s</label></td>
<td><br /><input type="text" name="group_name" id="group_name" value="%(group_name)s" /></td>
<td><br />
<input type="submit" name="find_button" value="%(find_label)s" class="nonsubmitbutton" />
</td>
</tr>
%(search_content)s
</table>
</td>
</tr>
</tbody>
</table>
<table>
<tr>
<td>
<input type="submit" name="join_button" value="%(label)s" class="formbutton" />
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
out %= {'action' : CFG_SITE_URL + '/yourgroups/join',
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label': _("Join group"),
'group_name': cgi.escape(group_name, 1),
'label2':_("or find it") + ': ',
'list_label':_("Choose group:"),
'ln': ln,
'find_label': _("Find group"),
'cancel_label':_("Cancel"),
'group_list' :self.__create_select_menu("grpID",group_list, _("Please select:")),
'search_content' : search_content
}
return out
def tmpl_display_manage_member(self,
grpID,
group_name,
members,
pending_members,
infos=[],
warnings=[],
ln=CFG_SITE_LANG):
"""Display current members and waiting members of a group.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'grpID *int* - ID of the group
- 'group_name' *string* - Name of the group
- 'members' *list* - List of the current members
- 'pending_members' *list* - List of the waiting members
- 'infos' *tuple of 2 lists* - Message to inform user about his last action
- 'warnings' *list* - Display warning if two group are selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
out += self.tmpl_infobox(infos)
out += """
<form name="member" action="%(action)s" method="post">
<p>%(title)s</p>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s"/>
<table>
<tr>
<td>
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/webbasket_usergroup.png" alt="%(img_alt_header1)s" />
</td>
<td class="bsktitle">
%(header1)s<br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
%(member_text)s
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/webbasket_usergroup_gray.png" alt="%(img_alt_header2)s" />
</td>
<td class="bsktitle">
%(header2)s<br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
%(pending_text)s
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table class="bskbasket" style="width: 400px">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/iconpen.gif" alt="%(img_alt_header3)s" />
</td>
<td class="bsktitle">
<b>%(header3)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td colspan="2" style="padding: 0 5 10 5;">%(invite_text)s</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</form>
"""
if members :
member_list = self.__create_select_menu("member_id", members, _("Please select:"))
member_text = """
<td style="padding: 0 5 10 5;">%s</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="remove_member" value="%s" class="nonsubmitbutton"/>
</td>""" % (member_list,_("Remove member"))
else :
member_text = """<td style="padding: 0 5 10 5;" colspan="2">%s</td>""" % _("No members.")
if pending_members :
pending_list = self.__create_select_menu("pending_member_id", pending_members, _("Please select:"))
pending_text = """
<td style="padding: 0 5 10 5;">%s</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="add_member" value="%s" class="nonsubmitbutton"/>
</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="reject_member" value="%s" class="nonsubmitbutton"/>
</td>""" % (pending_list,_("Accept member"), _("Reject member"))
else :
pending_text = """<td style="padding: 0 5 10 5;" colspan="2">%s</td>""" % _("No members awaiting approval.")
header1 = self.tmpl_group_table_title(text=_("Current members"))
header2 = self.tmpl_group_table_title(text=_("Members awaiting approval"))
header3 = _("Invite new members")
write_a_message_url = create_url(
"%s/yourmessages/write" % CFG_SITE_URL,
{
'ln' : ln,
'msg_subject' : _('Invitation to join "%s" group' % escape_html(group_name)),
'msg_body' : _("""\
Hello:
I think you might be interested in joining the group "%(x_name)s".
You can join by clicking here: %(x_url)s.
Best regards.
""") % {'x_name': group_name,
'x_url': create_html_link("%s/yourgroups/join" % CFG_SITE_URL, { 'grpID' : grpID,
'join_button' : "1",
},
link_label=group_name, escape_urlargd=True, escape_linkattrd=True)}})
link_open = '<a href="%s">' % escape_html(write_a_message_url)
invite_text = _("If you want to invite new members to join your group, please use the %(x_url_open)sweb message%(x_url_close)s system.") % \
{'x_url_open': link_open,
'x_url_close': '</a>'}
action = CFG_SITE_URL + '/yourgroups/members?ln=' + ln
out %= {'title':_('Group: %s') % escape_html(group_name),
'member_text' : member_text,
'pending_text' :pending_text,
'action':action,
'grpID':grpID,
'header1': header1,
'header2': header2,
'header3': header3,
'img_alt_header1': _("Current members"),
'img_alt_header2': _("Members awaiting approval"),
'img_alt_header3': _("Invite new members"),
'invite_text': invite_text,
'imgurl': CFG_SITE_URL + '/img',
'cancel_label':_("Cancel"),
'ln':ln
}
return out
def tmpl_display_input_leave_group(self,
groups,
warnings=[],
ln=CFG_SITE_LANG):
"""Display groups the user can leave.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - List of groups the user is currently member of
- 'warnings' *list* - Display warning if no group is selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
out += """
<form name="leave" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td>%(list_label)s</td>
<td>
%(groups)s
</td>
<td>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
<table>
<tr>
<td>
%(submit)s
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
if groups:
groups = self.__create_select_menu("grpID", groups, _("Please select:"))
list_label = _("Group list")
submit = """<input type="submit" name="leave_button" value="%s" class="formbutton"/>""" % _("Leave group")
else :
groups = _("You are not member of any group.")
list_label = ""
submit = ""
action = CFG_SITE_URL + '/yourgroups/leave?ln=%s'
action %= (ln)
out %= {'groups' : groups,
'list_label' : list_label,
'action':action,
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label' : _("Leave group"),
'cancel_label':_("Cancel"),
'ln' :ln,
'submit' : submit
}
return out
def tmpl_confirm_delete(self, grpID, ln=CFG_SITE_LANG):
"""
display a confirm message when deleting a group
@param grpID *int* - ID of the group
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
action = CFG_SITE_URL + '/yourgroups/edit'
out = """
<form name="delete_group" action="%(action)s" method="post">
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" name="delete" value="%(yes_label)s" class="formbutton" />
</td>
<td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</td>
</tr>
</table>
</form>"""% {'message': _("Are you sure you want to delete this group?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No"),
'grpID':grpID,
'action': action
}
return out
def tmpl_confirm_leave(self, uid, grpID, ln=CFG_SITE_LANG):
"""
display a confirm message
@param grpID *int* - ID of the group
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
action = CFG_SITE_URL + '/yourgroups/leave'
out = """
<form name="leave_group" action="%(action)s" method="post">
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" name="leave_button" value="%(yes_label)s" class="formbutton" />
</td>
<td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</td>
</tr>
</table>
</form>"""% {'message': _("Are you sure you want to leave this group?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No"),
'grpID':grpID,
'action': action
}
return out
def __create_join_policy_selection_menu(self, name, current_join_policy, ln=CFG_SITE_LANG):
"""Private function. create a drop down menu for selection of join policy
@param current_join_policy: join policy as defined in CFG_WEBSESSION_GROUP_JOIN_POLICY
@param ln: language
"""
_ = gettext_set_language(ln)
elements = [(CFG_WEBSESSION_GROUP_JOIN_POLICY['VISIBLEOPEN'],
_("Visible and open for new members")),
(CFG_WEBSESSION_GROUP_JOIN_POLICY['VISIBLEMAIL'],
_("Visible but new members need approval"))
]
select_text = _("Please select:")
return self.__create_select_menu(name, elements, select_text, selected_key=current_join_policy)
def __create_select_menu(self, name, elements, select_text, multiple=0, selected_key=None):
""" private function, returns a popup menu
@param name: name of HTML control
@param elements: list of (key, value)
"""
if multiple :
out = """
<select name="%s" multiple="multiple" style="width:100%%">"""% (name)
else :
out = """<select name="%s" style="width:100%%">""" % name
out += '<option value="-1">%s</option>' % (select_text)
for (key, label) in elements:
selected = ''
if key == selected_key:
selected = ' selected="selected"'
out += '<option value="%s"%s>%s</option>'% (key, selected, label)
out += '</select>'
return out
def tmpl_infobox(self, infos, ln=CFG_SITE_LANG):
"""Display len(infos) information fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(infos) is list) or (type(infos) is tuple)):
infos = [infos]
infobox = ""
for info in infos:
infobox += '<div><span class="info">'
lines = info.split("\n")
for line in lines[0:-1]:
infobox += line + "<br />\n"
infobox += lines[-1] + "</span></div>\n"
return infobox
def tmpl_navtrail(self, ln=CFG_SITE_LANG, title=""):
"""
display the navtrail, e.g.:
Your account > Your group > title
@param title: the last part of the navtrail. Is not a link
@param ln: language
return html formatted navtrail
"""
_ = gettext_set_language(ln)
nav_h1 = '<a class="navtrail" href="%s/youraccount/display">%s</a>'
nav_h2 = ""
if (title != ""):
nav_h2 = ' > <a class="navtrail" href="%s/yourgroups/display">%s</a>'
nav_h2 = nav_h2 % (CFG_SITE_URL, _("Your Groups"))
return nav_h1 % (CFG_SITE_URL, _("Your Account")) + nav_h2
def tmpl_group_table_title(self, img="", text="", ln=CFG_SITE_LANG):
"""
display the title of a table:
- 'img' *string* - img path
- 'text' *string* - title
- 'ln' *string* - The language to display the interface in
"""
out = "<div>"
if img:
out += """
<img src="%s" alt="" />
""" % (CFG_SITE_URL + img)
out += """
<b>%s</b>
</div>""" % text
return out
def tmpl_admin_msg(self, group_name, grpID, ln=CFG_SITE_LANG):
"""
return message content for joining group
- 'group_name' *string* - name of the group
- 'grpID' *int* - ID of the group
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
subject = _("Group %s: New membership request") % group_name
url = CFG_SITE_URL + "/yourgroups/members?grpID=%s&ln=%s"
url %= (grpID, ln)
# FIXME: which user? We should show his nickname.
body = (_("A user wants to join the group %s.") % group_name) + '<br />'
body += _("Please %(x_url_open)saccept or reject%(x_url_close)s this user's request.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_member_msg(self,
group_name,
accepted=0,
ln=CFG_SITE_LANG):
"""
return message content when new member is accepted/rejected
- 'group_name' *string* - name of the group
- 'accepted' *int* - 1 if new membership has been accepted, 0 if it has been rejected
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
if accepted:
subject = _("Group %s: Join request has been accepted") % (group_name)
body = _("Your request for joining group %s has been accepted.") % (group_name)
else:
subject = _("Group %s: Join request has been rejected") % (group_name)
body = _("Your request for joining group %s has been rejected.") % (group_name)
url = CFG_SITE_URL + "/yourgroups/display?ln=" + ln
body += '<br />'
body += _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_delete_msg(self,
group_name,
ln=CFG_SITE_LANG):
"""
return message content when new member is accepted/rejected
- 'group_name' *string* - name of the group
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
subject = _("Group %s has been deleted") % group_name
url = CFG_SITE_URL + "/yourgroups/display?ln=" + ln
body = _("Group %s has been deleted by its administrator.") % group_name
body += '<br />'
body += _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_group_info(self, nb_admin_groups=0, nb_member_groups=0, nb_total_groups=0, ln=CFG_SITE_LANG):
"""
display infos about groups (used by myaccount.py)
@param nb_admin_group: number of groups the user is admin of
@param nb_member_group: number of groups the user is member of
@param total_group: number of groups the user belongs to
@param ln: language
return: html output.
"""
_ = gettext_set_language(ln)
out = _("You can consult the list of %(x_url_open)s%(x_nb_total)i groups%(x_url_close)s you are subscribed to (%(x_nb_member)i) or administering (%(x_nb_admin)i).")
out %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourgroups/display?ln=' + ln + '">',
'x_nb_total': nb_total_groups,
'x_url_close': '</a>',
'x_nb_admin': nb_admin_groups,
'x_nb_member': nb_member_groups}
return out
def tmpl_general_warnings(self, warning_list, ln=CFG_SITE_LANG):
"""
display information to the admin user about possible
ssecurity problems in the system.
"""
message = ""
_ = gettext_set_language(ln)
#Try and connect to the mysql database with the default invenio password
if "warning_mysql_password_equal_to_invenio_password" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for MySQL root user is the same as the default Invenio password. For security purposes, you may want to change the password.")
message += "</font></p>"
#Try and connect to the invenio database with the default invenio password
if "warning_invenio_password_equal_to_default" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for the Invenio MySQL user is the same as the shipped default. For security purposes, you may want to change the password.")
message += "</font></p>"
#Check if the admin password is empty
if "warning_empty_admin_password" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for the Invenio admin user is currently empty. For security purposes, it is strongly recommended that you add a password.")
message += "</font></p>"
#Check if the admin email has been changed from the default
if "warning_site_support_email_equal_to_default" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The email address set for support email is currently set to [email protected]. It is recommended that you change this to your own address.")
message += "</font></p>"
#Check for a new release
if "note_new_release_available" in warning_list:
message += "<p><font color=red>"
message += _("A newer version of Invenio is available for download. You may want to visit ")
message += "<a href=\"http://invenio-software.org/wiki/Installation/Download\">http://invenio-software.org/wiki/Installation/Download</a>"
message += "</font></p>"
#Error downloading release notes
if "error_cannot_download_release_notes" in warning_list:
message += "<p><font color=red>"
message += _("Cannot download or parse release notes from http://invenio-software.org/repo/invenio/tree/RELEASE-NOTES")
message += "</font></p>"
return message
| robk5uj/invenio | modules/websession/lib/websession_templates.py | Python | gpl-2.0 | 103,238 |
#!/usr/bin/env python
'''
NIK-HDREfexPro2.py
Mod of ShellOut.py focused on getting Google NIK to work.
ShellOut call an external program passing the active layer as a temp file.
Tested only in Ubuntu 16.04 with Gimp 2.9.5 (git) with Nik Collection 1.2.11
Author:
Erico Porto on top of the work of Rob Antonishen
Benoit Touchette modified from Erico Porto
this script is modelled after the mm extern LabCurves trace plugin
by Michael Munzert http://www.mm-log.com/lab-curves-gimp
and thanks to the folds at gimp-chat has grown a bit ;)
License:
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The GNU Public License is available at
http://www.gnu.org/copyleft/gpl.html
'''
from gimpfu import *
import shutil
import subprocess
import os, sys
import tempfile
TEMP_FNAME = "ShellOutTempFile"
def plugin_main(image, drawable, visible):
pdb.gimp_image_undo_group_start(image)
# Copy so the save operations doesn't affect the original
if visible == 0:
# Save in temporary. Note: empty user entered file name
temp = pdb.gimp_image_get_active_drawable(image)
else:
# Get the current visible
temp = pdb.gimp_layer_new_from_visible(image, image, "HDR Efex")
image.add_layer(temp, 0)
buffer = pdb.gimp_edit_named_copy(temp, "ShellOutTemp")
#save selection if one exists
hassel = pdb.gimp_selection_is_empty(image) == 0
if hassel:
savedsel = pdb.gimp_selection_save(image)
tempimage = pdb.gimp_edit_named_paste_as_new_image(buffer)
pdb.gimp_buffer_delete(buffer)
if not tempimage:
raise RuntimeError
pdb.gimp_image_undo_disable(tempimage)
tempdrawable = pdb.gimp_image_get_active_layer(tempimage)
# Use temp file names from gimp, it reflects the user's choices in gimp.rc
# change as indicated if you always want to use the same temp file name
# tempfilename = pdb.gimp_temp_name(progtorun[2])
tempfiledir = tempfile.gettempdir()
tempfilename = os.path.join(tempfiledir, TEMP_FNAME + "." + "tif")
# !!! Note no run-mode first parameter, and user entered filename is empty string
pdb.gimp_progress_set_text ("Saving a copy")
pdb.gimp_file_save(tempimage, tempdrawable, tempfilename, tempfilename)
# Invoke external command
print("calling HDR Efex Pro 2...")
pdb.gimp_progress_set_text ("calling HDR Efex Pro 2...")
pdb.gimp_progress_pulse()
child = subprocess.Popen([ "nik_hdrefexpro2", tempfilename ], shell=False)
child.communicate()
# put it as a new layer in the opened image
try:
newlayer2 = pdb.gimp_file_load_layer(tempimage, tempfilename)
except:
RuntimeError
tempimage.add_layer(newlayer2,-1)
buffer = pdb.gimp_edit_named_copy(newlayer2, "ShellOutTemp")
if visible == 0:
drawable.resize(newlayer2.width,newlayer2.height,0,0)
sel = pdb.gimp_edit_named_paste(drawable, buffer, 1)
drawable.translate((tempdrawable.width-newlayer2.width)/2,(tempdrawable.height-newlayer2.height)/2)
else:
temp.resize(newlayer2.width,newlayer2.height,0,0)
sel = pdb.gimp_edit_named_paste(temp, buffer, 1)
temp.translate((tempdrawable.width-newlayer2.width)/2,(tempdrawable.height-newlayer2.height)/2)
pdb.gimp_buffer_delete(buffer)
pdb.gimp_edit_clear(temp)
pdb.gimp_floating_sel_anchor(sel)
#load up old selection
if hassel:
pdb.gimp_selection_load(savedsel)
image.remove_channel(savedsel)
# cleanup
os.remove(tempfilename) # delete the temporary file
gimp.delete(tempimage) # delete the temporary image
# Note the new image is dirty in Gimp and the user will be asked to save before closing.
pdb.gimp_image_undo_group_end(image)
gimp.displays_flush()
register(
"nikfilters_hdrefexpro2",
"HDR Efex Pro 2",
"HDR Efex Pro 2",
"Rob Antonishen (original) & Ben Touchette",
"(C)2011 Rob Antonishen (original) & (C)2016-2017 Ben Touchette",
"2017",
"<Image>/Filters/NIK Collection/HDR Efex Pro 2",
"RGB*, GRAY*",
[ (PF_RADIO, "visible", "Layer:", 1, (("new from visible", 1),("current layer",0))) ],
[],
plugin_main,
)
main()
| draekko-rand/nik_on_gimp | plug-ins/NIK-HDREfexPro2.py | Python | apache-2.0 | 4,466 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
prId = "com.martys.apcupsd"
apcupsdPlugin = indigo.server.getPlugin(prId)
cnt = 0
for device in indigo.devices.iter(prId):
if device.enabled:
apcupsdPlugin.executeAction("readApcupsd", deviceId=device.id)
indigo.server.log("Refreshed data for device name %s" % (device.name), type='apcupsd')
cnt = cnt + 1
if cnt == 0:
indigo.server.log("Did not find any matching enabled UPS devices", type='apcupsd')
| MartySkinner/Indigo-apcupsd | apcupsd.indigoPlugin/Contents/Menu Items/Refresh All UPSs.py | Python | unlicense | 486 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib,urllib2
from re import *
import client
"""This modules is With Access Internet (W.A.I)"""
def ip_info(target,ip):
u=[["12Informacion General:"],["12Geolocalización:"]]
E=[]
url="http://whatismyipaddress.com/ip/"+ip
raw=urllib.urlopen(url).read()
E0=search('<tr><th>IP:</th><td>([\w\W]+)</td></tr>',raw)
E1=search('<tr><th>ISP:</th><td>([\w\W]+)</td></tr>', raw)
E2=search('<tr><th>Organization:</th><td>([\w\W]+)</td></tr>', raw)
E3=search('<tr><th>Services:</th><td>([\w\W]+)</td></tr>',raw)
E4=search('<tr><th>Country:</th><td>([\w\W]+) <img src=', raw)
E5=search('<tr><th>State/Region:</th><td>([\w\W]+)</td></tr>', raw)
E6=search('<tr><th>City:</th><td>([\w\W]+)</td></tr>', raw)
if E0:u[0].append("15 IP:01 %s"%E0.groups()[0].split("</td></tr>")[0])
else: u[0].append("15 IP:04 N/A")
if E1: u[0].append("15 ISP:01 %s"%E1.groups()[0].split("</td></tr>")[0])
else: u[0].append("15 ISP:04 N/A")
if E2: u[0].append("15 Organización:01 %s"%E2.groups()[0].split("</td></tr>")[0])
else: u[0].append("15 Organización:01 N/A")
if E3: u[0].append("15 Servicios:04 %s"%E3.groups()[0].split("</td></tr>")[0])
else: u[0].append("15 Servicios:04 N/A")
if E4: u[1].append("15 Pais:01 %s"%E4.groups()[0].split("</td></tr>")[0])
else: u[1].append("15 Pais:04 N/A")
if E5: u[1].append("15 Estado:01 %s"%E5.groups()[0].split("</td></tr>")[0])
else: u[1].append("15 Estado:04 N/A")
if E6: u[1].append("15 Ciudad:01 %s"%E6.groups()[0].split("</td></tr>")[0])
else: u[1].append("15 Ciudad:04 N/A")
for i in u:
E.append(",".join(i))
client.privmsg(target," ".join(E))
def tittle_page(query):
if search('(.+://)(www.)?([^/]+)(.*)', query):
raw = urllib2.urlopen(query).read()
t=search('<title>([\w\W]+)</title>', raw).groups()[0]
T=''
for o in titulo.split('\n'):
T=o.lstrip()+' '
return T
| IsmaeRLGV/Modular-UserBot- | API/WAI.py | Python | apache-2.0 | 1,949 |
import re
from upload import upload
from upload import get_parser
class siv_upload(upload):
def __init__(self, **kwargs):
upload.__init__(self, **kwargs)
def fix_name(self, name):
tmp_name = name.replace(' ', '').replace('\'', '').replace('(', '').replace(')', '').replace('//', '/').replace('.', '').replace(',', '')
try:
tmp_name = 'SIV' + str(int(tmp_name))
except:
pass
return tmp_name
def filter(self, documents, index, **kwargs):
for doc in documents:
if doc[index] == "":
print(doc[index])
result = filter(lambda doc: doc[index] != "" and doc[index] is not None,documents)
return result
def format(self, documents, exclude_virus_methods=False, **kwargs):
'''
format virus information in preparation to upload to database table
'''
self.define_regions("source-data/geo_regions.tsv")
self.define_countries("source-data/geo_synonyms_siv.tsv")
self.countries_siv = set()
for doc in documents:
if 'strain' in doc:
doc['strain'] = self.fix_name(doc['strain'])
self.format_date(doc)
self.format_place(doc)
self.format_country(doc)
self.format_region(doc)
self.rethink_io.check_optional_attributes(doc, [])
self.fix_casing(doc)
def format_country(self, v):
'''
Label viruses with country based on strain name
'''
if 'country' in v and v['country'] != '' and v['country'] is not None:
original_country = v['country']
result = self.determine_location(v['country'])
if result is not None:
v['country'], v['division'], v['location'] = result
else:
v['country'], v['division'], v['location'] = None, None, None
print("couldn't parse country for ", v['strain'], original_country)
else:
v['country'], v['division'], v['location'] = None, None, None
#print(v['strain'], " country field isn't defined")
def determine_location(self, name):
'''
Try to determine country, division and location information from name
Return tuple of country, division, location if found, otherwise return None
'''
try:
label = re.match(r'^([^/]+)', name).group(1).lower() # check first for whole geo match
if label in self.label_to_country:
return (self.label_to_country[label], self.label_to_division[label], self.label_to_location[label])
else:
return None
except:
return None
if __name__=="__main__":
parser = get_parser()
args = parser.parse_args()
virus_fasta_fields = {2:'host_species', 3:'sub_species', 4:'SIVxyz', 5:'strain', 8:'country', 9:'collection_date'}
sequence_fasta_fields = {0:'accession', 1:'LanL_ID', 6:'sequence_length'}
# 0 1 2 3 4 5 6 7
#>>accession|lanl_ID|host_species|subspecies|SIVxyz|lanl name_isolate name|sequence_length|region|country|year
setattr(args, 'virus_fasta_fields', virus_fasta_fields)
setattr(args, 'sequence_fasta_fields', sequence_fasta_fields)
connVDB = siv_upload(**args.__dict__)
connVDB.upload(**args.__dict__)
| nextstrain/fauna | vdb/siv_upload.py | Python | agpl-3.0 | 3,400 |
from testfixtures import log_capture
from tests.base_test import BaseTest
from tests import config
from core.sessions import SessionURL
from core import modules
import utils
from core import messages
import subprocess
import os
import tempfile
import random
def setUpModule():
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_grep/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER/dir1/dir2/dir3/dir4"
echo string1 > "$BASE_FOLDER/dir1/string1"
echo string12 > "$BASE_FOLDER/dir1/dir2/string12"
echo 'string3\nSTR33' > "$BASE_FOLDER/dir1/dir2/dir3/string3"
echo string4 > "$BASE_FOLDER/dir1/dir2/dir3/dir4/string4"
chmod 0111 "$BASE_FOLDER/dir1/dir2/dir3/dir4/string4"
chown www-data: -R "$BASE_FOLDER/"
""".format(
config = config
), shell=True)
class FileGrep(BaseTest):
folders_rel = [
'test_file_grep/dir1',
'test_file_grep/dir1/dir2',
'test_file_grep/dir1/dir2/dir3',
'test_file_grep/dir1/dir2/dir3/dir4',
]
files_rel = [
'test_file_grep/dir1/string1',
'test_file_grep/dir1/dir2/string12',
'test_file_grep/dir1/dir2/dir3/string3',
'test_file_grep/dir1/dir2/dir3/dir4/string4',
]
def setUp(self):
self.session = SessionURL(
self.url,
self.password,
volatile = True
)
modules.load_modules(self.session)
self.vector_list = modules.loaded['file_grep'].vectors.get_names()
self.run_argv = modules.loaded['file_grep'].run_argv
def test_file_grep(self):
for vect in self.vector_list:
# grep string1 -> string[0]
self.assertEqual(
self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring1' ])[0],
{
self.files_rel[0] : ['string1'],
self.files_rel[1] : ['string12']
}
)
# grep string3 -> []
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring4' ])[0], {})
# grep string[2-9] -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring[2-9]' ])[0], { self.files_rel[2] : ['string3'] })
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[2], 'string.*' ])[0], { self.files_rel[2] : ['string3'] })
def test_file_grep_invert(self):
for vect in self.vector_list:
# grep -v string1 -> string3
self.assertEqual(
self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring1', '-v' ])[0],
{
self.files_rel[2]: ['string3', 'STR33'],
# self.files_rel[3] : ['string4'] # String 4 is 0111
}
)
# grep -v bogus -> string1,2,3
self.assertEqual(
self.run_argv([ '-vector', vect, self.folders_rel[0], 'bogus', '-v' ])[0],
{
self.files_rel[0] : ['string1'],
self.files_rel[1] : ['string12'],
self.files_rel[2] : ['string3', 'STR33']
}
)
# grep -v -i STR from string[2] -> string3
self.assertEqual(self.run_argv([ '-vector', vect, self.files_rel[2], '-v', '-case', 'STR' ])[0], { self.files_rel[2] : ['string3'] })
def test_file_grep_output_remote(self):
for vect in self.vector_list:
output_path = os.path.join(config.base_folder, 'test_file_grep', 'test_%s_%i' % (vect, random.randint(1, 99999)))
# grep string3 -> []
self.assertTrue(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring4', '-output', output_path ])[1])
self.assertEqual(subprocess.check_output(
'cat "%s"' % (output_path),
shell=True
), b''
)
subprocess.check_output(
'rm -f %s' % (output_path),
shell=True)
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[2], 'string.*', '-output', output_path ])[0], { self.files_rel[2] : ['string3'] })
self.assertEqual(subprocess.check_output(
'cat "%s"' % (output_path),
shell=True), b'string3'
)
subprocess.check_output(
'rm -f %s' % (output_path),
shell=True)
def test_file_grep_output_local(self):
for vect in self.vector_list:
temp_file = tempfile.NamedTemporaryFile()
# grep string3 -> []
self.assertTrue(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring4', '-output', temp_file.name, '-local' ])[1])
with open(temp_file.name, 'r') as temp_file2:
self.assertEqual('', temp_file2.read())
temp_file.truncate()
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[2], 'string.*', '-output', temp_file.name, '-local' ])[0], { self.files_rel[2] : ['string3'] })
with open(temp_file.name, 'r') as temp_file2:
self.assertEqual('string3', temp_file2.read())
temp_file.close()
@log_capture()
def test_php_err(self, log_captured):
# wrong rpath generate None and warning print
self.assertEqual(self.run_argv([ 'bogus', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
# wrong regex generate None and warning print
self.assertEqual(self.run_argv([ '\'', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
@log_capture()
def test_sh_err(self, log_captured):
# wrong rpath generate None and warning print
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', 'bogus', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
# wrong regex generate None and warning print
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', '\'', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
| epinna/weevely3 | tests/test_file_grep.py | Python | gpl-3.0 | 6,825 |
from django.conf.urls import patterns, url
from stepping_stones import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
)
| hahnicity/stepping_stones | stepping_stones/urls.py | Python | mit | 152 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.multi
def _prepare_invoice(self):
result = super(PurchaseOrder, self)._prepare_invoice()
result.update({'reference_coexiste': self.partner_ref,
'issuer': '0'})
return result
@api.multi
def action_view_invoice(self):
result = super(PurchaseOrder, self).action_view_invoice()
if not self.invoice_ids:
result['context'].update({'default_fiscal_position_id': self.fiscal_position_id.id,
'default_reference_coexiste': self.partner_ref,
'default_payment_term_id': self.payment_term_id.id,
})
return result
| thinkopensolutions/tkobr-addons | tko_coexiste_purchase/models/purchase.py | Python | agpl-3.0 | 1,907 |
from data_importers.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "NOW"
addresses_name = (
"2021-03-24T11:30:58.947611/Norwich new polling_station_export-2021-03-24.csv"
)
stations_name = (
"2021-03-24T11:30:58.947611/Norwich new polling_station_export-2021-03-24.csv"
)
elections = ["2021-05-06"]
csv_delimiter = ","
def address_record_to_dict(self, record):
uprn = record.uprn.strip().lstrip("0")
if uprn in [
"100091562008", # 10 SPROWSTON ROAD, NORWICH
"10093501268", # 481D SPROWSTON ROAD, NORWICH
"100091339250", # FIRST FLOOR FLAT 70 SILVER ROAD, NORWICH
"100090915857", # 9 OAK STREET, NORWICH
"100090915856", # 7 OAK STREET, NORWICH
"100090915855", # 5 OAK STREET, NORWICH
"100090915854", # 3 OAK STREET, NORWICH
"100090890924", # 32 BRITANNIA ROAD, NORWICH
"100090890926", # 34 BRITANNIA ROAD, NORWICH
"100090890928", # 36 BRITANNIA ROAD, NORWICH
]:
return None
if record.housepostcode in ["NR2 3AT", "NR4 7FW"]:
return None
return super().address_record_to_dict(record)
| DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/management/commands/import_norwich.py | Python | bsd-3-clause | 1,287 |
from typing import Iterable, Callable, Optional, Any, List, Iterator
from dupescan.fs._fileentry import FileEntry
from dupescan.fs._root import Root
from dupescan.types import AnyPath
FSPredicate = Callable[[FileEntry], bool]
ErrorHandler = Callable[[EnvironmentError], Any]
def catch_filter(inner_filter: FSPredicate, error_handler_func: ErrorHandler) -> FSPredicate:
# If no filter function provided, return one that includes everything. In
# this case it will never raise an error, so error_handler_func doesn't get
# a look-in here
if inner_filter is None:
def always_true(*args, **kwargs):
return True
return always_true
# Otherwise if the filter function throws an EnvironmentError, pass it to
# the error_handler_func (if provided) and return false
def wrapped_func(*args, **kwargs):
try:
return inner_filter(*args, **kwargs)
except EnvironmentError as env_error:
if error_handler_func is not None:
error_handler_func(env_error)
return False
return wrapped_func
def noerror(_):
pass
class Walker(object):
def __init__(
self,
recursive: bool,
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
):
self._recursive = bool(recursive)
self._onerror = noerror if onerror is None else onerror
self._dir_filter = catch_filter(dir_object_filter, self._onerror)
self._file_filter = catch_filter(file_object_filter, self._onerror)
def __call__(self, paths: Iterable[AnyPath]) -> Iterator[FileEntry]:
for root_index, root_path in enumerate(paths):
root_spec = Root(root_path, root_index)
try:
root_obj = FileEntry.from_path(root_path, root_spec)
except EnvironmentError as env_error:
self._onerror(env_error)
continue
if root_obj.is_dir and self._dir_filter(root_obj):
if self._recursive:
yield from self._recurse_dir(root_obj)
else:
yield root_obj
elif root_obj.is_file and self._file_filter(root_obj):
yield root_obj
def _recurse_dir(self, root_obj: FileEntry):
dir_obj_q: List[FileEntry] = [ root_obj ]
next_dirs: List[FileEntry] = [ ]
while len(dir_obj_q) > 0:
dir_obj = dir_obj_q.pop()
next_dirs.clear()
try:
for child_obj in dir_obj.dir_content():
try:
if (
child_obj.is_dir and
not child_obj.is_symlink and
self._dir_filter(child_obj)
):
next_dirs.append(child_obj)
elif (
child_obj.is_file and
self._file_filter(child_obj)
):
yield child_obj
except EnvironmentError as query_error:
self._onerror(query_error)
except EnvironmentError as env_error:
self._onerror(env_error)
dir_obj_q.extend(reversed(next_dirs))
def flat_iterator(
paths: Iterable[AnyPath],
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
) -> Iterator[FileEntry]:
return Walker(False, dir_object_filter, file_object_filter, onerror)(paths)
def recurse_iterator(
paths: Iterable[AnyPath],
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
) -> Iterator[FileEntry]:
return Walker(True, dir_object_filter, file_object_filter, onerror)(paths)
| yellcorp/dupescan | dupescan/fs/_walker.py | Python | mit | 4,089 |
"""ACME AuthHandler."""
import itertools
import logging
import time
import zope.component
from acme import challenges
from acme import messages
from letsencrypt import achallenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
logger = logging.getLogger(__name__)
class AuthHandler(object):
"""ACME Authorization Handler for a client.
:ivar dv_auth: Authenticator capable of solving
:class:`~acme.challenges.DVChallenge` types
:type dv_auth: :class:`letsencrypt.interfaces.IAuthenticator`
:ivar cont_auth: Authenticator capable of solving
:class:`~acme.challenges.ContinuityChallenge` types
:type cont_auth: :class:`letsencrypt.interfaces.IAuthenticator`
:ivar acme.client.Client acme: ACME client API.
:ivar account: Client's Account
:type account: :class:`letsencrypt.account.Account`
:ivar dict authzr: ACME Authorization Resource dict where keys are domains
and values are :class:`acme.messages.AuthorizationResource`
:ivar list dv_c: DV challenges in the form of
:class:`letsencrypt.achallenges.AnnotatedChallenge`
:ivar list cont_c: Continuity challenges in the
form of :class:`letsencrypt.achallenges.AnnotatedChallenge`
"""
def __init__(self, dv_auth, cont_auth, acme, account):
self.dv_auth = dv_auth
self.cont_auth = cont_auth
self.acme = acme
self.account = account
self.authzr = dict()
# List must be used to keep responses straight.
self.dv_c = []
self.cont_c = []
def get_authorizations(self, domains, best_effort=False):
"""Retrieve all authorizations for challenges.
:param set domains: Domains for authorization
:param bool best_effort: Whether or not all authorizations are
required (this is useful in renewal)
:returns: tuple of lists of authorization resources. Takes the
form of (`completed`, `failed`)
:rtype: tuple
:raises .AuthorizationError: If unable to retrieve all
authorizations
"""
for domain in domains:
self.authzr[domain] = self.acme.request_domain_challenges(
domain, self.account.regr.new_authzr_uri)
self._choose_challenges(domains)
# While there are still challenges remaining...
while self.dv_c or self.cont_c:
cont_resp, dv_resp = self._solve_challenges()
logger.info("Waiting for verification...")
# Send all Responses - this modifies dv_c and cont_c
self._respond(cont_resp, dv_resp, best_effort)
# Just make sure all decisions are complete.
self.verify_authzr_complete()
# Only return valid authorizations
return [authzr for authzr in self.authzr.values()
if authzr.body.status == messages.STATUS_VALID]
def _choose_challenges(self, domains):
"""Retrieve necessary challenges to satisfy server."""
logger.info("Performing the following challenges:")
for dom in domains:
path = gen_challenge_path(
self.authzr[dom].body.challenges,
self._get_chall_pref(dom),
self.authzr[dom].body.combinations)
dom_cont_c, dom_dv_c = self._challenge_factory(
dom, path)
self.dv_c.extend(dom_dv_c)
self.cont_c.extend(dom_cont_c)
def _solve_challenges(self):
"""Get Responses for challenges from authenticators."""
cont_resp = []
dv_resp = []
try:
if self.cont_c:
cont_resp = self.cont_auth.perform(self.cont_c)
if self.dv_c:
dv_resp = self.dv_auth.perform(self.dv_c)
# This will catch both specific types of errors.
except errors.AuthorizationError:
logger.critical("Failure in setting up challenges.")
logger.info("Attempting to clean up outstanding challenges...")
self._cleanup_challenges()
raise
assert len(cont_resp) == len(self.cont_c)
assert len(dv_resp) == len(self.dv_c)
return cont_resp, dv_resp
def _respond(self, cont_resp, dv_resp, best_effort):
"""Send/Receive confirmation of all challenges.
.. note:: This method also cleans up the auth_handler state.
"""
# TODO: chall_update is a dirty hack to get around acme-spec #105
chall_update = dict()
active_achalls = []
active_achalls.extend(
self._send_responses(self.dv_c, dv_resp, chall_update))
active_achalls.extend(
self._send_responses(self.cont_c, cont_resp, chall_update))
# Check for updated status...
try:
self._poll_challenges(chall_update, best_effort)
finally:
# This removes challenges from self.dv_c and self.cont_c
self._cleanup_challenges(active_achalls)
def _send_responses(self, achalls, resps, chall_update):
"""Send responses and make sure errors are handled.
:param dict chall_update: parameter that is updated to hold
authzr -> list of outstanding solved annotated challenges
"""
active_achalls = []
for achall, resp in itertools.izip(achalls, resps):
# XXX: make sure that all achalls, including those
# corresponding to None or False returned from
# Authenticator are removed from the queue and thus avoid
# infinite loop
active_achalls.append(achall)
# Don't send challenges for None and False authenticator responses
if resp is not None and resp:
self.acme.answer_challenge(achall.challb, resp)
# TODO: answer_challenge returns challr, with URI,
# that can be used in _find_updated_challr
# comparisons...
if achall.domain in chall_update:
chall_update[achall.domain].append(achall)
else:
chall_update[achall.domain] = [achall]
return active_achalls
def _poll_challenges(
self, chall_update, best_effort, min_sleep=3, max_rounds=15):
"""Wait for all challenge results to be determined."""
dom_to_check = set(chall_update.keys())
comp_domains = set()
rounds = 0
while dom_to_check and rounds < max_rounds:
# TODO: Use retry-after...
time.sleep(min_sleep)
all_failed_achalls = set()
for domain in dom_to_check:
comp_achalls, failed_achalls = self._handle_check(
domain, chall_update[domain])
if len(comp_achalls) == len(chall_update[domain]):
comp_domains.add(domain)
elif not failed_achalls:
for achall, _ in comp_achalls:
chall_update[domain].remove(achall)
# We failed some challenges... damage control
else:
# Right now... just assume a loss and carry on...
if best_effort:
comp_domains.add(domain)
else:
all_failed_achalls.update(
updated for _, updated in failed_achalls)
if all_failed_achalls:
_report_failed_challs(all_failed_achalls)
raise errors.FailedChallenges(all_failed_achalls)
dom_to_check -= comp_domains
comp_domains.clear()
rounds += 1
def _handle_check(self, domain, achalls):
"""Returns tuple of ('completed', 'failed')."""
completed = []
failed = []
self.authzr[domain], _ = self.acme.poll(self.authzr[domain])
if self.authzr[domain].body.status == messages.STATUS_VALID:
return achalls, []
# Note: if the whole authorization is invalid, the individual failed
# challenges will be determined here...
for achall in achalls:
updated_achall = achall.update(challb=self._find_updated_challb(
self.authzr[domain], achall))
# This does nothing for challenges that have yet to be decided yet.
if updated_achall.status == messages.STATUS_VALID:
completed.append((achall, updated_achall))
elif updated_achall.status == messages.STATUS_INVALID:
failed.append((achall, updated_achall))
return completed, failed
def _find_updated_challb(self, authzr, achall): # pylint: disable=no-self-use
"""Find updated challenge body within Authorization Resource.
.. warning:: This assumes only one instance of type of challenge in
each challenge resource.
:param .AuthorizationResource authzr: Authorization Resource
:param .AnnotatedChallenge achall: Annotated challenge for which
to get status
"""
for authzr_challb in authzr.body.challenges:
if type(authzr_challb.chall) is type(achall.challb.chall):
return authzr_challb
raise errors.AuthorizationError(
"Target challenge not found in authorization resource")
def _get_chall_pref(self, domain):
"""Return list of challenge preferences.
:param str domain: domain for which you are requesting preferences
"""
# Make sure to make a copy...
chall_prefs = []
chall_prefs.extend(self.cont_auth.get_chall_pref(domain))
chall_prefs.extend(self.dv_auth.get_chall_pref(domain))
return chall_prefs
def _cleanup_challenges(self, achall_list=None):
"""Cleanup challenges.
If achall_list is not provided, cleanup all achallenges.
"""
logger.info("Cleaning up challenges")
if achall_list is None:
dv_c = self.dv_c
cont_c = self.cont_c
else:
dv_c = [achall for achall in achall_list
if isinstance(achall.chall, challenges.DVChallenge)]
cont_c = [achall for achall in achall_list if isinstance(
achall.chall, challenges.ContinuityChallenge)]
if dv_c:
self.dv_auth.cleanup(dv_c)
for achall in dv_c:
self.dv_c.remove(achall)
if cont_c:
self.cont_auth.cleanup(cont_c)
for achall in cont_c:
self.cont_c.remove(achall)
def verify_authzr_complete(self):
"""Verifies that all authorizations have been decided.
:returns: Whether all authzr are complete
:rtype: bool
"""
for authzr in self.authzr.values():
if (authzr.body.status != messages.STATUS_VALID and
authzr.body.status != messages.STATUS_INVALID):
raise errors.AuthorizationError("Incomplete authorizations")
def _challenge_factory(self, domain, path):
"""Construct Namedtuple Challenges
:param str domain: domain of the enrollee
:param list path: List of indices from `challenges`.
:returns: dv_chall, list of DVChallenge type
:class:`letsencrypt.achallenges.Indexed`
cont_chall, list of ContinuityChallenge type
:class:`letsencrypt.achallenges.Indexed`
:rtype: tuple
:raises .errors.Error: if challenge type is not recognized
"""
dv_chall = []
cont_chall = []
for index in path:
challb = self.authzr[domain].body.challenges[index]
chall = challb.chall
achall = challb_to_achall(challb, self.account.key, domain)
if isinstance(chall, challenges.ContinuityChallenge):
cont_chall.append(achall)
elif isinstance(chall, challenges.DVChallenge):
dv_chall.append(achall)
return cont_chall, dv_chall
def challb_to_achall(challb, key, domain):
"""Converts a ChallengeBody object to an AnnotatedChallenge.
:param challb: ChallengeBody
:type challb: :class:`acme.messages.ChallengeBody`
:param key: Key
:type key: :class:`letsencrypt.le_util.Key`
:param str domain: Domain of the challb
:returns: Appropriate AnnotatedChallenge
:rtype: :class:`letsencrypt.achallenges.AnnotatedChallenge`
"""
chall = challb.chall
logger.info("%s challenge for %s", chall.typ, domain)
if isinstance(chall, challenges.DVSNI):
return achallenges.DVSNI(
challb=challb, domain=domain, key=key)
elif isinstance(chall, challenges.SimpleHTTP):
return achallenges.SimpleHTTP(
challb=challb, domain=domain, key=key)
elif isinstance(chall, challenges.DNS):
return achallenges.DNS(challb=challb, domain=domain)
elif isinstance(chall, challenges.RecoveryToken):
return achallenges.RecoveryToken(challb=challb, domain=domain)
elif isinstance(chall, challenges.RecoveryContact):
return achallenges.RecoveryContact(
challb=challb, domain=domain)
elif isinstance(chall, challenges.ProofOfPossession):
return achallenges.ProofOfPossession(
challb=challb, domain=domain)
else:
raise errors.Error(
"Received unsupported challenge of type: %s", chall.typ)
def gen_challenge_path(challbs, preferences, combinations):
"""Generate a plan to get authority over the identity.
.. todo:: This can be possibly be rewritten to use resolved_combinations.
:param tuple challbs: A tuple of challenges
(:class:`acme.messages.Challenge`) from
:class:`acme.messages.AuthorizationResource` to be
fulfilled by the client in order to prove possession of the
identifier.
:param list preferences: List of challenge preferences for domain
(:class:`acme.challenges.Challenge` subclasses)
:param tuple combinations: A collection of sets of challenges from
:class:`acme.messages.Challenge`, each of which would
be sufficient to prove possession of the identifier.
:returns: tuple of indices from ``challenges``.
:rtype: tuple
:raises letsencrypt.errors.AuthorizationError: If a
path cannot be created that satisfies the CA given the preferences and
combinations.
"""
if combinations:
return _find_smart_path(challbs, preferences, combinations)
else:
return _find_dumb_path(challbs, preferences)
def _find_smart_path(challbs, preferences, combinations):
"""Find challenge path with server hints.
Can be called if combinations is included. Function uses a simple
ranking system to choose the combo with the lowest cost.
"""
chall_cost = {}
max_cost = 1
for i, chall_cls in enumerate(preferences):
chall_cost[chall_cls] = i
max_cost += i
# max_cost is now equal to sum(indices) + 1
best_combo = []
# Set above completing all of the available challenges
best_combo_cost = max_cost
combo_total = 0
for combo in combinations:
for challenge_index in combo:
combo_total += chall_cost.get(challbs[
challenge_index].chall.__class__, max_cost)
if combo_total < best_combo_cost:
best_combo = combo
best_combo_cost = combo_total
combo_total = 0
if not best_combo:
msg = ("Client does not support any combination of challenges that "
"will satisfy the CA.")
logger.fatal(msg)
raise errors.AuthorizationError(msg)
return best_combo
def _find_dumb_path(challbs, preferences):
"""Find challenge path without server hints.
Should be called if the combinations hint is not included by the
server. This function returns the best path that does not contain
multiple mutually exclusive challenges.
"""
assert len(preferences) == len(set(preferences))
path = []
satisfied = set()
for pref_c in preferences:
for i, offered_challb in enumerate(challbs):
if (isinstance(offered_challb.chall, pref_c) and
is_preferred(offered_challb, satisfied)):
path.append(i)
satisfied.add(offered_challb)
return path
def mutually_exclusive(obj1, obj2, groups, different=False):
"""Are two objects mutually exclusive?"""
for group in groups:
obj1_present = False
obj2_present = False
for obj_cls in group:
obj1_present |= isinstance(obj1, obj_cls)
obj2_present |= isinstance(obj2, obj_cls)
if obj1_present and obj2_present and (
not different or not isinstance(obj1, obj2.__class__)):
return False
return True
def is_preferred(offered_challb, satisfied,
exclusive_groups=constants.EXCLUSIVE_CHALLENGES):
"""Return whether or not the challenge is preferred in path."""
for challb in satisfied:
if not mutually_exclusive(
offered_challb.chall, challb.chall, exclusive_groups,
different=True):
return False
return True
_ERROR_HELP_COMMON = (
"To fix these errors, please make sure that your domain name was entered "
"correctly and the DNS A/AAAA record(s) for that domain contains the "
"right IP address.")
_ERROR_HELP = {
"connection" :
_ERROR_HELP_COMMON + " Additionally, please check that your computer "
"has publicly routable IP address and no firewalls are preventing the "
"server from communicating with the client.",
"dnssec" :
_ERROR_HELP_COMMON + " Additionally, if you have DNSSEC enabled for "
"your domain, please ensure the signature is valid.",
"malformed" :
"To fix these errors, please make sure that you did not provide any "
"invalid information to the client and try running Let's Encrypt "
"again.",
"serverInternal" :
"Unfortunately, an error on the ACME server prevented you from completing "
"authorization. Please try again later.",
"tls" :
_ERROR_HELP_COMMON + " Additionally, please check that you have an up "
"to date TLS configuration that allows the server to communicate with "
"the Let's Encrypt client.",
"unauthorized" : _ERROR_HELP_COMMON,
"unknownHost" : _ERROR_HELP_COMMON,}
def _report_failed_challs(failed_achalls):
"""Notifies the user about failed challenges.
:param set failed_achalls: A set of failed
:class:`letsencrypt.achallenges.AnnotatedChallenge`.
"""
problems = dict()
for achall in failed_achalls:
if achall.error:
problems.setdefault(achall.error.typ, []).append(achall)
reporter = zope.component.getUtility(interfaces.IReporter)
for achalls in problems.itervalues():
reporter.add_message(
_generate_failed_chall_msg(achalls), reporter.MEDIUM_PRIORITY, True)
def _generate_failed_chall_msg(failed_achalls):
"""Creates a user friendly error message about failed challenges.
:param list failed_achalls: A list of failed
:class:`letsencrypt.achallenges.AnnotatedChallenge` with the same error
type.
:returns: A formatted error message for the client.
:rtype: str
"""
typ = failed_achalls[0].error.typ
msg = [
"The following '{0}' errors were reported by the server:".format(typ)]
problems = dict()
for achall in failed_achalls:
problems.setdefault(achall.error.description, set()).add(achall.domain)
for problem in problems:
msg.append("\n\nDomains: ")
msg.append(", ".join(sorted(problems[problem])))
msg.append("\nError: {0}".format(problem))
if typ in _ERROR_HELP:
msg.append("\n\n")
msg.append(_ERROR_HELP[typ])
return "".join(msg)
| tdfischer/lets-encrypt-preview | letsencrypt/auth_handler.py | Python | apache-2.0 | 20,080 |
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import, invalid-name
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import sys
import os
import imp
from path import path
from warnings import simplefilter
from django.utils.translation import ugettext_lazy as _
from .discussionsettings import *
import dealer.git
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from xmodule.mixin import LicenseMixin
from lms.djangoapps.lms_xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "IITBombayX"
CC_MERCHANT_NAME = PLATFORM_NAME
# Shows up in the platform footer, eg "(c) COPYRIGHT_YEAR"
COPYRIGHT_YEAR = "2015"
PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/YourPlatformFacebookAccount"
PLATFORM_TWITTER_ACCOUNT = "@YourPlatform"
PLATFORM_TWITTER_URL="http://www.twitter.com/YourPlatform"
PLATFORM_GOOGLE_PLUS_URL="http://www.google.com/YourPlatform"
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain.
'SUBDOMAIN_COURSE_LISTINGS': False,
# Expects you to define COURSE_LISTINGS, a dictionary mapping
# subdomains to lists of course_ids
# COURSE_LISTINGS = {
# 'default': [
# 'BerkeleyX/CS169.1x/2012_Fall',
# 'HarvardX/CS50x/2012',
# 'MITx/3.091x/2012_Fall',
# ],
# 'openedx': [
# 'BerkeleyX/CS169.1x/2012_Fall',
# ],
# }
# To see it in action, add the following to your /etc/hosts file:
# 127.0.0.1 openedx.dev
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
# Set this to True if you want the discussion digest emails enabled automatically for new users.
# This will be set on all new account registrations.
# It is not recommended to enable this feature if ENABLE_DISCUSSION_HOME_PANEL is not enabled, since
# subscribers who receive digests in that case will only be able to unsubscribe via links embedded
# in their emails, and they will have no way to resubscribe.
'ENABLE_DISCUSSION_EMAIL_DIGEST': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Toggles OAuth2 authentication provider
'ENABLE_OAUTH2_PROVIDER': False,
# Allows to enable an API endpoint to serve XBlock view, used for example by external applications.
# See jquey-xblock: https://github.com/edx-solutions/jquery-xblock
'ENABLE_XBLOCK_VIEW_ENDPOINT': False,
# Allows to configure the LMS to provide CORS headers to serve requests from other domains
'ENABLE_CORS_HEADERS': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': False,
# Analytics experiments - shows instructor analytics tab in LMS instructor dashboard.
# Enabling this feature depends on installation of a separate analytics server.
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# Segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
# Note: In order for this feature to work, you must also add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider' to
# the setting FIELD_OVERRIDE_PROVIDERS, in addition to setting this flag to
# True.
'INDIVIDUAL_DUE_DATES': False,
# Enable Custom Courses for EdX
'CUSTOM_COURSES_EDX': False,
# Enable legacy instructor dashboard
'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True,
# Is this an edX-owned domain? (used for edX specific messaging and images)
'IS_EDX_DOMAIN': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Enable the display of cosmetic course price display (set in course advanced settings)
'ENABLE_COSMETIC_DISPLAY_PRICE': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for downloads.
'ENABLE_S3_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': True,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': [
"refund_report",
"itemized_purchase_report",
"university_revenue_share",
"certificate_status"
],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': True,
# Toggles the embargo functionality, which blocks users from
# the site or courses based on their location.
'EMBARGO': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': True,
# Turn on Advanced Security by default
'ADVANCED_SECURITY': True,
# When a logged in user goes to the homepage ('/') should the user be
# redirected to the dashboard - this is default Open edX behavior. Set to
# False to not redirect the user
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER': True,
# When a user goes to the homepage ('/') the user see the
# courses listed in the announcement dates order - this is default Open edX behavior.
# Set to True to change the course sorting behavior by their start dates, latest first.
'ENABLE_COURSE_SORTING_BY_START_DATE': False,
# Expose Mobile REST API. Note that if you use this, you must also set
# ENABLE_OAUTH2_PROVIDER to True
'ENABLE_MOBILE_REST_API': False,
'ENABLE_MOBILE_SOCIAL_FACEBOOK_FEATURES': False,
# Enable APIs required for xBlocks on Mobile, and supported in general
'ENABLE_RENDER_XBLOCK_API': False,
'ENABLE_COURSE_BLOCKS_NAVIGATION_API': False,
# Enable the combined login/registration form
'ENABLE_COMBINED_LOGIN_REGISTRATION': False,
# Enable organizational email opt-in
'ENABLE_MKTG_EMAIL_OPT_IN': False,
# Show a section in the membership tab of the instructor dashboard
# to allow an upload of a CSV file that contains a list of new accounts to create
# and register for course.
'ALLOW_AUTOMATED_SIGNUPS': False,
# Display demographic data on the analytics tab in the instructor dashboard.
'DISPLAY_ANALYTICS_DEMOGRAPHICS': True,
# Enable display of enrollment counts in instructor and legacy analytics dashboard
'DISPLAY_ANALYTICS_ENROLLMENTS': True,
# Show the mobile app links in the footer
'ENABLE_FOOTER_MOBILE_APP_LINKS': False,
# Let students save and manage their annotations
'ENABLE_EDXNOTES': False,
# Milestones application flag
'MILESTONES_APP': False,
# Prerequisite courses feature flag
'ENABLE_PREREQUISITE_COURSES': False,
# For easily adding modes to courses during acceptance testing
'MODE_CREATION_FOR_TESTING': False,
# Courseware search feature
'ENABLE_COURSEWARE_SEARCH': False,
# Dashboard search feature
'ENABLE_DASHBOARD_SEARCH': False,
# log all information from cybersource callbacks
'LOG_POSTPAY_CALLBACKS': True,
# enable beacons for video timing statistics
'ENABLE_VIDEO_BEACON': False,
# enable beacons for lms onload event statistics
'ENABLE_ONLOAD_BEACON': False,
# Toggle platform-wide course licensing
'LICENSING': True,
# Certificates Web/HTML Views
'CERTIFICATES_HTML_VIEW': False,
# Batch-Generated Certificates from Instructor Dashboard
'CERTIFICATES_INSTRUCTOR_GENERATION': False,
# Social Media Sharing on Student Dashboard
'SOCIAL_SHARING_SETTINGS': {
# Note: Ensure 'CUSTOM_COURSE_URLS' has a matching value in cms/envs/common.py
'CUSTOM_COURSE_URLS': False,
'DASHBOARD_FACEBOOK': False,
'CERTIFICATE_FACEBOOK': False,
'CERTIFICATE_FACEBOOK_TEXT': None,
'DASHBOARD_TWITTER': False,
'DASHBOARD_TWITTER_TEXT': None
},
# Course discovery feature
'ENABLE_COURSE_DISCOVERY': False,
# Software secure fake page feature flag
'ENABLE_SOFTWARE_SECURE_FAKE': False,
# Teams feature
'ENABLE_TEAMS': False,
# Show video bumper in LMS
'ENABLE_VIDEO_BUMPER': False,
# How many seconds to show the bumper again, default is 7 days:
'SHOW_BUMPER_PERIODICITY': 7 * 24 * 3600,
# Enable OpenBadge support. See the BADGR_* settings later in this file.
'ENABLE_OPENBADGES': False,
# Credit course API
'ENABLE_CREDIT_API': False,
# The block types to disable need to be specified in "x block disable config" in django admin.
'ENABLE_DISABLING_XBLOCK_TYPES': True,
}
# Ignore static asset files on import which match this pattern
ASSET_IGNORE_REGEX = r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)"
# Used for A/B testing
DEFAULT_GROUPS = []
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
# Used with XQueue
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
#Bharat MOOCs 17th Sept - display count of courses, faculty and certificates
CMS_ROOT =REPO_ROOT / "cms"
DATA_DIR = COURSES_ROOT
# TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
#Bharat MOOCs 17th Sept - display count of courses, faculty and certificates
sys.path.append(CMS_ROOT / 'djangoapps')
# For Node.js
system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules')
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
# Where to look for a status message
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
############################ OpenID Provider ##################################
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = 'https:/example.com/oauth2'
# OpenID Connect claim handlers
OAUTH_OIDC_ID_TOKEN_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicIDTokenHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.IDTokenHandler'
)
OAUTH_OIDC_USERINFO_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicUserInfoHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.UserInfoHandler'
)
################################## EDX WEB #####################################
# This is where we stick our compiled template files. Most of the app uses Mako
# templates
# IITBX 11-APR-2016 :theme chanegd for hi/en language,which is redirect to path(/edx/app/edxapp/themes/THEME_NAME)
# IITBX 11-APR-2016: code changes ENV_ROOT / 'themes'/THEME_NAME
import json
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_lms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates',
ENV_ROOT / 'themes'/THEME_NAME , #IITBX 11-APR-2016 redirect to theme folder
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates']
# This is where Django Template lookup is defined. There are a few of these
# still left lying around.
# IITBX 11-APR-2016 :theme chanegd for hi/en language,which is redirect to path(/edx/app/edxapp/themes/THEME_NAME)
# IITBX 11-APR-2016: code changes ENV_ROOT / 'themes'/THEME_NAME
TEMPLATE_DIRS = [
PROJECT_ROOT / "templates",
ENV_ROOT / 'themes'/THEME_NAME, #IITBX 11-APR-2016 redirect to theme folder
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'static', # required to statically include common Underscore templates
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
# Added for django-wiki
'django.core.context_processors.media',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.open_source_footer_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.microsite_footer_context_processor',
)
# Bharat MOOCs 10th Feb 2015; bulk email configuration starts
COURSE_EMAIL_TEMPLATE_NAME = "IITBOMBAYXTEMPLATE"
COURSE_EMAIL_FROM_ADDR = "[email protected]"
# Bharat MOOCs 10th Feb 2015; bulk email configuration ends
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
# Dev machines shouldn't need the book
# BOOK_URL = '/static/book/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
# Configuration option for when we want to grab server error pages
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/login'
LOGIN_URL = EDX_ROOT_URL + '/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
### Dark code. Should be enabled in local settings for devel.
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
###
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
# IP addresses that are allowed to reload the course, etc.
# TODO (vshnayder): Will probably need to change as we get real access control in.
LMS_MIGRATION_ALLOWED_IPS = []
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
# Note: these intentionally greedily grab all chars up to the next slash including any pluses
# DHM: I really wanted to ensure the separators were the same (+ or /) but all patts I tried had
# too many inadvertent side effects :-(
COURSE_KEY_PATTERN = r'(?P<course_key_string>[^/+]+(/|\+)[^/+]+(/|\+)[^/]+)'
COURSE_ID_PATTERN = COURSE_KEY_PATTERN.replace('course_key_string', 'course_id')
COURSE_KEY_REGEX = COURSE_KEY_PATTERN.replace('P<course_key_string>', ':')
USAGE_KEY_PATTERN = r'(?P<usage_key_string>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
ASSET_KEY_PATTERN = r'(?P<asset_key_string>(?:/?c4x(:/)?/[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
USAGE_ID_PATTERN = r'(?P<usage_id>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
############################## EVENT TRACKING #################################
# FIXME: Should we be doing this truncation?
TRACK_MAX_EVENT = 50000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat', r'^/segmentio/event', r'^/performance']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'tracking_logs': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
},
'processors': [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.VideoEventProcessor'}
]
}
},
'segmentio': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'segment': {'ENGINE': 'eventtracking.backends.segment.SegmentBackend'}
},
'processors': [
{
'ENGINE': 'eventtracking.processors.whitelist.NameWhitelistProcessor',
'OPTIONS': {
'whitelist': []
}
},
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
}
}
EVENT_TRACKING_PROCESSORS = []
# Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag.
# In the future, adding the backend to TRACKING_BACKENDS should be enough.
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
EVENT_TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
TRACKING_SEGMENTIO_WEBHOOK_SECRET = None
TRACKING_SEGMENTIO_ALLOWED_TYPES = ['track']
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = ['.bi.']
TRACKING_SEGMENTIO_SOURCE_MAP = {
'analytics-android': 'mobile',
'analytics-ios': 'mobile',
}
######################## GOOGLE ANALYTICS ###########################
GOOGLE_ANALYTICS_ACCOUNT = None
GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY'
######################## OPTIMIZELY ###########################
OPTIMIZELY_PROJECT_ID = None
######################## subdomain specific settings ###########################
COURSE_LISTINGS = {}
SUBDOMAIN_BRANDING = {}
VIRTUAL_UNIVERSITIES = []
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# These are the Mixins that should be added to every XBlock.
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in the LMS
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############# ModuleStore Configuration ##########
MODULESTORE_BRANCH = 'published-only'
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
# If 'asset_collection' defined, it'll be used
# as the collection name for asset metadata.
# Otherwise, a default collection name will be used.
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
]
}
}
}
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################### DJANGO BUILT-INS ###############################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
USE_TZ = True
SESSION_COOKIE_SECURE = False
# CMS base
CMS_BASE = 'localhost:8001'
# Site info
SITE_ID = 1
SITE_NAME = "example.com"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
# NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*'
# Platform Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
TECH_SUPPORT_EMAIL = '[email protected]'
CONTACT_EMAIL = '[email protected]'
BUGS_EMAIL = '[email protected]'
UNIVERSITY_EMAIL = '[email protected]'
PRESS_EMAIL = '[email protected]'
FINANCE_EMAIL = ''
ADMINS = ()
MANAGERS = ADMINS
EDX_PLATFORM_REVISION = os.environ.get('EDX_PLATFORM_REVISION')
if not EDX_PLATFORM_REVISION:
try:
# Get git revision of the current file
EDX_PLATFORM_REVISION = dealer.git.Backend(path=REPO_ROOT).revision
except TypeError:
# Not a git repository
EDX_PLATFORM_REVISION = 'unknown'
# Static content
STATIC_URL = '/static/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
# User-uploaded content
MEDIA_ROOT = '/edx/var/edxapp/media/'
MEDIA_URL = '/media/'
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
# these languages display right to left
LANGUAGES_BIDI = ("he", "ar", "fa", "ur", "fa-ir", "rtl")
# Sourced from http://www.localeplanet.com/icu/ and wikipedia
LANGUAGES = (
('en', u'English'),
('rtl', u'Right-to-Left Test Language'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('am', u'አማርኛ'), # Amharic
('ar', u'العربية'), # Arabic
#BharatMooc
('as' , u' বািতল'), #Assames
('az', u'azərbaycanca'), # Azerbaijani
('bg-bg', u'български (България)'), # Bulgarian (Bulgaria)
('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh)
('bn-in', u'বাংলা (ভারত)'), # Bengali (India)
('bs', u'bosanski'), # Bosnian
('ca', u'Català'), # Catalan
('ca@valencia', u'Català (València)'), # Catalan (Valencia)
('cs', u'Čeština'), # Czech
('cy', u'Cymraeg'), # Welsh
('da', u'dansk'), # Danish
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('el', u'Ελληνικά'), # Greek
('en-uk', u'English (United Kingdom)'), # English (United Kingdom)
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-ar', u'Español (Argentina)'), # Spanish (Argentina)
('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador)
('es-es', u'Español (España)'), # Spanish (Spain)
('es-mx', u'Español (México)'), # Spanish (Mexico)
('es-pe', u'Español (Perú)'), # Spanish (Peru)
('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia)
('eu-es', u'euskara (Espainia)'), # Basque (Spain)
('fa', u'فارسی'), # Persian
('fa-ir', u'فارسی (ایران)'), # Persian (Iran)
('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland)
('fil', u'Filipino'), # Filipino
('fr', u'Français'), # French
('gl', u'Galego'), # Galician
('gu', u'ગુજરાતી'), # Gujarati
('he', u'עברית'), # Hebrew
('hi', u'हिन्दी'), # Hindi
('hr', u'hrvatski'), # Croatian
('hu', u'magyar'), # Hungarian
('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語 (日本)'), # Japanese (Japan)
('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan)
('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia)
('kn', u'ಕನ್ನಡ'), # Kannada
('ko-kr', u'한국어 (대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('ml', u'മലയാളം'), # Malayalam
('mn', u'Монгол хэл'), # Mongolian
#BharatMooc
('mr', u'मराठी'), # Marathi
('ms', u'Bahasa Melayu'), # Malay
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('ne', u'नेपाली'), # Nepali
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('or', u'ଓଡ଼ିଆ'), # Oriya
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal)
('ro', u'română'), # Romanian
('ru', u'Русский'), # Russian
('si', u'සිංහල'), # Sinhala
('sk', u'Slovenčina'), # Slovak
('sl', u'Slovenščina'), # Slovenian
('sq', u'shqip'), # Albanian
('sr', u'Српски'), # Serbian
('sv', u'svenska'), # Swedish
('sw', u'Kiswahili'), # Swahili
('ta', u'தமிழ்'), # Tamil
#BharatMooc
('te', u'తెలుగు'), # Telugu
('th', u'ไทย'), # Thai
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('ur', u'اردو'), # Urdu
('vi', u'Tiếng Việt'), # Vietnamese
('uz', u'Ўзбек'), # Uzbek
('zh-cn', u'中文 (简体)'), # Chinese (China)
('zh-hk', u'中文 (香港)'), # Chinese (Hong Kong)
('zh-tw', u'中文 (台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Guidelines for translators
TRANSLATORS_GUIDE = 'http://edx.readthedocs.org/projects/edx-developer-guide/en/latest/internationalization/i18n_translators_guide.html' # pylint: disable=line-too-long
#################################### GITHUB #######################################
# gitreload is used in LMS-workflow to pull content from github
# gitreload requests are only allowed from these IP addresses, which are
# the advertised public IPs of the github WebHook servers.
# These are listed, eg at https://github.com/edx/edx-platform/admin/hooks
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
#################################### AWS #######################################
# S3BotoStorage insists on a timeout for uploaded assets. We should make it
# permanent instead, but rather than trying to figure out exactly where that
# setting is, I'm just bumping the expiration time to something absurd (100
# years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3
# in the global settings.py
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
################################# SIMPLEWIKI ###################################
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
################################# WIKI ###################################
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
##### Feedback submission mechanism #####
FEEDBACK_SUBMISSION_EMAIL = None
##### Zendesk #####
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
##### shoppingcart Payment #####
PAYMENT_SUPPORT_EMAIL = '[email protected]'
##### Using cybersource by default #####
CC_PROCESSOR_NAME = 'CyberSource'
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
},
'CyberSource2': {
"PURCHASE_ENDPOINT": '',
"SECRET_KEY": '',
"ACCESS_KEY": '',
"PROFILE_ID": '',
}
}
# Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
# Members of this group are allowed to generate payment reports
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
################################# open ended grading config #####################
#By setting up the default settings with an incorrect user name and password,
# will get an error when attempting to connect
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://example.com/peer_grading',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
# Used for testing, debugging peer grading
MOCK_PEER_GRADING = False
# Used for testing, debugging staff grading
MOCK_STAFF_GRADING = False
################################# EdxNotes config #########################
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8120/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8120/api/v1'
########################## Parental controls config #######################
# The age at which a learner no longer requires parental consent, or None
# if parental consent is never required.
PARENTAL_CONSENT_AGE_LIMIT = 13
################################# Jasmine ##################################
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
######################### Branded Footer ###################################
# Constants for the footer used on the site and shared with other sites
# (such as marketing and the blog) via the branding API.
# URL for OpenEdX displayed in the footer
FOOTER_OPENEDX_URL = "http://open.edx.org"
# URL for the OpenEdX logo image
# We use logo images served from files.edx.org so we can (roughly) track
# how many OpenEdX installations are running.
# Site operators can choose from these logo options:
# * https://files.edx.org/openedx-logos/edx-openedx-logo-tag.png
# * https://files.edx.org/openedx-logos/edx-openedx-logo-tag-light.png"
# * https://files.edx.org/openedx-logos/edx-openedx-logo-tag-dark.png
FOOTER_OPENEDX_LOGO_IMAGE = "https://files.edx.org/openedx-logos/edx-openedx-logo-tag.png"
# This is just a placeholder image.
# Site operators can customize this with their organization's image.
#FOOTER_ORGANIZATION_IMAGE = "images/default-theme/logo.png"
FOOTER_ORGANIZATION_IMAGE = "images/default-theme/logo.png"
# These are referred to both by the Django asset pipeline
# AND by the branding footer API, which needs to decide which
# version of the CSS to serve.
FOOTER_CSS = {
"openedx": {
"ltr": "style-lms-footer",
"rtl": "style-lms-footer-rtl",
},
"edx": {
"ltr": "style-lms-footer-edx",
"rtl": "style-lms-footer-edx-rtl",
},
}
# Cache expiration for the version of the footer served
# by the branding API.
FOOTER_CACHE_TIMEOUT = 30 * 60
# Max age cache control header for the footer (controls browser caching).
FOOTER_BROWSER_CACHE_MAX_AGE = 5 * 60
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'openedx.core.djangoapps.user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# CORS and CSRF
'corsheaders.middleware.CorsMiddleware',
'cors_csrf.middleware.CorsCSRFMiddleware',
'cors_csrf.middleware.CsrfCrossDomainCookieMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
'geoinfo.middleware.CountryMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
# should be after DarkLangMiddleware
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Allows us to dark-launch particular languages.
# Must be after LangPrefMiddleware, so ?preview-lang query params can override
# user's language preference. ?clear-lang resets to user's language preference.
'dark_lang.middleware.DarkLangMiddleware',
# Detects user-requested locale from 'accept-language' header in http request.
# Must be after DarkLangMiddleware.
# TODO: Re-import the Django version once we upgrade to Django 1.8 [PLAT-671]
# 'django.middleware.locale.LocaleMiddleware',
'django_locale.middleware.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# to redirected unenrolled students to the course info page
'courseware.middleware.RedirectUnenrolledMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
# This must be last
'microsite_configuration.middleware.MicrositeSessionCookieDomainMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from openedx.core.lib.rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation', 'time']
] +
['js/' + pth + '.js' for pth in ['ajax-error']] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
courseware_search_js = ['js/search/course/main.js']
# Before a student accesses courseware, we do not
# need many of the JS dependencies. This includes
# only the dependencies used everywhere in the LMS
# (including the dashboard/account/profile pages)
# Currently, this partially duplicates the "main vendor"
# JavaScript file, so only one of the two should be included
# on a page at any time.
# In the future, we will likely refactor this to use
# RequireJS and an optimizer.
base_vendor_js = [
'js/vendor/jquery.min.js',
'js/vendor/jquery.cookie.js',
'js/vendor/url.min.js',
'js/vendor/underscore-min.js',
'js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
]
main_vendor_js = base_vendor_js + [
'js/vendor/json2.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/URI.min.js',
]
dashboard_js = (
sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/dashboard/**/*.js'))
)
dashboard_search_js = ['js/search/dashboard/main.js']
discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js'))
rwd_header_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/utils/rwd_header.js'))
staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js'))
open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js'))
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = (
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js')) +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/instructor_dashboard/**/*.js'))
)
# JavaScript used by the student account and profile pages
# These are not courseware, so they do not need many of the courseware-specific
# JavaScript modules.
student_account_js = [
'js/utils/rwd_header.js',
'js/utils/edx.utils.validate.js',
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
'js/src/ie_shim.js',
'js/src/string_utils.js',
'js/student_account/enrollment.js',
'js/student_account/emailoptin.js',
'js/student_account/shoppingcart.js',
'js/student_account/models/LoginModel.js',
'js/student_account/models/RegisterModel.js',
'js/student_account/models/PasswordResetModel.js',
'js/student_account/views/FormView.js',
'js/student_account/views/LoginView.js',
'js/student_account/views/HintedLoginView.js',
'js/student_account/views/RegisterView.js',
'js/student_account/views/PasswordResetView.js',
'js/student_account/views/AccessView.js',
'js/student_account/views/InstitutionLoginView.js',
'js/student_account/accessApp.js',
]
verify_student_js = [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
'js/src/ie_shim.js',
'js/src/string_utils.js',
'js/verify_student/models/verification_model.js',
'js/verify_student/views/error_view.js',
'js/verify_student/views/image_input_view.js',
'js/verify_student/views/webcam_photo_view.js',
'js/verify_student/views/step_view.js',
'js/verify_student/views/intro_step_view.js',
'js/verify_student/views/make_payment_step_view.js',
'js/verify_student/views/payment_confirmation_step_view.js',
'js/verify_student/views/face_photo_step_view.js',
'js/verify_student/views/id_photo_step_view.js',
'js/verify_student/views/review_photos_step_view.js',
'js/verify_student/views/enrollment_confirmation_step_view.js',
'js/verify_student/views/pay_and_verify_view.js',
'js/verify_student/pay_and_verify.js',
]
reverify_js = [
'js/verify_student/views/error_view.js',
'js/verify_student/views/image_input_view.js',
'js/verify_student/views/webcam_photo_view.js',
'js/verify_student/views/step_view.js',
'js/verify_student/views/face_photo_step_view.js',
'js/verify_student/views/id_photo_step_view.js',
'js/verify_student/views/review_photos_step_view.js',
'js/verify_student/views/reverify_success_step_view.js',
'js/verify_student/models/verification_model.js',
'js/verify_student/views/reverify_view.js',
'js/verify_student/reverify.js',
]
incourse_reverify_js = [
'js/verify_student/views/error_view.js',
'js/verify_student/views/image_input_view.js',
'js/verify_student/views/webcam_photo_view.js',
'js/verify_student/models/reverification_model.js',
'js/verify_student/views/incourse_reverify_view.js',
'js/verify_student/incourse_reverify.js',
]
ccx_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/ccx/**/*.js'))
discovery_js = ['js/discovery/main.js']
certificates_web_view_js = [
'js/vendor/jquery.min.js',
'js/vendor/jquery.cookie.js',
'js/src/logger.js',
'js/utils/facebook.js',
]
credit_web_view_js = [
'js/vendor/jquery.min.js',
'js/vendor/jquery.cookie.js',
'js/src/logger.js',
]
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-main': {
# this is unnecessary and can be removed
'source_filenames': [
'css/lms-main.css',
],
'output_filename': 'css/lms-main.css',
},
'style-main-rtl': {
# this is unnecessary and can be removed
'source_filenames': [
'css/lms-main-rtl.css',
],
'output_filename': 'css/lms-main-rtl.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'css/lms-course.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-course.css',
},
'style-course-rtl': {
'source_filenames': [
'css/lms-course-rtl.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-course-rtl.css',
},
'style-student-notes': {
'source_filenames': [
'css/vendor/edxnotes/annotator.min.css',
],
'output_filename': 'css/lms-style-student-notes.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-xmodule-annotations.css',
},
FOOTER_CSS['openedx']['ltr']: {
'source_filenames': [
'css/lms-footer.css',
],
'output_filename': 'css/lms-footer.css',
},
FOOTER_CSS['openedx']['rtl']: {
'source_filenames': [
'css/lms-footer-rtl.css',
],
'output_filename': 'css/lms-footer-rtl.css'
},
FOOTER_CSS['edx']['ltr']: {
'source_filenames': [
'css/lms-footer-edx.css',
],
'output_filename': 'css/lms-footer-edx.css'
},
FOOTER_CSS['edx']['rtl']: {
'source_filenames': [
'css/lms-footer-edx-rtl.css',
],
'output_filename': 'css/lms-footer-edx-rtl.css'
},
'style-certificates': {
'source_filenames': [
'certificates/css/main-ltr.css',
'css/vendor/font-awesome.css',
],
'output_filename': 'css/certificates-style.css'
},
'style-certificates-rtl': {
'source_filenames': [
'certificates/css/main-rtl.css',
'css/vendor/font-awesome.css',
],
'output_filename': 'css/certificates-style-rtl.css'
},
}
common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js) # pylint: disable=line-too-long
project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js) # pylint: disable=line-too-long
PIPELINE_JS = {
'application': {
# Application will contain all paths not in courseware_only_js
'source_filenames': ['js/xblock/core.js'] + sorted(common_js) + sorted(project_js) + [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
'js/src/ie_shim.js',
'js/src/string_utils.js',
'js/src/logger.js',
],
'output_filename': 'js/lms-application.js',
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
},
'courseware_search': {
'source_filenames': courseware_search_js,
'output_filename': 'js/lms-courseware-search.js',
},
'base_vendor': {
'source_filenames': base_vendor_js,
'output_filename': 'js/lms-base-vendor.js',
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
},
'staff_grading': {
'source_filenames': staff_grading_js,
'output_filename': 'js/staff_grading.js',
},
'open_ended': {
'source_filenames': open_ended_js,
'output_filename': 'js/open_ended.js',
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
},
'dashboard': {
'source_filenames': dashboard_js,
'output_filename': 'js/dashboard.js'
},
'dashboard_search': {
'source_filenames': dashboard_search_js,
'output_filename': 'js/dashboard-search.js',
},
'rwd_header': {
'source_filenames': rwd_header_js,
'output_filename': 'js/rwd_header.js'
},
'student_account': {
'source_filenames': student_account_js,
'output_filename': 'js/student_account.js'
},
'verify_student': {
'source_filenames': verify_student_js,
'output_filename': 'js/verify_student.js'
},
'reverify': {
'source_filenames': reverify_js,
'output_filename': 'js/reverify.js'
},
'incourse_reverify': {
'source_filenames': incourse_reverify_js,
'output_filename': 'js/incourse_reverify.js'
},
'ccx': {
'source_filenames': ccx_js,
'output_filename': 'js/ccx.js'
},
'footer_edx': {
'source_filenames': ['js/footer-edx.js'],
'output_filename': 'js/footer-edx.js'
},
'discovery': {
'source_filenames': discovery_js,
'output_filename': 'js/discovery.js'
},
'certificates_wv': {
'source_filenames': certificates_web_view_js,
'output_filename': 'js/certificates/web_view.js'
},
'utility': {
'source_filenames': ['js/src/utility.js'],
'output_filename': 'js/utility.js'
},
'credit_wv': {
'source_filenames': credit_web_view_js,
'output_filename': 'js/credit/web_view.js'
}
}
PIPELINE_DISABLE_WRAPPER = True
# Compile all coffee files in course data directories if they are out of date.
# TODO: Remove this once we move data into Mongo. This is only temporary while
# course data directories are still in use.
if os.path.isdir(DATA_DIR):
for course_dir in os.listdir(DATA_DIR):
js_dir = DATA_DIR / course_dir / "js"
if not os.path.isdir(js_dir):
continue
for filename in os.listdir(js_dir):
if filename.endswith('coffee'):
new_filename = os.path.splitext(filename)[0] + ".js"
if os.path.exists(js_dir / new_filename):
coffee_timestamp = os.stat(js_dir / filename).st_mtime
js_timestamp = os.stat(js_dir / new_filename).st_mtime
if coffee_timestamp <= js_timestamp:
continue
os.system("rm %s" % (js_dir / new_filename))
os.system("coffee -c %s" % (js_dir / filename))
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = "pipeline.compressors.uglifyjs.UglifyJSCompressor"
STATICFILES_IGNORE_PATTERNS = (
"sass/*",
"coffee/*",
# Symlinks used by js-test-tool
"xmodule_js",
)
PIPELINE_UGLIFYJS_BINARY = 'node_modules/.bin/uglifyjs'
# Setting that will only affect the edX version of django-pipeline until our changes are merged upstream
PIPELINE_COMPILE_INPLACE = True
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# let logging work as configured:
CELERYD_HIJACK_ROOT_LOGGER = False
################################ Bulk Email ###################################
# Suffix used to construct 'from' email address for bulk emails.
# A course-specific identifier is prepended.
BULK_EMAIL_DEFAULT_FROM_EMAIL = '[email protected]'
# Parameters for breaking down course enrollment into subtasks.
BULK_EMAIL_EMAILS_PER_TASK = 100
# Initial delay used for retrying tasks. Additional retries use
# longer delays. Value is in seconds.
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
BULK_EMAIL_MAX_RETRIES = 5
# Maximum number of retries per task for errors that are related to
# throttling. If this is not set, then there is no cap on such retries.
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# We also define a queue for smaller jobs so that large courses don't block
# smaller emails (see BULK_EMAIL_JOB_SIZE_THRESHOLD setting)
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE
# For emails with fewer than these number of recipients, send them through
# a different queue to avoid large courses blocking emails that are meant to be
# sent to self and staff
BULK_EMAIL_JOB_SIZE_THRESHOLD = 100
# Flag to indicate if individual email addresses should be logged as they are sent
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
############################# Email Opt In ####################################
# Minimum age for organization-wide email opt in
EMAIL_OPTIN_MINIMUM_AGE = PARENTAL_CONSENT_AGE_LIMIT
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'https://www.youtube.com/iframe_api',
# URL to get YouTube metadata
'METADATA_URL': 'https://www.googleapis.com/youtube/v3/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
'IMAGE_API': 'http://img.youtube.com/vi/{youtube_id}/0.jpg', # /maxresdefault.jpg for 1920*1080
}
YOUTUBE_API_KEY = None
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'south',
# History tables
'simple_history',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# Our courseware
'circuit',
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django',
'util',
'certificates',
'dashboard',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
'openedx.core.djangoapps.course_groups',
'bulk_email',
'branding',
# External auth (OpenID, shib)
'external_auth',
'django_openid_auth',
# OAuth2 Provider
'provider',
'provider.oauth2',
'oauth2_provider',
'auth_exchange',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# Foldit integration
'foldit',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
# Discussion forums
'django_comment_client',
'django_comment_common',
'discussion_api',
'notes',
'edxnotes',
# Splash screen
'splash',
# Monitoring
'datadog',
# User API
'rest_framework',
'openedx.core.djangoapps.user_api',
# Team API
'teams',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
'notifier_api',
# Different Course Modes
'course_modes',
# Enrollment API
'enrollment',
# Student Identity Verification
'verify_student',
# Dark-launching languages
'dark_lang',
# Microsite configuration
'microsite_configuration',
# Student Identity Reverification
'reverification',
'embargo',
# Monitoring functionality
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
# Country list
'django_countries',
# edX Mobile API
'mobile_api',
# Surveys
'survey',
'lms.djangoapps.lms_xblock',
'openedx.core.djangoapps.content.course_overviews',
'openedx.core.djangoapps.content.course_structures',
'course_structure_api',
# Mailchimp Syncing
'mailing',
# CORS and cross-domain CSRF
'corsheaders',
'cors_csrf',
'commerce',
# Credit courses
'openedx.core.djangoapps.credit',
# Course teams
'teams',
'xblock_django',
)
######################### CSRF #########################################
# Forwards-compatibility with Django 1.7
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
######################### MARKETING SITE ###############################
EDXMKTG_LOGGED_IN_COOKIE_NAME = 'edxloggedin'
EDXMKTG_USER_INFO_COOKIE_NAME = 'edx-user-info'
EDXMKTG_USER_INFO_COOKIE_VERSION = 1
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about',
'CONTACT': 'contact',
'FAQ': 'help',
#IITBombayX May 2016: To remove hard-coding of URL
'COURSES': 'all_courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor', # If your site does not have an honor code, simply delete this line.
'PRIVACY': 'privacy',
#IITBombayX May 2016: To remove hard-coding of URL
'HOW_IT_WORKS': 'how_it_works',
'PARTNERS': 'partners',
'NEWS': 'news',
'PRESS': 'press',
#IITBombayX
'BLOG': 'blog',
'DONATE': 'donate',
#Bharat MOOCs 10 Sept 2014
'RESEARCH': 'research_pedagogy',
#Bharat MOOCs 14 Oct 2015
'IITBOMBAYX_FAQ': 'iitbombayx_faq',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
'MEMBER' : 'member',
}
################# Social Media Footer Links #######################
# The names list controls the order of social media
# links in the footer.
SOCIAL_MEDIA_FOOTER_NAMES = [
"facebook",
"twitter",
"youtube",
"linkedin",
"google_plus",
"reddit",
]
# The footer URLs dictionary maps social footer names
# to URLs defined in configuration.
SOCIAL_MEDIA_FOOTER_URLS = {}
# The display dictionary defines the title
# and icon class for each social media link.
SOCIAL_MEDIA_FOOTER_DISPLAY = {
"facebook": {
# Translators: This is the website name of www.facebook.com. Please
# translate this the way that Facebook advertises in your language.
"title": _("Facebook"),
"icon": "fa-facebook-square",
"action": _("Like {platform_name} on Facebook")
},
"twitter": {
# Translators: This is the website name of www.twitter.com. Please
# translate this the way that Twitter advertises in your language.
"title": _("Twitter"),
"icon": "fa-twitter",
"action": _("Follow {platform_name} on Twitter")
},
"linkedin": {
# Translators: This is the website name of www.linkedin.com. Please
# translate this the way that LinkedIn advertises in your language.
"title": _("LinkedIn"),
"icon": "fa-linkedin-square",
"action": _("Follow {platform_name} on LinkedIn")
},
"google_plus": {
# Translators: This is the website name of plus.google.com. Please
# translate this the way that Google+ advertises in your language.
"title": _("Google+"),
"icon": "fa-google-plus-square",
"action": _("Follow {platform_name} on Google+")
},
"tumblr": {
# Translators: This is the website name of www.tumblr.com. Please
# translate this the way that Tumblr advertises in your language.
"title": _("Tumblr"),
"icon": "fa-tumblr"
},
"meetup": {
# Translators: This is the website name of www.meetup.com. Please
# translate this the way that MeetUp advertises in your language.
"title": _("Meetup"),
"icon": "fa-calendar"
},
"reddit": {
# Translators: This is the website name of www.reddit.com. Please
# translate this the way that Reddit advertises in your language.
"title": _("Reddit"),
"icon": "fa-reddit",
"action": _("Subscribe to the {platform_name} subreddit"),
},
"vk": {
# Translators: This is the website name of https://vk.com. Please
# translate this the way that VK advertises in your language.
"title": _("VK"),
"icon": "fa-vk"
},
"weibo": {
# Translators: This is the website name of http://www.weibo.com. Please
# translate this the way that Weibo advertises in your language.
"title": _("Weibo"),
"icon": "fa-weibo"
},
"youtube": {
# Translators: This is the website name of www.youtube.com. Please
# translate this the way that YouTube advertises in your language.
"title": _("Youtube"),
"icon": "fa-youtube",
"action": _("Subscribe to the {platform_name} YouTube channel")
}
}
################# Mobile URLS ##########################
# These are URLs to the app store for mobile.
MOBILE_STORE_URLS = {
'apple': '#',
'google': '#'
}
################# Student Verification #################
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
}
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
################ Enable credit eligibility feature ####################
ENABLE_CREDIT_ELIGIBILITY = False
FEATURES['ENABLE_CREDIT_ELIGIBILITY'] = ENABLE_CREDIT_ELIGIBILITY
######################## CAS authentication ###########################
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
############# Cross-domain requests #################
if FEATURES.get('ENABLE_CORS_HEADERS'):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = False
# Default cache expiration for the cross-domain proxy HTML page.
# This is a static page that can be iframed into an external page
# to simulate cross-domain requests.
XDOMAIN_PROXY_CACHE_TIMEOUT = 60 * 15
###################### Registration ##################################
# For each of the fields, give one of the following values:
# - 'required': to display the field, and make it mandatory
# - 'optional': to display the field, and make it non-mandatory
# - 'hidden': to not display the field
REGISTRATION_EXTRA_FIELDS = {
#BharatMooc
'state':'required',
'city': 'required',
'pincode': 'required',
'aadhar_id':'optional',
#BharatMooc
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'terms_of_service': 'hidden',
'country': 'hidden',
}
########################## CERTIFICATE NAME ########################
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
############ CERTIFICATE VERIFICATION URL (STATIC FILES) ###########
CERTIFICATES_STATIC_VERIFY_URL = "https://verify-test.edx.org/cert/"
#################### Badgr OpenBadges generation #######################
# Be sure to set up images for course modes using the BadgeImageConfiguration model in the certificates app.
BADGR_API_TOKEN = None
# Do not add the trailing slash here.
BADGR_BASE_URL = "http://localhost:8005"
BADGR_ISSUER_SLUG = "example-issuer"
###################### Grade Downloads ######################
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
FINANCIAL_REPORTS = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-financial-reports',
'ROOT_PATH': '/tmp/edx-s3/financial_reports',
}
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = 8
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {"UPPER": 1, "LOWER": 1, "DIGITS": 1}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##################### LinkedIn #####################
INSTALLED_APPS += ('django_openid_auth',)
############################ ORA 2 ############################################
# By default, don't use a file prefix
ORA2_FILE_PREFIX = None
# Default File Upload Storage bucket and prefix. Used by the FileUpload Service.
FILE_UPLOAD_STORAGE_BUCKET_NAME = 'edxuploads'
FILE_UPLOAD_STORAGE_PREFIX = 'submissions_attachments'
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
# Source:
# http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1
# Note that this is used as the set of choices to the `code` field of the
# `LanguageProficiency` model.
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"zh_HANS", u"Simplified Chinese"],
[u"zh_HANT", u"Traditional Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
'problem_builder',
'edx_sga',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval',
# milestones
'milestones',
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
# Stub for third_party_auth options.
# See common/djangoapps/third_party_auth/settings.py for configuration details.
THIRD_PARTY_AUTH = {}
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Analytics Data API + Dashboard (Insights) settings
ANALYTICS_DATA_URL = ""
ANALYTICS_DATA_TOKEN = ""
ANALYTICS_DASHBOARD_URL = ""
ANALYTICS_DASHBOARD_NAME = PLATFORM_NAME + " Insights"
# REGISTRATION CODES DISPLAY INFORMATION SUBTITUTIONS IN THE INVOICE ATTACHMENT
INVOICE_CORP_ADDRESS = "Please place your corporate address\nin this configuration"
INVOICE_PAYMENT_INSTRUCTIONS = "This is where you can\nput directions on how people\nbuying registration codes"
# Country code overrides
# Used by django-countries
COUNTRIES_OVERRIDE = {
"TW": _("Taiwan"),
}
# which access.py permission name to check in order to determine if a course is visible in
# the course catalog. We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = 'see_exists'
# which access.py permission name to check in order to determine if a course about page is
# visible. We default this to the legacy permission 'see_exists'.
COURSE_ABOUT_VISIBILITY_PERMISSION = 'see_exists'
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = 60
# for Student Notes we would like to avoid too frequent token refreshes (default is 30 seconds)
if FEATURES['ENABLE_EDXNOTES']:
OAUTH_ID_TOKEN_EXPIRATION = 60 * 60
# Configuration used for generating PDF Receipts/Invoices
PDF_RECEIPT_TAX_ID = 'add here'
PDF_RECEIPT_FOOTER_TEXT = 'add your own specific footer text here'
PDF_RECEIPT_DISCLAIMER_TEXT = 'add your own specific disclaimer text here'
PDF_RECEIPT_BILLING_ADDRESS = 'add your own billing address here with appropriate line feed characters'
PDF_RECEIPT_TERMS_AND_CONDITIONS = 'add your own terms and conditions'
PDF_RECEIPT_TAX_ID_LABEL = 'Tax ID'
PDF_RECEIPT_LOGO_PATH = PROJECT_ROOT + '/static/images/openedx-logo-tag.png'
# Height of the Logo in mm
PDF_RECEIPT_LOGO_HEIGHT_MM = 12
PDF_RECEIPT_COBRAND_LOGO_PATH = PROJECT_ROOT + '/static/images/default-theme/logo.png'
# Height of the Co-brand Logo in mm
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = 12
# Use None for the default search engine
SEARCH_ENGINE = None
#SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
#SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
# Use LMS specific search initializer
SEARCH_INITIALIZER = "lms.lib.courseware_search.lms_search_initializer.LmsSearchInitializer"
# Use the LMS specific result processor
SEARCH_RESULT_PROCESSOR = "lms.lib.courseware_search.lms_result_processor.LmsSearchResultProcessor"
# Use the LMS specific filter generator
SEARCH_FILTER_GENERATOR = "lms.lib.courseware_search.lms_filter_generator.LmsSearchFilterGenerator"
# Override to skip enrollment start date filtering in course search
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = False
### PERFORMANCE EXPERIMENT SETTINGS ###
# CDN experiment/monitoring flags
CDN_VIDEO_URLS = {}
# Page onload event sampling rate (min 0.0, max 1.0)
ONLOAD_BEACON_SAMPLE_RATE = 0.0
# The configuration visibility of account fields.
ACCOUNT_VISIBILITY_CONFIGURATION = {
# Default visibility level for accounts without a specified value
# The value is one of: 'all_users', 'private'
"default_visibility": "all_users",
# The list of all fields that can be shared with other users
"shareable_fields": [
'username',
'profile_image',
'country',
'time_zone',
'language_proficiencies',
'bio',
],
# The list of account fields that are always public
"public_fields": [
'username',
'profile_image',
],
}
# E-Commerce API Configuration
ECOMMERCE_PUBLIC_URL_ROOT = None
ECOMMERCE_API_URL = None
ECOMMERCE_API_SIGNING_KEY = None
ECOMMERCE_API_TIMEOUT = 5
# Reverification checkpoint name pattern
CHECKPOINT_PATTERN = r'(?P<checkpoint_name>[^/]+)'
# For the fields override feature
# If using FEATURES['INDIVIDUAL_DUE_DATES'], you should add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider' to
# this setting.
FIELD_OVERRIDE_PROVIDERS = ()
# PROFILE IMAGE CONFIG
# WARNING: Certain django storage backends do not support atomic
# file overwrites (including the default, OverwriteStorage) - instead
# there are separate calls to delete and then write a new file in the
# storage backend. This introduces the risk of a race condition
# occurring when a user uploads a new profile image to replace an
# earlier one (the file will temporarily be deleted).
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
PROFILE_IMAGE_DEFAULT_FILENAME = 'images/default-theme/default-profile'
PROFILE_IMAGE_DEFAULT_FILE_EXTENSION = 'png'
# This secret key is used in generating unguessable URLs to users'
# profile images. Once it has been set, changing it will make the
# platform unaware of current image URLs, resulting in reverting all
# users' profile images to the default placeholder image.
PROFILE_IMAGE_SECRET_KEY = 'placeholder secret key'
PROFILE_IMAGE_MAX_BYTES = 1024 * 1024
PROFILE_IMAGE_MIN_BYTES = 100
# This is to check the domain in case of preview.
PREVIEW_DOMAIN = 'preview'
# Sets the maximum number of courses listed on the homepage
# If set to None, all courses will be listed on the homepage
HOMEPAGE_COURSE_MAX = 12
################################ Settings for Credit Courses ################################
# Initial delay used for retrying tasks.
# Additional retries use longer delays.
# Value is in seconds.
CREDIT_TASK_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
CREDIT_TASK_MAX_RETRIES = 5
# Secret keys shared with credit providers.
# Used to digitally sign credit requests (us --> provider)
# and validate responses (provider --> us).
# Each key in the dictionary is a credit provider ID, and
# the value is the 32-character key.
CREDIT_PROVIDER_SECRET_KEYS = {}
# Maximum age in seconds of timestamps we will accept
# when a credit provider notifies us that a student has been approved
# or denied for credit.
CREDIT_PROVIDER_TIMESTAMP_EXPIRATION = 15 * 60
# Default domain for the e-mail address associated with users who are created
# via the LTI Provider feature. Note that the generated e-mail addresses are
# not expected to be active; this setting simply allows administrators to
# route any messages intended for LTI users to a common domain.
LTI_USER_EMAIL_DOMAIN = 'lti.example.com'
| openiitbombayx/edx-platform | lms/envs/common.py | Python | agpl-3.0 | 89,763 |
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements all the services
"""
from imcsdk.imcexception import ImcOperationError, ImcValidationException
from imcsdk.imccoreutils import IMC_PLATFORM
from imcsdk.apis.utils import _is_valid_arg
def _get_mgmtif_mo_dn(handle):
"""
Internal method to get the mgmt_if dn based on the type of platform
"""
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
return("sys/rack-unit-1/mgmt/if-1")
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
return("sys/chassis-1/if-1")
else:
raise ImcValidationException("Invalid platform detected:%s" %
handle.platform)
def _get_mgmtif_mo(handle):
"""
Internal method to get the mgmt_if mo
"""
dn = _get_mgmtif_mo_dn(handle)
mo = handle.query_dn(dn)
if mo is None:
raise ImcOperationError("common_prop_configure",
"%s does not exist." % dn)
return mo
def common_prop_configure(handle, hostname=None, ddns_enable=None,
ddns_domain=None):
"""
Configures networking common properties.
Args:
handle (ImcHandle)
ddns_enable (string): Dynamic DNS. "yes" or "no"
ddns_domain (string): Dynamic DNS update domain.
Returns:
MgmtIf object
Raises:
ImcOperationError
Example:
common_prop_configure(handle, ddns_enable="yes")
"""
return mgmt_if_configure(
handle,
hostname=hostname,
ddns_enable=ddns_enable,
ddns_domain=ddns_domain
)
def ipv4_configure(handle, dhcp_enable=None, ext_ip=None, ext_mask=None,
ext_gw=None, dns_using_dhcp=None, dns_alternate=None,
dns_preferred=None,
):
"""
Configures networking ipv4 properties.
Args:
handle (ImcHandle)
dhcp_enable (string): Use DHCP. "yes" or "no"
ext_ip (string):
ext_mask (string):
dns_using_dhcp (string): Use DHCP for DNS servers. "yes" or "no"
dns_alternate (string):
dns_preferred (string):
Returns:
MgmtIf object
Raises:
ImcOperationError
Example:
ipv4_configure(handle, dns_using_dhcp="yes")
"""
return mgmt_if_configure(
handle,
dhcp_enable=dhcp_enable,
ext_ip=ext_ip,
ext_mask=ext_mask,
ext_gw=ext_gw,
dns_using_dhcp=dns_using_dhcp,
dns_alternate=dns_alternate,
dns_preferred=dns_preferred
)
def ipv6_configure(handle, v6ext_enabled=None, v6dhcp_enable=None,
v6ext_ip=None, v6ext_gw=None, v6prefix=None,
v6dns_using_dhcp=None, v6dns_preferred=None,
v6dns_alternate=None,
):
"""
Configures networking ipv4 properties.
Args:
handle (ImcHandle)
v6ext_enabled (string): Use DHCP. "yes" or "no"
v6dhcp_enable (string): Use DHCP. "yes" or "no"
v6ext_ip (string):
v6ext_gw (string):
v6prefix (int):
v6dns_using_dhcp (string): Use DHCP for DNS servers. "yes" or "no"
v6dns_preferred (string):
v6dns_alternate (string):
Returns:
MgmtIf object
Raises:
ImcOperationError
Example:
ipv6_configure(handle, v6ext_enabled="yes")
"""
return mgmt_if_configure(
handle,
v6ext_enabled=v6ext_enabled,
v6dhcp_enable=v6dhcp_enable,
v6ext_ip=v6ext_ip,
v6ext_gw=v6ext_gw,
v6prefix=str(v6prefix) if v6prefix is not None else None,
v6dns_using_dhcp=v6dns_using_dhcp,
v6dns_preferred=v6dns_preferred,
v6dns_alternate=v6dns_alternate
)
def vlan_enable(handle, vlan_id=None, vlan_priority=None):
"""
Enables management vlan
Args:
handle (ImcHandle)
vlan_id (int): VLAN Id. 1-4094
vlan_priority (int): VLAN Priority. 0-7
Returns:
MgmtIf object
Raises:
ImcOperationError
Example:
vlan_enable(handle)
"""
mo = mgmt_if_configure(
handle,
vlan_enable="yes",
vlan_id=str(vlan_id) if vlan_id is not None else None,
vlan_priority=str(vlan_priority)
if vlan_priority is not None else None,
)
return mo
def vlan_exists(handle, **kwargs):
"""
Checks if management vlan exists.
Args:
handle (ImcHandle)
kwargs: key-value paired arguments
Returns:
True/False, MO/None
Raises:
ImcOperationError
Example:
vlan_exists(handle)
"""
exists, mo = mgmt_if_exists(handle, **kwargs)
if exists and mo:
if mo.vlan_enable.lower() in ["yes", "true"]:
return True, mo
return False, mo
def vlan_disable(handle):
"""
Disables management vlan
Args:
handle (ImcHandle)
Returns:
MgmtIf object
Raises:
ImcOperationError
Example:
vlan_disable(handle)
"""
return mgmt_if_configure(handle, vlan_enable="no")
def mgmt_if_configure(handle,
admin_duplex=None,
admin_net_speed=None,
auto_neg=None,
ddns_domain=None,
ddns_enable=None,
dhcp_enable=None,
dns_alternate=None,
dns_preferred=None,
dns_using_dhcp=None,
ext_gw=None,
ext_ip=None,
ext_mask=None,
hostname=None,
nic_mode=None,
nic_redundancy=None,
port_profile=None,
v4_ip_addr=None,
v4_ip_addr_bmc1=None,
v4_ip_addr_bmc2=None,
v4_ip_addr_cmc1=None,
v4_ip_addr_cmc2=None,
v6_ip_addr=None,
v6_ip_addr_bmc1=None,
v6_ip_addr_bmc2=None,
v6_ip_addr_cmc1=None,
v6_ip_addr_cmc2=None,
v6dhcp_enable=None,
v6dns_alternate=None,
v6dns_preferred=None,
v6dns_using_dhcp=None,
v6ext_enabled=None,
v6ext_gw=None,
v6ext_ip=None,
v6prefix=None,
v_hostname=None,
vic_slot=None,
vlan_enable=None,
vlan_id=None,
vlan_priority=None,
**kwargs
):
"""
This method configures the network settings of CIMC.
Args:
handle(ImcHandle)
admin_duplex(str):
admin_net_speed(str):
auto_neg(str):
ddns_domain(str):
ddns_enable(str):
dhcp_enable(str):
dns_alternate(str):
dns_preferred(str):
dns_using_dhcp(str):
ext_gw(str):
ext_ip(str):
ext_mask(str):
hostname(str):
nic_mode(str):
nic_redundancy(str):
port_profile(str):
v4_ip_addr(str):
v4_ip_addr_bmc1(str):
v4_ip_addr_bmc2(str):
v4_ip_addr_cmc1(str):
v4_ip_addr_cmc2(str):
v6_ip_addr(str):
v6_ip_addr_bmc1(str):
v6_ip_addr_bmc2(str):
v6_ip_addr_cmc1(str):
v6_ip_addr_cmc2(str):
v6dhcp_enable(str):
v6dns_alternate(str):
v6dns_preferred(str):
v6dns_using_dhcp(str):
v6ext_enabled(str):
v6ext_gw(str):
v6ext_ip(str):
v6prefix(str):
v_hostname(str):
vic_slot(str):
vlan_enable(str):
vlan_id(str):
vlan_priority(str):
Returns:
MgmtIf object
Raises:
ImcOperationError
Example:
mgmt_if_configure(handle)
"""
mo = _get_mgmtif_mo(handle)
args = {
'admin_duplex': admin_duplex,
'admin_net_speed': admin_net_speed,
'auto_neg': auto_neg,
'ddns_domain': ddns_domain,
'ddns_enable': ddns_enable,
'dhcp_enable': dhcp_enable,
'dns_alternate': dns_alternate,
'dns_preferred': dns_preferred,
'dns_using_dhcp': dns_using_dhcp,
'ext_gw': ext_gw,
'ext_ip': ext_ip,
'ext_mask': ext_mask,
'hostname': hostname,
'nic_mode': nic_mode,
'nic_redundancy': nic_redundancy,
'port_profile': port_profile,
'v4_ip_addr': v4_ip_addr,
'v4_ip_addr_bmc1': v4_ip_addr_bmc1,
'v4_ip_addr_bmc2': v4_ip_addr_bmc2,
'v4_ip_addr_cmc1': v4_ip_addr_cmc1,
'v4_ip_addr_cmc2': v4_ip_addr_cmc2,
'v6_ip_addr': v6_ip_addr,
'v6_ip_addr_bmc1': v6_ip_addr_bmc1,
'v6_ip_addr_bmc2': v6_ip_addr_bmc2,
'v6_ip_addr_cmc1': v6_ip_addr_cmc1,
'v6_ip_addr_cmc2': v6_ip_addr_cmc2,
'v6dhcp_enable': v6dhcp_enable,
'v6dns_alternate': v6dns_alternate,
'v6dns_preferred': v6dns_preferred,
'v6dns_using_dhcp': v6dns_using_dhcp,
'v6ext_enabled': v6ext_enabled,
'v6ext_gw': v6ext_gw,
'v6ext_ip': v6ext_ip,
'v6prefix': str(v6prefix) if v6prefix is not None else None,
'v_hostname': v_hostname,
'vic_slot': vic_slot,
'vlan_enable': vlan_enable,
'vlan_id': str(vlan_id) if vlan_id is not None else None,
'vlan_priority': str(vlan_priority) if vlan_priority is not None else None,
}
mo.set_prop_multiple(**args)
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
return mo
def _match_yes_no_value(prop_name, prop_value, mo):
_ENABLE = ['true', 'yes']
prop_value = prop_value.lower()
mo_prop_value = getattr(mo, prop_name).lower()
if prop_value in _ENABLE and mo_prop_value not in _ENABLE:
return False
elif prop_value not in _ENABLE and mo_prop_value in _ENABLE:
return False
return True
def mgmt_if_exists(handle, **kwargs):
try:
mo = _get_mgmtif_mo(handle)
except:
return False, None
ddns_enable = kwargs.pop('ddns_enable', None)
if ddns_enable and not _match_yes_no_value('ddns_enable',
ddns_enable,
mo):
return False, mo
dhcp_enable = kwargs.pop('dhcp_enable', None)
if dhcp_enable and not _match_yes_no_value('dhcp_enable',
dhcp_enable,
mo):
return False, mo
dns_using_dhcp = kwargs.pop('dns_using_dhcp', None)
if dns_using_dhcp and not _match_yes_no_value('dns_using_dhcp',
dns_using_dhcp,
mo):
return False, mo
v6dhcp_enable = kwargs.pop('v6dhcp_enable', None)
if v6dhcp_enable and not _match_yes_no_value('v6dhcp_enable',
v6dhcp_enable,
mo):
return False, mo
v6dns_using_dhcp = kwargs.pop('v6dns_using_dhcp', None)
if v6dns_using_dhcp and not _match_yes_no_value('v6dns_using_dhcp',
v6dns_using_dhcp,
mo):
return False, mo
v6ext_enabled = kwargs.pop('v6ext_enabled', None)
if v6ext_enabled and not _match_yes_no_value('v6ext_enabled',
v6ext_enabled,
mo):
return False, mo
vlan_enable = kwargs.pop('vlan_enable', None)
if vlan_enable and not _match_yes_no_value('vlan_enable',
vlan_enable,
mo):
return False, mo
if 'v6prefix' in kwargs:
kwargs['v6prefix'] = str(kwargs['v6prefix']) if kwargs['v6prefix'] is not None else None
if 'vlan_id' in kwargs:
kwargs['vlan_id'] = str(kwargs['vlan_id']) if kwargs['vlan_id'] is not None else None
if 'vlan_priority' in kwargs:
kwargs['vlan_priority'] = str(kwargs['vlan_priority']) if kwargs['vlan_priority'] is not None else None
return mo.check_prop_match(**kwargs), mo
def ip_blocking_enable(handle, fail_count=None, fail_window=None,
penalty_time=None, **kwargs):
"""
Enables IP Blocking and Configures.
Args:
handle (ImcHandle)
fail_count (int): 3-10
fail_window (int): 60-120
penalty_time (int): 300-900
Returns:
IPBlocing object
Raises:
ImcOperationError
Example:
ip_blocking_enable(handle, fail_count=3)
"""
dn = _get_mgmtif_mo_dn(handle) + "/ip-block"
mo = handle.query_dn(dn)
if mo is None:
raise ImcOperationError("ip_blocking_enable",
"%s does not exist." % dn)
args = {
'enable': "yes",
'fail_count': str(fail_count) if fail_count is not None else None,
'fail_window': str(fail_window) if fail_count is not None else None,
'penalty_time':
str(penalty_time) if penalty_time is not None else None
}
mo.set_prop_multiple(**args)
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
return mo
def ip_blocking_exists(handle, **kwargs):
"""
Checks if IP blocking is enabled.
Args:
handle (ImcHandle)
kwargs: key-value paired arguments
Returns:
True/False, MO/None
Raises:
None
Example:
ip_blocking_exists(handle, fail_count=3)
"""
dn = _get_mgmtif_mo_dn(handle) + "/ip-block"
mo = handle.query_dn(dn)
if mo is None:
return False, None
if mo.enable.lower() not in ["yes", "true"]:
return False, mo
return mo.check_prop_match(**kwargs), mo
def ip_blocking_disable(handle):
"""
Disables IP Blocking.
Args:
handle (ImcHandle)
Returns:
IPBlocing object
Raises:
ImcOperationError
Example:
ip_blocking_disable(handle)
"""
dn = _get_mgmtif_mo_dn(handle) + "/ip-block"
mo = handle.query_dn(dn)
if mo is None:
raise ImcOperationError("ip_blocking_enable",
"%s does not exist." % dn)
mo.enable = "no"
handle.set_mo(mo)
return mo
_IP_FILTERS_LIST = ["filter1", "filter2", "filter3", "filter4"]
def _get_ip_filters(ip_filters):
return {"filter" + str(x["id"]): x["ip_filter"] for x in ip_filters}
def _set_ip_filters(mo, ip_filters):
if len(ip_filters) > len(_IP_FILTERS_LIST):
raise ImcOperationError("Set IP Filters",
"Cannot specify more than %d filters"
% len(_IP_FILTERS_LIST))
args = _get_ip_filters(ip_filters)
mo.set_prop_multiple(**args)
def ip_filtering_enable(handle, ip_filters=None):
"""
Enables NTP and configures the NTP servers provided
Args:
handle (ImcHandle)
ip_filters (list): List of dictionaries in the format
[{"id": 1, "ip_filter": "192.168.1.1"},
{"id": 2, "ip": "192.168.1.2-192.168.1.4"}]
Upto 4 ip filters can be specified.
Returns:
IPFiltering object
Example:
ip_filtering_enable(handle,
ip_filters = [
{"id": 1, "ip_filter": "192.168.1.1"},
{"id": 2, "ip_filter": "192.168.1.2-192.168.1.4"}]
"""
dn = _get_mgmtif_mo_dn(handle) + "/ip-filter"
mo = handle.query_dn(dn)
if mo is None:
raise ImcOperationError("ip_filtering_enable",
"%s does not exist." % dn)
mo.enable = 'yes'
_set_ip_filters(mo, ip_filters)
handle.set_mo(mo)
return mo
def ip_filtering_disable(handle):
"""
Disables IP Filtering.
Args:
handle (ImcHandle)
Returns:
IPFiltering object
Raises:
ImcOperationError
Example:
ip_filtering_disable(handle)
"""
dn = _get_mgmtif_mo_dn(handle) + "/ip-filter"
mo = handle.query_dn(dn)
if mo is None:
raise ImcOperationError("ip_filtering_enable",
"%s does not exist." % dn)
mo.enable = "no"
handle.set_mo(mo)
return mo
def ip_filtering_exists(handle, **kwargs):
"""
Checks if the ip filtering already exists.
Args:
handle (ImcHandle)
kwargs: key-value paired arguments
Returns:
True/False, MO/None
"""
dn = _get_mgmtif_mo_dn(handle) + "/ip-filter"
mo = handle.query_dn(dn)
if mo is None:
return False, None
kwargs['enable'] = 'yes'
if _is_valid_arg("ip_filters", kwargs):
args = _get_ip_filters(kwargs['ip_filters'])
del kwargs['ip_filters']
kwargs.update(args)
return mo.check_prop_match(**kwargs), mo
| ragupta-git/ImcSdk | imcsdk/apis/v2/admin/network.py | Python | apache-2.0 | 17,832 |
#!/usr/bin/python
import os
import sys
import json
import zlib
import base64
import shutil
import random
import tempfile
import ConfigParser
from hashlib import sha1
from nfp_log import log
from nfp_queue import get_queue
from nfp_process import TimeoutCommand, RETURN_SIGNALS
from minimize_bindiff import CGenericBinaryDiffMinimizer
try:
from lib.interfaces import vtrace_iface, gdb_iface, asan_iface, pykd_iface
has_pykd = True
except ImportError:
has_pykd = False
from lib.interfaces import vtrace_iface, gdb_iface, asan_iface
#-----------------------------------------------------------------------
class CLineMinimizer(CGenericBinaryDiffMinimizer):
def __init__(self, cfg, section):
CGenericBinaryDiffMinimizer.__init__(self, cfg, section)
self.strip_empty_lines = True
self.read_configuration()
def read_configuration(self):
CGenericBinaryDiffMinimizer.read_configuration(self)
try:
self.line_per_line = bool(self.parser.get(self.section, 'line-per-line'))
except:
self.line_per_line = False
try:
self.lines_to_rip = int(self.parser.get(self.section, 'lines-to-rip'))
except:
self.lines_to_rip = 1
try:
self.lines_percent = int(self.parser.get(self.section, 'lines-percent'))
except:
self.lines_percent = 10
try:
self.crash_path = self.parser.get(self.section, 'crash-path')
except:
self.crash_path = None
try:
self.infinite_loop = self.parser.get(self.section, 'crash-path')
except:
self.infinite_loop = False
def read_template(self, template):
l = open(template, "rb").readlines()
if self.strip_empty_lines:
tmp = []
for line in l:
if line in ["\n", "\r\n"]:
continue
tmp.append(line)
l = tmp
self.template = l
def minimize(self, template, outdir):
self.read_template(template)
log("Performing line-level test case minimization")
start_at = os.getenv("NIGHTMARE_ITERATION")
if start_at is not None:
start_at = int(start_at)
log("Starting from iteration %d\n" % start_at)
else:
start_at = 0
self.do_try(outdir, start_at)
def crash_file_exists(self):
if self.crash_path is not None:
return os.listdir(self.crash_path) > 0
return False
def remove_crash_path(self):
if self.crash_path is not None:
for f in os.listdir(self.crash_path):
print "Removing", os.path.join(self.crash_path, f)
os.remove(os.path.join(self.crash_path, f))
def do_try(self, outdir, start_at=0):
""" Try to remove a random number of lines iterating from the first
line to the last one a number of times. Basically, we calculate
a total number of lines to remove between 1 line and 10%. If the
number of lines removed produces a test-case that still crashes,
remove the lines from the template, otherwise, drop the changes
and move to the next line.
IDEAS: Remove all empty lines before starting?
"""
orig_lines = len(self.template)
current_line = 0
iteration = 0
loops = 0
while 1:
self.minimized = False
total_lines = len(self.template)
log("Starting loop %d" % loops)
current_line = 0
for i in range(len(self.template)):
self.last_crash = None
self.read_configuration()
log("Minimizing, iteration %d..." % iteration)
iteration += 1
temp_file = tempfile.mktemp(suffix=self.extension)
lines = self.template
if current_line >= len(lines):
break
if loops == 0 and not self.line_per_line:
# Rip a random number of lines between 1 and self.lines_percent
# but only at the very first iteration (when we remove most of
# the stuff).
val = (total_lines-current_line)*self.lines_percent/100
if val == 0:
val = 1
lines_to_rip = random.randint(1, val)
log("Removing %d line(s) (maximum of %d%%)" % (lines_to_rip, self.lines_percent))
else:
# For the likely final run remove only one line per try (or
# whatever is specified in the configuration file)
lines_to_rip = self.lines_to_rip
log("Removing %d line(s)" % lines_to_rip)
lines = lines[:current_line] + lines[current_line+lines_to_rip:]
buf = "".join(lines)
with open(temp_file, "wb") as f:
f.write(buf)
try:
for key in self.env:
os.putenv(key, self.env[key])
self.remove_crash_path()
if i % self.pre_iterations == 0:
if self.pre_command is not None:
log("Running pre-command %s" % self.pre_command)
os.system(self.pre_command)
if self.command.find("@@") == -1:
cmd = "%s %s" % (self.command, temp_file)
else:
cmd = self.command.replace("@@", temp_file)
ret = self.execute_command(cmd, self.timeout)
if i % self.post_iterations == 0:
if self.post_command is not None:
log("Running post-command %s" % self.post_command)
os.system(self.post_command)
if ret in RETURN_SIGNALS or (self.signal is not None and ret == self.signal) or \
self.crash_file_exists():
self.template = lines
log("Process crashed as expected...")
buf = "".join(self.template)
if not os.path.exists(outdir):
log("Directory %s does not exists, creating it..." % outdir)
os.mkdir(outdir)
filename = os.path.join(outdir, "last_minimized%d%s" % (os.getpid(), self.extension))
with open(filename, "wb") as f:
f.write(buf)
log("Last minimized test case %s written to disk." % filename)
if self.should_notify_crash():
# TODO: Write a temporary file and put an enqueue the crash
self.put_new_crash(buf)
else:
current_line += 1
self.remove_crash_path()
finally:
os.remove(temp_file)
loops += 1
if len(self.template) == total_lines:
log("File minimized from %d line(s) to %d line(s)" % (orig_lines, len(self.template)))
buf = "".join(self.template)
filename = sha1(buf).hexdigest()
filename = os.path.join(outdir, "%s%s" % (filename, self.extension))
with open(filename, "wb") as f:
f.write(buf)
log("Minimized test case %s written to disk." % filename)
self.minimized = True
break
| joxeankoret/nightmare | runtime/minimize_line.py | Python | gpl-2.0 | 6,653 |
dependencies = [
'uwosh.themebase',
'uwosh.default'
]
def initialize(context):
"""Initializer called when used as a Zope 2 product.""" | uwosh/uwosh.thememain | uwosh/thememain/__init__.py | Python | gpl-2.0 | 148 |
import debug
from hubs.ha import haremote as ha
from hubs.ha.hasshub import HAnode, RegisterDomain
import logsupport
class Scene(HAnode):
def __init__(self, HAitem, d):
super().__init__(HAitem, **d)
self.Hub.RegisterEntity('scene', self.entity_id, self)
# noinspection PyUnusedLocal
def SendOnOffCommand(self, settoon):
if settoon:
try:
ha.call_service(self.Hub.api, 'scene', 'turn_on', {'entity_id': '{}'.format(self.entity_id)})
debug.debugPrint('HASSgeneral', "Scene On sent to ", self.entity_id)
except ha.HomeAssistantError:
# HA probably restarting
logsupport.Logs.Log(
"{} didn't respond to scene on for {} - probably restarting".format(self.Hub.name, self.name),
severity=logsupport.ConsoleWarning)
return False # scenes always show as off for display purposes
else:
logsupport.Logs.Log('{} attempt to set scene {} to off'.format(self.Hub.name, self.name),
severity=logsupport.ConsoleWarning)
return False
RegisterDomain('scene', Scene)
| kevinkahn/softconsole | hubs/ha/domains/scene.py | Python | apache-2.0 | 1,011 |
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from courses.forms import VoucherGenerationForm
from courses.models import *
@staff_member_required
def voucher_generation_view(request):
form = None
if 'generate' in request.POST:
form = VoucherGenerationForm(request.POST)
if form.is_valid():
number_of_vouchers = form.cleaned_data['number_of_vouchers']
percentage = form.cleaned_data['percentage']
purpose = form.cleaned_data['purpose']
expires_flag = form.cleaned_data['expires_flag']
expires = form.cleaned_data['expires']
for i in range(0, number_of_vouchers):
v = Voucher(purpose=purpose, percentage=percentage, expires=expires if expires_flag else None)
v.save()
return HttpResponseRedirect(reverse('admin:courses_voucher_changelist'))
if not form:
form = VoucherGenerationForm()
return render(request, 'courses/auth/action_voucher_generation.html', {
'form': form,
}) | gitsimon/tq_website | courses/admin_views.py | Python | gpl-2.0 | 1,187 |
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Softwar where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
print not bool
print not {}
print not 7
# TODO: Needs some SSA now.
# print bool or len
# print False or dict
print type(Ellipsis)
| wfxiang08/Nuitka | tests/optimizations/Operations.py | Python | apache-2.0 | 1,026 |
from decimal import Decimal
_ = lambda x:x
#from i18n import _
from electrum.wallet import WalletStorage, Wallet
from electrum.util import format_satoshis, set_verbosity, StoreDict
from electrum.bitcoin import is_valid, COIN
from electrum.network import filter_protocol
import sys, getpass, datetime
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, network):
self.network = network
self.config = config
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print "Wallet not found. try 'electrum create'"
exit()
self.done = 0
self.last_balance = ""
set_verbosity(False)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_threads(network)
self.contacts = StoreDict(self.config, 'contacts')
self.wallet.network.register_callback('updated', self.updated)
self.wallet.network.register_callback('connected', self.connected)
self.wallet.network.register_callback('disconnected', self.disconnected)
self.wallet.network.register_callback('disconnecting', self.disconnecting)
self.wallet.network.register_callback('peers', self.peers)
self.wallet.network.register_callback('banner', self.print_banner)
self.commands = [_("[h] - displays this help text"), \
_("[i] - display transaction history"), \
_("[o] - enter payment order"), \
_("[p] - print stored payment order"), \
_("[s] - send stored payment order"), \
_("[r] - show own receipt addresses"), \
_("[c] - display contacts"), \
_("[b] - print server banner"), \
_("[q] - quit") ]
self.num_commands = len(self.commands)
def main_command(self):
self.print_balance()
c = raw_input("enter command: ")
if c == "h" : self.print_commands()
elif c == "i" : self.print_history()
elif c == "o" : self.enter_order()
elif c == "p" : self.print_order()
elif c == "s" : self.send_order()
elif c == "r" : self.print_addresses()
elif c == "c" : self.print_contacts()
elif c == "b" : self.print_banner()
elif c == "n" : self.network_dialog()
elif c == "e" : self.settings_dialog()
elif c == "q" : self.done = 1
else: self.print_commands()
def peers(self):
print("got peers list:")
l = filter_protocol(self.wallet.network.get_servers(), 's')
for s in l:
print (s)
def connected(self):
print ("connected")
def disconnected(self):
print ("disconnected")
def disconnecting(self):
print ("disconnecting")
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%" \
+ "%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
messages = []
for item in self.wallet.get_history():
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'pending'
label, is_default_label = self.wallet.get_label(tx_hash)
messages.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
self.print_list(messages[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _( "Synchronizing..." )
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _( "Not connected" )
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s "%("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.addresses())
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount \
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = raw_input("Pay to: ")
self.str_description = raw_input("Description : ")
self.str_amount = raw_input("Amount: ")
self.str_fee = raw_input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate( self.wallet.network.banner.split('\n') ):
print( x )
def print_list(self, list, firstline):
self.maxpos = len(list)
if not self.maxpos: return
print(firstline)
for i in range(self.maxpos):
msg = list[i] if i < len(list) else ""
print(msg)
def main(self,url):
while self.done == 0: self.main_command()
def do_send(self):
if not is_valid(self.str_recipient):
print(_('Invalid Bitcoin address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = raw_input("ok to send (y/n)?")
if c == "n": return
try:
tx = self.wallet.mktx( [(self.str_recipient, amount)], password, self.config, fee)
except Exception as e:
print(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
h = self.wallet.send_tx(tx)
print(_("Please wait..."))
self.wallet.tx_event.wait()
status, msg = self.wallet.receive_tx( h, tx )
if status:
print(_('Payment sent.'))
#self.do_clear()
#self.update_contacts_tab()
else:
print(_('Error'))
def network_dialog(self):
print("use 'electrum setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'electrum setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
#if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass
| shanew/electrum | gui/stdio.py | Python | gpl-3.0 | 8,236 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Ronald Sadlier - Oak Ridge National Laboratory
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import qitkat_swig as qitkat
class qa_ecc_golay2412_encode_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
'''
We got our Golay2412 encoding/decoding algorithm from a reputable source, so for now
we can assume that it works correctly.
TODO: Add our own tests for Golay2412
'''
self.tb.run()
if __name__ == '__main__':
gr_unittest.run(qa_ecc_golay2412_encode_bb, "qa_ecc_golay2412_encode_bb.xml")
| RonSadlier/gr-qitkat | python/qa_ecc_golay2412_encode_bb.py | Python | gpl-3.0 | 1,421 |
from .g2sd import cmd
if __name__ == "__main__":
cmd()
| thismachinechills/grub2systemd | g2sd/__main__.py | Python | agpl-3.0 | 58 |
#
# Copyright 2006-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""XLIFF classes specifically suited for handling the PO representation in
XLIFF.
This way the API supports plurals as if it was a PO file, for example.
"""
import re
from lxml import etree
from translate.misc.multistring import multistring
from translate.misc.xml_helpers import setXMLspace
from translate.storage import base, lisa, poheader, xliff
from translate.storage.placeables import general
def hasplurals(thing):
if not isinstance(thing, multistring):
return False
return len(thing.strings) > 1
class PoXliffUnit(xliff.xliffunit):
"""A class to specifically handle the plural units created from a po file."""
rich_parsers = general.parsers
def __init__(self, source=None, empty=False, **kwargs):
self._rich_source = None
self._rich_target = None
self._state_n = 0
self.units = []
if empty:
return
if not hasplurals(source):
super().__init__(source)
return
self.xmlelement = etree.Element(self.namespaced("group"))
self.xmlelement.set("restype", "x-gettext-plurals")
self.source = source
def __eq__(self, other):
if isinstance(other, PoXliffUnit):
if len(self.units) != len(other.units):
return False
if not super().__eq__(other):
return False
for i in range(len(self.units) - 1):
if not self.units[i + 1] == other.units[i + 1]:
return False
return True
if len(self.units) <= 1:
if isinstance(other, lisa.LISAunit):
return super().__eq__(other)
else:
return self.source == other.source and self.target == other.target
return False
# XXX: We don't return language nodes correctly at the moment
# def getlanguageNodes(self):
# if not self.hasplural():
# return super().getlanguageNodes()
# else:
# return self.units[0].getlanguageNodes()
@property
def source(self):
if not self.hasplural():
return super().source
return multistring([unit.source for unit in self.units])
@source.setter
def source(self, source):
self.setsource(source, sourcelang="en")
def setsource(self, source, sourcelang="en"):
# TODO: consider changing from plural to singular, etc.
self._rich_source = None
if not hasplurals(source):
super().setsource(source, sourcelang)
else:
target = self.target
for unit in self.units:
try:
self.xmlelement.remove(unit.xmlelement)
except ValueError:
pass
self.units = []
for s in source.strings:
newunit = xliff.xliffunit(s)
# newunit.namespace = self.namespace #XXX?necessary?
self.units.append(newunit)
self.xmlelement.append(newunit.xmlelement)
self.target = target
# We don't support any rich strings yet
multistring_to_rich = base.TranslationUnit.multistring_to_rich
rich_to_multistring = base.TranslationUnit.rich_to_multistring
rich_source = base.TranslationUnit.rich_source
rich_target = base.TranslationUnit.rich_target
def gettarget(self, lang=None):
if self.hasplural():
strings = [unit.target for unit in self.units]
if strings:
return multistring(strings)
else:
return None
else:
return super().gettarget(lang)
def settarget(self, target, lang="xx", append=False):
self._rich_target = None
if self.target == target:
return
if not self.hasplural():
super().settarget(target, lang, append)
return
if not isinstance(target, multistring):
target = multistring(target)
source = self.source
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * (targetl - sourcel)
targets = target.strings
id = self.getid()
self.source = multistring(sources)
self.setid(id)
elif targetl < sourcel:
targets = target.strings + [""] * (sourcel - targetl)
else:
targets = target.strings
for i in range(len(self.units)):
self.units[i].target = targets[i]
def addnote(self, text, origin=None, position="append"):
"""Add a note specifically in a "note" tag"""
note = etree.SubElement(self.xmlelement, self.namespaced("note"))
note.text = text
if origin:
note.set("from", origin)
for unit in self.units[1:]:
unit.addnote(text, origin)
def getnotes(self, origin=None):
# NOTE: We support both <context> and <note> tags in xliff files for comments
if origin == "translator":
notes = super().getnotes("translator")
trancomments = self.gettranslatorcomments()
if notes == trancomments or trancomments.find(notes) >= 0:
notes = ""
elif notes.find(trancomments) >= 0:
trancomments = notes
notes = ""
return trancomments + notes
elif origin in ["programmer", "developer", "source code"]:
devcomments = super().getnotes("developer")
autocomments = self.getautomaticcomments()
if devcomments == autocomments or autocomments.find(devcomments) >= 0:
devcomments = ""
elif devcomments.find(autocomments) >= 0:
autocomments = devcomments
devcomments = ""
return autocomments
else:
return super().getnotes(origin)
def markfuzzy(self, value=True):
super().markfuzzy(value)
for unit in self.units[1:]:
unit.markfuzzy(value)
def marktranslated(self):
super().marktranslated()
for unit in self.units[1:]:
unit.marktranslated()
def setid(self, id):
super().setid(id)
if len(self.units) > 1:
for i in range(len(self.units)):
self.units[i].setid("%s[%d]" % (id, i))
def getlocations(self):
"""Returns all the references (source locations)"""
groups = self.getcontextgroups("po-reference")
references = []
for group in groups:
sourcefile = ""
linenumber = ""
for (type, text) in group:
if type == "sourcefile":
sourcefile = text
elif type == "linenumber":
linenumber = text
assert sourcefile
if linenumber:
sourcefile = sourcefile + ":" + linenumber
references.append(sourcefile)
return references
def getautomaticcomments(self):
"""Returns the automatic comments (x-po-autocomment), which corresponds
to the #. style po comments.
"""
def hasautocomment(grp):
return grp[0] == "x-po-autocomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hasautocomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def gettranslatorcomments(self):
"""Returns the translator comments (x-po-trancomment), which
corresponds to the # style po comments.
"""
def hastrancomment(grp):
return grp[0] == "x-po-trancomment"
groups = self.getcontextgroups("po-entry")
comments = []
for group in groups:
commentpairs = filter(hastrancomment, group)
for (type, text) in commentpairs:
comments.append(text)
return "\n".join(comments)
def isheader(self):
return "gettext-domain-header" in (self.getrestype() or "")
def istranslatable(self):
return super().istranslatable() and not self.isheader()
@classmethod
def createfromxmlElement(cls, element, namespace=None):
if element.tag.endswith("trans-unit"):
object = cls(None, empty=True)
object.xmlelement = element
object.namespace = namespace
return object
assert element.tag.endswith("group")
group = cls(None, empty=True)
group.xmlelement = element
group.namespace = namespace
units = list(element.iterdescendants(group.namespaced("trans-unit")))
for unit in units:
subunit = xliff.xliffunit.createfromxmlElement(unit)
subunit.namespace = namespace
group.units.append(subunit)
return group
def hasplural(self):
return self.xmlelement.tag == self.namespaced("group")
class PoXliffFile(xliff.xlifffile, poheader.poheader):
"""a file for the po variant of Xliff files"""
UnitClass = PoXliffUnit
def __init__(self, *args, **kwargs):
if "sourcelanguage" not in kwargs:
kwargs["sourcelanguage"] = "en-US"
xliff.xlifffile.__init__(self, *args, **kwargs)
def createfilenode(self, filename, sourcelanguage="en-US", datatype="po"):
# Let's ignore the sourcelanguage parameter opting for the internal
# one. PO files will probably be one language
return super().createfilenode(
filename, sourcelanguage=self.sourcelanguage, datatype="po"
)
def _insert_header(self, header):
header.xmlelement.set("restype", "x-gettext-domain-header")
header.xmlelement.set("approved", "no")
setXMLspace(header.xmlelement, "preserve")
self.addunit(header)
def addheaderunit(self, target, filename):
unit = self.addsourceunit(target, filename, True)
unit.target = target
unit.xmlelement.set("restype", "x-gettext-domain-header")
unit.xmlelement.set("approved", "no")
setXMLspace(unit.xmlelement, "preserve")
return unit
def addplural(self, source, target, filename, createifmissing=False):
"""This method should now be unnecessary, but is left for reference"""
assert isinstance(source, multistring)
if not isinstance(target, multistring):
target = multistring(target)
sourcel = len(source.strings)
targetl = len(target.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * targetl - sourcel
targets = target.strings
else:
sources = source.strings
targets = target.strings
self._messagenum += 1
pluralnum = 0
group = self.creategroup(filename, True, restype="x-gettext-plural")
for (src, tgt) in zip(sources, targets):
unit = self.UnitClass(src)
unit.target = tgt
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
if pluralnum < sourcel:
for string in sources[pluralnum:]:
unit = self.UnitClass(src)
unit.xmlelement.set("translate", "no")
unit.setid("%d[%d]" % (self._messagenum, pluralnum))
pluralnum += 1
group.append(unit.xmlelement)
self.units.append(unit)
return self.units[-pluralnum]
def parse(self, xml):
"""Populates this object from the given xml string"""
# TODO: Make more robust
def ispluralgroup(node):
"""determines whether the xml node refers to a getttext plural"""
return node.get("restype") == "x-gettext-plurals"
def isnonpluralunit(node):
"""determindes whether the xml node contains a plural like id.
We want to filter out all the plural nodes, except the very first
one in each group.
"""
return re.match(r".+\[[123456]\]$", node.get("id") or "") is None
def pluralunits(pluralgroups):
for pluralgroup in pluralgroups:
yield self.UnitClass.createfromxmlElement(
pluralgroup, namespace=self.namespace
)
self.filename = getattr(xml, "name", "")
if hasattr(xml, "read"):
xml.seek(0)
xmlsrc = xml.read()
xml = xmlsrc
parser = etree.XMLParser(resolve_entities=False)
self.document = etree.fromstring(xml, parser).getroottree()
self.initbody()
root_node = self.document.getroot()
assert root_node.tag == self.namespaced(self.rootNode)
groups = root_node.iterdescendants(self.namespaced("group"))
pluralgroups = filter(ispluralgroup, groups)
termEntries = root_node.iterdescendants(
self.namespaced(self.UnitClass.rootNode)
)
singularunits = list(filter(isnonpluralunit, termEntries))
if len(singularunits) == 0:
return
pluralunit_iter = pluralunits(pluralgroups)
nextplural = next(pluralunit_iter, None)
for entry in singularunits:
term = self.UnitClass.createfromxmlElement(entry, namespace=self.namespace)
if nextplural and str(term.getid()) == ("%s[0]" % nextplural.getid()):
self.addunit(nextplural, new=False)
nextplural = next(pluralunit_iter, None)
else:
self.addunit(term, new=False)
| miurahr/translate | translate/storage/poxliff.py | Python | gpl-2.0 | 14,579 |
import sys
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileModifiedEvent
class LessCompiler(FileSystemEventHandler):
def __init__(self, source):
self.source = source
FileSystemEventHandler.__init__(self)
def compile_css(self):
if len(sys.argv) < 3:
destination = self.source.replace('less', 'css')
else:
destination = sys.argv[2]
cmd = 'lessc %s > %s -x' % (source, os.path.abspath(destination))
print(cmd)
os.system(cmd)
def on_any_event(self, event):
if '__' not in event.src_path and isinstance(event, FileModifiedEvent):
self.compile_css()
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write(
'Usage: %s source [destination=../css/$1.css]\n' % sys.argv[0])
sys.exit(1)
source = os.path.abspath(sys.argv[1])
event_handler = LessCompiler(source)
# Run once at startup
event_handler.compile_css()
observer = Observer()
observer.schedule(event_handler, os.path.dirname(source), recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| hzlf/openbroadcast | website/tools/suit/watch_less.py | Python | gpl-3.0 | 1,306 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "projects.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| cmheisel/project-status-dashboard | manage.py | Python | mit | 251 |
Subsets and Splits