repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
solooboroten/fedora-livecd | imgcreate/kickstart.py | 1 | 20592 | #
# kickstart.py : Apply kickstart configuration to a system
#
# Copyright 2007, Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import os.path
import shutil
import subprocess
import time
import logging
import urlgrabber
import selinux
try:
import system_config_keyboard.keyboard as keyboard
except ImportError:
import rhpl.keyboard as keyboard
import pykickstart.commands as kscommands
import pykickstart.constants as ksconstants
import pykickstart.errors as kserrors
import pykickstart.parser as ksparser
import pykickstart.version as ksversion
import imgcreate.errors as errors
import imgcreate.fs as fs
def read_kickstart(path):
"""Parse a kickstart file and return a KickstartParser instance.
This is a simple utility function which takes a path to a kickstart file,
parses it and returns a pykickstart KickstartParser instance which can
be then passed to an ImageCreator constructor.
If an error occurs, a CreatorError exception is thrown.
"""
version = ksversion.makeVersion()
ks = ksparser.KickstartParser(version)
try:
ksfile = urlgrabber.urlgrab(path)
ks.readKickstart(ksfile)
# Fallback to e.args[0] is a workaround for bugs in urlgragger and pykickstart.
except IOError, e:
raise errors.KickstartError("Failed to read kickstart file "
"'%s' : %s" % (path, e.strerror or
e.args[0]))
except kserrors.KickstartError, e:
raise errors.KickstartError("Failed to parse kickstart file "
"'%s' : %s" % (path, e))
return ks
def build_name(kscfg, prefix = None, suffix = None, maxlen = None):
"""Construct and return an image name string.
This is a utility function to help create sensible name and fslabel
strings. The name is constructed using the sans-prefix-and-extension
kickstart filename and the supplied prefix and suffix.
If the name exceeds the maxlen length supplied, the prefix is first dropped
and then the kickstart filename portion is reduced until it fits. In other
words, the suffix takes precedence over the kickstart portion and the
kickstart portion takes precedence over the prefix.
kscfg -- a path to a kickstart file
prefix -- a prefix to prepend to the name; defaults to None, which causes
no prefix to be used
suffix -- a suffix to append to the name; defaults to None, which causes
a YYYYMMDDHHMM suffix to be used
maxlen -- the maximum length for the returned string; defaults to None,
which means there is no restriction on the name length
Note, if maxlen is less then the len(suffix), you get to keep both pieces.
"""
name = os.path.basename(kscfg)
idx = name.rfind('.')
if idx >= 0:
name = name[:idx]
if prefix is None:
prefix = ""
if suffix is None:
suffix = time.strftime("%Y%m%d%H%M")
if name.startswith(prefix):
name = name[len(prefix):]
ret = prefix + name + "-" + suffix
if not maxlen is None and len(ret) > maxlen:
ret = name[:maxlen - len(suffix) - 1] + "-" + suffix
return ret
class KickstartConfig(object):
"""A base class for applying kickstart configurations to a system."""
def __init__(self, instroot):
self.instroot = instroot
def path(self, subpath):
return self.instroot + subpath
def chroot(self):
os.chroot(self.instroot)
os.chdir("/")
def call(self, args):
if not os.path.exists("%s/%s" %(self.instroot, args[0])):
raise errors.KickstartError("Unable to run %s!" %(args))
subprocess.call(args, preexec_fn = self.chroot)
def apply(self):
pass
class LanguageConfig(KickstartConfig):
"""A class to apply a kickstart language configuration to a system."""
def apply(self, kslang):
lang = kslang.lang or "en_US.UTF-8"
f = open(self.path("/etc/locale.conf"), "w+")
f.write("LANG=\"" + lang + "\"\n")
f.close()
class KeyboardConfig(KickstartConfig):
"""A class to apply a kickstart keyboard configuration to a system."""
def apply(self, kskeyboard):
vcconf_file = self.path("/etc/vconsole.conf")
DEFAULT_VC_FONT = "latarcyrheb-sun16"
if not kskeyboard.keyboard:
kskeyboard.keyboard = "us"
try:
with open(vcconf_file, "w") as f:
f.write('KEYMAP="%s"\n' % kskeyboard.keyboard)
# systemd now defaults to a font that cannot display non-ascii
# characters, so we have to tell it to use a better one
f.write('FONT="%s"\n' % DEFAULT_VC_FONT)
except IOError as e:
logging.error("Cannot write vconsole configuration file: %s" % e)
class TimezoneConfig(KickstartConfig):
"""A class to apply a kickstart timezone configuration to a system."""
def apply(self, kstimezone):
tz = kstimezone.timezone or "America/New_York"
utc = str(kstimezone.isUtc)
# /etc/localtime is a symlink with glibc > 2.15-41
# but if it exists as a file keep it as a file and fall back
# to a symlink.
localtime = self.path("/etc/localtime")
if os.path.isfile(localtime) and \
not os.path.islink(localtime):
try:
shutil.copy2(self.path("/usr/share/zoneinfo/%s" %(tz,)),
localtime)
except (OSError, shutil.Error) as e:
logging.error("Error copying timezone: %s" %(e.strerror,))
else:
if os.path.exists(localtime):
os.unlink(localtime)
os.symlink("/usr/share/zoneinfo/%s" %(tz,), localtime)
class AuthConfig(KickstartConfig):
"""A class to apply a kickstart authconfig configuration to a system."""
def apply(self, ksauthconfig):
if not os.path.exists(self.path("/usr/sbin/authconfig")):
return
auth = ksauthconfig.authconfig or "--useshadow --enablemd5"
args = ["/usr/sbin/authconfig", "--update", "--nostart"]
self.call(args + auth.split())
class FirewallConfig(KickstartConfig):
"""A class to apply a kickstart firewall configuration to a system."""
def apply(self, ksfirewall):
args = ["/usr/bin/firewall-offline-cmd"]
# enabled is None if neither --enable or --disable is passed
# default to enabled if nothing has been set.
if ksfirewall.enabled == False:
args += ["--disabled"]
else:
args += ["--enabled"]
for dev in ksfirewall.trusts:
args += [ "--trust=%s" % (dev,) ]
for port in ksfirewall.ports:
args += [ "--port=%s" % (port,) ]
for service in ksfirewall.services:
args += [ "--service=%s" % (service,) ]
self.call(args)
class RootPasswordConfig(KickstartConfig):
"""A class to apply a kickstart root password configuration to a system."""
def lock(self):
self.call(["/usr/bin/passwd", "-l", "root"])
def set_encrypted(self, password):
self.call(["/usr/sbin/usermod", "-p", password, "root"])
def set_unencrypted(self, password):
for p in ("/bin/echo", "/usr/bin/passwd"):
if not os.path.exists("%s/%s" %(self.instroot, p)):
raise errors.KickstartError("Unable to set unencrypted password due to lack of %s" % p)
p1 = subprocess.Popen(["/bin/echo", password],
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2 = subprocess.Popen(["/usr/bin/passwd", "--stdin", "root"],
stdin = p1.stdout,
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2.communicate()
def apply(self, ksrootpw):
if ksrootpw.isCrypted:
self.set_encrypted(ksrootpw.password)
elif ksrootpw.password != "":
self.set_unencrypted(ksrootpw.password)
if ksrootpw.lock:
self.lock()
class ServicesConfig(KickstartConfig):
"""A class to apply a kickstart services configuration to a system."""
def apply(self, ksservices):
if not os.path.exists(self.path("/sbin/chkconfig")):
return
for s in ksservices.enabled:
self.call(["/sbin/chkconfig", s, "on"])
for s in ksservices.disabled:
self.call(["/sbin/chkconfig", s, "off"])
class XConfig(KickstartConfig):
"""A class to apply a kickstart X configuration to a system."""
RUNLEVELS = {3: 'multi-user.target', 5: 'graphical.target'}
def apply(self, ksxconfig):
if ksxconfig.defaultdesktop:
f = open(self.path("/etc/sysconfig/desktop"), "w")
f.write("DESKTOP="+ksxconfig.defaultdesktop+"\n")
f.close()
if ksxconfig.startX:
if not os.path.isdir(self.path('/etc/systemd/system')):
logging.warning("there is no /etc/systemd/system directory, cannot update default.target!")
return
default_target = self.path('/etc/systemd/system/default.target')
if os.path.islink(default_target):
os.unlink(default_target)
os.symlink('/lib/systemd/system/graphical.target', default_target)
class RPMMacroConfig(KickstartConfig):
"""A class to apply the specified rpm macros to the filesystem"""
def apply(self, ks):
if not ks:
return
f = open(self.path("/etc/rpm/macros.imgcreate"), "w+")
if exclude_docs(ks):
f.write("%_excludedocs 1\n")
if not selinux_enabled(ks):
f.write("%__file_context_path %{nil}\n")
if inst_langs(ks) != None:
f.write("%_install_langs ")
f.write(inst_langs(ks))
f.write("\n")
f.close()
class NetworkConfig(KickstartConfig):
"""A class to apply a kickstart network configuration to a system."""
def write_ifcfg(self, network):
p = self.path("/etc/sysconfig/network-scripts/ifcfg-" + network.device)
f = file(p, "w+")
os.chmod(p, 0644)
f.write("DEVICE=%s\n" % network.device)
f.write("BOOTPROTO=%s\n" % network.bootProto)
if network.bootProto.lower() == "static":
if network.ip:
f.write("IPADDR=%s\n" % network.ip)
if network.netmask:
f.write("NETMASK=%s\n" % network.netmask)
if network.onboot:
f.write("ONBOOT=on\n")
else:
f.write("ONBOOT=off\n")
if network.essid:
f.write("ESSID=%s\n" % network.essid)
if network.ethtool:
if network.ethtool.find("autoneg") == -1:
network.ethtool = "autoneg off " + network.ethtool
f.write("ETHTOOL_OPTS=%s\n" % network.ethtool)
if network.bootProto.lower() == "dhcp":
if network.hostname:
f.write("DHCP_HOSTNAME=%s\n" % network.hostname)
if network.dhcpclass:
f.write("DHCP_CLASSID=%s\n" % network.dhcpclass)
if network.mtu:
f.write("MTU=%s\n" % network.mtu)
f.close()
def write_wepkey(self, network):
if not network.wepkey:
return
p = self.path("/etc/sysconfig/network-scripts/keys-" + network.device)
f = file(p, "w+")
os.chmod(p, 0600)
f.write("KEY=%s\n" % network.wepkey)
f.close()
def write_sysconfig(self, useipv6, hostname, gateway):
path = self.path("/etc/sysconfig/network")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("NETWORKING=yes\n")
if useipv6:
f.write("NETWORKING_IPV6=yes\n")
else:
f.write("NETWORKING_IPV6=no\n")
if gateway:
f.write("GATEWAY=%s\n" % gateway)
f.close()
def write_hosts(self, hostname):
localline = ""
if hostname and hostname != "localhost.localdomain":
localline += hostname + " "
l = hostname.split(".")
if len(l) > 1:
localline += l[0] + " "
localline += "localhost.localdomain localhost"
path = self.path("/etc/hosts")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("127.0.0.1\t\t%s\n" % localline)
f.write("::1\t\tlocalhost6.localdomain6 localhost6\n")
f.close()
def write_hostname(self, hostname):
if not hostname:
return
path = self.path("/etc/hostname")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("%s\n" % (hostname,))
f.close()
def write_resolv(self, nodns, nameservers):
if nodns or not nameservers:
return
path = self.path("/etc/resolv.conf")
f = file(path, "w+")
os.chmod(path, 0644)
for ns in (nameservers):
if ns:
f.write("nameserver %s\n" % ns)
f.close()
def apply(self, ksnet):
fs.makedirs(self.path("/etc/sysconfig/network-scripts"))
useipv6 = False
nodns = False
hostname = None
gateway = None
nameservers = None
for network in ksnet.network:
if not network.device:
raise errors.KickstartError("No --device specified with "
"network kickstart command")
if (network.onboot and network.bootProto.lower() != "dhcp" and
not (network.ip and network.netmask)):
raise errors.KickstartError("No IP address and/or netmask "
"specified with static "
"configuration for '%s'" %
network.device)
self.write_ifcfg(network)
self.write_wepkey(network)
if network.ipv6:
useipv6 = True
if network.nodns:
nodns = True
if network.hostname:
hostname = network.hostname
if network.gateway:
gateway = network.gateway
if network.nameserver:
nameservers = network.nameserver.split(",")
self.write_sysconfig(useipv6, hostname, gateway)
self.write_hosts(hostname)
self.write_hostname(hostname)
self.write_resolv(nodns, nameservers)
class SelinuxConfig(KickstartConfig):
"""A class to apply a kickstart selinux configuration to a system."""
def relabel(self, ksselinux):
# touch some files which get unhappy if they're not labeled correctly
for fn in ("/etc/resolv.conf",):
path = self.path(fn)
f = file(path, "w+")
os.chmod(path, 0644)
f.close()
if ksselinux.selinux == ksconstants.SELINUX_DISABLED:
return
if not os.path.exists(self.path("/sbin/setfiles")):
return
self.call(["/sbin/setfiles", "-p", "-e", "/proc", "-e", "/sys", "-e", "/dev", selinux.selinux_file_context_path(), "/"])
def apply(self, ksselinux):
selinux_config = "/etc/selinux/config"
if not os.path.exists(self.instroot+selinux_config):
return
if ksselinux.selinux == ksconstants.SELINUX_ENFORCING:
cmd = "SELINUX=enforcing\n"
elif ksselinux.selinux == ksconstants.SELINUX_PERMISSIVE:
cmd = "SELINUX=permissive\n"
elif ksselinux.selinux == ksconstants.SELINUX_DISABLED:
cmd = "SELINUX=disabled\n"
else:
return
# Replace the SELINUX line in the config
lines = open(self.instroot+selinux_config).readlines()
with open(self.instroot+selinux_config, "w") as f:
for line in lines:
if line.startswith("SELINUX="):
f.write(cmd)
else:
f.write(line)
self.relabel(ksselinux)
def get_image_size(ks, default = None):
__size = 0
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.size:
__size = p.size
if __size > 0:
return int(__size) * 1024L * 1024L
else:
return default
def get_image_fstype(ks, default = None):
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.fstype:
return p.fstype
return default
def get_modules(ks):
devices = []
if not hasattr(ks.handler.device, "deviceList"):
devices.append(ks.handler.device)
else:
devices.extend(ks.handler.device.deviceList)
modules = []
for device in devices:
if not device.moduleName:
continue
modules.extend(device.moduleName.split(":"))
return modules
def get_timeout(ks, default = None):
if not hasattr(ks.handler.bootloader, "timeout"):
return default
if ks.handler.bootloader.timeout is None:
return default
return int(ks.handler.bootloader.timeout)
def get_kernel_args(ks, default = "ro rd.live.image quiet"):
if not hasattr(ks.handler.bootloader, "appendLine"):
return default
if ks.handler.bootloader.appendLine is None:
return default
return "%s %s" %(default, ks.handler.bootloader.appendLine)
def get_default_kernel(ks, default = None):
if not hasattr(ks.handler.bootloader, "default"):
return default
if not ks.handler.bootloader.default:
return default
return ks.handler.bootloader.default
def get_repos(ks, repo_urls = {}):
repos = {}
for repo in ks.handler.repo.repoList:
inc = []
if hasattr(repo, "includepkgs"):
inc.extend(repo.includepkgs)
exc = []
if hasattr(repo, "excludepkgs"):
exc.extend(repo.excludepkgs)
baseurl = repo.baseurl
mirrorlist = repo.mirrorlist
proxy = repo.proxy
sslverify = not repo.noverifyssl
if repo.name in repo_urls:
baseurl = repo_urls[repo.name]
mirrorlist = None
if repos.has_key(repo.name):
logging.warn("Overriding already specified repo %s" %(repo.name,))
repos[repo.name] = (repo.name, baseurl, mirrorlist, proxy, inc, exc, repo.cost, sslverify)
return repos.values()
def convert_method_to_repo(ks):
try:
ks.handler.repo.methodToRepo()
except (AttributeError, kserrors.KickstartError):
pass
def get_packages(ks, required = []):
return ks.handler.packages.packageList + required
def get_groups(ks, required = []):
return ks.handler.packages.groupList + required
def get_excluded(ks, required = []):
return ks.handler.packages.excludedList + required
def get_partitions(ks, required = []):
return ks.handler.partition.partitions
def ignore_missing(ks):
return ks.handler.packages.handleMissing == ksconstants.KS_MISSING_IGNORE
def exclude_docs(ks):
return ks.handler.packages.excludeDocs
def inst_langs(ks):
if hasattr(ks.handler.packages, "instLange"):
return ks.handler.packages.instLange
elif hasattr(ks.handler.packages, "instLangs"):
return ks.handler.packages.instLangs
return ""
def get_pre_scripts(ks):
scripts = []
for s in ks.handler.scripts:
if s.type != ksparser.KS_SCRIPT_PRE:
continue
scripts.append(s)
return scripts
def get_post_scripts(ks):
scripts = []
for s in ks.handler.scripts:
if s.type != ksparser.KS_SCRIPT_POST:
continue
scripts.append(s)
return scripts
def selinux_enabled(ks):
return ks.handler.selinux.selinux in (ksconstants.SELINUX_ENFORCING,
ksconstants.SELINUX_PERMISSIVE)
| gpl-2.0 | 1,327,125,252,375,814,700 | 32.924217 | 128 | 0.595037 | false |
sergeneren/anima | anima/env/base.py | 1 | 17632 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
from anima import logger, log_file_handler
from anima.recent import RecentFileManager
class EnvironmentBase(object):
"""Connects the environment (the host program) to Stalker.
In Stalker, an Environment is a host application like Maya, Nuke, Houdini
etc.
Generally a GUI for the end user is given an environment which helps the
QtGui to be able to open, save, import or export a Version without
knowing the details of the environment.
The environment object supplies **methods** like ``open``, ``save``,
``export``, ``import`` or ``reference``. The main duty of the Environment
object is to introduce the host application (Maya, Houdini, Nuke, etc.) to
Stalker and let it to open, save, export, import or reference a file.
It is the pipeline developers duty to create the environment classes for
the programs used in the studio by instantiating this class and overriding
the methods as necessary. You can find good examples in `Anima Tools`_
which is a Python package developed in `Anima Istanbul`_.
.. _Anima Tools: https://pypi.python.org/pypi/anima
.. _Anima Istanbul: http;//www.animaistanbul.com
Here is a brief example for creating an environment for a generic program::
from Stalker import EnvironmentBase
class MyProgramEnv(EnvironmentBase):
\"""This is a class which will be used by the UI
\"""
def open():
\"""uses the programs own Python API to open a version of an
asset
\"""
# do anything that needs to be done before opening the file
my_programs_own_python_api.open(filepath=self.version.full_path)
def save():
\"""uses the programs own Python API to save the current file
as a new version.
\"""
# do anything that needs to be done before saving the file
my_programs_own_python_api.save(filepath=self.version.full_path)
# do anything that needs to be done after saving the file
and that is it.
The environment class by default has a property called ``version``. Holding
the current open Version. It is None for a new scene and a
:class:`~stalker.models.version.Version` instance in any other case.
"""
name = "EnvironmentBase"
representations = ['Base']
def __init__(self, name="", extensions=None, version=None):
self._name = name
if extensions is None:
extensions = []
self._extensions = extensions
self._version = version
def __str__(self):
"""the string representation of the environment
"""
return self._name
@property
def version(self):
"""returns the current Version instance which is open in the
environment
"""
return self.get_current_version()
@property
def name(self):
"""returns the environment name
"""
return self._name
@name.setter
def name(self, name):
"""sets the environment name
"""
self._name = name
def save_as(self, version):
"""The save as action of this environment. It should save the current
scene or file to the given version.full_path
"""
raise NotImplementedError
def export_as(self, version):
"""Exports the contents of the open document as the given version.
:param version: A :class:`~stalker.models.version.Version` instance
holding the desired version.
"""
raise NotImplementedError
def open(self, version, force=False, representation=None,
reference_depth=0, skip_update_check=False):
"""the open action
"""
raise NotImplementedError
def import_(self, asset):
"""the import action
"""
raise NotImplementedError
def reference(self, asset):
"""the reference action
"""
raise NotImplementedError
def trim_repo_path(self, path):
"""Trims the repository path value from the given path
:param path: The path that wanted to be trimmed
:return: str
"""
# get the repo first
repo = self.find_repo(path)
if not repo:
return path
# then try to trim the path
if path.startswith(repo.path):
return path[len(repo.path):]
elif path.startswith(repo.windows_path):
return path[len(repo.windows_path):]
elif path.startswith(repo.linux_path):
return path[len(repo.linux_path):]
elif path.startswith(repo.osx_path):
return path[len(repo.osx_path):]
return path
@classmethod
def find_repo(cls, path):
"""returns the repository from the given path
:param str path: path in a repository
:return: stalker.models.repository.Repository
"""
# path could be using environment variables so expand them
# path = os.path.expandvars(path)
# first find the repository
from stalker import Repository
repos = Repository.query.all()
found_repo = None
for repo in repos:
if path.startswith(repo.path) \
or path.startswith(repo.windows_path) \
or path.startswith(repo.linux_path) \
or path.startswith(repo.osx_path):
found_repo = repo
break
return found_repo
def get_versions_from_path(self, path):
"""Finds Version instances from the given path value.
Finds and returns the :class:`~stalker.models.version.Version`
instances from the given path value.
Returns an empty list if it can't find any matching.
This method is different than
:meth:`~stalker.env.EnvironmentBase.get_version_from_full_path`
because it returns a list of
:class:`~stalker.models.version.Version` instances which are
residing in that path. The list is ordered by the ``id``\ s of the
instances.
:param path: A path which has possible
:class:`~stalker.models.version.Version` instances.
:return: A list of :class:`~stalker.models.version.Version` instances.
"""
if not path:
return []
# convert '\\' to '/'
path = os.path.normpath(path).replace('\\', '/')
from stalker import Repository
os_independent_path = Repository.to_os_independent_path(path)
logger.debug('os_independent_path: %s' % os_independent_path)
from stalker import db, Version
# try to get all versions with that info
with db.DBSession.no_autoflush:
versions = Version.query.\
filter(Version.full_path.startswith(os_independent_path)).all()
return versions
@classmethod
def get_version_from_full_path(cls, full_path):
"""Finds the Version instance from the given full_path value.
Finds and returns a :class:`~stalker.models.version.Version` instance
from the given full_path value.
Returns None if it can't find any matching.
:param full_path: The full_path of the desired
:class:`~stalker.models.version.Version` instance.
:return: :class:`~stalker.models.version.Version`
"""
logger.debug('full_path: %s' % full_path)
# convert '\\' to '/'
full_path = os.path.normpath(
os.path.expandvars(full_path)
).replace('\\', '/')
# trim repo path
from stalker import Repository, Version
os_independent_path = Repository.to_os_independent_path(full_path)
# try to get a version with that info
logger.debug('getting a version with path: %s' % full_path)
version = Version.query\
.filter(Version.full_path == os_independent_path).first()
logger.debug('version: %s' % version)
return version
def get_current_version(self):
"""Returns the current Version instance from the environment.
:returns: :class:`~stalker.models.version.Version` instance or None
"""
raise NotImplementedError
def append_to_recent_files(self, path):
"""appends the given path to the recent files list
"""
# add the file to the recent file list
rfm = RecentFileManager()
rfm.add(self.name, path)
def get_version_from_recent_files(self):
"""This will try to create a :class:`.Version` instance by looking at
the recent files list.
It will return None if it can not find one.
:return: :class:`.Version`
"""
version = None
logger.debug("trying to get the version from recent file list")
# read the fileName from recent files list
# try to get the a valid asset file from starting the last recent file
rfm = RecentFileManager()
try:
recent_files = rfm[self.name]
except KeyError:
logger.debug('no recent files')
recent_files = None
if recent_files is not None:
for recent_file in recent_files:
version = self.get_version_from_full_path(recent_file)
if version is not None:
break
logger.debug("version from recent files is: %s" % version)
return version
def get_last_version(self):
"""Returns the last opened Version instance from the environment.
* It first looks at the current open file full path and tries to match
it with a Version instance.
* Then searches for the recent files list.
* Still not able to find any Version instances, will return the version
instance with the highest id which has the current workspace path in
its path
* Still not able to find any Version instances returns None
:returns: :class:`~stalker.models.version.Version` instance or None.
"""
version = self.get_current_version()
# read the recent file list
if version is None:
version = self.get_version_from_recent_files()
return version
def get_project(self):
"""returns the current project from environment
"""
raise NotImplementedError
def set_project(self, version):
"""Sets the project to the given Versions project.
:param version: A :class:`~stalker.models.version.Version`.
"""
raise NotImplementedError
def check_referenced_versions(self):
"""Checks the referenced versions
:returns: list of Versions
"""
raise NotImplementedError
def get_referenced_versions(self):
"""Returns the :class:`~stalker.models.version.Version` instances which
are referenced in to the current scene
:returns: list of :class:`~stalker.models.version.Version` instances.
"""
raise NotImplementedError
def get_frame_range(self):
"""Returns the frame range from the environment
:returns: a tuple of integers containing the start and end frame
numbers
"""
raise NotImplementedError
def set_frame_range(self, start_frame=1, end_frame=100,
adjust_frame_range=False):
"""Sets the frame range in the environment to the given start and end
frames
"""
raise NotImplementedError
def get_fps(self):
"""Returns the frame rate of this current environment
"""
raise NotImplementedError
def set_fps(self, fps=25):
"""Sets the frame rate of the environment. The default value is 25.
"""
raise NotImplementedError
@property
def extensions(self):
"""Returns the valid native extensions for this environment.
:returns: a list of strings
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""Sets the valid native extensions of this environment.
:param extensions: A list of strings holding the extensions. Ex:
["ma", "mb"] for Maya
"""
self._extensions = extensions
def has_extension(self, filename):
"""Returns True if the given file names extension is in the extensions
list false otherwise.
accepts:
* a full path with extension or not
* a file name with extension or not
* an extension with a dot on the start or not
:param filename: A string containing the filename
"""
if filename is None:
return False
return filename.split('.')[-1].lower() in self.extensions
def load_referenced_versions(self):
"""loads all the references
"""
raise NotImplementedError
def replace_version(self, source_version, target_version):
"""Replaces the source_version with the target_version
:param source_version: A
:class:`~stalker.models.version.Version` instance holding the version
to be replaced
:param target_version: A
:class:`~stalker.models.version.Version` instance holding the new
version replacing the source one.
"""
raise NotImplementedError
def replace_external_paths(self, mode=0):
"""Replaces the external paths (which are not starting with the
environment variable) with a proper path. The mode controls if the
resultant path should be absolute or relative to the project dir.
:param mode: Controls the resultant path is absolute or relative.
mode 0: absolute (a path which starts with $REPO)
mode 1: relative (to project path)
:return:
"""
raise NotImplementedError
def reference_filters(self, version, options):
"""Checks the given version against the given options
:param options: a dictionary object showing the reference options
:return:
"""
pass
@classmethod
def get_significant_name(cls, version, include_version_number=True):
"""returns a significant name starting from the closest parent which is
an Asset, Shot or Sequence and includes the Project.code
:rtype : basestring
"""
sig_name = '%s_%s' % (version.task.project.code, version.nice_name)
if include_version_number:
sig_name = '%s_v%03d' % (sig_name, version.version_number)
return sig_name
@classmethod
def local_backup_path(cls):
"""returns the local backup path
:return:
"""
# use the user home directory .stalker_local_backup
from anima import local_cache_folder
return os.path.normpath(
os.path.expanduser('%s/projects_backup' % local_cache_folder)
).replace('\\', '/')
@classmethod
def create_local_copy(cls, version):
"""Creates a local copy of the given version
:param version:
:return:
"""
output_path = os.path.join(
cls.local_backup_path(),
version.absolute_path.replace(':', '')
).replace('\\', '/')
output_full_path = os.path.join(
cls.local_backup_path(),
version.absolute_full_path.replace(':', '')
).replace('\\', '/')
# do nothing if the version and the copy is on the same drive
# (ex: do not duplicate the file)
if len(os.path.commonprefix([output_full_path,
version.absolute_full_path])):
logger.debug(
'Local copy file: %s is on the same drive with the source '
'file: %s' % (output_full_path, version.absolute_full_path)
)
logger.debug('Not duplicating it!')
return
# create intermediate folders
try:
os.makedirs(output_path)
except OSError:
# already exists
pass
import shutil
try:
shutil.copy(
version.absolute_full_path,
output_full_path
)
except IOError:
# no space left
pass
logger.debug('created copy to: %s' % output_full_path)
class Filter(object):
"""A filter class filters given options against the given versions related
task type.
:param version: :class:`~stalker.models.version.Version` instance. The
related :class:`~stalker.models.task.Task`\ s
:attr:`~stalker.models.task.Task.type` attribute is key here. It defines
which filter to apply to.
:param options: A dictionary with keys are the name of the option and the
value is the value of that option.
"""
def __init__(self):
pass
class OpenFilter(Filter):
"""A filter for Open operations
"""
pass
class ReferenceFilter(Filter):
"""A filter for Reference operations
"""
pass
class ImportFilter(Filter):
"""A filter for Import operations
"""
pass
class ExportFilter(Filter):
"""A filter for Export operations
"""
pass
class SaveAsFilter(Filter):
"""A Filter for Save As operations
"""
pass
| bsd-2-clause | -7,131,568,699,548,201,000 | 30.542039 | 80 | 0.609176 | false |
Mankee/CS419 | wsgi/openshift/schedules/views.py | 1 | 2686 | import logging
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django_cas.views import _service_url, _login_url
from models import CredentialsModel, Event
from django.contrib.auth import logout as auth_logout, login
import settings
def index(request):
# Obtain the context from the HTTP request.
context = RequestContext(request)
# Query the database for a list of ALL events stored per Calendar.
all_events = Event.objects.all()
context_dict = {'Events': all_events}
# Render the response and send it back!
return render_to_response('schedules/index.html', context_dict, context)
# @login_required()
def render_schedule(request, next_page=None, required=True):
if request.user.is_authenticated():
# Query the database for a list of ALL events stored per Calendar.
all_events = Event.objects.all()
data = {'Events': all_events}
return render_to_response('schedules/main.html', data)
else:
logging.error(
'redirecting to login from render_schedule... could not authenticate user ' + request.user.username)
service = _service_url(request, next_page)
return HttpResponseRedirect(_login_url(service))
# @login_required
def done(request):
"""Login complete view, displays user data"""
return render_to_response('schedules/done.html', {'user': request.user},
RequestContext(request))
def logout(request):
"""Logs out user"""
auth_logout(request)
return render_to_response('schedules/home.html', {}, RequestContext(request))
def home(request):
"""Home view, displays login mechanism"""
if request.user.is_authenticated():
return redirect('done')
return render_to_response('schedules/home.html', {
'plus_id': getattr(settings, 'SOCIAL_AUTH_GOOGLE_PLUS_KEY', None)
}, RequestContext(request))
def signup_email(request):
return render_to_response('schedules/email_signup.html', {}, RequestContext(request))
def validation_sent(request):
return render_to_response('schedules/validation_sent.html', {
'email': request.session.get('email_validation_address')
}, RequestContext(request))
def require_email(request):
if request.method == 'POST':
request.session['saved_email'] = request.POST.get('email')
backend = request.session['partial_pipeline']['backend']
return redirect('social:complete', backend=backend)
return render_to_response('schedules/email.html', RequestContext(request)) | apache-2.0 | -4,304,884,770,091,513,300 | 36.84507 | 112 | 0.702904 | false |
bfalacerda/strands_executive | task_executor/scripts/example_add_cosafe_client.py | 2 | 3508 | #! /usr/bin/env python
import rospy
import actionlib
from actionlib_msgs.msg import GoalStatus
from strands_executive_msgs.msg import ExecutePolicyAction, ExecutePolicyFeedback, ExecutePolicyGoal, MdpStateVar, StringIntPair, StringTriple, MdpAction, MdpActionOutcome, MdpDomainSpec, MdpTask, Task
from strands_executive_msgs.srv import GetGuaranteesForCoSafeTask, GetGuaranteesForCoSafeTaskRequest
import strands_executive_msgs.mdp_action_utils as mau
from strands_executive_msgs.srv import AddCoSafeTasks, SetExecutionStatus, DemandCoSafeTask
import sys
def create_metric_map_action(waypoint_name, duration=5):
action_name='wait_at_' + waypoint_name
state_var_name="executed_" + action_name
# set state_var
var=MdpStateVar(name=state_var_name,
init_val=0,
min_range=0,
max_range=1)
# set action
outcome=MdpActionOutcome(probability=1.0,
#waypoint=[], #same waypoint
post_conds=[StringIntPair(string_data=state_var_name, int_data=1)],
duration_probs=[1.0],
durations=[duration],
#status=GoalStatus #prob 1, isnt needed
#result=[] #prob 1, isnt needed
)
action = MdpAction(name=action_name,
action_server='wait_action',
waypoints=[waypoint_name],
pre_conds=[StringIntPair(string_data=state_var_name, int_data=0)],
outcomes=[outcome]
)
mau.add_time_argument(action, rospy.Time())
mau.add_duration_argument(action, rospy.Duration(duration))
return (var, action)
def get_services():
# get services necessary to do the jon
add_tasks_srv_name = '/task_executor/add_co_safe_tasks'
demand_task_srv_name = '/task_executor/demand_co_safe_task'
set_exe_stat_srv_name = '/task_executor/set_execution_status'
rospy.loginfo("Waiting for task_executor service...")
rospy.wait_for_service(add_tasks_srv_name)
rospy.wait_for_service(set_exe_stat_srv_name)
rospy.loginfo("Done")
add_tasks_srv = rospy.ServiceProxy(add_tasks_srv_name, AddCoSafeTasks)
demand_task_srv = rospy.ServiceProxy(demand_task_srv_name, DemandCoSafeTask)
set_execution_status = rospy.ServiceProxy(set_exe_stat_srv_name, SetExecutionStatus)
return add_tasks_srv, demand_task_srv, set_execution_status
if __name__ == '__main__':
rospy.init_node('mdp_client_test')
n_waypoints=3
# get services to call into execution framework
add_tasks, demand_task, set_execution_status = get_services()
spec=MdpDomainSpec()
ltl_task=''
for i in range(1, n_waypoints+1):
waypoint_name="WayPoint" + str(i)
(var, action)=create_metric_map_action(waypoint_name)
spec.vars.append(var)
spec.actions.append(action)
ltl_task+='(F executed_wait_at_' + waypoint_name + '=1) & '
spec.ltl_task=ltl_task[:-3]
# print add_tasks([spec],[rospy.Time()], [rospy.get_rostime() + rospy.Duration(60 * 60)])
set_execution_status(True)
task = MdpTask()
task.mdp_spec = spec
task.start_after = rospy.get_rostime()
task.end_before = task.start_after + rospy.Duration(60 * 60)
task.priority = Task.HIGH_PRIORITY
task.is_interruptible = True
print add_tasks([task]) | mit | 7,696,492,989,779,846,000 | 37.141304 | 201 | 0.631129 | false |
cernbox/wopiserver | src/localiface.py | 1 | 7218 | '''
localiface.py
Local storage interface for the IOP WOPI server
Author: [email protected], CERN/IT-ST
'''
import time
import os
import warnings
from stat import S_ISDIR
# module-wide state
config = None
log = None
homepath = None
def _getfilepath(filepath):
'''map the given filepath into the target namespace by prepending the homepath (see storagehomepath in wopiserver.conf)'''
return os.path.normpath(homepath + os.sep + filepath)
def init(inconfig, inlog):
'''Init module-level variables'''
global config # pylint: disable=global-statement
global log # pylint: disable=global-statement
global homepath # pylint: disable=global-statement
config = inconfig
log = inlog
homepath = config.get('local', 'storagehomepath')
try:
# validate the given storagehomepath folder
mode = os.stat(homepath).st_mode
if not S_ISDIR(mode):
raise IOError('Not a directory')
except IOError as e:
raise IOError('Could not stat storagehomepath folder %s: %s' % (homepath, e))
def stat(_endpoint, filepath, _userid):
'''Stat a file and returns (size, mtime) as well as other extended info. This method assumes that the given userid has access.'''
try:
tstart = time.time()
statInfo = os.stat(_getfilepath(filepath))
tend = time.time()
log.info('msg="Invoked stat" filepath="%s" elapsedTimems="%.1f"' % (_getfilepath(filepath), (tend-tstart)*1000))
if S_ISDIR(statInfo.st_mode):
raise IOError('Is a directory')
return {
'inode': str(statInfo.st_ino),
'filepath': filepath,
'userid': str(statInfo.st_uid) + ':' + str(statInfo.st_gid),
'size': statInfo.st_size,
'mtime': statInfo.st_mtime
}
except (FileNotFoundError, PermissionError) as e:
raise IOError(e)
def statx(endpoint, filepath, userid, versioninv=1): # pylint: disable=unused-argument
'''Get extended stat info (inode, filepath, userid, size, mtime). Equivalent to stat in the case of local storage.
The versioninv flag is ignored as local storage always supports version-invariant inodes (cf. CERNBOX-1216).'''
return stat(endpoint, filepath, userid)
def setxattr(_endpoint, filepath, _userid, key, value):
'''Set the extended attribute <key> to <value> on behalf of the given userid'''
try:
os.setxattr(_getfilepath(filepath), 'user.' + key, str(value).encode())
except (FileNotFoundError, PermissionError, OSError) as e:
log.warning('msg="Failed to setxattr" filepath="%s" key="%s" exception="%s"' % (filepath, key, e))
raise IOError(e)
def getxattr(_endpoint, filepath, _userid, key):
'''Get the extended attribute <key> on behalf of the given userid. Do not raise exceptions'''
try:
filepath = _getfilepath(filepath)
return os.getxattr(filepath, 'user.' + key).decode('UTF-8')
except (FileNotFoundError, PermissionError, OSError) as e:
log.warning('msg="Failed to getxattr" filepath="%s" key="%s" exception="%s"' % (filepath, key, e))
return None
def rmxattr(_endpoint, filepath, _userid, key):
'''Remove the extended attribute <key> on behalf of the given userid'''
try:
os.removexattr(_getfilepath(filepath), 'user.' + key)
except (FileNotFoundError, PermissionError, OSError) as e:
log.warning('msg="Failed to rmxattr" filepath="%s" key="%s" exception="%s"' % (filepath, key, e))
raise IOError(e)
def readfile(_endpoint, filepath, _userid):
'''Read a file on behalf of the given userid. Note that the function is a generator, managed by Flask.'''
log.debug('msg="Invoking readFile" filepath="%s"' % filepath)
try:
tstart = time.time()
filepath = _getfilepath(filepath)
chunksize = config.getint('io', 'chunksize')
with open(filepath, mode='rb', buffering=chunksize) as f:
tend = time.time()
log.info('msg="File open for read" filepath="%s" elapsedTimems="%.1f"' % (filepath, (tend-tstart)*1000))
# the actual read is buffered and managed by the Flask server
for chunk in iter(lambda: f.read(chunksize), b''):
yield chunk
except FileNotFoundError as e:
# log this case as info to keep the logs cleaner
log.info('msg="File not found on read" filepath="%s"' % filepath)
# as this is a generator, we yield the error string instead of the file's contents
yield IOError('No such file or directory')
except OSError as e:
# general case, issue a warning
log.warning('msg="Error opening the file for read" filepath="%s" error="%s"' % (filepath, e))
yield IOError(e)
def writefile(_endpoint, filepath, _userid, content, islock=False):
'''Write a file via xroot on behalf of the given userid. The entire content is written
and any pre-existing file is deleted (or moved to the previous version if supported).
With islock=True, the file is opened with O_CREAT|O_EXCL.'''
if isinstance(content, str):
content = bytes(content, 'UTF-8')
size = len(content)
filepath = _getfilepath(filepath)
log.debug('msg="Invoking writeFile" filepath="%s" size="%d"' % (filepath, size))
tstart = time.time()
if islock:
warnings.simplefilter("ignore", ResourceWarning)
try:
# apparently there's no way to pass the O_CREAT without O_TRUNC to the python f.open()!
# cf. https://stackoverflow.com/questions/38530910/python-open-flags-for-open-or-create
# so we resort to the os-level open(), with some caveats
fd = os.open(filepath, os.O_CREAT | os.O_EXCL)
f = os.fdopen(fd, mode='wb')
written = f.write(content) # os.write(fd, ...) raises EBADF?
os.close(fd) # f.close() raises EBADF! while this works
# as f goes out of scope here, we'd get a false ResourceWarning, which is ignored by the above filter
except FileExistsError:
log.info('msg="File exists on write but islock flag requested" filepath="%s"' % filepath)
raise IOError('File exists and islock flag requested')
except OSError as e:
log.warning('msg="Error writing file in O_EXCL mode" filepath="%s" error="%s"' % (filepath, e))
raise IOError(e)
else:
try:
with open(filepath, mode='wb') as f:
written = f.write(content)
except OSError as e:
log.warning('msg="Error writing file" filepath="%s" error="%s"' % (filepath, e))
raise IOError(e)
tend = time.time()
if written != size:
raise IOError('Written %d bytes but content is %d bytes' % (written, size))
log.info('msg="File written successfully" filepath="%s" elapsedTimems="%.1f" islock="%s"' % \
(filepath, (tend-tstart)*1000, islock))
def renamefile(_endpoint, origfilepath, newfilepath, _userid):
'''Rename a file from origfilepath to newfilepath on behalf of the given userid.'''
try:
os.rename(_getfilepath(origfilepath), _getfilepath(newfilepath))
except (FileNotFoundError, PermissionError, OSError) as e:
raise IOError(e)
def removefile(_endpoint, filepath, _userid, _force=0):
'''Remove a file on behalf of the given userid.
The force argument is irrelevant and ignored for local storage.'''
try:
os.remove(_getfilepath(filepath))
except (FileNotFoundError, PermissionError, IsADirectoryError, OSError) as e:
raise IOError(e)
| gpl-3.0 | 3,878,232,960,349,696,500 | 40.245714 | 131 | 0.681629 | false |
haracejacob/darknet-python-flask | app.py | 1 | 6131 | # -*- coding: utf-8 -*-
import os
from os.path import splitext, basename
import time
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
from flask import jsonify
from PIL import Image
from PIL import ImageDraw
import cStringIO as StringIO
import urllib
from urlparse import urlparse
import darknet as dn
REPO_DIRNAME = os.path.abspath(os.path.dirname('./'))
UPLOAD_FOLDER = '/tmp/darknet_flask'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
disassembled = urlparse(imageurl)
print(disassembled)
image_name, image_ext = splitext(basename(disassembled.path))
print(image_name, image_ext)
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(image_name+image_ext)
filename = os.path.join(UPLOAD_FOLDER, filename_)
urllib.urlretrieve(imageurl, filename)
logging.info('Saving to %s.', filename)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(filename)
dr_image = result[-1]
result = result[:-1]
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(filename),
proc_imagesrc=embed_image_html(dr_image)
)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(filename)
dr_image = result[-1]
result = result[:-1]
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(filename),
proc_imagesrc=embed_image_html(dr_image)
)
@app.route('/classify_rest', methods=['POST'])
def classify_rest():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return jsonify(val = 'Cannot open uploaded image.')
result = app.clf.classify_image(filename)
result = result[:-1]
return jsonify(val = result)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
im = Image.open(image)
string_buf = StringIO.StringIO()
im.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
class ImageDetector(object) :
def __init__(self, cfg_file, weights_file, meta_file) :
self.net = dn.load_net(cfg_file, weights_file, 0)
self.meta = dn.load_meta(meta_file)
print("Success to load darknet")
def classify_image(self, image) :
print("Classfy : ", image)
res = dn.detect(self.net, self.meta, image)
img = Image.open(image)
dr = ImageDraw.Draw(img)
for data in res :
x,y,w,h = data[2]
for i in range(5) :
dr.rectangle((x-w/2+i,y-h/2+i,x+w/2-i,y+h/2-i), outline=(46, 254, 46))
dr_image = image[:image.rfind('.')]+'_deect'+image[image.rfind('.'):]
img.save(dr_image)
res.append(dr_image)
return res
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-c', '--cfg',
help="choose cfg file",
action='store', default="cfg/tiny-yolo.cfg")
parser.add_option(
'-w', '--weights',
help="choose weights file",
action='store', default="tiny-yolo.weights")
parser.add_option(
'-m', '--meta',
help="choose meta file",
action='store', default="cfg/coco.data")
# Initialize classifier + warm start by forward for allocation
opts, args = parser.parse_args()
app.clf = ImageDetector(opts.cfg, opts.weights, opts.meta)
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| mit | -8,258,458,316,918,674,000 | 32.502732 | 76 | 0.63709 | false |
jdcald13/Winning_Texas_Holdem_Strategy | app/hand.py | 1 | 2626 | from app.deck import Deck
class Hand(Deck):
"""In real play we would also need to know the total number of players so that we could deal the cards out in the correct order. However, here we are only interested in our hand (we never fully know our
opponent's hand); which will later be compared to an opening range based off of our position. Therefore, it doesn't matter which two cards we get as long as they are valid, random, and different."""
def __init__(self):
super(Hand, self).__init__()
self.hole_cards = []
def get_hand(self, deck):
self.hole_cards.append(deck.deal_card())
self.hole_cards.append(deck.deal_card())
return self.hole_cards
def order_hand(self):
"""Need to order the hand from highest to lowest value; i.e. 3s Ah needs to be reordered to Ah 3s to compare it to the hand type of A3o."""
if self.hole_cards[0].get_rank() < self.hole_cards[1].get_rank(): # This is similar to a reverse sort which gets the majority of hands in the correct order.
self.hole_cards = [self.hole_cards[1], self.hole_cards[0]]
if self.hole_cards[1].get_rank() == "A": # Now need to get the A-T in correct order b/c cannot use the alphabetical sort order.
self.hole_cards = [self.hole_cards[1], self.hole_cards[0]]
elif self.hole_cards[1].get_rank() == "K":
self.hole_cards = [self.hole_cards[1], self.hole_cards[0]]
elif self.hole_cards[1].get_rank() == "Q":
self.hole_cards = [self.hole_cards[1], self.hole_cards[0]]
elif self.hole_cards[1].get_rank() == "J" and self.hole_cards[0].get_rank() == "T":
self.hole_cards = [self.hole_cards[1], self.hole_cards[0]]
return self.hole_cards
def hand_type(self):
"""The 3 hand types are pairs (5h5s is 55), suited (KhQh is KQs; s = suited here; not to be confused as 'spades'), and unsuited (5d4c is 54o; o = off-suit)."""
if self.hole_cards[0].get_rank() == self.hole_cards[1].get_rank():
return str(self.hole_cards[0].get_rank()) + str(self.hole_cards[1].get_rank())
elif self.hole_cards[0].get_suit() == self.hole_cards[1].get_suit():
return str(self.hole_cards[0].get_rank()) + str(self.hole_cards[1].get_rank() + "s")
else:
return str(self.hole_cards[0].get_rank()) + str(self.hole_cards[1].get_rank() + "o")
def __str__(self):
if self.hole_cards:
c = ""
for card in self.hole_cards:
c += str(card) + " "
else:
c = "No cards in hand."
return c
| mit | -5,951,038,077,747,133,000 | 54.87234 | 206 | 0.606626 | false |
TomTranter/OpenPNM | openpnm/network/__init__.py | 1 | 5934 | r"""
**openpnm.network**
----
This module contains the ``GenericNetwork`` class, whose main purpose is to
manage the topological representation of the Network. It also houses a
collection of Network generators.
----
**Available Network Generators**
OpenPNM includes a variety of Network generators. The basically include two
families of topology: periodic lattices and tessellations of random points.
+---------------------+-------------------------------------------------------+
| Generator Name | Description |
+=====================+=======================================================+
| Cubic | Simple cubic lattice with connectivity from 6 to 26 |
+---------------------+-------------------------------------------------------+
| CubicDual | Body centered cubic lattice plus face centered nodes |
| | on the surfaces |
+---------------------+-------------------------------------------------------+
| CubicTemplate | Simple cubic lattice with arbitrary domain shape |
| | specified by a template image |
+---------------------+-------------------------------------------------------+
| Bravais | Crystal lattice types including fcc, bcc, sc, and hcp |
+---------------------+-------------------------------------------------------+
| Delaunay | Random network formed by Delaunay tessellation of |
| | arbitrary base points |
+---------------------+-------------------------------------------------------+
| Voronoi | Random network formed by Voronoi tessellation of |
| | arbitrary base points |
+---------------------+-------------------------------------------------------+
| Gabriel | Random network formed by Gabriel tessellation of |
| | arbitrary base points |
+---------------------+-------------------------------------------------------+
| DelaunayVoronoiDual | Combined and interconnected Voronoi and Delaunay |
| | tessellations |
+---------------------+-------------------------------------------------------+
----
**The GenericNetwork Class**
All of the above Network classes derive from the GenericNetwork class. It is
a subclass of ``Base`` so contains methods for retrieving sets of pores based
on labels and so forth, but also contains the following additional methods
that are used soley for topological queries.
Pore networks require two essential pieces of information:
- the spatial location of pores
- the connectivity of which throats connect which pores
The ``GenericNetwork`` class and it's subclasses are responsible for storing,
managing, and utilizing this information.
Network topology is stored using `adjacency matrices
<https://en.wikipedia.org/wiki/Adjacency_matrix>`_. Moreover, this is stored
using a `sparse matrix format <https://en.wikipedia.org/wiki/Sparse_matrix>`_
known as COO. All netowrk objects store the COO matrix as ``'throat.conns'``.
The spatial location of each pore is stored in Cartesian coordinates [x, y, z],
under ``'pore.coords'``. All networks must be 3D, so even a 2D network must
have a z-component (but set to 0).
The following methods are implemented on ``GenericNetwork``, and look into
the ``'throat.conns'`` and ``'pore.coords'`` as needed.
+-------------------------+---------------------------------------------------+
| Method | Description |
+=========================+===================================================+
| num_neighbors | Counts the number of neighbors with a given label |
+-------------------------+---------------------------------------------------+
| find_neighbor_pores | Gets indices of pores neighboring a given pore |
+-------------------------+---------------------------------------------------+
| find_neighbor_throats | Gets indices of neighbor throats to a given pore |
+-------------------------+---------------------------------------------------+
| find_connected_pores | Gets indices of pores connected by a given throat |
+-------------------------+---------------------------------------------------+
| find_connecting_throat | Gets indices of the throat joining pairs of pores |
+-------------------------+---------------------------------------------------+
| find_nearby_pores | Find all pores within given distance of given pore|
+-------------------------+---------------------------------------------------+
| create_adjacency_matrix | Generates a weighted adjacency matrix |
+-------------------------+---------------------------------------------------+
| create_incidence_matrix | Creates a weighted incidence matrix |
+-------------------------+---------------------------------------------------+
| get_adjacency_matrix | Returns an adjacency matrix with default weights |
+-------------------------+---------------------------------------------------+
| get_incidence_matrix | Returns an incidence matrix with default weights |
+-------------------------+---------------------------------------------------+
| check_network_health | Check various aspects of topology for problems |
+-------------------------+---------------------------------------------------+
"""
from .GenericNetwork import GenericNetwork
from .Cubic import Cubic
from .CubicDual import CubicDual
from .Bravais import Bravais
from .CubicTemplate import CubicTemplate
from .DelaunayVoronoiDual import DelaunayVoronoiDual
from .Voronoi import Voronoi
from .Delaunay import Delaunay
from .Gabriel import Gabriel
| mit | 3,996,471,672,911,377,400 | 53.944444 | 79 | 0.454331 | false |
ErenTD/Eren-s-Music-Bot | musicbot/playlist.py | 1 | 12272 | import os.path
import datetime
import traceback
from random import shuffle
from itertools import islice
from collections import deque
from urllib.error import URLError
from youtube_dl.utils import ExtractorError, DownloadError, UnsupportedError
from .utils import get_header
from .lib.event_emitter import EventEmitter
from .entry import URLPlaylistEntry, StreamPlaylistEntry
from .exceptions import ExtractionError, WrongEntryTypeError
class Playlist(EventEmitter):
"""
A playlist is manages the list of songs that will be played.
"""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.loop = bot.loop
self.downloader = bot.downloader
self.entries = deque()
def __iter__(self):
return iter(self.entries)
def shuffle(self):
shuffle(self.entries)
def clear(self):
self.entries.clear()
async def add_entry(self, song_url, **meta):
"""
Validates and adds a song_url to be played. This does not start the download of the song.
Returns the entry & the position it is in the queue.
:param song_url: The song url to add to the playlist.
:param meta: Any additional metadata to add to the playlist entry.
"""
try:
info = await self.downloader.extract_info(self.loop, song_url, download=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(song_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % song_url)
# TODO: Sort out what happens next when this happens
if info.get('_type', None) == 'playlist':
raise WrongEntryTypeError("This is a playlist.", True, info.get('webpage_url', None) or info.get('url', None))
if info.get('is_live', False):
return await self.add_stream_entry(song_url, info=info, **meta)
# TODO: Extract this to its own function
if info['extractor'] in ['generic', 'Dropbox']:
try:
# unfortunately this is literally broken
# https://github.com/KeepSafe/aiohttp/issues/758
# https://github.com/KeepSafe/aiohttp/issues/852
content_type = await get_header(self.bot.aiosession, info['url'], 'CONTENT-TYPE')
print("Got content type", content_type)
except Exception as e:
print("[Warning] Failed to get content type for url %s (%s)" % (song_url, e))
content_type = None
if content_type:
if content_type.startswith(('application/', 'image/')):
if '/ogg' not in content_type: # How does a server say `application/ogg` what the actual fuck
raise ExtractionError("Invalid content type \"%s\" for url %s" % (content_type, song_url))
elif not content_type.startswith(('audio/', 'video/')):
print("[Warning] Questionable content type \"%s\" for url %s" % (content_type, song_url))
entry = URLPlaylistEntry(
self,
song_url,
info.get('title', 'Untitled'),
info.get('duration', 0) or 0,
self.downloader.ytdl.prepare_filename(info),
**meta
)
self._add_entry(entry)
return entry, len(self.entries)
async def add_stream_entry(self, song_url, info=None, **meta):
if info is None:
try:
info = {'title': song_url, 'extractor': None}
info = await self.downloader.extract_info(self.loop, song_url, download=False)
except DownloadError as e:
if e.exc_info[0] == UnsupportedError: # ytdl doesn't like it but its probably a stream
print("Assuming content is a direct stream")
elif e.exc_info[0] == URLError:
if os.path.exists(os.path.abspath(song_url)):
raise ExtractionError("This is not a stream, this is a file path.")
else: # it might be a file path that just doesn't exist
raise ExtractionError("Invalid input: {0.exc_info[0]}: {0.exc_info[1].reason}".format(e))
else:
traceback.print_exc()
raise ExtractionError("Unknown error: {}".format(e))
except Exception as e:
traceback.print_exc()
print('Could not extract information from {} ({}), falling back to direct'.format(song_url, e))
if info.get('extractor', None) == 'twitch:stream': # may need to add other twitch types
title = info.get('description')
else:
title = info.get('title', 'Untitled')
# TODO: A bit more validation, "~stream some_url" should not be :ok_hand:
# TODO: You'd think that this would be able to play youtube videos and the like
# TODO: or rather anything ytdl can parse. I'm not quite sure how to handle that yet.
entry = StreamPlaylistEntry(
self,
song_url,
title,
direct = not info.get('is_live', False),
**meta
)
self._add_entry(entry)
return entry, len(self.entries)
async def import_from(self, playlist_url, **meta):
"""
Imports the songs from `playlist_url` and queues them to be played.
Returns a list of `entries` that have been enqueued.
:param playlist_url: The playlist url to be cut into individual urls and added to the playlist
:param meta: Any additional metadata to add to the playlist entry
"""
position = len(self.entries) + 1
entry_list = []
try:
info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(playlist_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % playlist_url)
# Once again, the generic extractor fucks things up.
if info.get('extractor', None) == 'generic':
url_field = 'url'
else:
url_field = 'webpage_url'
baditems = 0
for items in info['entries']:
if items:
try:
entry = URLPlaylistEntry(
self,
items[url_field],
items.get('title', 'Untitled'),
items.get('duration', 0) or 0,
self.downloader.ytdl.prepare_filename(items),
**meta
)
self._add_entry(entry)
entry_list.append(entry)
except:
baditems += 1
# Once I know more about what's happening here I can add a proper message
traceback.print_exc()
print(items)
print("Could not add item")
else:
baditems += 1
if baditems:
print("Skipped %s bad entries" % baditems)
return entry_list, position
async def async_process_youtube_playlist(self, playlist_url, **meta):
"""
Processes youtube playlists links from `playlist_url` in a questionable, async fashion.
:param playlist_url: The playlist url to be cut into individual urls and added to the playlist
:param meta: Any additional metadata to add to the playlist entry
"""
try:
info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False, process=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(playlist_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % playlist_url)
gooditems = []
baditems = 0
for entry_data in info['entries']:
if entry_data:
baseurl = info['webpage_url'].split('playlist?list=')[0]
song_url = baseurl + 'watch?v=%s' % entry_data['id']
try:
entry, elen = await self.add_entry(song_url, **meta)
gooditems.append(entry)
except ExtractionError:
baditems += 1
except Exception as e:
baditems += 1
print("There was an error adding the song {}: {}: {}\n".format(
entry_data['id'], e.__class__.__name__, e))
else:
baditems += 1
if baditems:
print("Skipped %s bad entries" % baditems)
return gooditems
async def async_process_sc_bc_playlist(self, playlist_url, **meta):
"""
Processes soundcloud set and bancdamp album links from `playlist_url` in a questionable, async fashion.
:param playlist_url: The playlist url to be cut into individual urls and added to the playlist
:param meta: Any additional metadata to add to the playlist entry
"""
try:
info = await self.downloader.safe_extract_info(self.loop, playlist_url, download=False, process=False)
except Exception as e:
raise ExtractionError('Could not extract information from {}\n\n{}'.format(playlist_url, e))
if not info:
raise ExtractionError('Could not extract information from %s' % playlist_url)
gooditems = []
baditems = 0
for entry_data in info['entries']:
if entry_data:
song_url = entry_data['url']
try:
entry, elen = await self.add_entry(song_url, **meta)
gooditems.append(entry)
except ExtractionError:
baditems += 1
except Exception as e:
baditems += 1
print("There was an error adding the song {}: {}: {}\n".format(
entry_data['id'], e.__class__.__name__, e))
else:
baditems += 1
if baditems:
print("Skipped %s bad entries" % baditems)
return gooditems
def _add_entry(self, entry):
self.entries.append(entry)
self.emit('entry-added', playlist=self, entry=entry)
if self.peek() is entry:
entry.get_ready_future()
def remove_entry(self, index):
del self.entries[index]
async def get_next_entry(self, predownload_next=True):
"""
A coroutine which will return the next song or None if no songs left to play.
Additionally, if predownload_next is set to True, it will attempt to download the next
song to be played - so that it's ready by the time we get to it.
"""
if not self.entries:
return None
entry = self.entries.popleft()
if predownload_next:
next_entry = self.peek()
if next_entry:
next_entry.get_ready_future()
return await entry.get_ready_future()
def peek(self):
"""
Returns the next entry that should be scheduled to be played.
"""
if self.entries:
return self.entries[0]
async def estimate_time_until(self, position, player):
"""
(very) Roughly estimates the time till the queue will 'position'
"""
estimated_time = sum([e.duration for e in islice(self.entries, position - 1)])
# When the player plays a song, it eats the first playlist item, so we just have to add the time back
if not player.is_stopped and player.current_entry:
estimated_time += player.current_entry.duration - player.progress
return datetime.timedelta(seconds=estimated_time)
def count_for_user(self, user):
return sum(1 for e in self.entries if e.meta.get('author', None) == user)
| mit | 4,468,694,328,803,419,600 | 36.644172 | 122 | 0.563233 | false |
Pettythug/ForThisClass | assignment7/broker.py | 1 | 2885 | from network import Listener, Handler, poll
handlers = {} # map client handler to user name
names = {} # map name to handler
subs = {} # map tag to handlers
def broadcast(msg):
for h in handlers.keys():
h.do_send(msg)
class MyHandler(Handler):
count = 0;
def on_open(self):
handlers[self] = None
def on_close(self):
name = handlers[self]
del handlers[self]
broadcast({'leave': name, 'users': handlers.values()})
def on_msg(self, msg):
count = 0
check = True
personal = False
if 'join' in msg:
name = msg['join']
handlers[self] = name
broadcast({'join': name, 'users': handlers.values()})
elif 'speak' in msg:
name, txt = msg['speak'], msg['txt']
count = len(txt.split())
for x in txt.split():
count -= 1
word = x[1:]
if x.startswith('+') and personal == False:
count = 0
check = False
if word in subs.keys():
if name not in subs.get(word):
subs.setdefault(word,[]).append(name)
else:
subs.update({word:[name]})
elif x.startswith("#") and personal == False and check == True:
count = 0
check = False
while not check:
if word in subs:
for value in subs.get(word):
for h in handlers:
if value in handlers.get(h) and value != self:
h.do_send(msg)
check = True
elif x.startswith('-') and personal == False:
count = 0
check = False
if word in subs.keys():
if name in subs.get(word):
subs.setdefault(word,[]).remove(name)
elif x.startswith("@"):
for h in handlers:
if word in handlers.get(h):
if h != self:
h.do_send(msg)
check = False
personal == True
elif count == 0 and check == True and personal == False:
broadcast({'speak': name, 'txt': txt})
Listener(8888, MyHandler)
while 1:
poll(0.05)
| mit | 3,542,157,346,691,514,000 | 35.987179 | 82 | 0.375737 | false |
airanmehr/Scan | Libs/Util.py | 1 | 2178 | '''
Copyleft Mar 08, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
import UTILS.Util as utl
import UTILS.Estimate as est
import UTILS.Plots as pplt
from multiprocessing import Pool
def loadChrom(CHROM,pop=None):
a=pd.read_pickle(utl.dataPath1000GP+'dataframe/chr{}.df'.format(CHROM))
a.index=a.index.droplevel([2,3,4])
if pop is not None:return a[pop]
return a
def scanChrom(args):
CHROM,winSize,pop,n=args
if isinstance(CHROM,str) or isinstance(CHROM,int):CHROM=loadChrom(CHROM,pop)
return utl.scanGenome(CHROM,uf=lambda x: est.Estimate.getAllEstimatesX(x,n=n*2),winSize=winSize)
def scanGenome(winSize,genome,n,nProc=10):
if isinstance(genome,str):
args=map(lambda x: (x,winSize,genome,n),range(1,23))
else:
args=map(lambda x: (genome.loc[[x]],winSize,None,n),range(1,23))
if nProc>1:
return pd.concat(Pool(nProc).map(scanChrom,args))
else:
return pd.concat(map(scanChrom,args))
def scan1000GP(pop,wins=[50,200,500,1000]):
if pop=='ALL':n=2504
else:n=utl.VCF.loadPanel().groupby('pop').size()[pop]
df=pd.concat(map(lambda w: scanGenome(w*1000,pop,n),wins),1,keys=wins)
df.to_pickle(utl.parentdir(utl.dataPath1000GP)+'/scan/{}.SFS.df'.format(pop))
def scan1000GPAll():
pops=utl.VCF.loadPanel()
pops=['ALL']+ pops['super_pop'].unique().tolist()+pops['pop'].unique().tolist()
map(scan1000GP,pops)
def genesA():
pop='CEU'
genes=loadGenes().loc[pop]
scan=pd.read_pickle(utl.parentdir(utl.dataPath1000GP)+'/scan/{}.SFS.df'.format(pop))
scan.columns=[50,200,500,1000]
# a=scan[500].dropna().unstack('method')['FayWu']
# # I=range(1,5)+range(7,23)a=a.loc[I]
# pplt.Manhattan(a,shade=genes)
# for _,row in genes.iterrows():plt.annotate('{}'.format(row['name']), xy=(row.loc['gstart'], (a.max())), xytext=(row.loc['gstart'], 5),fontsize=22)
| mit | 4,540,748,270,361,257,000 | 30.114286 | 148 | 0.68641 | false |
jreback/pandas | pandas/tests/frame/indexing/test_setitem.py | 1 | 13480 | import numpy as np
import pytest
from pandas.core.dtypes.base import registry as ea_registry
from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._mgr.blocks[1]
b2 = df._mgr.blocks[2]
tm.assert_extension_array_equal(b1.values, b2.values)
b1base = b1.values._data.base
b2base = b2.values._data.base
assert b1base is None or (id(b1base) != id(b2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
class TestDataFrameSetItemWithExpansion:
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemCallable:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
class TestDataFrameSetItemBooleanMask:
@pytest.mark.parametrize(
"mask_type",
[lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values],
ids=["dataframe", "array"],
)
def test_setitem_boolean_mask(self, mask_type, float_frame):
# Test for issue #18582
df = float_frame.copy()
mask = mask_type(df)
# index with boolean mask
result = df.copy()
result[mask] = np.nan
expected = df.copy()
expected.values[np.array(mask)] = np.nan
tm.assert_frame_equal(result, expected)
| bsd-3-clause | 8,025,559,734,795,429,000 | 32.784461 | 88 | 0.528116 | false |
ceibal-tatu/pygobject | gi/pygtkcompat.py | 1 | 14279 | # -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2011-2012 Johan Dahlin <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
"""
PyGTK compatibility layer.
This modules goes a little bit longer to maintain PyGTK compatibility than
the normal overrides system.
It is recommended to not depend on this layer, but only use it as an
intermediate step when porting your application to PyGI.
Compatibility might never be 100%, but the aim is to make it possible to run
a well behaved PyGTK application mostly unmodified on top of PyGI.
"""
import sys
import warnings
try:
# Python 3
from collections import UserList
from imp import reload
except ImportError:
# Python 2 ships that in a different module
from UserList import UserList
import gi
from gi.repository import GObject
def _install_enums(module, dest=None, strip=''):
if dest is None:
dest = module
modname = dest.__name__.rsplit('.', 1)[1].upper()
for attr in dir(module):
try:
obj = getattr(module, attr, None)
except:
continue
try:
if issubclass(obj, GObject.GEnum):
for value, enum in obj.__enum_values__.items():
name = enum.value_name
name = name.replace(modname + '_', '')
if strip and name.startswith(strip):
name = name[len(strip):]
setattr(dest, name, enum)
except TypeError:
continue
try:
if issubclass(obj, GObject.GFlags):
for value, flag in obj.__flags_values__.items():
name = flag.value_names[-1].replace(modname + '_', '')
setattr(dest, name, flag)
except TypeError:
continue
def enable():
# gobject
from gi.repository import GLib
sys.modules['glib'] = GLib
# gobject
from gi.repository import GObject
sys.modules['gobject'] = GObject
from gi._gobject import propertyhelper
sys.modules['gobject.propertyhelper'] = propertyhelper
# gio
from gi.repository import Gio
sys.modules['gio'] = Gio
_unset = object()
def enable_gtk(version='2.0'):
# set the default encoding like PyGTK
reload(sys)
if sys.version_info < (3,0):
sys.setdefaultencoding('utf-8')
# atk
gi.require_version('Atk', '1.0')
from gi.repository import Atk
sys.modules['atk'] = Atk
_install_enums(Atk)
# pango
gi.require_version('Pango', '1.0')
from gi.repository import Pango
sys.modules['pango'] = Pango
_install_enums(Pango)
# pangocairo
gi.require_version('PangoCairo', '1.0')
from gi.repository import PangoCairo
sys.modules['pangocairo'] = PangoCairo
# gdk
gi.require_version('Gdk', version)
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
sys.modules['gtk.gdk'] = Gdk
_install_enums(Gdk)
_install_enums(GdkPixbuf, dest=Gdk)
Gdk._2BUTTON_PRESS = 5
Gdk.BUTTON_PRESS = 4
Gdk.screen_get_default = Gdk.Screen.get_default
Gdk.Pixbuf = GdkPixbuf.Pixbuf
Gdk.pixbuf_new_from_file = GdkPixbuf.Pixbuf.new_from_file
Gdk.PixbufLoader = GdkPixbuf.PixbufLoader.new_with_type
orig_get_frame_extents = Gdk.Window.get_frame_extents
def get_frame_extents(window):
try:
try:
rect = Gdk.Rectangle(0, 0, 0, 0)
except TypeError:
rect = Gdk.Rectangle()
orig_get_frame_extents(window, rect)
except TypeError:
rect = orig_get_frame_extents(window)
return rect
Gdk.Window.get_frame_extents = get_frame_extents
orig_get_origin = Gdk.Window.get_origin
def get_origin(self):
return orig_get_origin(self)[1:]
Gdk.Window.get_origin = get_origin
# gtk
gi.require_version('Gtk', version)
from gi.repository import Gtk
sys.modules['gtk'] = Gtk
Gtk.gdk = Gdk
Gtk.pygtk_version = (2, 99, 0)
Gtk.gtk_version = (Gtk.MAJOR_VERSION,
Gtk.MINOR_VERSION,
Gtk.MICRO_VERSION)
_install_enums(Gtk)
# Action
def set_tool_item_type(menuaction, gtype):
warnings.warn('set_tool_item_type() is not supported',
DeprecationWarning, stacklevel=2)
Gtk.Action.set_tool_item_type = classmethod(set_tool_item_type)
# Alignment
orig_Alignment = Gtk.Alignment
class Alignment(orig_Alignment):
def __init__(self, xalign=0.0, yalign=0.0, xscale=0.0, yscale=0.0):
orig_Alignment.__init__(self)
self.props.xalign = xalign
self.props.yalign = yalign
self.props.xscale = xscale
self.props.yscale = yscale
Gtk.Alignment = Alignment
# Box
orig_pack_end = Gtk.Box.pack_end
def pack_end(self, child, expand=True, fill=True, padding=0):
orig_pack_end(self, child, expand, fill, padding)
Gtk.Box.pack_end = pack_end
orig_pack_start = Gtk.Box.pack_start
def pack_start(self, child, expand=True, fill=True, padding=0):
orig_pack_start(self, child, expand, fill, padding)
Gtk.Box.pack_start = pack_start
# TreeViewColumn
orig_tree_view_column_pack_end = Gtk.TreeViewColumn.pack_end
def tree_view_column_pack_end(self, cell, expand=True):
orig_tree_view_column_pack_end(self, cell, expand)
Gtk.TreeViewColumn.pack_end = tree_view_column_pack_end
orig_tree_view_column_pack_start = Gtk.TreeViewColumn.pack_start
def tree_view_column_pack_start(self, cell, expand=True):
orig_tree_view_column_pack_start(self, cell, expand)
Gtk.TreeViewColumn.pack_start = tree_view_column_pack_start
# TreeView
def insert_column_with_attributes(view, position, title, cell, *args, **kwargs):
pass
Gtk.TreeView.insert_column_with_attributes = insert_column_with_attributes
# CellLayout
orig_cell_pack_end = Gtk.CellLayout.pack_end
def cell_pack_end(self, cell, expand=True):
orig_cell_pack_end(self, cell, expand)
Gtk.CellLayout.pack_end = cell_pack_end
orig_cell_pack_start = Gtk.CellLayout.pack_start
def cell_pack_start(self, cell, expand=True):
orig_cell_pack_start(self, cell, expand)
Gtk.CellLayout.pack_start = cell_pack_start
orig_set_cell_data_func = Gtk.CellLayout.set_cell_data_func
def set_cell_data_func(self, cell, func, user_data=_unset):
def callback(*args):
if args[-1] == _unset:
args = args[:-1]
return func(*args)
orig_set_cell_data_func(self, cell, callback, user_data)
Gtk.CellLayout.set_cell_data_func = set_cell_data_func
# CellRenderer
class GenericCellRenderer(Gtk.CellRenderer):
pass
Gtk.GenericCellRenderer = GenericCellRenderer
# ComboBox
orig_combo_row_separator_func = Gtk.ComboBox.set_row_separator_func
def combo_row_separator_func(self, func, user_data=_unset):
def callback(*args):
if args[-1] == _unset:
args = args[:-1]
return func(*args)
orig_combo_row_separator_func(self, callback, user_data)
Gtk.ComboBox.set_row_separator_func = combo_row_separator_func
# ComboBoxEntry
class ComboBoxEntry(Gtk.ComboBox):
def __init__(self, **kwds):
Gtk.ComboBox.__init__(self, has_entry=True, **kwds)
def set_text_column (self, text_column):
self.set_entry_text_column(text_column)
def get_text_column (self):
return self.get_entry_text_column()
Gtk.ComboBoxEntry = ComboBoxEntry
def combo_box_entry_new():
return Gtk.ComboBoxEntry()
Gtk.combo_box_entry_new = combo_box_entry_new
def combo_box_entry_new_with_model(model):
return Gtk.ComboBoxEntry(model=model)
Gtk.combo_box_entry_new_with_model = combo_box_entry_new_with_model
# Container
def install_child_property(container, flag, pspec):
warnings.warn('install_child_property() is not supported',
DeprecationWarning, stacklevel=2)
Gtk.Container.install_child_property = classmethod(install_child_property)
def new_text():
combo = Gtk.ComboBox()
model = Gtk.ListStore(str)
combo.set_model(model)
combo.set_entry_text_column(0)
return combo
Gtk.combo_box_new_text = new_text
def append_text(self, text):
model = self.get_model()
model.append([text])
Gtk.ComboBox.append_text = append_text
Gtk.expander_new_with_mnemonic = Gtk.Expander.new_with_mnemonic
Gtk.icon_theme_get_default = Gtk.IconTheme.get_default
Gtk.image_new_from_pixbuf = Gtk.Image.new_from_pixbuf
Gtk.image_new_from_stock = Gtk.Image.new_from_stock
Gtk.settings_get_default = Gtk.Settings.get_default
Gtk.window_set_default_icon = Gtk.Window.set_default_icon
# Scale
orig_HScale = Gtk.HScale
orig_VScale = Gtk.VScale
class HScale(orig_HScale):
def __init__(self, adjustment=None):
orig_HScale.__init__(self, adjustment=adjustment)
Gtk.HScale = HScale
class VScale(orig_VScale):
def __init__(self, adjustment=None):
orig_VScale.__init__(self, adjustment=adjustment)
Gtk.VScale = VScale
Gtk.stock_add = lambda items: None
# Widget
Gtk.widget_get_default_direction = Gtk.Widget.get_default_direction
orig_size_request = Gtk.Widget.size_request
def size_request(widget):
class SizeRequest(UserList):
def __init__(self, req):
self.height = req.height
self.width = req.width
UserList.__init__(self, [self.width,
self.height])
return SizeRequest(orig_size_request(widget))
Gtk.Widget.size_request = size_request
Gtk.Widget.hide_all = Gtk.Widget.hide
class BaseGetter(object):
def __init__(self, context):
self.context = context
def __getitem__(self, state):
color = self.context.get_background_color(state)
return Gdk.Color(red=color.red,
green=color.green,
blue=color.blue)
class Styles(object):
def __init__(self, widget):
context = widget.get_style_context()
self.base = BaseGetter(context)
self.black = Gdk.Color(red=0, green=0, blue=0)
class StyleDescriptor(object):
def __get__(self, instance, class_):
return Styles(instance)
Gtk.Widget.style = StyleDescriptor()
# gtk.unixprint
class UnixPrint(object):
pass
unixprint = UnixPrint()
sys.modules['gtkunixprint'] = unixprint
# gtk.keysyms
class Keysyms(object):
pass
keysyms = Keysyms()
sys.modules['gtk.keysyms'] = keysyms
Gtk.keysyms = keysyms
for name in dir(Gdk):
if name.startswith('KEY_'):
target = name[4:]
if target[0] in '0123456789':
target = '_' + target
value = getattr(Gdk, name)
setattr(keysyms, target, value)
def enable_vte():
gi.require_version('Vte', '0.0')
from gi.repository import Vte
sys.modules['vte'] = Vte
def enable_poppler():
gi.require_version('Poppler', '0.18')
from gi.repository import Poppler
sys.modules['poppler'] = Poppler
Poppler.pypoppler_version = (1, 0, 0)
def enable_webkit(version='1.0'):
gi.require_version('WebKit', version)
from gi.repository import WebKit
sys.modules['webkit'] = WebKit
WebKit.WebView.get_web_inspector = WebKit.WebView.get_inspector
def enable_gudev():
gi.require_version('GUdev', '1.0')
from gi.repository import GUdev
sys.modules['gudev'] = GUdev
def enable_gst():
gi.require_version('Gst', '0.10')
from gi.repository import Gst
sys.modules['gst'] = Gst
_install_enums(Gst)
Gst.registry_get_default = Gst.Registry.get_default
Gst.element_register = Gst.Element.register
Gst.element_factory_make = Gst.ElementFactory.make
Gst.caps_new_any = Gst.Caps.new_any
Gst.get_pygst_version = lambda : (0, 10, 19)
Gst.get_gst_version = lambda : (0, 10, 40)
from gi.repository import GstInterfaces
sys.modules['gst.interfaces'] = GstInterfaces
_install_enums(GstInterfaces)
from gi.repository import GstAudio
sys.modules['gst.audio'] = GstAudio
_install_enums(GstAudio)
from gi.repository import GstVideo
sys.modules['gst.video'] = GstVideo
_install_enums(GstVideo)
from gi.repository import GstBase
sys.modules['gst.base'] = GstBase
_install_enums(GstBase)
Gst.BaseTransform = GstBase.BaseTransform
Gst.BaseSink = GstBase.BaseSink
from gi.repository import GstController
sys.modules['gst.controller'] = GstController
_install_enums(GstController, dest=Gst)
from gi.repository import GstPbutils
sys.modules['gst.pbutils'] = GstPbutils
_install_enums(GstPbutils)
def enable_goocanvas():
gi.require_version('GooCanvas', '2.0')
from gi.repository import GooCanvas
sys.modules['goocanvas'] = GooCanvas
_install_enums(GooCanvas, strip='GOO_CANVAS_')
GooCanvas.ItemSimple = GooCanvas.CanvasItemSimple
GooCanvas.Item = GooCanvas.CanvasItem
GooCanvas.Image = GooCanvas.CanvasImage
GooCanvas.Group = GooCanvas.CanvasGroup
GooCanvas.Rect = GooCanvas.CanvasRect
| lgpl-2.1 | 8,445,934,827,913,439,000 | 30.451542 | 84 | 0.640101 | false |
h3biomed/ansible | lib/ansible/modules/network/cloudengine/ce_switchport.py | 1 | 27421 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_switchport
version_added: "2.4"
short_description: Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
description:
- Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- When C(state=absent), VLANs can be added/removed from trunk links and
the existing access VLAN can be 'unconfigured' to just having VLAN 1
on that interface.
- When working with trunks VLANs the keywords add/remove are always sent
in the C(port trunk allow-pass vlan) command. Use verbose mode to see
commands sent.
- When C(state=unconfigured), the interface will result with having a default
Layer 2 interface, i.e. vlan 1 in access mode.
options:
interface:
description:
- Full name of the interface, i.e. 40GE1/0/22.
required: true
mode:
description:
- The link type of an interface.
choices: ['access','trunk']
access_vlan:
description:
- If C(mode=access), used as the access VLAN ID, in the range from 1 to 4094.
native_vlan:
description:
- If C(mode=trunk), used as the trunk native VLAN ID, in the range from 1 to 4094.
trunk_vlans:
description:
- If C(mode=trunk), used as the VLAN range to ADD or REMOVE
from the trunk, such as 2-10 or 2,5,10-15, etc.
state:
description:
- Manage the state of the resource.
default: present
choices: ['present', 'absent', 'unconfigured']
'''
EXAMPLES = '''
- name: switchport module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure 10GE1/0/22 is in its default switchport state
ce_switchport:
interface: 10GE1/0/22
state: unconfigured
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is configured for access vlan 20
ce_switchport:
interface: 10GE1/0/22
mode: access
access_vlan: 20
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 only has vlans 5-10 as trunk vlans
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 5-10
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 2-50
provider: '{{ cli }}'
- name: Ensure these VLANs are not being tagged on the trunk
ce_switchport:
interface: 10GE1/0/22
mode: trunk
trunk_vlans: 51-4000
state: absent
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22", "mode": "access"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {"access_vlan": "10", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["10GE1/0/22", "port default vlan 20"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_GET_PORT_ATTR = """
<filter type="subtree">
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf>
<ifName>%s</ifName>
<l2Enable></l2Enable>
<l2Attribute>
<linkType></linkType>
<pvid></pvid>
<trunkVlans></trunkVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</filter>
"""
CE_NC_SET_ACCESS_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>%s</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
CE_NC_SET_TRUNK_PORT_MODE = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_PVID = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<pvid>%s</pvid>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_VLANS = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<trunkVlans>%s:%s</trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_DEFAULT_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>1</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
SWITCH_PORT_TYPE = ('ge', '10ge', '25ge',
'4x10ge', '40ge', '100ge', 'eth-trunk')
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_portswitch_enalbed(iftype):
""""[undo] portswitch"""
return bool(iftype in SWITCH_PORT_TYPE)
def vlan_bitmap_undo(bitmap):
"""convert vlan bitmap to undo bitmap"""
vlan_bit = ['F'] * 1024
if not bitmap or len(bitmap) == 0:
return ''.join(vlan_bit)
bit_len = len(bitmap)
for num in range(bit_len):
undo = (~int(bitmap[num], 16)) & 0xF
vlan_bit[num] = hex(undo)[2]
return ''.join(vlan_bit)
def is_vlan_bitmap_empty(bitmap):
"""check vlan bitmap empty"""
if not bitmap or len(bitmap) == 0:
return True
bit_len = len(bitmap)
for num in range(bit_len):
if bitmap[num] != '0':
return False
return True
class SwitchPort(object):
"""
Manages Layer 2 switchport interfaces.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface and vlan info
self.interface = self.module.params['interface']
self.mode = self.module.params['mode']
self.state = self.module.params['state']
self.access_vlan = self.module.params['access_vlan']
self.native_vlan = self.module.params['native_vlan']
self.trunk_vlans = self.module.params['trunk_vlans']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intf_info = dict() # interface vlan info
self.intf_type = None # loopback tunnel ...
def init_module(self):
""" init module """
required_if = [('state', 'absent', ['mode']), ('state', 'present', ['mode'])]
self.module = AnsibleModule(
argument_spec=self.spec, required_if=required_if, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_interface_dict(self, ifname):
""" get one interface attributes dict."""
intf_info = dict()
conf_str = CE_NC_GET_PORT_ATTR % ifname
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return intf_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*<l2Enable>(.*)</l2Enable>.*', rcv_xml)
if intf:
intf_info = dict(ifName=intf[0][0],
l2Enable=intf[0][1],
linkType="",
pvid="",
trunkVlans="")
if intf_info["l2Enable"] == "enable":
attr = re.findall(
r'.*<linkType>(.*)</linkType>.*.*\s*<pvid>(.*)'
r'</pvid>.*\s*<trunkVlans>(.*)</trunkVlans>.*', rcv_xml)
if attr:
intf_info["linkType"] = attr[0][0]
intf_info["pvid"] = attr[0][1]
intf_info["trunkVlans"] = attr[0][2]
return intf_info
def is_l2switchport(self):
"""Check layer2 switch port"""
return bool(self.intf_info["l2Enable"] == "enable")
def merge_access_vlan(self, ifname, access_vlan):
"""Merge access interface vlan"""
change = False
conf_str = ""
self.updates_cmd.append("interface %s" % ifname)
if self.state == "present":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] != access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
change = True
else: # not access
self.updates_cmd.append("port link-type access")
if access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
else:
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
elif self.state == "absent":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] == access_vlan and access_vlan != "1":
self.updates_cmd.append(
"undo port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
else: # not access
self.updates_cmd.append("port link-type access")
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
if not change:
self.updates_cmd.pop() # remove interface
return
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_ACCESS_PORT")
self.changed = True
def merge_trunk_vlan(self, ifname, native_vlan, trunk_vlans):
"""Merge trunk interface vlan"""
change = False
xmlstr = ""
self.updates_cmd.append("interface %s" % ifname)
if trunk_vlans:
vlan_list = self.vlan_range_to_list(trunk_vlans)
vlan_map = self.vlan_list_to_bitmap(vlan_list)
if self.state == "present":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] != native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
change = True
if trunk_vlans:
add_vlans = self.vlan_bitmap_add(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(add_vlans):
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, add_vlans, add_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
change = True
if native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
if trunk_vlans:
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, vlan_map, vlan_map)
if not native_vlan and not trunk_vlans:
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
self.updates_cmd.append(
"undo port trunk allow-pass vlan 1")
elif self.state == "absent":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] == native_vlan and native_vlan != '1':
self.updates_cmd.append(
"undo port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, 1)
change = True
if trunk_vlans:
del_vlans = self.vlan_bitmap_del(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(del_vlans):
self.updates_cmd.append(
"undo port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
undo_map = vlan_bitmap_undo(del_vlans)
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, undo_map, del_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
self.updates_cmd.append("undo port trunk allow-pass vlan 1")
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
change = True
if not change:
self.updates_cmd.pop()
return
conf_str = "<config>" + xmlstr + "</config>"
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_TRUNK_PORT")
self.changed = True
def default_switchport(self, ifname):
"""Set interface default or unconfigured"""
change = False
if self.intf_info["linkType"] != "access":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port link-type access")
self.updates_cmd.append("port default vlan 1")
change = True
else:
if self.intf_info["pvid"] != "1":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port default vlan 1")
change = True
if not change:
return
conf_str = CE_NC_SET_DEFAULT_PORT % ifname
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "DEFAULT_INTF_VLAN")
self.changed = True
def vlan_series(self, vlanid_s):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_s)
if peerlistlen != 2:
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
for num in range(peerlistlen):
if not vlanid_s[num].isdigit():
self.module.fail_json(
msg='Error: Format of vlanid is invalid.')
if int(vlanid_s[0]) > int(vlanid_s[1]):
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
elif int(vlanid_s[0]) == int(vlanid_s[1]):
vlan_list.append(str(vlanid_s[0]))
return vlan_list
for num in range(int(vlanid_s[0]), int(vlanid_s[1])):
vlan_list.append(str(num))
vlan_list.append(vlanid_s[1])
return vlan_list
def vlan_region(self, vlanid_list):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_list)
for num in range(peerlistlen):
if vlanid_list[num].isdigit():
vlan_list.append(vlanid_list[num])
else:
vlan_s = self.vlan_series(vlanid_list[num].split('-'))
vlan_list.extend(vlan_s)
return vlan_list
def vlan_range_to_list(self, vlan_range):
""" convert vlan range to vlan list """
vlan_list = self.vlan_region(vlan_range.split(','))
return vlan_list
def vlan_list_to_bitmap(self, vlanlist):
""" convert vlan list to vlan bitmap """
vlan_bit = ['0'] * 1024
bit_int = [0] * 1024
vlan_list_len = len(vlanlist)
for num in range(vlan_list_len):
tagged_vlans = int(vlanlist[num])
if tagged_vlans <= 0 or tagged_vlans > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
j = tagged_vlans / 4
bit_int[j] |= 0x8 >> (tagged_vlans % 4)
vlan_bit[j] = hex(bit_int[j])[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_add(self, oldmap, newmap):
"""vlan add bitmap"""
vlan_bit = ['0'] * 1024
if len(newmap) != 1024:
self.module.fail_json(msg='Error: New vlan bitmap is invalid.')
if len(oldmap) != 1024 and len(oldmap) != 0:
self.module.fail_json(msg='Error: old vlan bitmap is invalid.')
if len(oldmap) == 0:
return newmap
for num in range(1024):
new_tmp = int(newmap[num], 16)
old_tmp = int(oldmap[num], 16)
add = (~(new_tmp & old_tmp)) & new_tmp
vlan_bit[num] = hex(add)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_del(self, oldmap, delmap):
"""vlan del bitmap"""
vlan_bit = ['0'] * 1024
if not oldmap or len(oldmap) == 0:
return ''.join(vlan_bit)
if len(oldmap) != 1024 or len(delmap) != 1024:
self.module.fail_json(msg='Error: vlan bitmap is invalid.')
for num in range(1024):
tmp = int(delmap[num], 16) & int(oldmap[num], 16)
vlan_bit[num] = hex(tmp)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def check_params(self):
"""Check all input params"""
# interface type check
if self.interface:
self.intf_type = get_interface_type(self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
if not self.intf_type or not is_portswitch_enalbed(self.intf_type):
self.module.fail_json(msg='Error: Interface %s is error.')
# check access_vlan
if self.access_vlan:
if not self.access_vlan.isdigit():
self.module.fail_json(msg='Error: Access vlan id is invalid.')
if int(self.access_vlan) <= 0 or int(self.access_vlan) > 4094:
self.module.fail_json(
msg='Error: Access vlan id is not in the range from 1 to 4094.')
# check native_vlan
if self.native_vlan:
if not self.native_vlan.isdigit():
self.module.fail_json(msg='Error: Native vlan id is invalid.')
if int(self.native_vlan) <= 0 or int(self.native_vlan) > 4094:
self.module.fail_json(
msg='Error: Native vlan id is not in the range from 1 to 4094.')
# get interface info
self.intf_info = self.get_interface_dict(self.interface)
if not self.intf_info:
self.module.fail_json(msg='Error: Interface does not exist.')
if not self.is_l2switchport():
self.module.fail_json(
msg='Error: Interface is not layer2 switch port.')
def get_proposed(self):
"""get proposed info"""
self.proposed['state'] = self.state
self.proposed['interface'] = self.interface
self.proposed['mode'] = self.mode
self.proposed['access_vlan'] = self.access_vlan
self.proposed['native_vlan'] = self.native_vlan
self.proposed['trunk_vlans'] = self.trunk_vlans
def get_existing(self):
"""get existing info"""
if self.intf_info:
self.existing["interface"] = self.intf_info["ifName"]
self.existing["mode"] = self.intf_info["linkType"]
self.existing["switchport"] = self.intf_info["l2Enable"]
self.existing['access_vlan'] = self.intf_info["pvid"]
self.existing['native_vlan'] = self.intf_info["pvid"]
self.existing['trunk_vlans'] = self.intf_info["trunkVlans"]
def get_end_state(self):
"""get end state info"""
if self.intf_info:
end_info = self.get_interface_dict(self.interface)
if end_info:
self.end_state["interface"] = end_info["ifName"]
self.end_state["mode"] = end_info["linkType"]
self.end_state["switchport"] = end_info["l2Enable"]
self.end_state['access_vlan'] = end_info["pvid"]
self.end_state['native_vlan'] = end_info["pvid"]
self.end_state['trunk_vlans'] = end_info["trunkVlans"]
def work(self):
"""worker"""
self.check_params()
if not self.intf_info:
self.module.fail_json(msg='Error: interface does not exist.')
self.get_existing()
self.get_proposed()
# present or absent
if self.state == "present" or self.state == "absent":
if self.mode == "access":
self.merge_access_vlan(self.interface, self.access_vlan)
elif self.mode == "trunk":
self.merge_trunk_vlan(
self.interface, self.native_vlan, self.trunk_vlans)
# unconfigured
else:
self.default_switchport(self.interface)
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
mode=dict(choices=['access', 'trunk'], required=False),
access_vlan=dict(type='str', required=False),
native_vlan=dict(type='str', required=False),
trunk_vlans=dict(type='str', required=False),
state=dict(choices=['absent', 'present', 'unconfigured'],
default='present')
)
argument_spec.update(ce_argument_spec)
switchport = SwitchPort(argument_spec)
switchport.work()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,632,139,386,570,024,000 | 33.063354 | 123 | 0.553773 | false |
yunojuno/django-juno-testrunner | setup.py | 1 | 1383 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-juno-testrunner',
version='0.4.1',
description='A more useful (and slightly more glamorous) test runner for Django 1.6+ from the folks at YunoJuno',
long_description=README,
author='Steve Jalim, Hugo Rodger-Brown',
author_email='[email protected], [email protected]',
url='https://github.com/yunojuno/django-juno-testrunner.git',
license='MIT',
packages=['junorunner'],
install_requires=['colorama'],
extras_require={'junorunner': ['colorama', ]},
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| mit | 3,243,954,591,741,133,000 | 38.514286 | 117 | 0.610991 | false |
HackLab-Almeria/clubpythonalm-taller-bots-telegram | 01-Chat Bot/Teclados - Api 2.0/Teclado1.py | 1 | 2361 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Ejemplo: Teclado Virtual 1 (Api 2.0)
Libreria: pyTelegramBotAPI 2.0 [ok]
Python: 3.5.1
"""
from telebot import types # Importamos el API correspondiente para usar teclados virtuales
import telebot
import sys
TOKEN='AQUÍ EL NUMERO DE VUESTRO TOKEN' # Identificador único del bot
telegram = telebot.TeleBot(TOKEN) # Activamos el bot asociado al Token
# Preparamos un menú de instrucciones
instrucciones="Instrucciones:\n 1.- Saludar \n 2.- Despedirse\n 3.- Ayuda"
'Esta es la revisión de creación del teclado para el API 2.0 de Telegram'
teclado_virtual = types.ReplyKeyboardMarkup() # Activamos el teclado virtual
tecla1 = types.KeyboardButton('1')
tecla2 = types.KeyboardButton('2')
tecla_ayuda= types.KeyboardButton("Ayuda")
teclado_virtual.add(tecla1,tecla2,tecla_ayuda)
def listener(mensaje_telegram):
'Definimos la función que estará atenta a los mensajes que envíe el bot de Telegram.'
nombre_bot="Taller_Python_Almeria"
nombre_usuario="Taller_Python_Almeria_bot"
info_bot="Nombre del bot:" +nombre_bot+"\n"+"Nombre de Usuario: "+nombre_usuario+"\n"+"Token: "+TOKEN+"\n"
for mensaje in mensaje_telegram: # Bucle de captura de los mensajes que envía el bot
chatID = mensaje.chat.id # Identificativo del chat. IMPRESCINDIBLE para enviar mensajes
if mensaje.content_type == 'text': #Esperamos mensajes de texto
if mensaje.text=='1': # Si es el 1
telegram.send_message(chatID, "Hola") # Saludamos
if mensaje.text=='2': # Si es el 2
telegram.send_message(chatID, "Adiós") # Nos despedimos
if mensaje.text=="Ayuda": # Pulsa 'Ayuda' y mostramos el menú de instrucciones
telegram.send_message(chatID,instrucciones)
return 0 # Fin de la función y su valor de retorno
try:
info_api=telegram.get_me() # Comprobar que el bot está activo
print ("Conectando con el Bot de Telegram... [OK]")
print ("-CTR + C para detener el Bot -") # Para salir desde consola
telegram.send_message(chat_id=chatID,text=instrucciones,reply_markup=teclado_virtual ) # Mandamos el teclado diseñado
telegram.set_update_listener(listener) # Actualizamos el escuchador (listener)
telegram.polling() # Activamos el bucle de sondeo de mensajes
sys.exit(0)
except Exception as e:
print ("Conectando con Bot de Telegram -> ERROR")
print (e)
sys.exit(1)
| mit | 6,601,520,652,176,615,000 | 36.854839 | 118 | 0.73285 | false |
polyaxon/polyaxon | core/polyaxon/k8s/custom_resources/operation.py | 1 | 1229 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polyaxon.k8s.custom_resources.crd import get_custom_object
KIND = "Operation"
PLURAL = "operations"
API_VERSION = "v1"
GROUP = "core.polyaxon.com"
def get_operation_custom_object(
resource_name: str,
namespace: str,
custom_object: Dict,
annotations: Dict[str, str],
labels: Dict[str, str],
) -> Dict:
return get_custom_object(
resource_name=resource_name,
namespace=namespace,
kind=KIND,
api_version="{}/{}".format(GROUP, API_VERSION),
labels=labels,
annotations=annotations,
custom_object=custom_object,
)
| apache-2.0 | 1,260,142,692,166,638,600 | 28.261905 | 74 | 0.701383 | false |
devilry/devilry-django | devilry/devilry_admin/tests/subject_for_period_admin/test_subject_redirect.py | 1 | 2259 |
import mock
from django.conf import settings
from django.http import Http404
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.devilry_admin.views.subject_for_period_admin import subject_redirect
class TestSubjectRedirect(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = subject_redirect.SubjectRedirectView
def test_404(self):
testsubject = baker.make('core.Subject')
with self.assertRaises(Http404):
self.mock_http302_getrequest(cradmin_role=testsubject)
def test_user_is_not_periodadmin_or_subjectadmin(self):
testsubject = baker.make('core.Subject')
testuser = baker.make(settings.AUTH_USER_MODEL)
with self.assertRaises(Http404):
self.mock_http302_getrequest(cradmin_role=testsubject, requestuser=testuser)
def test_redirect_to_overview_for_periodadmin(self):
testperiod = baker.make('core.Period')
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
mock_cradmin_instance = mock.MagicMock()
self.mock_http302_getrequest(
cradmin_role=testperiod.parentnode,
cradmin_instance=mock_cradmin_instance,
requestuser=testuser
)
mock_cradmin_instance.rolefrontpage_url.assert_called_once_with(roleid=testperiod.parentnode.id)
def test_redirect_to_overview_for_subject_admins(self):
testsubject = baker.make('core.Subject')
subjectpermissiongroup = baker.make('devilry_account.SubjectPermissionGroup', subject=testsubject)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
mockresponse = self.mock_http302_getrequest(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual('/devilry_admin/subject/{}/overview/'.format(testsubject.id), mockresponse.response.url)
| bsd-3-clause | 3,487,144,100,941,223,000 | 46.0625 | 113 | 0.729969 | false |
triump0870/RemindMeLater | src/profiles/migrations/0001_initial.py | 1 | 1204 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
('authtools', '0003_auto_20160128_0912'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(primary_key=True,
serialize=False, to=settings.AUTH_USER_MODEL)),
('slug', models.UUIDField(
default=uuid.uuid4, editable=False, blank=True)),
('picture', models.ImageField(upload_to='profile_pics/%Y-%m-%d/',
null=True, verbose_name='Profile picture', blank=True)),
('bio', models.CharField(max_length=200,
null=True, verbose_name='Short Bio', blank=True)),
('email_verified', models.BooleanField(
default=False, verbose_name='Email verified')),
],
options={
'abstract': False,
},
),
]
| mit | -7,514,481,914,761,045,000 | 34.411765 | 102 | 0.503322 | false |
brianhouse/housepy | osc.py | 1 | 3521 | #!/usr/bin/env python3
"""
Example:
import time
from osc import *
try:
address = sys.argv[1]
except:
address = "/hello/world"
try:
data = sys.argv[2]
except:
data = None
def message_handler(location, address, data):
print(location)
print(address)
print(data)
receiver = Receiver(23232, message_handler)
Sender(23232).send(address, data)
time.sleep(0.1)
"""
import sys, os, time, json, threading, queue
from .log import log
from .lib import OSC
verbose = True
class Sender(object):
# note: OSC.OSCMultiClient() does not appear to work, so writing the equivalent in wrappers here
def __init__(self, *location):
self.targets = {}
self.add_target(*location)
def add_target(self, *location):
location = self._get_host_port(*location)
if not location:
return
if verbose:
log.info("OSC adding target %s:%s" % location)
self.targets[location] = OSC.OSCClient()
self.targets[location].connect(location)
def remove_target(self, *location):
location = self._get_host_port(*location)
if not location or not location in self.targets:
return
if verbose:
log.info("OSC removing target %s:%s" % location)
del self.targets[location]
def send(self, address, data=None):
message = OSC.OSCMessage()
message.setAddress(address)
if data is not None:
if type(data) is dict:
data = json.dumps(data)
if type(data) is tuple:
data = list(data)
message.extend(data)
if verbose:
log.info("OSC send (%s): %s" % (len(self.targets), message))
for location, target in self.targets.items():
try:
target.send(message, timeout=1)
except Exception as e:
log.error("%s (%s)" % (log.exc(e), message))
def _get_host_port(self, *target):
host, port = None, None
assert len(target) <= 2
if len(target) == 1:
host, port = '127.0.0.1', target[0]
if len(target) == 2:
host, port = target
if host is not None and port is not None:
return host, port
return None
class Receiver(threading.Thread):
def __init__(self, port, message_handler=None, blocking=False):
threading.Thread.__init__(self)
self.daemon = True
self.message_handler = message_handler
self.location = '0.0.0.0', port
self.server = OSC.OSCServer(self.location)
if verbose:
log.info("Started OSC Receiver on port %s" % port)
self.start()
if blocking:
try:
while True:
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
pass
def run(self):
self.server.addMsgHandler("default", self.default_handler)
while True:
try:
self.server.handle_request()
except Exception as e:
log.error(log.exc(e))
def default_handler(self, address, tags, data, location): # tags show data type ('i': integer, 's': string)
if verbose:
log.info("%s: %s %s" % (location[0], address, data))
if self.message_handler is not None:
self.message_handler(location[0], address, data)
| mit | -2,130,635,583,753,808,100 | 28.341667 | 112 | 0.550412 | false |
mmpagani/oq-hazardlib | openquake/hazardlib/calc/hazard_curve.py | 1 | 5948 | # coding: utf-8
# The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`openquake.hazardlib.calc.hazard_curve` implements
:func:`hazard_curves`.
"""
import sys
import numpy
from openquake.hazardlib.calc import filters
from openquake.hazardlib.imt import from_string
from openquake.hazardlib.gsim.base import deprecated
@deprecated('Use calc_hazard_curves instead')
def hazard_curves(
sources, sites, imts, gsims, truncation_level,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter):
"""
Deprecated. It does the same job of
:func:`openquake.hazardlib.calc.hazard_curve.calc_hazard_curves`,
with the only difference that the intensity measure types in input
and output are hazardlib objects instead of simple strings.
"""
imtls = {str(imt): imls for imt, imls in imts.iteritems()}
curves_by_imt = calc_hazard_curves(
sources, sites, imtls, gsims, truncation_level,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter)
return {from_string(imt): curves
for imt, curves in curves_by_imt.iteritems()}
def calc_hazard_curves(
sources, sites, imtls, gsims, truncation_level,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter):
"""
Compute hazard curves on a list of sites, given a set of seismic sources
and a set of ground shaking intensity models (one per tectonic region type
considered in the seismic sources).
Probability of ground motion exceedance is computed using the following
formula ::
P(X≥x|T) = 1 - ∏ ∏ Prup_ij(X<x|T)
where ``P(X≥x|T)`` is the probability that the ground motion parameter
``X`` is exceeding level ``x`` one or more times in a time span ``T``, and
``Prup_ij(X<x|T)`` is the probability that the j-th rupture of the i-th
source is not producing any ground motion exceedance in time span ``T``.
The first product ``∏`` is done over sources, while the second one is done
over ruptures in a source.
The above formula computes the probability of having at least one ground
motion exceedance in a time span as 1 minus the probability that none of
the ruptures in none of the sources is causing a ground motion exceedance
in the same time span. The basic assumption is that seismic sources are
independent, and ruptures in a seismic source are also independent.
:param sources:
An iterator of seismic sources objects (instances of subclasses
of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`).
:param sites:
Instance of :class:`~openquake.hazardlib.site.SiteCollection` object,
representing sites of interest.
:param imtls:
Dictionary mapping intensity measure type strings
to lists of intensity measure levels.
:param gsims:
Dictionary mapping tectonic region types (members
of :class:`openquake.hazardlib.const.TRT`) to
:class:`~openquake.hazardlib.gsim.base.GMPE` or
:class:`~openquake.hazardlib.gsim.base.IPE` objects.
:param truncation_level:
Float, number of standard deviations for truncation of the intensity
distribution.
:param source_site_filter:
Optional source-site filter function. See
:mod:`openquake.hazardlib.calc.filters`.
:param rupture_site_filter:
Optional rupture-site filter function. See
:mod:`openquake.hazardlib.calc.filters`.
:returns:
Dictionary mapping intensity measure type strings (same keys
as in parameter ``imtls``) to 2d numpy arrays of float, where
first dimension differentiates sites (the order and length
are the same as in ``sites`` parameter) and the second one
differentiates IMLs (the order and length are the same as
corresponding value in ``imts`` dict).
"""
imts = {from_string(imt): imls for imt, imls in imtls.iteritems()}
curves = dict((imt, numpy.ones([len(sites), len(imtls[imt])]))
for imt in imtls)
sources_sites = ((source, sites) for source in sources)
for source, s_sites in source_site_filter(sources_sites):
try:
ruptures_sites = ((rupture, s_sites)
for rupture in source.iter_ruptures())
for rupture, r_sites in rupture_site_filter(ruptures_sites):
gsim = gsims[rupture.tectonic_region_type]
sctx, rctx, dctx = gsim.make_contexts(r_sites, rupture)
for imt in imts:
poes = gsim.get_poes(sctx, rctx, dctx, imt, imts[imt],
truncation_level)
pno = rupture.get_probability_no_exceedance(poes)
curves[str(imt)] *= r_sites.expand(pno, placeholder=1)
except Exception, err:
etype, err, tb = sys.exc_info()
msg = 'An error occurred with source id=%s. Error: %s'
msg %= (source.source_id, err.message)
raise etype, msg, tb
for imt in imtls:
curves[imt] = 1 - curves[imt]
return curves
| agpl-3.0 | 7,406,639,656,955,995,000 | 43.646617 | 78 | 0.675312 | false |
uai/VZI | 09/py/sorting.py | 1 | 1314 | # sorting some stuff 2
# https://www.youtube.com/watch?v=-xXvxj0SaQ4
import sys
import random
import time
# import timeit
# v pripade max. recursion limitu
# sys.setrecursionlimit(1500)
# viz. https://stackoverflow.com/questions/3323001/maximum-recursion-depth
nums = list()
n = 10
for i in range(0, n):
# print(random.random())
# nums.append(round(random.uniform(0, 100)))
nums.append(round(random.uniform(0, n)))
def swap(lst, a, b):
tmp = lst[a]
lst[a] = lst[b]
lst[b] = tmp
def print_list(lst):
for i in range(0, len(lst)):
# print(lst[i])
pprint(lst[i])
def merge(left, right):
result = list()
while (len(left) > 0) and (len(right) > 0):
if left[0] > right[0]:
result.append(right.pop(0))
else:
result.append(left.pop(0))
result.extend(left+right)
return result
def merge_sort(lst):
left = list()
right = list()
if len(lst) <= 1:
return lst
else:
middle = len(lst) // 2
left= lst[middle:]
right= lst[:middle]
left = merge_sort(left)
right = merge_sort(right)
result = merge(left, right)
return result
print(nums)
# s = time.time()
lst = merge_sort(nums)
print(lst)
# e = time.time()
# print(nums)
# print(e-s)
| bsd-3-clause | 6,548,840,141,799,127,000 | 17.507042 | 74 | 0.582953 | false |
kartikshah1/Test | concept/views.py | 1 | 1365 | """
Views for Concept API
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404
from elearning_academy.permissions import InInstructorOrContentDeveloperMode
from courseware.permissions import IsOwner
from courseware.models import Concept
@login_required
def view_concept(request, concept_id):
"""
View Concept Page
"""
concept = get_object_or_404(Concept, pk=concept_id)
inInstructorOrContentDeveloperModeObject = InInstructorOrContentDeveloperMode()
isOwnerObj = IsOwner()
if inInstructorOrContentDeveloperModeObject.has_permission(request, None):
return view_content_developer(request, concept_id)
elif (concept.is_published or
isOwnerObj.has_object_permission(request=request, obj=concept.group.course, view=None)):
return view_student(request, concept_id)
else:
return render(request, 'error.html', {'error': 'Concept does not exist !'})
@login_required
def view_student(request, concept_id):
""" Course Progress of Student """
return render(request, 'concept/student.html', {'conceptId': concept_id})
@login_required
def view_content_developer(request, concept_id):
""" Concept edit page for content developer """
return render(request, 'concept/content_developer.html', {'conceptId': concept_id})
| mit | -4,125,520,641,924,915,000 | 33.125 | 98 | 0.737729 | false |
wavesoft/robob | robob/pipe/app.py | 1 | 1444 |
import shlex
from robob.pipe import PipeBase
class Pipe(PipeBase):
"""
Implementation of the application pipe item
"""
def configure(self, config):
"""
Configure cmdline
"""
# Prepare
self.binary = config['binary']
self.args = []
self.stdin = None
# Parse arguments
if 'args' in config:
if isinstance(config['args'], str):
self.args = shlex.split(config['args'])
elif isinstance(config['args'], list):
self.args = config['args']
else:
raise AssertionError("Application's arguments must be a string or list!")
# Process stdin
if 'stdin' in config:
self.stdin = config['stdin']
# Parse application environment
self.env = {}
if 'env' in config:
n = "env.%s" % config['env']
if not n in self.context:
raise AssertionError("Unknown environment '%s' in application specs" % config['env'])
self.env = self.context[ n ]
def pipe_stdin(self):
"""
Return app stdin buffer (if any)
"""
return self.stdin
def pipe_cmdline(self):
"""
Pipe local arguments to command-line
"""
# Prepare args
args = [ self.binary ]
args += self.args
# Append child command-lines
args += super(Pipe, self).pipe_cmdline()
# If we have environment wrap in 'env'
if self.env:
env_prefix = [ 'env' ]
for k,v in self.env.items():
env_prefix.append( "%s=%s" % (k,v) )
# Update with prefix
args = env_prefix + args
# Return new arguments
return args
| apache-2.0 | 7,895,432,236,760,590,000 | 19.628571 | 89 | 0.639197 | false |
ask/flakeplus | flakeplus/__init__.py | 1 | 7179 | # -*- coding: utf-8 -*-
"""Additional pyflakes"""
# :copyright: (c) 2012-2016 by Ask Solem.
# :license: BSD, see LICENSE for more details.
from __future__ import absolute_import
from __future__ import with_statement
VERSION = (1, 1, 0)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'Ask Solem'
__contact__ = '[email protected]'
__homepage__ = 'http://github.com/ask/flakeplus'
__docformat__ = 'restructuredtext'
# -eof meta-
import os
import re
import sys
from collections import defaultdict
from optparse import OptionParser, make_option as Option
from unipath import Path
EX_USAGE = getattr(os, 'EX_USAGE', 0x40)
RE_COMMENT = r'^\s*\#'
RE_NOQA = r'.+?\#\s+noqa+'
RE_MULTILINE_COMMENT_O = r'^\s*(?:\'\'\'|""").+?(?:\'\'\'|""")'
RE_MULTILINE_COMMENT_S = r'^\s*(?:\'\'\'|""")'
RE_MULTILINE_COMMENT_E = r'(?:^|.+?)(?:\'\'\'|""")'
RE_WITH = r'(?:^|\s+)with\s+'
RE_WITH_IMPORT = r'''from\s+ __future__\s+ import\s+ with_statement'''
RE_PRINT = r'''(?:^|\s+)print\((?:"|')(?:\W+?)?[A-Z0-9:]{2,}'''
RE_ABS_IMPORT = r'''from\s+ __future__\s+ import\s+.*?absolute_import'''
RE_UNI_IMPORT = r'''from\s+ __future__\s+ import.*?\s+unicode_literals'''
acc = defaultdict(lambda: {
'abs': False,
'print': False,
'uni': False,
})
def compile(regex):
return re.compile(regex, re.VERBOSE)
class FlakePP(object):
re_comment = compile(RE_COMMENT)
re_ml_comment_o = compile(RE_MULTILINE_COMMENT_O)
re_ml_comment_s = compile(RE_MULTILINE_COMMENT_S)
re_ml_comment_e = compile(RE_MULTILINE_COMMENT_E)
re_abs_import = compile(RE_ABS_IMPORT)
re_uni_import = compile(RE_UNI_IMPORT)
re_print = compile(RE_PRINT)
re_with_import = compile(RE_WITH_IMPORT)
re_with = compile(RE_WITH)
re_noqa = compile(RE_NOQA)
map = {
'abs': False,
'print': False,
'with': False,
'with-used': False,
'uni': False,
}
def __init__(self, verbose=False, use_26=False, use_27=False, quiet=False):
self.verbose = verbose # XXX unused
self.quiet = quiet
self.use_26 = use_26 or use_27
self.use_27 = use_27
self.steps = (
('abs', self.re_abs_import),
('uni', self.re_uni_import),
('with', self.re_with_import),
('with-used', self.re_with),
('print', self.re_print),
)
def analyze_fh(self, fh):
steps = self.steps
filename = fh.name
acc = dict(self.map)
index = 0
errors = [0]
def error(fmt, **kwargs):
errors[0] += 1
self.announce(fmt, **dict(kwargs, filename=filename))
for index, line in enumerate(self.strip_comments(fh)):
for key, pattern in steps:
if pattern.match(line):
acc[key] = True
if index:
if not acc['abs']:
error('%(filename)s: missing absloute_import import')
if not self.use_26 and acc['with-used'] and not acc['with']:
error('%(filename)s: missing with import')
if self.use_27 and not acc['uni']:
error('%(filename)s: missing unicode_literals import')
if acc['print']:
error('%(filename)s: left over print statement')
return filename, errors[0], acc
def analyze_file(self, filename):
with open(filename) as fh:
return self.analyze_fh(fh)
def analyze_tree(self, dir):
for dirpath, _, filenames in os.walk(dir):
for path in (Path(dirpath, f) for f in filenames):
if path.endswith('.py'):
yield self.analyze_file(path)
def analyze(self, *paths):
for path in map(Path, paths):
if path.isdir():
for res in self.analyze_tree(path):
yield res
else:
yield self.analyze_file(path)
def strip_comments(self, fh):
re_comment = self.re_comment
re_ml_comment_o = self.re_ml_comment_o
re_ml_comment_s = self.re_ml_comment_s
re_ml_comment_e = self.re_ml_comment_e
re_noqa = self.re_noqa
in_ml = False
for line in fh.readlines():
if in_ml:
if re_ml_comment_e.match(line):
in_ml = False
else:
if re_noqa.match(line) or re_ml_comment_o.match(line):
pass
elif re_ml_comment_s.match(line):
in_ml = True
elif re_comment.match(line):
pass
else:
yield line
def announce(self, fmt, **kwargs):
if not self.quiet:
sys.stderr.write((fmt + '\n') % kwargs)
class Command(object):
FlakePP = FlakePP
Parser = OptionParser
args = 'dir1 .. dirN'
version = __version__
def run(self, *files, **kwargs):
exitcode = 0
for _, errors, _ in self.FlakePP(**kwargs).analyze(*files):
if errors:
exitcode = 1
return exitcode
def get_options(self):
return (
Option('--2.6',
default=False, action='store_true', dest='use_26',
help='Specify support of Python 2.6 and up'),
Option('--2.7',
default=False, action='store_true', dest='use_27',
help='Specify support of Python 2.7 and up'),
Option('--verbose', '-v',
default=False, action='store_true', dest='verbose',
help='Show more output.'),
Option('--quiet', '-q',
default=False, action='store_true', dest='quiet',
help='Silence output'),
)
def usage(self):
return '%%prog [options] %s' % (self.args, )
def die(self, msg):
self.usage()
sys.stderr.write('%s\n' % msg)
return EX_USAGE
def expanduser(self, value):
if isinstance(value, basestring):
return os.path.expanduser(value)
return value
def handle_argv(self, prog_name, argv):
options, args = self.parse_options(prog_name, argv)
options = dict((k, self.expanduser(v))
for k, v in vars(options).iteritems()
if not k.startswith('_'))
argv = map(self.expanduser, argv)
if not argv:
return self.die('No input files/directories')
return self.run(*args, **options)
def parse_options(self, prog_name, argv):
parser = self.Parser(prog=prog_name,
usage=self.usage(),
version=self.version,
option_list=self.get_options())
return parser.parse_args(argv)
def execute_from_commandline(self, argv=None):
if argv is None:
argv = list(sys.argv)
prog_name = os.path.basename(argv[0])
return self.handle_argv(prog_name, argv[1:])
def main(argv=sys.argv):
sys.exit(Command().execute_from_commandline(argv))
if __name__ == '__main__':
main()
| bsd-3-clause | -723,099,010,128,184,200 | 30.765487 | 79 | 0.531411 | false |
skosukhin/spack | var/spack/repos/builtin/packages/snbone/package.py | 1 | 2737 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Snbone(MakefilePackage):
"""This application targets the primary computational solve burden of a SN,
continuous finite element based transport equation solver."""
homepage = "https://github.com/ANL-CESAR/"
url = "https://github.com/ANL-CESAR/SNbone.git"
version('develop', git='https://github.com/ANL-CESAR/SNbone.git')
tags = ['proxy-app']
depends_on('metis')
def build(self, spec, prefix):
working_dirs = ['src_c', 'src_fortran', 'src_makemesh',
'src_processmesh']
for wdir in working_dirs:
with working_dir(wdir, create=False):
if self.compiler.name == 'gcc' and wdir == 'src_processmesh':
make('COMPILER=gfortran', 'METISLIB={0}'
.format(spec['metis'].prefix + '/lib/libmetis.so'))
elif self.compiler.name == 'intel':
make('COMPILER=intel', 'LDFLAGS=-lm')
else:
make('COMPILER=gfortran', 'LDFLAGS=-lm')
def install(self, spec, prefix):
mkdirp(prefix.bin.C)
mkdirp(prefix.bin.Fortran)
mkdirp(prefix.bin.MakeMesh)
mkdirp(prefix.bin.ProcessMesh)
install('src_c/SNaCFE.x', prefix.bin.C)
install('src_fortran/SNaCFE.x', prefix.bin.Fortran)
install('src_makemesh/makemesh.x', prefix.bin.MakeMesh)
install('src_processmesh/processmesh.x', prefix.bin.ProcessMesh)
| lgpl-2.1 | -3,598,552,296,907,631,600 | 41.765625 | 79 | 0.620022 | false |
timdiels/zeroinstall | zeroinstall/zerostore/cli.py | 1 | 7291 | """Code for the B{0store} command-line interface."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import sys, os
from zeroinstall.zerostore.manifest import verify, get_algorithm, copy_tree_with_verify
from zeroinstall import zerostore, SafeException, support
stores = None
def init_stores():
global stores
assert stores is None
if stores is None:
stores = zerostore.Stores()
class UsageError(SafeException): pass
def do_manifest(args):
"""manifest DIRECTORY [ALGORITHM]"""
if len(args) < 1 or len(args) > 2: raise UsageError(_("Wrong number of arguments"))
if len(args) == 2:
alg = get_algorithm(args[1])
else:
# If no algorithm was given, guess from the directory name
name = os.path.basename(args[0])
if '=' in name:
alg = get_algorithm(name.split('=', 1)[0])
else:
alg = get_algorithm('sha1new')
digest = alg.new_digest()
for line in alg.generate_manifest(args[0]):
print line
digest.update(line + '\n')
print alg.getID(digest)
sys.exit(0)
def do_find(args):
"""find DIGEST"""
if len(args) != 1: raise UsageError(_("Wrong number of arguments"))
try:
print stores.lookup(args[0])
sys.exit(0)
except zerostore.BadDigest as ex:
print >>sys.stderr, ex
except zerostore.NotStored as ex:
print >>sys.stderr, ex
sys.exit(1)
def do_add(args):
"""add DIGEST (DIRECTORY | (ARCHIVE [EXTRACT]))"""
from zeroinstall.zerostore import unpack
if len(args) < 2: raise UsageError(_("Missing arguments"))
digest = args[0]
if os.path.isdir(args[1]):
if len(args) > 2: raise UsageError(_("Too many arguments"))
stores.add_dir_to_cache(digest, args[1])
elif os.path.isfile(args[1]):
if len(args) > 3: raise UsageError(_("Too many arguments"))
if len(args) > 2:
extract = args[2]
else:
extract = None
type = unpack.type_from_url(args[1])
if not type:
raise SafeException(_("Unknown extension in '%s' - can't guess MIME type") % args[1])
unpack.check_type_ok(type)
stores.add_archive_to_cache(digest, file(args[1]), args[1], extract, type = type)
else:
try:
os.stat(args[1])
except OSError as ex:
if ex.errno != 2: # No such file or directory
raise UsageError(str(ex)) # E.g. permission denied
raise UsageError(_("No such file or directory '%s'") % args[1])
def do_optimise(args):
"""optimise [ CACHE ]"""
if len(args) == 1:
cache_dir = args[0]
else:
cache_dir = stores.stores[0].dir
cache_dir = os.path.realpath(cache_dir)
import stat
info = os.stat(cache_dir)
if not stat.S_ISDIR(info.st_mode):
raise UsageError(_("Not a directory: '%s'") % cache_dir)
impl_name = os.path.basename(cache_dir)
if impl_name != 'implementations':
raise UsageError(_("Cache directory should be named 'implementations', not\n"
"'%(name)s' (in '%(cache_dir)s')") % {'name': impl_name, 'cache_dir': cache_dir})
print _("Optimising"), cache_dir
from . import optimise
uniq_size, dup_size, already_linked, man_size = optimise.optimise(cache_dir)
print _("Original size : %(size)s (excluding the %(manifest_size)s of manifests)") % {'size': support.pretty_size(uniq_size + dup_size), 'manifest_size': support.pretty_size(man_size)}
print _("Already saved : %s") % support.pretty_size(already_linked)
if dup_size == 0:
print _("No duplicates found; no changes made.")
else:
print _("Optimised size : %s") % support.pretty_size(uniq_size)
perc = (100 * float(dup_size)) / (uniq_size + dup_size)
print _("Space freed up : %(size)s (%(percentage).2f%%)") % {'size': support.pretty_size(dup_size), 'percentage': perc}
print _("Optimisation complete.")
def do_verify(args):
"""verify (DIGEST | (DIRECTORY [DIGEST])"""
if len(args) == 2:
required_digest = args[1]
root = args[0]
elif len(args) == 1:
root = get_stored(args[0])
required_digest = None # Get from name
else:
raise UsageError(_("Missing DIGEST or DIRECTORY"))
print _("Verifying"), root
try:
verify(root, required_digest)
print _("OK")
except zerostore.BadDigest as ex:
print str(ex)
if ex.detail:
print
print ex.detail
sys.exit(1)
def do_audit(args):
"""audit [DIRECTORY]"""
if len(args) == 0:
audit_stores = stores.stores
else:
audit_stores = [zerostore.Store(x) for x in args]
audit_ls = []
total = 0
for a in audit_stores:
if os.path.isdir(a.dir):
items = sorted(os.listdir(a.dir))
audit_ls.append((a.dir, items))
total += len(items)
elif len(args):
raise SafeException(_("No such directory '%s'") % a.dir)
verified = 0
failures = []
i = 0
for root, impls in audit_ls:
print _("Scanning %s") % root
for required_digest in impls:
i += 1
path = os.path.join(root, required_digest)
if '=' not in required_digest:
print _("Skipping non-implementation directory %s") % path
continue
try:
msg = _("[%(done)d / %(total)d] Verifying %(digest)s") % {'done': i, 'total': total, 'digest': required_digest}
print msg,
sys.stdout.flush()
verify(path, required_digest)
print "\r" + (" " * len(msg)) + "\r",
verified += 1
except zerostore.BadDigest as ex:
print
failures.append(path)
print str(ex)
if ex.detail:
print
print ex.detail
if failures:
print '\n' + _("List of corrupted or modified implementations:")
for x in failures:
print x
print
print _("Checked %d items") % i
print _("Successfully verified implementations: %d") % verified
print _("Corrupted or modified implementations: %d") % len(failures)
if failures:
sys.exit(1)
def do_list(args):
"""list"""
if args: raise UsageError(_("List takes no arguments"))
print _("User store (writable) : %s") % stores.stores[0].dir
for s in stores.stores[1:]:
print _("System store : %s") % s.dir
if len(stores.stores) < 2:
print _("No system stores.")
def get_stored(dir_or_digest):
if os.path.isdir(dir_or_digest):
return dir_or_digest
else:
try:
return stores.lookup(dir_or_digest)
except zerostore.NotStored as ex:
print >>sys.stderr, ex
sys.exit(1)
def do_copy(args):
"""copy SOURCE [ TARGET ]"""
if len(args) == 2:
source, target = args
elif len(args) == 1:
source = args[0]
target = stores.stores[0].dir
else:
raise UsageError(_("Wrong number of arguments."))
if not os.path.isdir(source):
raise UsageError(_("Source directory '%s' not found") % source)
if not os.path.isdir(target):
raise UsageError(_("Target directory '%s' not found") % target)
manifest_path = os.path.join(source, '.manifest')
if not os.path.isfile(manifest_path):
raise UsageError(_("Source manifest '%s' not found") % manifest_path)
required_digest = os.path.basename(source)
manifest_data = file(manifest_path, 'rb').read()
copy_tree_with_verify(source, target, manifest_data, required_digest)
def do_manage(args):
"""manage"""
if args:
raise UsageError(_("manage command takes no arguments"))
import pygtk
pygtk.require('2.0')
import gtk
from zeroinstall.gtkui import cache
from zeroinstall.injector.iface_cache import iface_cache
cache_explorer = cache.CacheExplorer(iface_cache)
cache_explorer.window.connect('destroy', gtk.main_quit)
cache_explorer.show()
gtk.main()
commands = [do_add, do_audit, do_copy, do_find, do_list, do_manifest, do_optimise, do_verify, do_manage]
| lgpl-2.1 | -8,336,675,680,161,674,000 | 28.759184 | 186 | 0.667124 | false |
datapythonista/pandas | pandas/tests/frame/test_stack_unstack.py | 1 | 72472 | from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("w", "b", "j")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_stack_empty_frame(dropna):
# GH 36113
expected = Series(index=MultiIndex([[], []], [[], []]), dtype=np.float64)
result = DataFrame(dtype=np.float64).stack(dropna=dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value):
# GH 36113
result = (
DataFrame(dtype=np.int64).stack(dropna=dropna).unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_positional_level_duplicate_column_names():
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
class TestStackUnstackMultiLevel:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
def test_stack(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2)
expected = ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(
np.arange(12).reshape(4, 3),
index=list("abab"),
columns=["1st", "2nd", "3rd"],
)
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd", "3rd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ["1st", "2nd", "1st"]
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thu,Dinner,No,3.0,1
Thu,Lunch,No,117.32,44
Thu,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df["foo"].stack().sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack()
assert restacked.index.names == frame.index.names
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake")
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake")
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp")
expected = frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp")
expected = frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
tm.assert_frame_equal(s_unstacked, expected["A"])
restacked = unstacked.stack(["year", "month"])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, ymd)
assert restacked.index.names == ymd.index.names
# GH #451
unstacked = ymd.unstack([1, 2])
expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
unstacked = ymd.unstack([2, 1])
expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
# Can't use mixture of names and numbers to stack
with pytest.raises(ValueError, match="level should contain"):
unstacked.stack([0, "month"])
def test_stack_multiple_out_of_bounds(
self, multiindex_year_month_day_dataframe_random_data
):
# nlevels == 3
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
with pytest.raises(IndexError, match="Too many levels"):
unstacked.stack([2, 3])
with pytest.raises(IndexError, match="not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH4342
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period",
)
idx2 = Index(["A", "B"] * 3, name="str")
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period"
)
expected = DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"]
)
expected.columns.name = "str"
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"],
freq="M",
name="period2",
)
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period1"
)
e_cols = pd.PeriodIndex(
["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"],
freq="M",
name="period2",
)
expected = DataFrame(
[
[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan],
],
index=e_idx,
columns=e_cols,
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH4342
idx1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"],
freq="M",
name="period2",
)
value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1")
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"],
freq="M",
name="period2",
)
e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2])
expected = DataFrame(
[[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1"
)
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02"], freq="M", name="period2"
)
e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1])
expected = DataFrame(
[[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols
)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
# bug when some uniques are not present in the data GH#3170
id_col = ([1] * 3) + ([2] * 3)
name = (["a"] * 3) + (["b"] * 3)
date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1})
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
unst = multi.unstack("ID")
down = unst.resample("W-THU").mean()
rs = down.stack("ID")
xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID")
xp.columns.name = "Params"
tm.assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH#3997
df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]})
df = df.set_index(["A", "B"])
stacked = df.unstack().stack(dropna=False)
assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(
index=[
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]],
)
df.index.names = ["a", "b", "c"]
df.columns.names = ["d", "e"]
# it works!
df.unstack(["b", "c"])
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl GH#2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame(
{
"A": np.random.randint(100, size=NUM_ROWS),
"B": np.random.randint(300, size=NUM_ROWS),
"C": np.random.randint(-7, 7, size=NUM_ROWS),
"D": np.random.randint(-19, 19, size=NUM_ROWS),
"E": np.random.randint(3000, size=NUM_ROWS),
"F": np.random.randn(NUM_ROWS),
}
)
idf = df.set_index(["A", "B", "C", "D", "E"])
# it works! is sufficient
idf.unstack("E")
def test_unstack_unobserved_keys(self):
# related to GH#2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, codes)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack()
tm.assert_frame_equal(recons, df)
@pytest.mark.slow
def test_unstack_number_of_levels_larger_than_int32(self):
# GH#20601
df = DataFrame(
np.random.randn(2 ** 16, 2), index=[np.arange(2 ** 16), np.arange(2 ** 16)]
)
with pytest.raises(ValueError, match="int32 overflow"):
df.unstack()
def test_stack_order_with_unsorted_levels(self):
# GH#16323
def manual_compare_stacked(df, df_stacked, lev0, lev1):
assert all(
df.loc[row, col] == df_stacked.loc[(row, col[lev0]), col[lev1]]
for row in df.index
for col in df.columns
)
# deep check for 1-row case
for width in [2, 3]:
levels_poss = itertools.product(
itertools.permutations([0, 1, 2], width), repeat=2
)
for levels in levels_poss:
columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
for stack_lev in range(2):
df_stacked = df.stack(stack_lev)
manual_compare_stacked(df, df_stacked, stack_lev, 1 - stack_lev)
# check multi-row case
mi = MultiIndex(
levels=[["A", "C", "B"], ["B", "A", "C"]],
codes=[np.repeat(range(3), 3), np.tile(range(3), 3)],
)
df = DataFrame(
columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1)
)
manual_compare_stacked(df, df.stack(0), 0, 1)
def test_stack_unstack_unordered_multiindex(self):
# GH# 18265
values = np.arange(5)
data = np.vstack(
[
[f"b{x}" for x in values], # b0, b1, ..
[f"a{x}" for x in values], # a0, a1, ..
]
)
df = DataFrame(data.T, columns=["b", "a"])
df.columns.name = "first"
second_level_dict = {"x": df}
multi_level_df = pd.concat(second_level_dict, axis=1)
multi_level_df.columns.names = ["second", "first"]
df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1)
result = df.stack(["first", "second"]).unstack(["first", "second"])
expected = DataFrame(
[["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]],
index=[0, 1, 2, 3, 4],
columns=MultiIndex.from_tuples(
[("a", "x"), ("b", "x")], names=["first", "second"]
),
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_types(
self, multiindex_year_month_day_dataframe_random_data
):
# GH#403
ymd = multiindex_year_month_day_dataframe_random_data
ymd["E"] = "foo"
ymd["F"] = 2
unstacked = ymd.unstack("month")
assert unstacked["A", 1].dtype == np.float64
assert unstacked["E", 1].dtype == np.object_
assert unstacked["F", 1].dtype == np.float64
def test_unstack_group_index_overflow(self):
codes = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(
levels=[level] * 8 + [[0, 1]],
codes=[codes] * 8 + [np.arange(2).repeat(500)],
)
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack()
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(
levels=[[0, 1]] + [level] * 8,
codes=[np.arange(2).repeat(500)] + [codes] * 8,
)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(
levels=[level] * 4 + [[0, 1]] + [level] * 4,
codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4),
)
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_unstack_with_missing_int_cast_to_float(self, using_array_manager):
# https://github.com/pandas-dev/pandas/issues/37115
df = DataFrame(
{
"a": ["A", "A", "B"],
"b": ["ca", "cb", "cb"],
"v": [10] * 3,
}
).set_index(["a", "b"])
# add another int column to get 2 blocks
df["is_"] = 1
if not using_array_manager:
assert len(df._mgr.blocks) == 2
result = df.unstack("b")
result[("is_", "ca")] = result[("is_", "ca")].fillna(0)
expected = DataFrame(
[[10.0, 10.0, 1.0, 1.0], [np.nan, 10.0, 0.0, 1.0]],
index=Index(["A", "B"], dtype="object", name="a"),
columns=MultiIndex.from_tuples(
[("v", "ca"), ("v", "cb"), ("is_", "ca"), ("is_", "cb")],
names=[None, "b"],
),
)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
expected[("v", "cb")] = expected[("v", "cb")].astype("int64")
expected[("is_", "cb")] = expected[("is_", "cb")].astype("int64")
tm.assert_frame_equal(result, expected)
def test_unstack_with_level_has_nan(self):
# GH 37510
df1 = DataFrame(
{
"L1": [1, 2, 3, 4],
"L2": [3, 4, 1, 2],
"L3": [1, 1, 1, 1],
"x": [1, 2, 3, 4],
}
)
df1 = df1.set_index(["L1", "L2", "L3"])
new_levels = ["n1", "n2", "n3", None]
df1.index = df1.index.set_levels(levels=new_levels, level="L1")
df1.index = df1.index.set_levels(levels=new_levels, level="L2")
result = df1.unstack("L3")[("x", 1)].sort_index().index
expected = MultiIndex(
levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]],
codes=[[0, 1, 2, 3], [2, 3, 0, 1]],
names=["L1", "L2"],
)
tm.assert_index_equal(result, expected)
def test_stack_nan_in_multiindex_columns(self):
# GH#39481
df = DataFrame(
np.zeros([1, 5]),
columns=MultiIndex.from_tuples(
[
(0, None, None),
(0, 2, 0),
(0, 2, 1),
(0, 3, 0),
(0, 3, 1),
],
),
)
result = df.stack(2)
expected = DataFrame(
[[0.0, np.nan, np.nan], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]],
index=Index([(0, None), (0, 0), (0, 1)]),
columns=Index([(0, None), (0, 2), (0, 3)]),
)
tm.assert_frame_equal(result, expected)
def test_multi_level_stack_categorical(self):
# GH 15239
midx = MultiIndex.from_arrays(
[
["A"] * 2 + ["B"] * 2,
pd.Categorical(list("abab")),
pd.Categorical(list("ccdd")),
]
)
df = DataFrame(np.arange(8).reshape(2, 4), columns=midx)
result = df.stack([1, 2])
expected = DataFrame(
[
[0, np.nan],
[np.nan, 2],
[1, np.nan],
[np.nan, 3],
[4, np.nan],
[np.nan, 6],
[5, np.nan],
[np.nan, 7],
],
columns=["A", "B"],
index=MultiIndex.from_arrays(
[
[0] * 4 + [1] * 4,
pd.Categorical(list("aabbaabb")),
pd.Categorical(list("cdcdcdcd")),
]
),
)
tm.assert_frame_equal(result, expected)
def test_stack_nan_level(self):
# GH 9406
df_nan = DataFrame(
np.arange(4).reshape(2, 2),
columns=MultiIndex.from_tuples(
[("A", np.nan), ("B", "b")], names=["Upper", "Lower"]
),
index=Index([0, 1], name="Num"),
dtype=np.float64,
)
result = df_nan.stack()
expected = DataFrame(
[[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]],
columns=Index(["A", "B"], name="Upper"),
index=MultiIndex.from_tuples(
[(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"]
),
)
tm.assert_frame_equal(result, expected)
def test_unstack_categorical_columns(self):
# GH 14018
idx = MultiIndex.from_product([["A"], [0, 1]])
df = DataFrame({"cat": pd.Categorical(["a", "b"])}, index=idx)
result = df.unstack()
expected = DataFrame(
{
0: pd.Categorical(["a"], categories=["a", "b"]),
1: pd.Categorical(["b"], categories=["a", "b"]),
},
index=["A"],
)
expected.columns = MultiIndex.from_tuples([("cat", 0), ("cat", 1)])
tm.assert_frame_equal(result, expected)
| bsd-3-clause | 6,102,745,059,497,143,000 | 34.044487 | 88 | 0.485443 | false |
polysquare/cmake-module-common | conanfile.py | 1 | 1305 | from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.12"
class CMakeModuleCommonConan(ConanFile):
name = "cmake-module-common"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
url = "http://github.com/polysquare/cmake-module-common"
license = "MIT"
requires = ("cmake-unit/master@smspillaz/cmake-unit",
"cmake-linter-cmake/master@smspillaz/cmake-linter-cmake",
"style-linter-cmake/master@smspillaz/style-linter-cmake")
options = {
"dev": [True, False]
}
default_options = "dev=False"
def source(self):
zip_name = "cmake-module-common.zip"
download("https://github.com/polysquare/"
"cmake-module-common/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="Find*.cmake",
dst="",
src="cmake-module-common-" + VERSION,
keep_path=True)
self.copy(pattern="*.cmake",
dst="cmake/cmake-module-common",
src="cmake-module-common-" + VERSION,
keep_path=True)
| mit | 8,907,373,018,090,672,000 | 32.461538 | 73 | 0.574713 | false |
Yangqing/caffe2 | caffe2/python/operator_test/ceil_op_test.py | 1 | 1592 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import hypothesis.strategies as st
import caffe2.python.hypothesis_test_util as hu
import numpy as np
import unittest
class TestCeil(hu.HypothesisTestCase):
@given(X=hu.tensor(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_ceil(self, X, gc, dc, engine):
op = core.CreateOperator("Ceil", ["X"], ["Y"], engine=engine)
def ceil_ref(X):
return (np.ceil(X),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=ceil_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -2,875,960,689,764,800,000 | 29.615385 | 78 | 0.638191 | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_telnet_cfg.py | 1 | 5518 | """ Cisco_IOS_XR_ipv4_telnet_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-telnet package configuration.
This module contains definitions
for the following management objects\:
ipv6\-telnet\: IPv6 telnet configuration
ipv4\-telnet\: ipv4 telnet
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Ipv6Telnet(object):
"""
IPv6 telnet configuration
.. attribute:: client
Telnet client configuration
**type**\: :py:class:`Client <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_cfg.Ipv6Telnet.Client>`
"""
_prefix = 'ipv4-telnet-cfg'
_revision = '2015-11-09'
def __init__(self):
self.client = Ipv6Telnet.Client()
self.client.parent = self
class Client(object):
"""
Telnet client configuration
.. attribute:: source_interface
Source interface for telnet sessions
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'ipv4-telnet-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.source_interface = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-cfg:ipv6-telnet/Cisco-IOS-XR-ipv4-telnet-cfg:client'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.source_interface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_cfg as meta
return meta._meta_table['Ipv6Telnet.Client']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-cfg:ipv6-telnet'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.client is not None and self.client._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_cfg as meta
return meta._meta_table['Ipv6Telnet']['meta_info']
class Ipv4Telnet(object):
"""
ipv4 telnet
.. attribute:: client
Telnet client configuration
**type**\: :py:class:`Client <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_cfg.Ipv4Telnet.Client>`
"""
_prefix = 'ipv4-telnet-cfg'
_revision = '2015-11-09'
def __init__(self):
self.client = Ipv4Telnet.Client()
self.client.parent = self
class Client(object):
"""
Telnet client configuration
.. attribute:: source_interface
Source interface for telnet sessions
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'ipv4-telnet-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.source_interface = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-cfg:ipv4-telnet/Cisco-IOS-XR-ipv4-telnet-cfg:client'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.source_interface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_cfg as meta
return meta._meta_table['Ipv4Telnet.Client']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-cfg:ipv4-telnet'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.client is not None and self.client._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_cfg as meta
return meta._meta_table['Ipv4Telnet']['meta_info']
| apache-2.0 | 1,648,462,373,414,770,000 | 26.04902 | 321 | 0.546756 | false |
Marcdnd/electrum-cesc | plugins/qrscanner.py | 1 | 13378 | from electrum_cesc.util import print_error
from urlparse import urlparse, parse_qs
from PyQt4.QtGui import QPushButton, QMessageBox, QDialog, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel, QLineEdit, QComboBox
from PyQt4.QtCore import Qt
from electrum_cesc.i18n import _
import re
import os
from electrum_cesc import Transaction
from electrum_cesc.bitcoin import MIN_RELAY_TX_FEE, is_valid
from electrum_cesc_gui.qt.qrcodewidget import QRCodeWidget
from electrum_cesc import bmp
from electrum_cesc_gui.qt import HelpButton, EnterButton
import json
try:
import zbar
except ImportError:
zbar = None
from electrum_cesc import BasePlugin
class Plugin(BasePlugin):
def fullname(self): return 'QR scans'
def description(self): return "QR Scans.\nInstall the zbar package (http://zbar.sourceforge.net/download.html) to enable this plugin"
def __init__(self, gui, name):
BasePlugin.__init__(self, gui, name)
self._is_available = self._init()
def _init(self):
if not zbar:
return False
try:
proc = zbar.Processor()
proc.init(video_device=self.video_device())
except zbar.SystemError:
# Cannot open video device
pass
#return False
return True
def load_wallet(self, wallet):
b = QPushButton(_("Scan QR code"))
b.clicked.connect(self.fill_from_qr)
self.send_tab_grid.addWidget(b, 1, 5)
b2 = QPushButton(_("Scan TxQR"))
b2.clicked.connect(self.read_raw_qr)
if not wallet.seed:
b3 = QPushButton(_("Show unsigned TxQR"))
b3.clicked.connect(self.show_raw_qr)
self.send_tab_grid.addWidget(b3, 7, 1)
self.send_tab_grid.addWidget(b2, 7, 2)
else:
self.send_tab_grid.addWidget(b2, 7, 1)
def is_available(self):
return self._is_available
def create_send_tab(self, grid):
self.send_tab_grid = grid
def scan_qr(self):
proc = zbar.Processor()
try:
proc.init(video_device=self.video_device())
except zbar.SystemError, e:
QMessageBox.warning(self.gui.main_window, _('Error'), _(e), _('OK'))
return
proc.visible = True
while True:
try:
proc.process_one()
except Exception:
# User closed the preview window
return {}
for r in proc.results:
if str(r.type) != 'QRCODE':
continue
return r.data
def show_raw_qr(self):
r = unicode( self.gui.main_window.payto_e.text() )
r = r.strip()
# label or alias, with address in brackets
m = re.match('(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>', r)
to_address = m.group(2) if m else r
if not is_valid(to_address):
QMessageBox.warning(self.gui.main_window, _('Error'), _('Invalid Cryptoescudo Address') + ':\n' + to_address, _('OK'))
return
try:
amount = self.gui.main_window.read_amount(unicode( self.gui.main_window.amount_e.text()))
except Exception:
QMessageBox.warning(self.gui.main_window, _('Error'), _('Invalid Amount'), _('OK'))
return
try:
fee = self.gui.main_window.read_amount(unicode( self.gui.main_window.fee_e.text()))
except Exception:
QMessageBox.warning(self.gui.main_window, _('Error'), _('Invalid Fee'), _('OK'))
return
try:
tx = self.gui.main_window.wallet.mktx( [(to_address, amount)], None, fee)
except Exception as e:
self.gui.main_window.show_message(str(e))
return
if fee < tx.required_fee(self.gui.main_window.wallet.verifier):
QMessageBox.warning(self.gui.main_window, _('Error'), _("This transaction requires a higher fee, or it will not be propagated by the network."), _('OK'))
return
try:
out = {
"hex" : tx.hash(),
"complete" : "false"
}
input_info = []
except Exception as e:
self.gui.main_window.show_message(str(e))
try:
json_text = json.dumps(tx.as_dict()).replace(' ', '')
self.show_tx_qrcode(json_text, 'Unsigned Transaction')
except Exception as e:
self.gui.main_window.show_message(str(e))
def show_tx_qrcode(self, data, title):
if not data: return
d = QDialog(self.gui.main_window)
d.setModal(1)
d.setWindowTitle(title)
d.setMinimumSize(250, 525)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 0)
hbox = QHBoxLayout()
hbox.addStretch(1)
def print_qr(self):
filename = "qrcode.bmp"
electrum_gui.bmp.save_qrcode(qrw.qr, filename)
QMessageBox.information(None, _('Message'), _("QR code saved to file") + " " + filename, _('OK'))
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(d.accept)
b.setDefault(True)
vbox.addLayout(hbox, 1)
d.setLayout(vbox)
d.exec_()
def read_raw_qr(self):
qrcode = self.scan_qr()
if qrcode:
tx = self.gui.main_window.tx_from_text(qrcode)
if tx:
self.create_transaction_details_window(tx)
def create_transaction_details_window(self, tx):
dialog = QDialog(self.gui.main_window)
dialog.setMinimumWidth(500)
dialog.setWindowTitle(_('Process Offline transaction'))
dialog.setModal(1)
l = QGridLayout()
dialog.setLayout(l)
l.addWidget(QLabel(_("Transaction status:")), 3,0)
l.addWidget(QLabel(_("Actions")), 4,0)
if tx.is_complete == False:
l.addWidget(QLabel(_("Unsigned")), 3,1)
if self.gui.main_window.wallet.seed :
b = QPushButton("Sign transaction")
b.clicked.connect(lambda: self.sign_raw_transaction(tx, tx.inputs, dialog))
l.addWidget(b, 4, 1)
else:
l.addWidget(QLabel(_("Wallet is de-seeded, can't sign.")), 4,1)
else:
l.addWidget(QLabel(_("Signed")), 3,1)
b = QPushButton("Broadcast transaction")
def broadcast(tx):
result, result_message = self.gui.main_window.wallet.sendtx( tx )
if result:
self.gui.main_window.show_message(_("Transaction successfully sent:")+' %s' % (result_message))
if dialog:
dialog.done(0)
else:
self.gui.main_window.show_message(_("There was a problem sending your transaction:") + '\n %s' % (result_message))
b.clicked.connect(lambda: broadcast( tx ))
l.addWidget(b,4,1)
closeButton = QPushButton(_("Close"))
closeButton.clicked.connect(lambda: dialog.done(0))
l.addWidget(closeButton, 4,2)
dialog.exec_()
def do_protect(self, func, args):
if self.gui.main_window.wallet.use_encryption:
password = self.gui.main_window.password_dialog()
if not password:
return
else:
password = None
if args != (False,):
args = (self,) + args + (password,)
else:
args = (self,password)
apply( func, args)
def protected(func):
return lambda s, *args: s.do_protect(func, args)
@protected
def sign_raw_transaction(self, tx, input_info, dialog ="", password = ""):
try:
self.gui.main_window.wallet.signrawtransaction(tx, input_info, [], password)
txtext = json.dumps(tx.as_dict()).replace(' ', '')
self.show_tx_qrcode(txtext, 'Signed Transaction')
except Exception as e:
self.gui.main_window.show_message(str(e))
def fill_from_qr(self):
qrcode = parse_uri(self.scan_qr())
if not qrcode:
return
if 'address' in qrcode:
self.gui.main_window.payto_e.setText(qrcode['address'])
if 'amount' in qrcode:
self.gui.main_window.amount_e.setText(str(qrcode['amount']))
if 'label' in qrcode:
self.gui.main_window.message_e.setText(qrcode['label'])
if 'message' in qrcode:
self.gui.main_window.message_e.setText("%s (%s)" % (self.gui.main_window.message_e.text(), qrcode['message']))
def video_device(self):
device = self.config.get("video_device", "default")
if device == 'default':
device = ''
return device
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def _find_system_cameras(self):
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
name = open(os.path.join(device_root, device, 'name')).read()
devices[name] = os.path.join("/dev",device)
return devices
def settings_dialog(self):
system_cameras = self._find_system_cameras()
d = QDialog()
layout = QGridLayout(d)
layout.addWidget(QLabel("Choose a video device:"),0,0)
# Create a combo box with the available video devices:
combo = QComboBox()
# on change trigger for video device selection, makes the
# manual device selection only appear when needed:
def on_change(x):
combo_text = str(combo.itemText(x))
combo_data = combo.itemData(x)
if combo_text == "Manually specify a device":
custom_device_label.setVisible(True)
self.video_device_edit.setVisible(True)
if self.config.get("video_device") == "default":
self.video_device_edit.setText("")
else:
self.video_device_edit.setText(self.config.get("video_device"))
else:
custom_device_label.setVisible(False)
self.video_device_edit.setVisible(False)
self.video_device_edit.setText(combo_data.toString())
# on save trigger for the video device selection window,
# stores the chosen video device on close.
def on_save():
device = str(self.video_device_edit.text())
self.config.set_key("video_device", device)
d.accept()
custom_device_label = QLabel("Video device: ")
custom_device_label.setVisible(False)
layout.addWidget(custom_device_label,1,0)
self.video_device_edit = QLineEdit()
self.video_device_edit.setVisible(False)
layout.addWidget(self.video_device_edit, 1,1,2,2)
combo.currentIndexChanged.connect(on_change)
combo.addItem("Default","default")
for camera, device in system_cameras.items():
combo.addItem(camera, device)
combo.addItem("Manually specify a device",self.config.get("video_device"))
# Populate the previously chosen device:
index = combo.findData(self.config.get("video_device"))
combo.setCurrentIndex(index)
layout.addWidget(combo,0,1)
self.accept = QPushButton(_("Done"))
self.accept.clicked.connect(on_save)
layout.addWidget(self.accept,4,2)
if d.exec_():
return True
else:
return False
def parse_uri(uri):
if ':' not in uri:
# It's just an address (not BIP21)
return {'address': uri}
if '//' not in uri:
# Workaround for urlparse, it don't handle bitcoin: URI properly
uri = uri.replace(':', '://')
uri = urlparse(uri)
result = {'address': uri.netloc}
if uri.query.startswith('?'):
params = parse_qs(uri.query[1:])
else:
params = parse_qs(uri.query)
for k,v in params.items():
if k in ('amount', 'label', 'message'):
result[k] = v[0]
return result
if __name__ == '__main__':
# Run some tests
assert(parse_uri('LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx') ==
{'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo://LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx') ==
{'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo:LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx') ==
{'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo:LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx?amount=10') ==
{'amount': '10', 'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo:LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx?amount=10&label=devfund&message=Donation%20to%20the%20dev%20fund') ==
{'amount': '10', 'label': 'devfund', 'message': 'Donation to the dev fund', 'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
| mit | 2,894,233,677,867,080,000 | 33.658031 | 165 | 0.5746 | false |
OptimalPayments/Python_SDK | src/PythonNetBanxSDK/ThreeDSecure/EnrollmentChecks.py | 1 | 3265 | '''
Created on 18-Apr-2016
@author: Asawari.Vaidya
'''
from PythonNetBanxSDK.CardPayments.Card import Card
from PythonNetBanxSDK.common.DomainObject import DomainObject
from PythonNetBanxSDK.common.Error import Error
from PythonNetBanxSDK.common.Link import Link
class EnrollmentChecks(DomainObject):
def __init__(self, obj):
'''
Constructor
'''
# Handler dictionary
handler = dict()
handler['card'] = self.card
handler['links'] = self.links
handler['error'] = self.error
if obj is not None:
self.setProperties(obj, handler=handler)
else:
pass
'''
Property Id
'''
def id(self,id_):
self.__dict__['id'] = id_
'''
Property Merchant Reference Number
'''
def merchantRefNum(self,merchant_ref_num):
self.__dict__['merchantRefNum']= merchant_ref_num
'''
Property Amount
'''
def amount(self,amount):
self.__dict__['amount'] = amount
'''
Property Currency
'''
def currency(self, currency):
self.__dict__['currency'] = currency
'''
Property Card
@param: Card Object
'''
def card(self, card):
if isinstance(card, Card):
self.__dict__['card'] = card
else:
p = Card(card)
self.__dict__['card'] = p
'''
Property Customer Ip
'''
def customerIp(self,customer_ip):
self.__dict__['customerIp'] = customer_ip
'''
Property User Agent
'''
def userAgent(self, user_agent):
self.__dict__['userAgent'] = user_agent
'''
Property Accept Header
'''
def acceptHeader(self, accept_header):
self.__dict__['acceptHeader'] = accept_header
'''
Property Merchant Uri
'''
def merchantUrl(self, merchant_uri):
self.__dict__['merchantUrl'] = merchant_uri
'''
Property Txn Time
'''
def txnTime(self, txn_time):
self.__dict__['txnTime'] = txn_time
'''
Property status
'''
def status(self, status):
self.__dict__['status'] = status
'''
Property ACS URL
'''
def acsURL(self, acs_url):
self.__dict__['acsURL'] = acs_url
'''
Property paReq
'''
def paReq(self, paReq):
self.__dict__['paReq'] = paReq
'''
Property eci
'''
def eci(self, eci):
self.__dict__['eci'] = eci
'''
Property threeDEnrollment
'''
def threeDEnrollment(self, three_d_enrollment):
self.__dict__['threeDEnrollment'] = three_d_enrollment
'''
Property Link
@param: Link Object, List of Link Objects
'''
def links(self, links):
if isinstance(links, Link):
l = Link(links)
self.__dict__['links'] = l
else:
for count in range(0, links.__len__()):
l = Link(links[count])
self.__dict__.setdefault('links', []).append(l)
'''
Property Error
@param: Error Object
'''
def error(self, error):
e = Error(error)
self.__dict__['error'] = e | mit | 6,267,941,849,149,330,000 | 21.680556 | 63 | 0.515773 | false |
Shatnerz/ringplus | ringplus/auth.py | 1 | 4820 | """OAuth2 handlers and some utility functions for RingPlus."""
from __future__ import print_function
import requests
from requests_oauthlib import OAuth2, OAuth2Session
from bs4 import BeautifulSoup
class OAuthHandler(object):
"""OAuth Authentication Handler.
OAuthHandler is used to simplify the OAuth2 authentication process.
All authentication into the RingPlus API must been done using OAuth 2.0
over HTTPS. It allows applications to access user details without needing
their password, allows limiting access to only what the application
requires, and can be revoked by users at any time.
All application developers need to register their application by visiting
the Application tab on their Settings page. A registered application is
assigned a Client ID and Client Secret. Your Client Secret should not be
shared with anyone.
Using your Client ID and Secret, you will be able to get an Authorization
Token for a user, and make requests to the API for their data.
"""
AUTHORIZATION_BASE_URL = 'https://my.ringplus.net/oauth/authorize'
TOKEN_URL = 'https://my.ringplus.net/oauth/token'
def __init__(self, client_id, client_secret, redirect_uri):
"""OAuthHandler instance contructor.
Args:
client_id: Client ID associated with the app.
client_secret: Client secret.
redirect_uri: The redirect URI exactly as listed on RingPlus.
"""
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.access_token = None
self.oauth = OAuth2Session(client_id, redirect_uri=redirect_uri)
def get_authorization_url(self, **kwargs):
"""Returns the authorization URL to redirect users."""
response = self.oauth.authorization_url(self.AUTHORIZATION_BASE_URL,
**kwargs)
authorization_url, state = response
return authorization_url
def fetch_token(self, authorization_response):
"""Use the authorization response url to fetch a token.
Returns:
dict: A dictionary representing the token.
"""
token = self.oauth.fetch_token(
self.TOKEN_URL,
authorization_response=authorization_response,
client_secret=self.client_secret)
return token
def refresh_token(self):
"""Refresh the current access token."""
data = {'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.access_token['refresh_token']}
post = requests.post(self.TOKEN_URL, data=data)
self.access_token = post.json()
def login(self, username, password, **kwargs):
"""Hackish method to sign into RingPlus without going to site.
This sets the access token for the OAutherHandler instance.
Args:
username: Username used to login to Ring Plus (likely your email).
password: The password used to login to Ring Plus.
"""
session = requests.Session()
params = {'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self.redirect_uri}
# Go to authorization url and get the necessary details
r1 = session.get(self.get_authorization_url(**kwargs), params=params)
data = self._get_input_data_from_html(r1.content)
self._set_username_and_password(data, username, password)
# Login with username and password
r2 = session.post(r1.url, data=data)
r2.raise_for_status()
self.access_token = self.fetch_token(r2.url)
def get_account_id(self):
"""Return the account id associated with the access token."""
raise NotImplementedError
def get_user_id(self):
"""Return the first user id associated with the access token."""
raise NotImplementedError
def apply_auth(self):
return OAuth2(self.client_id, token=self.access_token)
def _get_input_data_from_html(self, html):
"""Return the params needed to login from html."""
soup = BeautifulSoup(html, 'html.parser')
input_tags = soup.find_all('input')
# Get the data from the tags
data = {}
for tag in input_tags:
data[tag.attrs['name']] = tag.attrs.get('value', None)
return data
def _set_username_and_password(self, data, username, password):
"""Adds username and password to input dictionary."""
for key in data.keys():
if 'email' in key:
data[key] = username
if 'password' in key:
data[key] = password
| mit | -689,304,542,838,251,500 | 36.952756 | 78 | 0.636929 | false |
jimsize/PySolFC | pysollib/games/wavemotion.py | 1 | 4084 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.util import ANY_RANK
from pysollib.stack import \
AC_RowStack, \
InitialDealTalonStack, \
OpenStack, \
isAlternateColorSequence, \
isSameSuitSequence, \
SS_RowStack
# ************************************************************************
# * Wave Motion
# ************************************************************************
class WaveMotion(Game):
RowStack_Class = SS_RowStack
#
# game layout
#
def createGame(self, rows=8, reserves=8, playcards=7):
# create layout
l, s = Layout(self), self.s
# set window
max_rows = max(rows, reserves)
w, h = l.XM + max_rows*l.XS, l.YM + 2*l.YS + (12+playcards)*l.YOFFSET
self.setSize(w, h)
# create stacks
x, y = l.XM + (max_rows-rows)*l.XS//2, l.YM
for i in range(rows):
stack = self.RowStack_Class(x, y, self, base_rank=ANY_RANK)
stack.getBottomImage = stack._getReserveBottomImage
s.rows.append(stack)
x += l.XS
x, y = l.XM + (max_rows-reserves)*l.XS//2, l.YM+l.YS+12*l.YOFFSET
for i in range(reserves):
stack = OpenStack(x, y, self, max_accept=0)
s.reserves.append(stack)
stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, l.YOFFSET
x += l.XS
s.talon = InitialDealTalonStack(l.XM, l.YM, self)
# default
l.defaultAll()
#
# game overrides
#
def startGame(self):
for i in range(5):
self.s.talon.dealRow(rows=self.s.reserves, frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealRow(rows=self.s.reserves[:4])
def isGameWon(self):
for s in self.s.rows:
if s.cards:
if len(s.cards) != 13 or not isSameSuitSequence(s.cards):
return False
return True
shallHighlightMatch = Game._shallHighlightMatch_SS
# ************************************************************************
# * Flourish
# ************************************************************************
class Flourish(WaveMotion):
RowStack_Class = AC_RowStack
def createGame(self):
WaveMotion.createGame(self, rows=7, reserves=8, playcards=7)
def isGameWon(self):
for s in self.s.rows:
if s.cards:
if len(s.cards) != 13 or not isAlternateColorSequence(s.cards):
return False
return True
shallHighlightMatch = Game._shallHighlightMatch_AC
# register the game
registerGame(GameInfo(314, WaveMotion, "Wave Motion",
GI.GT_1DECK_TYPE | GI.GT_OPEN, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(753, Flourish, "Flourish",
GI.GT_1DECK_TYPE | GI.GT_OPEN | GI.GT_ORIGINAL, 1, 0,
GI.SL_MOSTLY_SKILL))
| gpl-3.0 | 563,173,693,865,608,500 | 31.15748 | 79 | 0.55142 | false |
Mathew/psychoanalysis | psychoanalysis/apps/pa/migrations/0002_auto__del_participant.py | 1 | 7476 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Participant'
db.delete_table(u'pa_participant')
# Removing M2M table for field user on 'Participant'
db.delete_table('pa_participant_user')
# Adding M2M table for field user on 'ReportingPeriod'
db.create_table(u'pa_reportingperiod_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reportingperiod', models.ForeignKey(orm[u'pa.reportingperiod'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_reportingperiod_user', ['reportingperiod_id', 'user_id'])
def backwards(self, orm):
# Adding model 'Participant'
db.create_table(u'pa_participant', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reporting_period', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pa.ReportingPeriod'])),
))
db.send_create_signal(u'pa', ['Participant'])
# Adding M2M table for field user on 'Participant'
db.create_table(u'pa_participant_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'pa.participant'], null=False)),
('user', models.ForeignKey(orm[u'pa.user'], null=False))
))
db.create_unique(u'pa_participant_user', ['participant_id', 'user_id'])
# Removing M2M table for field user on 'ReportingPeriod'
db.delete_table('pa_reportingperiod_user')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pa.activity': {
'Meta': {'object_name': 'Activity'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Category']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'pa.activityentry': {
'Meta': {'object_name': 'ActivityEntry'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Activity']"}),
'day': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.User']"})
},
u'pa.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'grouping': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reporting_period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.ReportingPeriod']"})
},
u'pa.profession': {
'Meta': {'object_name': 'Profession'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'pa.reportingperiod': {
'Meta': {'object_name': 'ReportingPeriod'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'slots_per_hour': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pa.User']", 'symmetrical': 'False'})
},
u'pa.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pa.Profession']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['pa'] | mit | 6,745,956,246,311,314,000 | 60.286885 | 187 | 0.560059 | false |
plum-umd/pasket | pasket/encoder.py | 1 | 48157 | import math
import cStringIO
import os
import copy as cp
from itertools import chain, ifilter, ifilterfalse
from functools import partial
import re
import operator as op
from string import Template as T
import logging
from lib.typecheck import *
import lib.const as C
from lib.enum import enum
import util
import sample
from meta import methods, classes, class_lookup
from meta.template import Template
from meta.clazz import Clazz, find_fld, find_mtds_by_name, find_mtds_by_sig, find_base
from meta.method import Method, sig_match
from meta.field import Field
from meta.statement import Statement
import meta.statement as st
from meta.expression import Expression, typ_of_e
import meta.expression as exp
# constants regarding sketch
C.SK = enum(z=u"bit", self=u"self")
# global constants that should be placed at every sketch file
_const = u''
# among class declarations in the template
# exclude subclasses so that only the base class remains
# (will make a virtual struct representing all the classes in that hierarchy)
@takes(list_of(Clazz))
@returns(list_of(Clazz))
def rm_subs(clss):
# { cname: Clazz(cname, ...), ... }
decls = { cls.name: cls for cls in clss }
# remove subclasses
for cname in decls.keys():
if util.is_collection(cname): continue
cls = class_lookup(cname)
if not cls.is_class: continue
if cls.is_aux: continue # virtual relations; don't remove sub classes
for sub in cls.subs:
if sub.name in decls:
logging.debug("{} < {}".format(sub.name, cname))
del decls[sub.name]
for sup in util.ffilter([cls.sup]):
if sup in decls and cname in decls:
logging.debug("{} < {}".format(cname, sup))
del decls[cname]
return decls.values()
# convert the given type name into a newer one
_ty = {} # { tname : new_tname }
@takes(dict_of(unicode, unicode))
@returns(nothing)
def add_ty_map(m):
global _ty
for key in m: _ty[key] = m[key]
@takes(unicode)
@returns(unicode)
def trans_ty(tname):
_tname = util.sanitize_ty(tname.strip())
array_regex = r"([^ \[\]]+)((\[\])+)"
m = re.match(array_regex, _tname)
global _ty
r_ty = _tname
# to avoid primitive types that Sketch doesn't support
if _tname == C.J.z: r_ty = C.SK.z
elif _tname in [C.J.b, C.J.s, C.J.j]: r_ty = C.J.i
# unboxing primitive Classes, e.g., Character -> char
elif _tname in C.autoboxing: r_ty = util.unboxing(_tname)
# TODO: parameterize len?
elif _tname in [C.J.c+"[]"]: r_ty = u"{}[51]".format(C.J.c)
elif _tname in [C.J.B, C.J.S, C.J.J, C.J.I]: r_ty = C.J.i
# array bounds
elif m:
r_ty = trans_ty(m.group(1)) + \
"[{}]".format(len(methods())) * len(re.findall(r"\[\]", m.group(2)))
# use memoized type conversion
elif _tname in _ty: r_ty = _ty[_tname]
# convert Java collections into an appropriate struct name
# Map<K,V> / List<T> / ... -> Map_K_V / List_T / ...
elif util.is_collection(_tname):
r_ty = '_'.join(util.of_collection(_tname))
logging.debug("{} => {}".format(_tname, r_ty))
_ty[_tname] = r_ty
return r_ty
# check whether the given type is replaced due to class hierarchy
@takes(unicode)
@returns(bool)
def is_replaced(tname):
return tname != trans_ty(tname)
# sanitize method name
# e.g., JComboBox(E[]) => JComboBox_JComboBox_E[] => JComboBox_JComboBox_Es
@takes(unicode)
@returns(unicode)
def sanitize_mname(mname):
return mname.replace("[]",'s')
# convert the given method name into a new one
# considering parameterized types (e.g., collections) and inheritances
_mtds = {} # { cname_mname_... : new_mname }
@takes(unicode, unicode, list_of(unicode))
@returns(unicode)
def trans_mname(cname, mname, arg_typs=[]):
global _mtds
r_mtd = mname
mid = u'_'.join([cname, mname] + arg_typs)
# use memoized method name conversion
if mid in _mtds:
return _mtds[mid]
# methods of Java collections
elif util.is_collection(cname):
_arg_typs = map(trans_ty, arg_typs)
r_mtd = u'_'.join([mname, trans_ty(cname)] + _arg_typs)
else:
if is_replaced(cname):
tr_name = trans_ty(cname)
cls = class_lookup(tr_name)
if cls and cls.is_aux: cname = tr_name
mtds = find_mtds_by_sig(cname, mname, arg_typs)
if mtds and 1 == len(mtds):
r_mtd = unicode(repr(mtds[0]))
else: # ambiguous or not found
r_mtd = '_'.join([mname, util.sanitize_ty(cname)])
r_mtd = sanitize_mname(r_mtd)
_mtds[mid] = r_mtd
return r_mtd
# basic Java libraries
@takes(nothing)
@returns(unicode)
def trans_lib():
return u''
# to avoid duplicate structs for collections
_collections = set([])
# Java collections -> C-style struct (along with basic functions)
@takes(Clazz)
@returns(unicode)
def col_to_struct(cls):
buf = cStringIO.StringIO()
cname = cls.name
sname = trans_ty(cname)
global _collections
if sname in _collections:
logging.debug("collection: {} (duplicated)".format(cname))
return u''
else:
_collections.add(sname)
logging.debug("collection: " + cname)
buf.write("struct ${sname} {\n int idx;\n")
if C.J.MAP in cname:
_, k, v = util.of_collection(cname)
k = trans_ty(k)
v = trans_ty(v)
# Map<K,V> -> struct Map_K_V { int idx; K[S] key; V[S] val; }
buf.write(" ${k}[S] key;\n ${v}[S] val;\n}\n")
# Map<K,V>.containsKey -> containsKey_Map_K_V
buf.write("""
bit {} (${{sname}} map, ${{k}} k) {{
int i;
for (i = 0; map.val[i] != null && i < S; i++) {{
if (map.key[i] == k) return 1;
}}
return 0;
}}
""".format(trans_mname(cname, u"containsKey", [k])))
# Map<K,V>.get -> get_Map_K_V
buf.write("""
${{v}} {} (${{sname}} map, ${{k}} k) {{
int i;
for (i = 0; map.val[i] != null && i < S; i++) {{
if (map.key[i] == k) return map.val[i];
}}
return null;
}}
""".format(trans_mname(cname, u"get", [k])))
# Map<K,V>.put -> put_Map_K_V
buf.write("""
void {} (${{sname}} map, ${{k}} k, ${{v}} v) {{
map.key[map.idx] = k;
map.val[map.idx] = v;
map.idx = (map.idx + 1) % S;
}}
""".format(trans_mname(cname, u"put", [k, v])))
# Map<K,V>.clear -> clear_Map_K_V
if util.is_class_name(k): default_k = "null"
else: default_k = "0"
buf.write("""
void {} (${{sname}} map) {{
map.idx = 0;
for (int i = 0; i < S; i++) {{
map.key[i] = {};
map.val[i] = null;
}}
}}
""".format(trans_mname(cname, u"clear", []), default_k))
else:
collection, t = util.of_collection(cname)
t = trans_ty(t)
if C.J.QUE in collection: buf.write(" int head;\n")
# Collection<T> -> struct Collection_T { int idx; T[S] elts; }
buf.write(" ${t}[S] elts;\n}\n")
if C.J.STK in collection:
# Stack<T>.peek -> peek_Stack_T
buf.write("""
${{t}} {} (${{sname}} stk) {{
if (stk.idx == 0) return null;
${{t}} top = stk.elts[stk.idx - 1];
return top;
}}
""".format(trans_mname(cname, u"peek", [])))
# Stack<T>.push -> push_Stack_T
buf.write("""
${{t}} {} (${{sname}} stk, ${{t}} elt) {{
stk.elts[stk.idx] = elt;
stk.idx = (stk.idx + 1) % S;
return elt;
}}
""".format(trans_mname(cname, u"push", [t])))
# Stack<T>.pop -> pop_Stack_T
buf.write("""
${{t}} {} (${{sname}} stk) {{
if (stk.idx == 0) return null;
stk.idx = stk.idx - 1;
${{t}} top = stk.elts[stk.idx];
stk.elts[stk.idx] = null;
return top;
}}
""".format(trans_mname(cname, u"pop", [])))
elif C.J.QUE in collection:
# Queue<T>.add -> add_Queue_T
buf.write("""
bit {} (${{sname}} que, ${{t}} elt) {{
que.elts[que.idx] = elt;
que.idx = (que.idx + 1) % S;
return true;
}}
""".format(trans_mname(cname, u"add", [t])))
# Queue<T>.remove -> remove_Queue_T
buf.write("""
${{t}} {} (${{sname}} que) {{
if (que.head == que.idx) return null;
${{t}} top = que.elts[que.head];
que.elts[que.head] = null;
que.head = (que.head + 1) % S;
return top;
}}
""".format(trans_mname(cname, u"remove", [])))
# Queue<T>.isEmpty -> isEmpty_Queue_T
buf.write("""
bit {} (${{sname}} que) {{
return que.head == que.idx;
}}
""".format(trans_mname(cname, u"isEmpty", [])))
elif C.J.LST in collection:
# List<T>.add -> add_List_T
buf.write("""
bit {} (${{sname}} lst, ${{t}} elt) {{
lst.elts[lst.idx] = elt;
lst.idx = (lst.idx + 1) % S;
return true;
}}
""".format(trans_mname(cname, u"add", [t])))
# List<T>.remove(T) -> remove_List_T_T
buf.write("""
bit {} (${{sname}} lst, ${{t}} elt) {{
int i;
for (i = 0; lst.elts[i] != null && i < S; i++) {{
if (lst.elts[i] == elt) {{
lst.elts[i] = null;
int j;
for (j = i + 1; lst.elts[j] != null && j < lst.idx; j++) {{
lst.elts[j-1] = lst.elts[j];
}}
lst.idx = (lst.idx - 1) % S;
return true;
}}
}}
return false;
}}
""".format(trans_mname(cname, u"remove", [t])))
# List<T>.remove(int) -> remove_List_T_int
buf.write("""
${{t}} {} (${{sname}} lst, int index) {{
${{t}} res = null;
if (0 <= index && index < lst.idx) {{
res = lst.elts[index];
lst.elts[index] = null;
int i;
for (i = index + 1; lst.elts[i] != null && i < lst.idx; i++) {{
lst.elts[i-1] = lst.elts[i];
}}
lst.idx = (lst.idx - 1) % S;
}}
return res;
}}
""".format(trans_mname(cname, u"remove", [C.J.i])))
# List<T>.get -> get_List_T
buf.write("""
${{t}} {} (${{sname}} lst, int index) {{
${{t}} res = null;
if (0 <= index && index < lst.idx) {{
res = lst.elts[index];
}}
return res;
}}
""".format(trans_mname(cname, u"get", [C.J.i])))
# List<T>.isEmpty -> isEmpty_List_T
buf.write("""
bit {} (${{sname}} lst) {{
return lst.idx == 0;
}}
""".format(trans_mname(cname, u"isEmpty", [])))
return T(buf.getvalue()).safe_substitute(locals())
_flds = {} # { cname.fname : new_fname }
_s_flds = {} # { cname.fname : accessor }
# from the given base class,
# generate a virtual struct that encompasses all the class in the hierarchy
@takes(Clazz)
@returns(Clazz)
def to_v_struct(cls):
cls_v = Clazz(name=cls.name)
fld_ty = Field(clazz=cls_v, typ=C.J.i, name=u"__cid")
cls_v.flds.append(fld_ty)
global _ty, _flds, _s_flds
@takes(dict_of(unicode, Field), Clazz)
@returns(nothing)
def per_cls(sup_flds, cls):
aux_name = None
# if this class is suppose to be replaced (due to pattern rewriting)
# apply that replacement first, and then replace that aux type as well
if not cls.is_aux and cls.name in _ty:
aux_name = _ty[cls.name]
logging.debug("{} => {}".format(aux_name, cls_v.name))
# check that aux type is already involved in this family
if aux_name not in _ty: _ty[aux_name] = cls_v.name
# keep mappings from original subclasses to the representative
# so that subclasses can refer to the representative
# e.g., for C < B < A, { B : A, C : A }
cname = util.sanitize_ty(cls.name)
if cname != cls_v.name: # exclude the root of this family
logging.debug("{} => {}".format(cname, cls_v.name))
_ty[cname] = cls_v.name
if cls.is_inner: # to handle inner class w/ outer class name
logging.debug("{} => {}".format(repr(cls), cls_v.name))
_ty[unicode(repr(cls))] = cls_v.name
# if this class implements an interface which has constants,
# then copy those constants
for itf in cls.itfs:
cls_i = class_lookup(itf)
if not cls_i or not cls_i.flds: continue
for fld in cls_i.flds:
sup_flds[fld.name] = fld
# also, keep mappings from original member fields to newer ones
# so that usage of old fields can be replaced accordingly
# e.g., for A.f1 and B.f2, { A.f1 : f1_A, B.f1 : f1_A, B.f2 : f2_B }
for sup_fld in sup_flds.keys():
fld = sup_flds[sup_fld]
fname = unicode(repr(fld))
fid = '.'.join([cname, sup_fld])
logging.debug("{} => {}".format(fid, fname))
if fld.is_static: _s_flds[fid] = fname
else: _flds[fid] = fname # { ..., B.f1 : f1_A }
cur_flds = cp.deepcopy(sup_flds) # { f1 : f1_A }
@takes(Field)
@returns(nothing)
def cp_fld(fld):
cur_flds[fld.name] = fld # { ..., f2 : f2_B }
fname = unicode(repr(fld))
fld_v = cp.deepcopy(fld)
fld_v.clazz = cls_v
fld_v.name = fname
cls_v.flds.append(fld_v)
def upd_flds(cname):
fid = '.'.join([cname, fld.name])
# if A.f1 exists and B redefines f1, then B.f1 : f1_A
# except for enum, which can (re)define its own fields
# e.g., SwingConstands.LEADING vs. GroupLayout.Alignment.LEADING
if not cls.is_enum and (fid in _s_flds or fid in _flds): return
logging.debug("{} => {}".format(fid, fname))
if fld.is_static: _s_flds[fid] = fname
else: _flds[fid] = fname # { ..., B.f2 : f2_B }
upd_flds(cname)
if aux_name: upd_flds(aux_name)
map(cp_fld, cls.flds)
# subclass relations of aux types are virtual, so do not visit further
if not cls.is_aux:
map(partial(per_cls, cur_flds), cls.subs)
per_cls({}, cls)
return cls_v
@takes(Field)
@returns(str)
def trans_fld(fld):
buf = cStringIO.StringIO()
buf.write(' '.join([trans_ty(fld.typ), fld.name]))
if fld.is_static and fld.init and \
not fld.init.has_call and not fld.init.has_str and not fld.is_aliasing:
buf.write(" = " + trans_e(None, fld.init))
buf.write(';')
return buf.getvalue()
# Java class (along with subclasses) -> C-style struct
@takes(Clazz)
@returns(str)
def to_struct(cls):
# make mappings from static fields to corresponding accessors
def gen_s_flds_accessors(cls):
s_flds = filter(op.attrgetter("is_static"), cls.flds)
global _s_flds
for fld in ifilterfalse(op.attrgetter("is_private"), s_flds):
cname = fld.clazz.name
fid = '.'.join([cname, fld.name])
fname = unicode(repr(fld))
logging.debug("{} => {}".format(fid, fname))
_s_flds[fid] = fname
cname = util.sanitize_ty(cls.name)
global _ty
# if this is an interface, merge this into another family of classes
# as long as classes that implement this interface are in the same family
if cls.is_itf:
# interface may have static constants
gen_s_flds_accessors(cls)
subss = util.flatten_classes(cls.subs, "subs")
bases = util.rm_dup(map(lambda sub: find_base(sub), subss))
# filter out interfaces that extend other interfaces, e.g., Action
base_clss, _ = util.partition(op.attrgetter("is_class"), bases)
if not base_clss:
logging.debug("no implementer of {}".format(cname))
elif len(base_clss) > 1:
logging.debug("ambiguous inheritance of {}: {}".format(cname, base_clss))
else: # len(base_clss) == 1
base = base_clss[0]
base_name = base.name
logging.debug("{} => {}".format(cname, base_name))
_ty[cname] = base_name
if cls.is_inner: # to handle inner interface w/ outer class name
logging.debug("{} => {}".format(repr(cls), base_name))
_ty[unicode(repr(cls))] = base_name
return ''
# if this is the base class having subclasses,
# make a virtual struct first
if cls.subs and not cls.is_aux:
cls = to_v_struct(cls)
cname = cls.name
# cls can be modified above, thus generate static fields accessors here
gen_s_flds_accessors(cls)
# for unique class numbering, add an identity mapping
if cname not in _ty: _ty[cname] = cname
buf = cStringIO.StringIO()
buf.write("struct " + cname + " {\n int hash;\n")
# to avoid static fields, which will be bound to a class-representing package
_, i_flds = util.partition(op.attrgetter("is_static"), cls.flds)
buf.write('\n'.join(map(trans_fld, i_flds)))
if len(i_flds) > 0: buf.write('\n')
buf.write("}\n")
return buf.getvalue()
# convert the given field name into a newer one
# only if the field belongs to a virtual representative struct
@takes(unicode, unicode, optional(bool))
@returns(unicode)
def trans_fname(cname, fname, is_static=False):
global _flds, _s_flds
r_fld = fname
fid = '.'.join([cname, fname])
if is_static:
if fid in _s_flds: r_fld = _s_flds[fid]
else:
if fid in _flds: r_fld = _flds[fid]
return r_fld
# collect method/field declarations in the given class and its inner classes
@takes(Clazz)
@returns(list_of((Method, Field)))
def collect_decls(cls, attr):
clss = util.flatten_classes([cls], "inners")
declss = map(op.attrgetter(attr), clss)
return util.flatten(declss)
# TODO: no longer used?
# translate class <init> into sketch's initializer with named parameters
@takes(unicode, list_of(unicode), list_of(unicode))
@returns(str)
def trans_init(cls_name, arg_typs, args):
buf = cStringIO.StringIO()
cls = class_lookup(cls_name)
if util.is_collection(cls_name) or not cls:
buf.write(trans_ty(cls_name) + "()")
elif is_replaced(cls_name):
buf.write(trans_ty(cls_name) + "(hash=nonce())")
else:
add_on = []
if args:
# NOTE: assume the order of arguments is same as that of fields
# NOTE: for missing fields, just call default constructors
# TODO: use template.sig_match
kwargs = zip(cls.flds, args)
if kwargs: assigned, _ = zip(*kwargs)
else: assigned = []
not_assigned = [fld for fld in cls.flds if fld not in assigned]
if not_assigned:
def default_init(fld):
if util.is_class_name(fld.typ):
return C.J.NEW + ' ' + trans_init(fld.typ, [], [])
else: return '0'
add_on = map(default_init, not_assigned)
# else: # means, default constructor
flds = ["hash"] + map(op.attrgetter("name"), cls.flds)
vals = ["nonce()"] + args + add_on
kwargs = map(lambda (f, v): "{}={}".format(f, v), zip(flds, vals))
buf.write('_'.join([cls_name] + arg_typs))
buf.write('(' + ", ".join(kwargs) + ')')
return buf.getvalue()
# sanitize id by removing package name
# e.g., javax.swing.SwingUtilities.invokeLater -> SwingUtilities.invokeLater
@takes(unicode)
@returns(unicode)
def sanitize_id(dot_id):
pkg, cls, mtd = util.explode_mname(dot_id)
if cls and util.is_class_name(cls) and class_lookup(cls):
clazz = class_lookup(cls)
if clazz.pkg and pkg and clazz.pkg != pkg: # to avoid u'' != None
raise Exception("wrong package", pkg, clazz.pkg)
return '.'.join([cls, mtd])
return dot_id
# need to check log conformity except for calls inside the platform
# i.e., client -> client, platform -> client or vice versa
# also discard super calls towards the platform, e.g.,
# class MyActivity extends Activity {
# ... onCreate(...) { super.onCreate(...); ... }
# }
@takes(Method, Method)
@returns(bool)
def check_logging(caller, callee):
return (caller.clazz.client or callee.clazz.client) and \
not caller.is_supercall(callee)
@takes(optional(Method), Expression)
@returns(str)
def trans_e(mtd, e):
curried = partial(trans_e, mtd)
buf = cStringIO.StringIO()
if e.kind == C.E.ANNO:
anno = e.anno
if anno.name == C.A.NEW: pass # TODO
elif anno.name == C.A.OBJ:
buf.write("retrieve_{}@log({})".format(util.sanitize_ty(anno.typ), anno.idx))
# @Compare(exps) => {| exps[0] (< | <= | == | != | >= | >) exps[1] |}
# @CompareString(exps) => exps[0].eqauls(exps[1])
elif anno.name in [C.A.CMP, C.A.CMP_STR]:
le = curried(anno.exps[0])
re = curried(anno.exps[1])
if anno.name == C.A.CMP:
buf.write("{| " + le + " (< | <= | == | != | >= | >) " + re + " |}")
else:
buf.write("{}({},{})".format(trans_mname(C.J.STR, u"equals"), le, re))
elif e.kind == C.E.GEN:
if e.es:
buf.write("{| ")
buf.write(" | ".join(map(curried, e.es)))
buf.write(" |}")
else:
buf.write(C.T.HOLE)
elif e.kind == C.E.ID:
if hasattr(e, "ty"): buf.write(trans_ty(e.ty) + ' ')
fld = None
if mtd and e.id not in mtd.param_vars:
fld = find_fld(mtd.clazz.name, e.id)
if fld: # fname -> self.new_fname (unless the field is static)
new_fname = trans_fname(fld.clazz.name, e.id, fld.is_static)
if fld.is_static:
# access to the static field inside the same class
if fld.clazz.name == mtd.clazz.name: buf.write(e.id)
# o.w., e.g., static constant in an interface, call the accessor
else: buf.write(new_fname + "()")
else: buf.write('.'.join([C.SK.self, new_fname]))
elif e.id in [C.J.THIS, C.J.SUP]: buf.write(C.SK.self)
elif util.is_str(e.id): # constant string, such as "Hello, World"
str_init = trans_mname(C.J.STR, C.J.STR, [u"char[]", C.J.i, C.J.i])
s_hash = hash(e.id) % 256 # hash string value itself
buf.write("{}(new Object(hash={}), {}, 0, {})".format(str_init, s_hash, e.id, len(e.id)))
else: buf.write(e.id)
elif e.kind == C.E.UOP:
buf.write(' '.join([e.op, curried(e.e)]))
elif e.kind == C.E.BOP:
buf.write(' '.join([curried(e.le), e.op, curried(e.re)]))
elif e.kind == C.E.DOT:
# with package names, e.g., javax.swing.SwingUtilities
if util.is_class_name(e.re.id) and class_lookup(e.re.id):
buf.write(curried(e.re))
elif e.re.id == C.J.THIS: # ClassName.this
buf.write(C.SK.self)
else:
rcv_ty = typ_of_e(mtd, e.le)
fld = find_fld(rcv_ty, e.re.id)
new_fname = trans_fname(rcv_ty, e.re.id, fld.is_static)
if fld.is_static:
# access to the static field inside the same class
if mtd and rcv_ty == mtd.clazz.name: buf.write(e.re.id)
# o.w., e.g., static constant in an interface, call the accessor
else: buf.write(new_fname + "()")
else: buf.write('.'.join([curried(e.le), new_fname]))
elif e.kind == C.E.IDX:
buf.write(curried(e.e) + '[' + curried(e.idx) + ']')
elif e.kind == C.E.NEW:
if e.e.kind == C.E.CALL:
ty = typ_of_e(mtd, e.e.f)
cls = class_lookup(ty)
if cls and cls.has_init:
arg_typs = map(partial(typ_of_e, mtd), e.e.a)
mname = trans_mname(cls.name, cls.name, arg_typs)
obj = "alloc@log({})".format(cls.id)
args = [obj] + map(unicode, map(curried, e.e.a))
buf.write("{}({})".format(mname, ", ".join(args)))
else: # collection or Object
buf.write(C.J.NEW + ' ' + trans_ty(ty) + "()")
else: # o.w., array initialization, e.g., new int[] { ... }
buf.write(str(e.init))
elif e.kind == C.E.CALL:
arg_typs = map(partial(typ_of_e, mtd), e.a)
def trans_call(callee, rcv_ty, rcv):
if callee.is_static: rcv = None
logging = None
if not util.is_collection(callee.clazz.name):
logging = str(check_logging(mtd, callee)).lower()
args = util.rm_none([rcv] + map(curried, e.a) + [logging])
mid = trans_mname(rcv_ty, callee.name, arg_typs)
return u"{}({})".format(mid, ", ".join(args))
def dynamic_dispatch(rcv_ty, rcv, acc, callee):
_dispatched = trans_call(callee, callee.clazz.name, rcv)
_guarded = "{}.__cid == {} ? {}".format(rcv, callee.clazz.id, _dispatched)
return "({} : {})".format(_guarded, acc)
if e.f.kind == C.E.DOT: # rcv.mid
rcv_ty = typ_of_e(mtd, e.f.le)
rcv = curried(e.f.le)
mname = e.f.re.id
mtd_callees = find_mtds_by_sig(rcv_ty, mname, arg_typs)
if mtd_callees and 1 < len(mtd_callees): # needs dynamic dispatch
curried_dispatch = partial(dynamic_dispatch, rcv_ty, rcv)
# TODO: use least upper bound?
default_v = util.default_value(mtd_callees[0].typ)
buf.write(reduce(curried_dispatch, mtd_callees, default_v))
elif mtd_callees and 1 == len(mtd_callees):
mtd_callee = mtd_callees[0]
buf.write(trans_call(mtd_callee, rcv_ty, rcv))
else: # unresolved, maybe library method
mid = trans_mname(rcv_ty, mname, arg_typs)
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
else: # mid
mname = e.f.id
# pre-defined meta information or Sketch primitive functions
if mname in C.typ_arrays + [u"minimize"]:
mid = mname
rcv = None
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
elif mname == C.J.SUP and mtd.is_init: # super(...) inside <init>
sup = class_lookup(mtd.clazz.sup)
mid = trans_mname(sup.name, sup.name, arg_typs)
rcv = C.SK.self
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
else: # member methods
mtd_callees = find_mtds_by_sig(mtd.clazz.name, mname, arg_typs)
if mtd_callees and 1 < len(mtd_callees): # needs dynamic dispatch
curried_dispatch = partial(dynamic_dispatch, mtd.clazz.name, C.SK.self)
# TODO: use least upper bound?
default_v = util.default_value(mtd_callees[0].typ)
buf.write(reduce(curried_dispatch, mtd_callees, default_v))
elif mtd_callees and 1 == len(mtd_callees):
mtd_callee = mtd_callees[0]
buf.write(trans_call(mtd_callee, mtd.clazz.name, C.SK.self))
else: # unresolved, maybe library method
mid = trans_mname(mtd.clazz.name, mname, arg_typs)
args = util.rm_none([rcv] + map(curried, e.a))
buf.write("{}({})".format(mid, ", ".join(args)))
elif e.kind == C.E.CAST:
# since a family of classes is merged, simply ignore the casting
buf.write(curried(e.e))
elif e.kind == C.E.INS_OF:
ty = typ_of_e(mtd, e.ty)
cls = class_lookup(ty)
if cls:
buf.write(curried(e.e) + ".__cid == " + str(cls.id))
else:
logging.debug("unknown type: {}".format(ty))
buf.write("0")
else: buf.write(str(e))
return buf.getvalue()
@takes(Method, Statement)
@returns(str)
def trans_s(mtd, s):
curried_e = partial(trans_e, mtd)
curried_s = partial(trans_s, mtd)
buf = cStringIO.StringIO()
if s.kind == C.S.IF:
e = curried_e(s.e)
t = '\n'.join(map(curried_s, s.t))
f = '\n'.join(map(curried_s, s.f))
buf.write("if (" + e + ") {\n" + t + "\n}")
if f: buf.write("\nelse {\n" + f + "\n}")
elif s.kind == C.S.WHILE:
e = curried_e(s.e)
b = '\n'.join(map(curried_s, s.b))
buf.write("while (" + e + ") {\n" + b + "\n}")
elif s.kind == C.S.REPEAT:
e = curried_e(s.e)
b = '\n'.join(map(curried_s, s.b))
if e == "??": buf.write("minrepeat {\n" + b + "\n}")
else: buf.write("repeat (" + e + ") {\n" + b + "\n}")
elif s.kind == C.S.MINREPEAT:
b = '\n'.join(map(curried_s, s.b))
buf.write("minrepeat {\n" + b + "\n}")
elif s.kind == C.S.FOR:
# assume "for" is used for List<T> and LinkedList<T> only
col = mtd.vars[s.init.id]
if not util.is_collection(col) or \
util.of_collection(col)[0] not in [C.J.LST, C.J.LNK]:
raise Exception("not iterable type", col)
# if this is about observers, let sketch choose iteration direction
is_obs = hasattr(class_lookup(util.of_collection(col)[1]), "obs")
s_init = curried_e(s.init)
if is_obs: init = "{{| 0 | {}.idx - 1 |}}".format(s_init)
else: init = '0'
buf.write(" int idx = {};".format(init))
s_i_typ = trans_ty(s.i.ty)
buf.write("""
while (0 <= idx && idx < S && {s_init}.elts[idx] != null) {{
{s_i_typ} {s.i.id} = {s_init}.elts[idx];
""".format(**locals()))
buf.write('\n'.join(map(curried_s, s.b)))
if is_obs: upd = "{| idx (+ | -) 1 |}"
else: upd = "idx + 1"
buf.write("""
idx = {};
}}
""".format(upd))
elif s.kind == C.S.TRY:
# NOTE: no idea how to handle catch blocks
# at this point, just walk through try/finally blocks
buf.write('\n'.join(map(curried_s, s.b + s.fs)))
else: buf.write(s.__str__(curried_e))
return buf.getvalue()
@takes(tuple_of(unicode))
@returns(tuple_of(unicode))
def log_param( (ty, nm) ):
ty = trans_ty(ty)
if util.is_class_name(ty):
if nm == C.J.N:
return (u'', u'')
else:
nm_hash = nm + u"_hash"
retrival = u"""
int {nm_hash} = 0;
if ({nm} != null) {{ {nm_hash} = {nm}.hash; }}
""".format(**locals())
return (retrival, nm_hash)
elif ty in [C.SK.z] + C.primitives:
return (u'', nm)
else:
return (u'', u'')
# Java member method -> C-style function
_mids = set([]) # to maintain which methods are logged
_inits = set([]) # to maintain which <init> are translated
@takes(list_of(sample.Sample), Method)
@returns(str)
def to_func(smpls, mtd):
buf = cStringIO.StringIO()
if C.mod.GN in mtd.mods: buf.write(C.mod.GN + ' ')
elif C.mod.HN in mtd.mods: buf.write(C.mod.HN + ' ')
ret_ty = trans_ty(mtd.typ)
cname = unicode(repr(mtd.clazz))
mname = mtd.name
arg_typs = mtd.param_typs
buf.write(ret_ty + ' ' + trans_mname(cname, mname, arg_typs) + '(')
@takes(tuple_of(unicode))
@returns(unicode)
def trans_param( (ty, nm) ):
return ' '.join([trans_ty(ty), nm])
# for instance methods, add "this" pointer into parameters
if mtd.is_static:
params = mtd.params[:]
else:
self_ty = trans_ty(unicode(repr(mtd.clazz)))
params = [ (self_ty, C.SK.self) ] + mtd.params[:]
# add "logging" flag into parameters
# to check log conformity only if invocations cross the boundary
if not mtd.is_init and not mtd.is_clinit:
params.append( (C.SK.z, u"logging") )
if len(params) > 0:
buf.write(", ".join(map(trans_param, params)))
buf.write(") {\n")
# once function signature is dumped out, remove "logging" flag
if not mtd.is_init and not mtd.is_clinit:
params.pop()
clss = util.flatten_classes([mtd.clazz], "subs")
logged = (not mtd.is_init) and sample.mtd_appears(smpls, clss, mtd.name)
mid = unicode(repr(mtd))
m_ent = mid + "_ent()"
m_ext = mid + "_ext()"
if logged:
global _mids
_mids.add(mid)
if logged: # logging method entry (>)
_log_params = map(log_param, params)
_retrievals, _hashes = util.split([(u'', m_ent)] + _log_params)
ent_retrievals = util.ffilter(_retrievals)
ent_hashes = util.ffilter(_hashes)
buf.write("""{}
int[P] __params = {{ {} }};
if (logging) check_log@log(__params);
""".format(u''.join(ent_retrievals), u", ".join(ent_hashes)))
is_void = C.J.v == mtd.typ
if mtd.body:
if not is_void and not mtd.is_init:
bodies = mtd.body[:-1] # exclude the last 'return' statement
else: bodies = mtd.body
buf.write('\n'.join(map(partial(trans_s, mtd), bodies)))
if logged: # logging method exit (<)
_log_params = []
if mtd.body and not is_void and not mtd.is_init:
ret_v = mtd.body[-1].e
ret_u = unicode(trans_e(mtd, ret_v))
# retrieve the return value to a temporary variable
buf.write(u"""
{} __ret = {};
""".format(ret_ty, ret_u))
# then, try to obtain a hash from that temporary variable
_log_params.append(log_param( (ret_ty, u"__ret") ))
_retrievals, _hashes = util.split([(u'', m_ext)] + _log_params)
ext_retrievals = util.ffilter(_retrievals)
ext_hashes = util.ffilter(_hashes)
buf.write("""{}
__params = {{ {} }};
if (logging) check_log@log(__params);
""".format(u''.join(ext_retrievals), u", ".join(ext_hashes)))
if mtd.body and not is_void and not mtd.is_init:
buf.write(os.linesep)
if logged:
# return the return value stored at the temporary variable
buf.write("return __ret;")
else:
buf.write(trans_s(mtd, mtd.body[-1]))
if mtd.is_init:
evt_srcs = map(util.sanitize_ty, sample.evt_sources(smpls))
cname = unicode(repr(mtd.clazz))
if cname in evt_srcs:
global _inits
_inits.add(cname)
buf.write("\nreturn {};".format(C.SK.self))
buf.write("\n}\n")
return buf.getvalue()
# generate type.sk
@takes(str, list_of(Clazz))
@returns(nothing)
def gen_type_sk(sk_dir, bases):
buf = cStringIO.StringIO()
buf.write("package type;\n")
buf.write(_const)
buf.write(trans_lib())
buf.write('\n')
cols, decls = util.partition(lambda c: util.is_collection(c.name), bases)
decls = filter(lambda c: not util.is_array(c.name), decls)
itfs, clss = util.partition(op.attrgetter("is_itf"), decls)
logging.debug("# interface(s): {}".format(len(itfs)))
logging.debug("# class(es): {}".format(len(clss)))
# convert interfaces first, then usual classes
buf.write('\n'.join(util.ffilter(map(to_struct, itfs))))
buf.write('\n'.join(util.ffilter(map(to_struct, clss))))
# convert collections at last
logging.debug("# collection(s): {}".format(len(cols)))
buf.write('\n'.join(map(col_to_struct, cols)))
# argument number of methods
arg_num = map(lambda mtd: len(mtd.params), methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id) {{
return _{0}[id];
}}
""".format(C.typ.argNum, ", ".join(map(str, arg_num))))
# argument types of methods
def get_args_typ(mtd):
def get_arg_typ(param): return str(class_lookup(param[0]).id)
return '{' + ", ".join(map(get_arg_typ, mtd.params)) + '}'
args_typ = map(get_args_typ, methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id, int idx) {{
return _{0}[id][idx];
}}
""".format(C.typ.argType, ", ".join(args_typ)))
# return type of methods
def get_ret_typ(mtd):
cls = class_lookup(mtd.typ)
if cls: return cls.id
else: return -1
ret_typ = map(get_ret_typ, methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id) {{
return _{0}[id];
}}
""".format(C.typ.retType, ", ".join(map(str, ret_typ))))
# belonging class of methods
belongs = map(lambda mtd: mtd.clazz.id, methods())
buf.write("""
#define _{0} {{ {1} }}
int {0}(int id) {{
return _{0}[id];
}}
""".format(C.typ.belongsTo, ", ".join(map(str, belongs))))
subcls = \
map(lambda cls_i: '{' + ", ".join( \
map(lambda cls_j: str(cls_i <= cls_j).lower(), classes()) \
) + '}', classes())
buf.write("""
#define _{0} {{ {1} }}
bit {0}(int i, int j) {{
return _{0}[i][j];
}}
""".format(C.typ.subcls, ", ".join(subcls)))
## sub type relations
#subcls = []
#for cls_i in classes():
# row = []
# for cls_j in classes():
# row.append(int(cls_i <= cls_j))
# subcls.append(row)
## sub type relations in yale format
#_, IA, JA = util.yale_format(subcls)
#li, lj = len(IA), len(JA)
#si = ", ".join(map(str, IA))
#sj = ", ".join(map(str, JA))
#buf.write("""
# #define _iA {{ {si} }}
# #define _jA {{ {sj} }}
# int iA(int i) {{
# return _iA[i];
# }}
# int jA(int j) {{
# return _jA[j];
# }}
# bit subcls(int i, int j) {{
# int col_i = iA(i);
# int col_j = iA(i+1);
# for (int col = col_i; col < col_j; col++) {{
# if (j == jA(col)) return true;
# }}
# return false;
# }}
#""".format(**locals()))
with open(os.path.join(sk_dir, "type.sk"), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
# generate cls.sk
@takes(str, list_of(sample.Sample), Clazz)
@returns(optional(unicode))
def gen_cls_sk(sk_dir, smpls, cls):
mtds = collect_decls(cls, "mtds")
flds = collect_decls(cls, "flds")
s_flds = filter(op.attrgetter("is_static"), flds)
if cls.is_class:
if not mtds and not s_flds: return None
else: # cls.is_itf or cls.is_enum
if not s_flds: return None
cname = util.sanitize_ty(cls.name)
buf = cStringIO.StringIO()
buf.write("package {};\n".format(cname))
buf.write(_const)
# static fields
buf.write('\n'.join(map(trans_fld, s_flds)))
if len(s_flds) > 0: buf.write('\n')
# migrating static fields' initialization to <clinit>
for fld in ifilter(op.attrgetter("init"), s_flds):
if not fld.init.has_call and not fld.init.has_str and not fld.is_aliasing: continue
# retrieve (or declare) <clinit>
clinit = fld.clazz.get_or_declare_clinit()
if clinit not in mtds: mtds.append(clinit)
# add assignment
assign = st.gen_S_assign(exp.gen_E_id(fld.name), fld.init)
clinit.body.append(assign)
# accessors for static fields
for fld in ifilterfalse(op.attrgetter("is_private"), s_flds):
fname = fld.name
accessor = trans_fname(fld.clazz.name, fname, True)
buf.write("""
{0} {1}() {{ return {2}; }}
""".format(trans_ty(fld.typ), accessor, fname))
# methods
clinits, mtds = util.partition(lambda m: m.is_clinit, mtds)
inits, mtds = util.partition(lambda m: m.is_init, mtds)
# <init>/<clinit> should be dumped out in any case
buf.write('\n'.join(map(partial(to_func, smpls), clinits)))
buf.write('\n'.join(map(partial(to_func, smpls), inits)))
for mtd in mtds:
# interface won't have method bodies
if mtd.clazz.is_itf: continue
buf.write(to_func(smpls, mtd) + os.linesep)
cls_sk = cname + ".sk"
with open(os.path.join(sk_dir, cls_sk), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
return cls_sk
# max # of objects in samples
max_objs = 0
# generate sample_x.sk
@takes(str, sample.Sample, Template, Method)
@returns(nothing)
def gen_smpl_sk(sk_path, smpl, tmpl, main):
buf = cStringIO.StringIO()
buf.write("package {};\n".format(smpl.name))
buf.write(_const)
buf.write("harness void {} () {{\n".format(smpl.name))
# insert call-return sequences
buf.write("""
clear_log@log();
int[P] log = { 0 };
""")
global _mids
obj_cnt = 0
objs = { C.J.N: 0, C.J.FALSE: 0, C.J.TRUE: 1, } # { @Obj...aaa : 2, ... }
for i in xrange(10):
objs[str(i)] = i
obj_cnt = obj_cnt + 1
call_stack = []
for io in smpl.IOs:
# ignore <init>
if io.is_init: continue
elif isinstance(io, sample.CallExt):
# ignore method exits whose counterparts are missed
if not call_stack: continue
mid = call_stack.pop()
# ignore methods that are not declared in the template
if not mid: continue
else: # sample.CallEnt
mid = None
# TODO: retrieve arg types
mtd = None # find_mtd_by_sig(io.cls, io.mtd, ...)
if mtd: # found the method that matches the argument types
mid = repr(mtd)
if mid not in _mids: continue
else: # try other possible methods
mtds = find_mtds_by_name(io.cls, io.mtd)
argn = len(io.vals)
min_gap = argn
for mtd in mtds:
_gap = abs((argn - (0 if mtd.is_static else 1)) - len(mtd.params))
if _gap <= min_gap: # eq is needed for zero parameter
min_gap = _gap
mid = repr(mtd)
if mid not in _mids: mid = None
call_stack.append(mid)
# ignore methods that are not declared in the template
if not mid: continue
if isinstance(io, sample.CallEnt):
mid = mid + "_ent()"
else: # sample.CallExt
mid = mid + "_ext()"
vals = []
for val in io.vals:
kind = sample.kind(val)
if type(kind) is type: val = str(val)
# every occurrence of constant string will be uniquely allocated,
# hence different hash => assign unique obj_cnt
# also, primitive value doesn't have hash,
# so we can't compare via obj array; just assign unique obj_cnt
## 1) primitive, including string
# 2) this object never occurs
#if type(kind) is type or val not in objs:
if val not in objs:
obj_cnt = obj_cnt + 1
objs[val] = obj_cnt
vals.append(str(objs[val]))
buf.write("""
log = (int[P]){{ {} }};
write_log@log(log);
""".format(", ".join([mid] + vals)))
buf.write("""
int len_log = get_log_cnt@log();
reset_log_cnt@log();
""")
global max_objs
max_objs = max(max_objs, obj_cnt)
# invoke class initializers
for cls in util.flatten_classes(tmpl.classes, "inners"):
clinit = cls.mtd_by_sig(C.J.CLINIT)
if not clinit: continue
# to only call the base class's <clinit>
if clinit.clazz != cls: continue
buf.write(" {}();\n".format(trans_mname(unicode(repr(cls)), clinit.name)))
# execute template's *main*
cname = unicode(repr(main.clazz))
mname = main.name
arg_typs = main.param_typs
params = main.params + [ (C.J.z, u"logging") ]
args = ", ".join(sig_match(params, []))
buf.write("\n {}({});\n".format(trans_mname(cname, mname, arg_typs), args))
buf.write("assert len_log == get_log_cnt@log();")
buf.write("\n}\n")
with open(sk_path, 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
# generate log.sk
@takes(str, Template)
@returns(nothing)
def gen_log_sk(sk_dir, tmpl):
buf = cStringIO.StringIO()
buf.write("package log;\n")
buf.write(_const)
global max_objs
buf.write("int O = {}; // # of objects\n".format(max_objs + 1))
buf.write("""
int log_cnt = 0;
int[P][N] ev;
int[O] obj;
// to enforce the length of logs
int get_log_cnt() {
return log_cnt;
}
// after writing logs, reset the cursor in order to check logs in order
void reset_log_cnt() {
log_cnt = 0;
}
// to clean up the logs totally
void clear_log() {
reset_log_cnt();
ev = {};
obj = {};
}
// to write the log from samples
void write_log (int[P] params) {
ev[log_cnt++] = params;
}
// to check whether control-flow conforms to the samples
@Native("{ std::cout << \\\"log::check_log::\\\" << params[0] << std::endl; }")
void check_log (int[P] params) {
assert params[0] == ev[log_cnt][0]; // check mid
for (int i = 1; i < P; i++) {
if (ev[log_cnt][i] != 0) {
if (obj[ev[log_cnt][i]] == 0) { // not set yet
obj[ev[log_cnt][i]] = params[i];
}
else { // o.w. check obj eq.
assert obj[ev[log_cnt][i]] == params[i];
}
}
}
log_cnt++; // advance
}
// distinct hash values for runtime objects
int obj_cnt = 0;
int nonce () {
return obj_cnt++;
}
""")
global _inits
reg_codes = []
for ty in _inits:
cls = class_lookup(ty)
if not cls: continue
buf.write("""
int obj_{0}_cnt = 0;
{1}[O] obj_{0};
// to register runtime instances of {0}
void register_{0} ({1} {2}) {{
if (obj_{0}_cnt < O) {{
obj_{0}[obj_{0}_cnt++] = {2};
}}
}}
// to access to a certain instance of {0}
{1} retrieve_{0} (int idx) {{
if (0 <= idx && idx < obj_{0}_cnt) {{
return obj_{0}[idx];
}}
else {{
return null;
}}
}}
""".format(ty, trans_ty(ty), ty.lower()))
reg_code = "if (ty == {0}) register_{1}@log({2});".format(cls.id, repr(cls), C.SK.self)
reg_codes.append(reg_code)
# factory of Object
buf.write("""
// factory of Object
Object alloc(int ty) {{
Object {0} = new Object(hash=nonce(), __cid=ty);
{1}
return {0};
}}
""".format(C.SK.self, "\nelse ".join(reg_codes)))
global _ty;
_clss = []
for ty in _ty.keys():
if util.is_collection(ty): continue
if util.is_array(ty): continue
cls = class_lookup(ty)
if not cls: continue # to avoid None definition
# inner class may appear twice: w/ and w/o outer class name
if cls not in _clss: _clss.append(cls)
buf.write("\n// distinct class IDs\n")
for cls in _clss:
buf.write("int {cls!r} () {{ return {cls.id}; }}\n".format(**locals()))
buf.write("\n// distinct method IDs\n")
for cls in tmpl.classes:
mtds = collect_decls(cls, "mtds")
if not mtds: continue
for mtd in mtds:
mname = sanitize_mname(unicode(repr(mtd)))
buf.write("""
int {mname}_ent () {{ return {mtd.id}; }}
int {mname}_ext () {{ return -{mtd.id}; }}
""".format(**locals()))
with open(os.path.join(sk_dir, "log.sk"), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
# reset global variables
@takes(nothing)
@returns(nothing)
def reset():
global _ty, _mtds, _flds, _s_flds
global _collections, _mids, _inits
global max_objs
_ty = {}
_mtds = {}
_flds = {}
_s_flds = {}
_collections = set([])
_mids = set([])
_inits = set([])
max_objs = 0
# translate the high-level templates into low-level sketches
# using information at the samples
@takes(str, list_of(sample.Sample), Template, str)
@returns(nothing)
def to_sk(cmd, smpls, tmpl, sk_dir):
# clean up result directory
if os.path.isdir(sk_dir): util.clean_dir(sk_dir)
else: os.makedirs(sk_dir)
# reset global variables so that we can run this encoding phase per demo
reset()
# update global constants
def logged(mtd):
if mtd.is_init: return False
clss = util.flatten_classes([mtd.clazz], "subs")
return sample.mtd_appears(smpls, clss, mtd.name)
mtds = filter(logged, methods())
if mtds:
n_params = 2 + max(map(len, map(op.attrgetter("params"), mtds)))
else: # no meaningful logs in the sample?
n_params = 2
n_evts = sample.max_evts(smpls)
if cmd == "android":
n_views = sample.max_views(smpls)
magic_S = max(3, n_evts + 1, n_views)
else:
magic_S = max(5, n_evts + 1) # at least 5, just in case
n_ios = sample.max_IOs(smpls)
global _const
_const = u"""
int P = {}; // length of parameters (0: (>|<)mid, 1: receiver, 2...)
int S = {}; // length of arrays for Java collections
int N = {}; // length of logs
""".format(n_params, magic_S, n_ios)
# type.sk
logging.info("building class hierarchy")
tmpl.consist()
# merge all classes and interfaces, except for primitive types
clss, _ = util.partition(lambda c: util.is_class_name(c.name), classes())
bases = rm_subs(clss)
gen_type_sk(sk_dir, bases)
# cls.sk
cls_sks = []
for cls in tmpl.classes:
# skip the collections, which will be encoded at type.sk
if repr(cls).split('_')[0] in C.collections: continue
cls_sk = gen_cls_sk(sk_dir, smpls, cls)
if cls_sk: cls_sks.append(cls_sk)
# sample_x.sk
smpl_sks = []
for smpl in smpls:
smpl_sk = "sample_" + smpl.name + ".sk"
smpl_sks.append(smpl_sk)
sk_path = os.path.join(sk_dir, smpl_sk)
gen_smpl_sk(sk_path, smpl, tmpl, tmpl.harness(smpl.name))
# log.sk
gen_log_sk(sk_dir, tmpl)
# sample.sk that imports all the other sketch files
buf = cStringIO.StringIO()
# deprecated as we use regex generator for class/method roles
## --bnd-cbits: the number of bits for integer holes
#bits = max(5, int(math.ceil(math.log(len(methods()), 2))))
#buf.write("pragma options \"--bnd-cbits {}\";\n".format(bits))
# --bnd-unroll-amnt: the unroll amount for loops
unroll_amnt = max(n_params, magic_S)
buf.write("pragma options \"--bnd-unroll-amnt {}\";\n".format(unroll_amnt))
# --bnd-inline-amnt: bounds inlining to n levels of recursion
inline_amnt = None # use a default value if not set
if cmd == "android":
#inline_amnt = 2 # depth of View hierarchy (at findViewByTraversal)
inline_amnt = 1 # no recursion for flat Views
elif cmd == "gui":
# setting it 1 means there is no recursion
inline_amnt = 1
if inline_amnt:
buf.write("pragma options \"--bnd-inline-amnt {}\";\n".format(inline_amnt))
buf.write("pragma options \"--bnd-bound-mode CALLSITE\";\n")
sks = ["log.sk", "type.sk"] + cls_sks + smpl_sks
for sk in sks:
buf.write("include \"{}\";\n".format(sk))
with open(os.path.join(sk_dir, "sample.sk"), 'w') as f:
f.write(buf.getvalue())
logging.info("encoding " + f.name)
buf.close()
| mit | -3,296,324,012,821,895,000 | 30.475163 | 95 | 0.578628 | false |
cloudrain21/memcached-1 | testsuite/breakdancer/engine_test.py | 1 | 7385 | #!/usr/bin/env python
import os
import sys
import breakdancer
from breakdancer import Condition, Effect, Action, Driver
TESTKEY = 'testkey'
######################################################################
# Conditions
######################################################################
class ExistsCondition(Condition):
def __call__(self, state):
return TESTKEY in state
class ExistsAsNumber(Condition):
def __call__(self, state):
try:
int(state[TESTKEY])
return True
except:
return False
class MaybeExistsAsNumber(ExistsAsNumber):
def __call__(self, state):
return TESTKEY not in state or ExistsAsNumber.__call__(self, state)
class DoesNotExistCondition(Condition):
def __call__(self, state):
return TESTKEY not in state
class NothingExistsCondition(Condition):
def __call__(self, state):
return not bool(state)
######################################################################
# Effects
######################################################################
class StoreEffect(Effect):
def __init__(self, v='0'):
self.v = v
def __call__(self, state):
state[TESTKEY] = self.v
class DeleteEffect(Effect):
def __call__(self, state):
del state[TESTKEY]
class FlushEffect(Effect):
def __call__(self, state):
state.clear()
class AppendEffect(Effect):
suffix = '-suffix'
def __call__(self, state):
state[TESTKEY] = state[TESTKEY] + self.suffix
class PrependEffect(Effect):
prefix = 'prefix-'
def __call__(self, state):
state[TESTKEY] = self.prefix + state[TESTKEY]
class ArithmeticEffect(Effect):
default = '0'
def __init__(self, by=1):
self.by = by
def __call__(self, state):
if TESTKEY in state:
state[TESTKEY] = str(max(0, int(state[TESTKEY]) + self.by))
else:
state[TESTKEY] = self.default
######################################################################
# Actions
######################################################################
class Set(Action):
effect = StoreEffect()
postconditions = [ExistsCondition()]
class Add(Action):
preconditions = [DoesNotExistCondition()]
effect = StoreEffect()
postconditions = [ExistsCondition()]
class Delete(Action):
preconditions = [ExistsCondition()]
effect = DeleteEffect()
postconditions = [DoesNotExistCondition()]
class Flush(Action):
effect = FlushEffect()
postconditions = [NothingExistsCondition()]
class Delay(Flush):
pass
class Append(Action):
preconditions = [ExistsCondition()]
effect = AppendEffect()
postconditions = [ExistsCondition()]
class Prepend(Action):
preconditions = [ExistsCondition()]
effect = PrependEffect()
postconditions = [ExistsCondition()]
class Incr(Action):
preconditions = [ExistsAsNumber()]
effect = ArithmeticEffect(1)
postconditions = [ExistsAsNumber()]
class Decr(Action):
preconditions = [ExistsAsNumber()]
effect = ArithmeticEffect(-1)
postconditions = [ExistsAsNumber()]
class IncrWithDefault(Action):
preconditions = [MaybeExistsAsNumber()]
effect = ArithmeticEffect(1)
postconditions = [ExistsAsNumber()]
class DecrWithDefault(Action):
preconditions = [MaybeExistsAsNumber()]
effect = ArithmeticEffect(-1)
postconditions = [ExistsAsNumber()]
######################################################################
# Driver
######################################################################
class TestFile(object):
def __init__(self, path, n=10):
self.tmpfilenames = ["%s_%d.c.tmp" % (path, i) for i in range(n)]
self.files = [open(tfn, "w") for tfn in self.tmpfilenames]
self.seq = [list() for f in self.files]
self.index = 0
def finish(self):
for f in self.files:
f.close()
for tfn in self.tmpfilenames:
nfn = tfn[:-4]
assert (nfn + '.tmp') == tfn
if os.path.exists(nfn):
os.remove(nfn)
os.rename(tfn, nfn)
def nextfile(self):
self.index += 1
if self.index >= len(self.files):
self.index = 0
def write(self, s):
self.files[self.index].write(s)
def addseq(self, seq):
self.seq[self.index].append(seq)
class EngineTestAppDriver(Driver):
def __init__(self, writer=sys.stdout):
self.writer = writer
def output(self, s):
self.writer.write(s)
def preSuite(self, seq):
files = [self.writer]
if isinstance(self.writer, TestFile):
files = self.writer.files
for f in files:
f.write('/* DO NOT EDIT.. GENERATED SOURCE */\n\n')
f.write('#include "testsuite/breakdancer/disable_optimize.h"\n')
f.write('#include "testsuite/breakdancer/suite_stubs.h"\n\n')
def testName(self, seq):
return 'test_' + '_'.join(a.name for a in seq)
def startSequence(self, seq):
if isinstance(self.writer, TestFile):
self.writer.nextfile()
self.writer.addseq(seq)
f = "static enum test_result %s" % self.testName(seq)
self.output(("%s(ENGINE_HANDLE *h,\n%sENGINE_HANDLE_V1 *h1) {\n"
% (f, " " * (len(f) + 1))))
def startAction(self, action):
if isinstance(action, Delay):
s = " delay(expiry+1);"
elif isinstance(action, Flush):
s = " flush(h, h1);"
elif isinstance(action, Delete):
s = ' del(h, h1);'
else:
s = ' %s(h, h1);' % (action.name)
self.output(s + "\n")
def _writeList(self, writer, fname, seq):
writer.write("""engine_test_t* %s(void) {
static engine_test_t tests[] = {
""" % fname)
for seq in sorted(seq):
writer.write(' TEST_CASE("%s",\n %s,\n test_setup, teardown, NULL, NULL, NULL),\n' % (
', '.join(a.name for a in seq),
self.testName(seq)))
writer.write(""" TEST_CASE(NULL, NULL, NULL, NULL, NULL, NULL, NULL)
};
return tests;
}
""")
def postSuite(self, seq):
if isinstance(self.writer, TestFile):
for i, v in enumerate(self.writer.files):
self._writeList(v, 'get_tests', self.writer.seq[i])
else:
self._writeList(self.writer, 'get_tests', seq)
def endSequence(self, seq, state):
val = state.get(TESTKEY)
if val:
self.output(' checkValue(h, h1, "%s");\n' % val)
else:
self.output(' assertNotExists(h, h1);\n')
self.output(" return SUCCESS;\n")
self.output("}\n\n")
def endAction(self, action, state, errored):
value = state.get(TESTKEY)
if value:
vs = ' /* value is "%s" */\n' % value
else:
vs = ' /* value is not defined */\n'
if errored:
self.output(" assertHasError();" + vs)
else:
self.output(" assertHasNoError();" + vs)
if __name__ == '__main__':
w = TestFile('generated_suite')
breakdancer.runTest(breakdancer.findActions(globals().values()),
EngineTestAppDriver(w))
w.finish()
| bsd-3-clause | -7,667,654,003,206,512,000 | 25.281139 | 121 | 0.53067 | false |
artemix9/mlpy | mlpy.py | 1 | 2876 | # Copyright (C) 2016 Artem Kozlov. All rights reserved. Contacts: <[email protected]>
# Multilayer perceptron module for Python 3
from copy import deepcopy as new
import numpy as np
import json
class Neuron:
inputs = []
weights = []
output = 0.0
# The 'weights' variable must be a list of neuron weights.
def __init__(self, inputs_count, weights=None):
self.inputs = [0.0 for i in range(inputs_count)]
if weights is None:
self.weights = [np.random.random() * 2 - 1 for w in range(inputs_count)]
elif len(weights) != inputs_count:
print('Error in ' + str(self) + '. Wrong weights')
return
@staticmethod
def f(x):
return 1 / (1 + np.exp(-x))
@staticmethod
def fdx(x):
# return np.exp(-x) / ((np.exp(-x) + 1) ** 2)
return Neuron.f(x) * (1 - Neuron.f(x))
def calculate(self):
self.output = Neuron.f(sum(np.array(self.inputs) * np.array(self.weights)))
def pprint(self):
for i in range(len(self.inputs)):
print('in ' + str(i) + ': ' + str(round(self.inputs[i], 3)) + ' * ' + str(round(self.weights[i], 3)) + ', ')
print('out: ' + str(round(self.output, 3)) + ';')
class MLP:
net_inputs = []
neurons = [[]]
net_outputs = []
def __init__(self, structure): # [inputs, lay0 neurons, lay1 neurons, ...]
self.neurons = [[Neuron(structure[lay - 1]) for neuron in range(structure[lay])]
for lay in range(1, len(structure))]
def get_inputs_count(self, lay=0): # Returns inputs count in the lay.
return len(self.neurons[lay][0].inputs)
def get_lays_count(self):
return len(self.neurons)
def get_neurons_count(self, lay):
return len(self.neurons[lay])
def get_structure(self):
structure = [self.get_inputs_count()]
structure.extend([self.get_neurons_count(l) for l in range(self.get_lays_count())])
return structure
def run(self):
for lay in range(self.get_lays_count()):
for neuron in range(self.get_neurons_count(lay)):
if lay == 0:
self.neurons[lay][neuron].inputs = new(self.net_inputs)
else:
self.neurons[lay][neuron].inputs = [self.neurons[lay - 1][out].output
for out in range(self.get_neurons_count(lay - 1))]
self.neurons[lay][neuron].calculate()
self.net_outputs = [self.neurons[-1][out] for out in range(self.get_neurons_count(-1))]
def save(self, filename):
array = [[self.neurons[lay][neuron].weights for neuron in range(self.get_neurons_count(lay))] for lay in range(self.get_lays_count())]
f = open(filename, 'w')
json.dump(array, f, sort_keys=True)
f.close()
| gpl-3.0 | 1,807,188,544,151,770,000 | 32.835294 | 142 | 0.567455 | false |
ithinksw/philo | philo/signals.py | 1 | 2901 | from django.dispatch import Signal
#: Sent whenever an Entity subclass has been "prepared" -- that is, after the processing necessary to make :mod:`.AttributeProxyField`\ s work has been completed. This will fire after :obj:`django.db.models.signals.class_prepared`.
#:
#: Arguments that are sent with this signal:
#:
#: ``sender``
#: The model class.
entity_class_prepared = Signal(providing_args=['class'])
#: Sent when a :class:`~philo.models.nodes.View` instance is about to render. This allows you, for example, to modify the ``extra_context`` dictionary used in rendering.
#:
#: Arguments that are sent with this signal:
#:
#: ``sender``
#: The :class:`~philo.models.nodes.View` instance
#:
#: ``request``
#: The :class:`HttpRequest` instance which the :class:`~philo.models.nodes.View` is rendering in response to.
#:
#: ``extra_context``
#: A dictionary which will be passed into :meth:`~philo.models.nodes.View.actually_render_to_response`.
view_about_to_render = Signal(providing_args=['request', 'extra_context'])
#: Sent when a view instance has finished rendering.
#:
#: Arguments that are sent with this signal:
#:
#: ``sender``
#: The :class:`~philo.models.nodes.View` instance
#:
#: ``response``
#: The :class:`HttpResponse` instance which :class:`~philo.models.nodes.View` view has rendered to.
view_finished_rendering = Signal(providing_args=['response'])
#: Sent when a :class:`~philo.models.pages.Page` instance is about to render as a string. If the :class:`~philo.models.pages.Page` is rendering as a response, this signal is sent after :obj:`view_about_to_render` and serves a similar function. However, there are situations where a :class:`~philo.models.pages.Page` may be rendered as a string without being rendered as a response afterwards.
#:
#: Arguments that are sent with this signal:
#:
#: ``sender``
#: The :class:`~philo.models.pages.Page` instance
#:
#: ``request``
#: The :class:`HttpRequest` instance which the :class:`~philo.models.pages.Page` is rendering in response to (if any).
#:
#: ``extra_context``
#: A dictionary which will be passed into the :class:`Template` context.
page_about_to_render_to_string = Signal(providing_args=['request', 'extra_context'])
#: Sent when a :class:`~philo.models.pages.Page` instance has just finished rendering as a string. If the :class:`~philo.models.pages.Page` is rendering as a response, this signal is sent before :obj:`view_finished_rendering` and serves a similar function. However, there are situations where a :class:`~philo.models.pages.Page` may be rendered as a string without being rendered as a response afterwards.
#:
#: Arguments that are sent with this signal:
#:
#: ``sender``
#: The :class:`~philo.models.pages.Page` instance
#:
#: ``string``
#: The string which the :class:`~philo.models.pages.Page` has rendered to.
page_finished_rendering_to_string = Signal(providing_args=['string']) | isc | 5,403,604,978,678,147,000 | 47.366667 | 405 | 0.725957 | false |
beiko-lab/gengis | bin/Lib/site-packages/numpy/oldnumeric/fix_default_axis.py | 1 | 8326 | """
This module adds the default axis argument to code which did not specify it
for the functions where the default was changed in NumPy.
The functions changed are
add -1 ( all second argument)
======
nansum
nanmax
nanmin
nanargmax
nanargmin
argmax
argmin
compress 3
add 0
======
take 3
repeat 3
sum # might cause problems with builtin.
product
sometrue
alltrue
cumsum
cumproduct
average
ptp
cumprod
prod
std
mean
"""
__all__ = ['convertfile', 'convertall', 'converttree']
import sys
import os
import re
import glob
_args3 = ['compress', 'take', 'repeat']
_funcm1 = ['nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin',
'argmax', 'argmin', 'compress']
_func0 = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue',
'cumsum', 'cumproduct', 'average', 'ptp', 'cumprod', 'prod',
'std', 'mean']
_all = _func0 + _funcm1
func_re = {}
for name in _all:
_astr = r"""%s\s*[(]"""%name
func_re[name] = re.compile(_astr)
import string
disallowed = '_' + string.uppercase + string.lowercase + string.digits
def _add_axis(fstr, name, repl):
alter = 0
if name in _args3:
allowed_comma = 1
else:
allowed_comma = 0
newcode = ""
last = 0
for obj in func_re[name].finditer(fstr):
nochange = 0
start, end = obj.span()
if fstr[start-1] in disallowed:
continue
if fstr[start-1] == '.' \
and fstr[start-6:start-1] != 'numpy' \
and fstr[start-2:start-1] != 'N' \
and fstr[start-9:start-1] != 'numarray' \
and fstr[start-8:start-1] != 'numerix' \
and fstr[start-8:start-1] != 'Numeric':
continue
if fstr[start-1] in ['\t',' ']:
k = start-2
while fstr[k] in ['\t',' ']:
k -= 1
if fstr[k-2:k+1] == 'def' or \
fstr[k-4:k+1] == 'class':
continue
k = end
stack = 1
ncommas = 0
N = len(fstr)
while stack:
if k>=N:
nochange =1
break
if fstr[k] == ')':
stack -= 1
elif fstr[k] == '(':
stack += 1
elif stack == 1 and fstr[k] == ',':
ncommas += 1
if ncommas > allowed_comma:
nochange = 1
break
k += 1
if nochange:
continue
alter += 1
newcode = "%s%s,%s)" % (newcode, fstr[last:k-1], repl)
last = k
if not alter:
newcode = fstr
else:
newcode = "%s%s" % (newcode, fstr[last:])
return newcode, alter
def _import_change(fstr, names):
# Four possibilities
# 1.) import numpy with subsequent use of numpy.<name>
# change this to import numpy.oldnumeric as numpy
# 2.) import numpy as XXXX with subsequent use of
# XXXX.<name> ==> import numpy.oldnumeric as XXXX
# 3.) from numpy import *
# with subsequent use of one of the names
# 4.) from numpy import ..., <name>, ... (could span multiple
# lines. ==> remove all names from list and
# add from numpy.oldnumeric import <name>
num = 0
# case 1
importstr = "import numpy"
ind = fstr.find(importstr)
if (ind > 0):
found = 0
for name in names:
ind2 = fstr.find("numpy.%s" % name, ind)
if (ind2 > 0):
found = 1
break
if found:
fstr = "%s%s%s" % (fstr[:ind], "import numpy.oldnumeric as numpy",
fstr[ind+len(importstr):])
num += 1
# case 2
importre = re.compile("""import numpy as ([A-Za-z0-9_]+)""")
modules = importre.findall(fstr)
if len(modules) > 0:
for module in modules:
found = 0
for name in names:
ind2 = fstr.find("%s.%s" % (module, name))
if (ind2 > 0):
found = 1
break
if found:
importstr = "import numpy as %s" % module
ind = fstr.find(importstr)
fstr = "%s%s%s" % (fstr[:ind],
"import numpy.oldnumeric as %s" % module,
fstr[ind+len(importstr):])
num += 1
# case 3
importstr = "from numpy import *"
ind = fstr.find(importstr)
if (ind > 0):
found = 0
for name in names:
ind2 = fstr.find(name, ind)
if (ind2 > 0) and fstr[ind2-1] not in disallowed:
found = 1
break
if found:
fstr = "%s%s%s" % (fstr[:ind],
"from numpy.oldnumeric import *",
fstr[ind+len(importstr):])
num += 1
# case 4
ind = 0
importstr = "from numpy import"
N = len(importstr)
while 1:
ind = fstr.find(importstr, ind)
if (ind < 0):
break
ind += N
ptr = ind+1
stack = 1
while stack:
if fstr[ptr] == '\\':
stack += 1
elif fstr[ptr] == '\n':
stack -= 1
ptr += 1
substr = fstr[ind:ptr]
found = 0
substr = substr.replace('\n',' ')
substr = substr.replace('\\','')
importnames = [x.strip() for x in substr.split(',')]
# determine if any of names are in importnames
addnames = []
for name in names:
if name in importnames:
importnames.remove(name)
addnames.append(name)
if len(addnames) > 0:
fstr = "%s%s\n%s\n%s" % \
(fstr[:ind],
"from numpy import %s" % \
", ".join(importnames),
"from numpy.oldnumeric import %s" % \
", ".join(addnames),
fstr[ptr:])
num += 1
return fstr, num
def add_axis(fstr, import_change=False):
total = 0
if not import_change:
for name in _funcm1:
fstr, num = _add_axis(fstr, name, 'axis=-1')
total += num
for name in _func0:
fstr, num = _add_axis(fstr, name, 'axis=0')
total += num
return fstr, total
else:
fstr, num = _import_change(fstr, _funcm1+_func0)
return fstr, num
def makenewfile(name, filestr):
fid = file(name, 'w')
fid.write(filestr)
fid.close()
def getfile(name):
fid = file(name)
filestr = fid.read()
fid.close()
return filestr
def copyfile(name, fstr):
base, ext = os.path.splitext(name)
makenewfile(base+'.orig', fstr)
return
def convertfile(filename, import_change=False):
"""Convert the filename given from using Numeric to using NumPy
Copies the file to filename.orig and then over-writes the file
with the updated code
"""
filestr = getfile(filename)
newstr, total = add_axis(filestr, import_change)
if total > 0:
print "Changing ", filename
copyfile(filename, filestr)
makenewfile(filename, newstr)
sys.stdout.flush()
def fromargs(args):
filename = args[1]
convertfile(filename)
def convertall(direc=os.path.curdir, import_change=False):
"""Convert all .py files in the directory given
For each file, a backup of <usesnumeric>.py is made as
<usesnumeric>.py.orig. A new file named <usesnumeric>.py
is then written with the updated code.
"""
files = glob.glob(os.path.join(direc,'*.py'))
for afile in files:
convertfile(afile, import_change)
def _func(arg, dirname, fnames):
convertall(dirname, import_change=arg)
def converttree(direc=os.path.curdir, import_change=False):
"""Convert all .py files in the tree given
"""
os.path.walk(direc, _func, import_change)
if __name__ == '__main__':
fromargs(sys.argv)
| gpl-3.0 | -6,043,871,192,939,689,000 | 26.611684 | 78 | 0.490632 | false |
redhat-openstack/heat | heat/engine/resources/software_config/cloud_config.py | 1 | 2132 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.template_format import yaml
from heat.common.template_format import yaml_dumper
from heat.engine import properties
from heat.engine.resources.software_config import software_config
class CloudConfig(software_config.SoftwareConfig):
'''
A configuration resource for representing cloud-init cloud-config.
This resource allows cloud-config YAML to be defined and stored by the
config API. Any intrinsic functions called in the config will be resolved
before storing the result.
This resource will generally be referenced by OS::Nova::Server user_data,
or OS::Heat::MultipartMime parts config. Since cloud-config is boot-only
configuration, any changes to the definition will result in the
replacement of all servers which reference it.
'''
PROPERTIES = (
CLOUD_CONFIG
) = (
'cloud_config'
)
properties_schema = {
CLOUD_CONFIG: properties.Schema(
properties.Schema.MAP,
_('Map representing the cloud-config data structure which will '
'be formatted as YAML.')
)
}
def handle_create(self):
props = {self.NAME: self.physical_resource_name()}
cloud_config = yaml.dump(self.properties.get(
self.CLOUD_CONFIG), Dumper=yaml_dumper)
props[self.CONFIG] = '#cloud-config\n%s' % cloud_config
sc = self.heat().software_configs.create(**props)
self.resource_id_set(sc.id)
def resource_mapping():
return {
'OS::Heat::CloudConfig': CloudConfig,
}
| apache-2.0 | -692,358,450,047,824,900 | 34.533333 | 78 | 0.689024 | false |
jrichte43/ProjectEuler | Problem-0194/solutions.py | 1 | 1346 |
__problem_title__ = "Coloured Configurations"
__problem_url___ = "https://projecteuler.net/problem=194"
__problem_description__ = "Consider graphs built with the units A: and B: , where the units are " \
"glued along the vertical edges as in the graph . A configuration of " \
"type ( , , ) is a graph thus built of units A and units B, where the " \
"graph's vertices are coloured using up to colours, so that no two " \
"adjacent vertices have the same colour. The compound graph above is " \
"an example of a configuration of type (2,2,6), in fact of type (2,2, " \
") for all ≥ 4. Let N( , , ) be the number of configurations of type ( " \
", , ). For example, N(1,0,3) = 24, N(0,2,4) = 92928 and N(2,2,3) = " \
"20736. Find the last 8 digits of N(25,75,1984)."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | -4,165,816,755,729,612,000 | 39.727273 | 100 | 0.540179 | false |
ThiefMaster/sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg2.py | 1 | 26982 | # postgresql/psycopg2.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname\
[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but
are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the ``stream_results=True`` execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
.. seealso::
:ref:`psycopg2_disable_native_unicode`
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect.
.. seealso::
:ref:`psycopg2_isolation_level`
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
using psycopg2's ``set_client_encoding()`` method.
.. seealso::
:ref:`psycopg2_unicode`
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?\
host=/var/lib/postgresql")
See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/\
libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
.. _psycopg2_execution_options:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* ``isolation_level`` - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors -
this feature makes use of "named" cursors in combination with special
result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
specifies the maximum number of rows to buffer at a time. This is
interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
buffer will grow to ultimately store 1000 rows at a time.
.. versionadded:: 1.0.6
.. _psycopg2_unicode:
Unicode with Psycopg2
----------------------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf8``, as a more useful default::
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's
:meth:`psycopg2:connection.set_client_encoding` method
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
# set_client_encoding() setting;
# works for *all* Postgresql versions
engine = create_engine("postgresql://user:pass@host/dbname",
client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
When using the parameter in this way, the psycopg2 driver emits
``SET client_encoding TO 'utf8'`` on the connection explicitly, and works
in all Postgresql versions.
Note that the ``client_encoding`` setting as passed to :func:`.create_engine`
is **not the same** as the more recently added ``client_encoding`` parameter
now supported by libpq directly. This is enabled when ``client_encoding``
is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed
using the :paramref:`.create_engine.connect_args` parameter::
# libpq direct parameter setting;
# only works for Postgresql **9.1 and above**
engine = create_engine("postgresql://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'})
# using the query string is equivalent
engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8")
The above parameter was only added to libpq as of version 9.1 of Postgresql,
so using the previous method is better for cross-version support.
.. _psycopg2_disable_native_unicode:
Disabling Native Unicode
^^^^^^^^^^^^^^^^^^^^^^^^
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize its own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as most DBAPIs now support unicode fully.
Bound Parameter Styles
----------------------
The default parameter style for the psycopg2 dialect is "pyformat", where
SQL is rendered using ``%(paramname)s`` style. This format has the limitation
that it does not accommodate the unusual case of parameter names that
actually contain percent or parenthesis symbols; as SQLAlchemy in many cases
generates bound parameter names based on the name of a column, the presence
of these characters in a column name can lead to problems.
There are two solutions to the issue of a :class:`.schema.Column` that contains
one of these characters in its name. One is to specify the
:paramref:`.schema.Column.key` for columns that have such names::
measurement = Table('measurement', metadata,
Column('Size (meters)', Integer, key='size_meters')
)
Above, an INSERT statement such as ``measurement.insert()`` will use
``size_meters`` as the parameter name, and a SQL expression such as
``measurement.c.size_meters > 10`` will derive the bound parameter name
from the ``size_meters`` key as well.
.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key`
as the source of naming when anonymous bound parameters are created
in SQL expressions; previously, this behavior only applied to
:meth:`.Table.insert` and :meth:`.Table.update` parameter names.
The other solution is to use a positional format; psycopg2 allows use of the
"format" paramstyle, which can be passed to
:paramref:`.create_engine.paramstyle`::
engine = create_engine(
'postgresql://scott:tiger@localhost:5432/test', paramstyle='format')
With the above engine, instead of a statement like::
INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s)
{'Size (meters)': 1}
we instead see::
INSERT INTO measurement ("Size (meters)") VALUES (%s)
(1, )
Where above, the dictionary style is converted into a tuple with positional
style.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all Postgresql dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
as well as the ``isolation_level`` argument used by
:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these
options make use of psycopg2's ``set_isolation_level()`` connection method,
rather than emitting a Postgresql directive; this is because psycopg2's
API-level setting is always emitted at the start of each transaction in any
case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
psycopg2.
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. _psycopg2_hstore::
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when psycopg2 version 2.4 or greater is used, and
it is detected that the target database has the HSTORE type set up for use.
In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using
``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine
that the ``HSTORE`` extension is present.
This function is **skipped** if the version of psycopg2 installed is
less than version 2.4.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python
dictionaries being accepted as parameters regardless of the type of target
column in SQL**. The dictionaries are converted by this extension into a
textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
which may be more performant.
"""
from __future__ import absolute_import
import re
import logging
from ... import util, exc
import decimal
from ... import processors
from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES, UUID
from .hstore import HSTORE
from .json import JSON, JSONB
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if self.native_enum and util.py2k and self.convert_unicode is True:
# we can't easily use PG's extensions here because
# the OID is on the fly, and we need to give it a python
# function anyway - not really worth it.
self.convert_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
if dialect._has_native_jsonb:
return None
else:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
nonetype = type(None)
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
# When we're handed literal SQL, ensure it's a SELECT query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement,
expression.Selectable)
or
(
(not self.compiled or
isinstance(self.compiled.statement,
expression.TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(
self.statement))
)
)
else:
is_server_side = \
self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:],
hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
def get_result_proxy(self):
# TODO: ouch
if logger.isEnabledFor(logging.INFO):
self._log_notices(self.cursor)
if self.__is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_psycopg2(PGDialect):
driver = 'psycopg2'
if util.py2k:
supports_unicode_statements = False
default_paramstyle = 'pyformat'
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
FEATURE_VERSION_MAP = dict(
native_json=(2, 5),
native_jsonb=(2, 5, 4),
sane_multi_rowcount=(2, 0, 9),
array_oid=(2, 4, 3),
hstore_adapter=(2, 4)
)
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
engine_config_types = PGDialect.engine_config_types.union([
('use_native_unicode', util.asbool),
])
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None,
use_native_hstore=True, use_native_uuid=True,
**kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.use_native_uuid = use_native_uuid
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
self._has_native_json = \
self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json']
self._has_native_jsonb = \
self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb']
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = \
self.psycopg2_version >= \
self.FEATURE_VERSION_MAP['sane_multi_rowcount']
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
extras = self._psycopg2_extras()
extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
def on_connect(conn):
extras.register_uuid(None, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
kw = {'oid': oid}
if util.py2k:
kw['unicode'] = True
if self.psycopg2_version >= \
self.FEATURE_VERSION_MAP['array_oid']:
kw['array_oid'] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
if self._has_native_json:
extras.register_default_json(
conn, loads=self._json_deserializer)
if self._has_native_jsonb:
extras.register_default_jsonb(
conn, loads=self._json_deserializer)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
if getattr(connection, 'closed', False):
return True
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
'terminating connection',
'closed the connection',
'connection not open',
'could not receive data from server',
'could not send data to server',
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
'connection already closed',
'cursor already closed',
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
'losed the connection unexpectedly',
# these can occur in newer SSL
'connection has been closed unexpectedly',
'SSL SYSCALL error: Bad file descriptor',
'SSL SYSCALL error: EOF detected',
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
| mit | 5,201,154,178,368,682,000 | 36.114168 | 85 | 0.641687 | false |
bhaugen/nova | distribution/view_helpers.py | 1 | 28699 | from decimal import *
import datetime
from operator import attrgetter
from django.forms.formsets import formset_factory
from django.contrib.sites.models import Site
from models import *
from forms import *
try:
from notification import models as notification
except ImportError:
notification = None
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def create_pricing_masterboard_forms(delivery_date, data=None):
fn = food_network()
forms = []
pricing_objects = fn.producer_product_prices_for_date(delivery_date)
for pp in pricing_objects:
content_type = pp.__class__.__name__
prefix = "".join([content_type, str(pp.id)])
form = PricingMasterboardForm(
prefix = prefix,
data=data,
initial={
'id': pp.id,
'producer_id': pp.producer.id,
'product_id': pp.product.id,
'producer_price': pp.decide_producer_price(),
'producer_fee': pp.decide_producer_fee(),
'pay_price': pp.compute_pay_price(),
'markup_percent': pp.decide_markup(),
'selling_price': pp.compute_selling_price(),
'content_type': content_type,
}
)
form.product = pp.product.name_with_method()
form.producer = pp.producer
changes = ""
if isinstance(pp, ProducerPriceChange):
changes = pp.what_changed()
form.changes = changes
forms.append(form)
return forms
def create_producer_product_price_forms(product, data=None):
pps = product.product_producers.all()
form_list = []
for pp in pps:
prefix = "-".join(["PP", str(pp.id)])
form = ProducerProductPriceForm(data, prefix=prefix, instance=pp)
form.producer = pp.producer.short_name
form_list.append(form)
return form_list
def create_order_item_price_forms(product, data=None):
items = OrderItem.objects.filter(
product=product,
).exclude(order__state__contains="Paid").exclude(order__state="Unsubmitted")
form_list = []
for item in items:
prefix = "-".join(["OI", str(item.id)])
form = OrderItemPriceForm(data, prefix=prefix, instance=item)
form.order = item.order
form.producer = item.producer
form_list.append(form)
return form_list
def create_inventory_item_price_forms(product, data=None):
items = InventoryItem.objects.filter(
product=product,
remaining__gt=0,
)
form_list = []
for item in items:
prefix = "-".join(["II", str(item.id)])
form = InventoryItemPriceForm(data, prefix=prefix, instance=item)
form.lot = item.lot_id
form_list.append(form)
return form_list
def weekly_production_plans(week_date):
monday = week_date - datetime.timedelta(days=datetime.date.weekday(week_date))
saturday = monday + datetime.timedelta(days=5)
plans = ProductPlan.objects.select_related(depth=1).filter(
role="producer",
from_date__lte=week_date,
to_date__gte=saturday)
for plan in plans:
plan.category = plan.product.parent_string()
plan.product_name = plan.product.short_name
plans = sorted(plans, key=attrgetter('category',
'product_name'))
return plans
def plan_columns(from_date, to_date):
columns = []
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate.strftime('%Y-%m-%d'))
wkdate = wkdate + datetime.timedelta(days=7)
return columns
def sd_columns(from_date, to_date):
columns = []
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate.strftime('%Y_%m_%d'))
wkdate = wkdate + datetime.timedelta(days=7)
return columns
# shd plan_weeks go to the view and include headings?
# somebody needs headings!
def create_weekly_plan_forms(rows, data=None):
form_list = []
PlanCellFormSet = formset_factory(PlanCellForm, extra=0)
for row in rows:
product = row[0]
row_form = PlanRowForm(data, prefix=product.id, initial={'product_id': product.id})
row_form.product = product.long_name
cells = row[1:len(row)]
initial_data = []
for cell in cells:
plan_id = ""
if cell.plan:
plan_id = cell.plan.id
dict = {
'plan_id': plan_id,
'product_id': cell.product.id,
'from_date': cell.from_date,
'to_date': cell.to_date,
'quantity': cell.quantity,
}
initial_data.append(dict)
row_form.formset = PlanCellFormSet(data, prefix=product.id, initial=initial_data)
form_list.append(row_form)
return form_list
class SupplyDemandTable(object):
def __init__(self, columns, rows):
self.columns = columns
self.rows = rows
def supply_demand_table(from_date, to_date, member=None):
plans = ProductPlan.objects.all()
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
constants = {}
for cp in cps:
constants.setdefault(cp.product, Decimal("0"))
constants[cp.product] += cp.default_avail_qty
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
product = plan.product.supply_demand_product()
constant = Decimal('0')
cp = constants.get(product)
if cp:
constant = cp
row = []
while wkdate <= to_date:
row.append(constant)
wkdate = wkdate + datetime.timedelta(days=7)
row.insert(0, product)
rows.setdefault(product, row)
wkdate = from_date
week = 0
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][week + 1] += plan.quantity
else:
rows[product][week + 1] -= plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
label = "Product/Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def supply_demand_rows(from_date, to_date, member=None):
plans = ProductPlan.objects.select_related(depth=1).all()
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
constants = {}
rows = {}
#import pdb; pdb.set_trace()
#todo: what if some NIPs and some inventoried for same product?
#does code does allow for that?
for cp in cps:
constants.setdefault(cp.product, Decimal("0"))
constant = cp.default_avail_qty
product = cp.product
constants[product] += constant
row = {}
row["product"] = product.long_name
row["id"] = product.id
rows.setdefault(product, row)
wkdate = from_date
while wkdate <= to_date:
row[wkdate.strftime('%Y_%m_%d')] = str(constant)
wkdate = wkdate + datetime.timedelta(days=7)
if member:
plans = plans.filter(member=member)
#todo:
# spread storage items over many weeks
# if plan.product expiration_days > 1 week:
# spread remainder over weeks until consumed or expired.
# means plannable parents cd determine expiration.
# may require another pass thru storage plans...
for plan in plans:
wkdate = from_date
#this is too slow:
#product = plan.product.supply_demand_product()
product = plan.product
#constant = Decimal('0')
#constant = ""
#cp = constants.get(product)
#if cp:
# constant = str(cp)
row = {}
#while wkdate <= to_date:
# row[wkdate.strftime('%Y_%m_%d')] = str(constant)
# wkdate = wkdate + datetime.timedelta(days=7)
row["product"] = product.long_name
row["id"] = product.id
rows.setdefault(product, row)
#import pdb; pdb.set_trace()
wkdate = from_date
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
key = wkdate.strftime('%Y_%m_%d')
try:
value = rows[product][key]
except KeyError:
value = Decimal("0")
if value == "":
value = Decimal("0")
else:
value = Decimal(value)
if plan.role == "producer":
value += plan.quantity
else:
value -= plan.quantity
rows[product][key] = str(value)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return rows
def supply_demand_weekly_table(week_date):
plans = ProductPlan.objects.filter(
from_date__lte=week_date,
to_date__gte=week_date,
).order_by("-role", "member__short_name")
columns = []
rows = {}
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
for cp in cps:
if not cp.producer in columns:
columns.append(cp.producer)
for plan in plans:
if not plan.member in columns:
columns.append(plan.member)
columns.insert(0, "Product\Member")
columns.append("Balance")
for cp in cps:
if not rows.get(cp.product):
row = []
for i in range(0, len(columns)-1):
row.append(Decimal("0"))
row.insert(0, cp.product)
rows[cp.product] = row
rows[cp.product][columns.index(cp.producer)] += cp.default_avail_qty
rows[cp.product][len(columns)-1] += cp.default_avail_qty
for plan in plans:
if not rows.get(plan.product):
row = []
for i in range(0, len(columns)-1):
row.append(Decimal("0"))
row.insert(0, plan.product)
rows[plan.product] = row
if plan.role == "producer":
rows[plan.product][columns.index(plan.member)] += plan.quantity
rows[plan.product][len(columns)-1] += plan.quantity
else:
rows[plan.product][columns.index(plan.member)] -= plan.quantity
rows[plan.product][len(columns)-1] -= plan.quantity
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def dojo_supply_demand_weekly_table(week_date):
plans = ProductPlan.objects.filter(
from_date__lte=week_date,
to_date__gte=week_date,
).order_by("-role", "member__short_name")
# for columns: product, member.short_name(s), balance
# but only members are needed here...product and balance can be added in
# template
# for rows: dictionaries with the above keys
columns = []
rows = {}
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
for cp in cps:
if not cp.producer in columns:
columns.append(cp.producer.short_name)
for plan in plans:
if not plan.member.short_name in columns:
columns.append(plan.member.short_name)
columns.append("Balance")
for cp in cps:
if not rows.get(cp.product):
row = {}
for column in columns:
row[column] = 0
row["product"] = cp.product.long_name
row["id"] = cp.product.id
row["Balance"] = 0
rows[cp.product] = row
rows[cp.product][cp.producer.short_name] += int(cp.default_avail_qty)
rows[cp.product]["Balance"] += int(cp.default_avail_qty)
for plan in plans:
if not rows.get(plan.product):
row = {}
for column in columns:
row[column] = 0
row["product"] = plan.product.long_name
row["id"] = plan.product.id
row["Balance"] = 0
rows[plan.product] = row
if plan.role == "producer":
rows[plan.product][plan.member.short_name] += int(plan.quantity)
rows[plan.product]["Balance"] += int(plan.quantity)
else:
rows[plan.product][plan.member.short_name] -= int(plan.quantity)
rows[plan.product]["Balance"] -= int(plan.quantity)
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
class SuppliableDemandCell(object):
def __init__(self, supply, demand):
self.supply = supply
self.demand = demand
def suppliable(self):
answer = Decimal("0")
if self.supply and self.demand:
if self.supply > self.demand:
answer = self.demand
else:
answer = self.supply
return answer
def suppliable_demand(from_date, to_date, member=None):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.all()
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
row = []
while wkdate <= to_date:
row.append(SuppliableDemandCell(Decimal("0"), Decimal("0")))
wkdate = wkdate + datetime.timedelta(days=7)
product = plan.product.supply_demand_product()
row.insert(0, product)
rows.setdefault(product, row)
wkdate = from_date
week = 0
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][week + 1].supply += plan.quantity
else:
rows[product][week + 1].demand += plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
rows = rows.values()
cust_fee = customer_fee()/100
producer_fee = default_producer_fee()/100
for row in rows:
for x in range(1, len(row)):
sd = row[x].suppliable()
if sd >= 0:
income = sd * row[0].selling_price
row[x] = income
else:
row[x] = Decimal("0")
income_rows = []
for row in rows:
base = Decimal("0")
total = Decimal("0")
for x in range(1, len(row)):
cell = row[x]
base += cell
cell += cell * cust_fee
total += cell
row[x] = cell.quantize(Decimal('.1'), rounding=ROUND_UP)
if total:
net = base * cust_fee + (base * producer_fee)
net = net.quantize(Decimal('1.'), rounding=ROUND_UP)
total = total.quantize(Decimal('1.'), rounding=ROUND_UP)
row.append(total)
row.append(net)
income_rows.append(row)
label = "Item\Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
columns.append("Total")
columns.append("Net")
income_rows.sort(lambda x, y: cmp(x[0].long_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, income_rows)
return sdtable
#todo: does not use contants (NIPs)
#or correct logic for storage items
def json_income_rows(from_date, to_date, member=None):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.all()
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
row = {}
while wkdate <= to_date:
row[wkdate.strftime('%Y_%m_%d')] = SuppliableDemandCell(Decimal("0"), Decimal("0"))
wkdate = wkdate + datetime.timedelta(days=7)
product = plan.product.supply_demand_product()
row["product"] = product.long_name
row["id"] = product.id
row["price"] = product.selling_price
rows.setdefault(product, row)
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][key].supply += plan.quantity
else:
rows[product][key].demand += plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
cust_fee = customer_fee()/100
producer_fee = default_producer_fee()/100
#import pdb; pdb.set_trace()
for row in rows:
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
sd = row[key].suppliable()
if sd > 0:
income = sd * row["price"]
row[key] = income
else:
row[key] = Decimal("0")
wkdate = wkdate + datetime.timedelta(days=7)
income_rows = []
for row in rows:
base = Decimal("0")
total = Decimal("0")
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
cell = row[key]
base += cell
cell += cell * cust_fee
total += cell
row[key] = str(cell.quantize(Decimal('.1'), rounding=ROUND_UP))
wkdate = wkdate + datetime.timedelta(days=7)
if total:
net = base * cust_fee + (base * producer_fee)
net = net.quantize(Decimal('1.'), rounding=ROUND_UP)
total = total.quantize(Decimal('1.'), rounding=ROUND_UP)
row["total"] = str(total)
row["net"] = str(net)
row["price"] = str(row["price"])
income_rows.append(row)
income_rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return income_rows
class PlannedWeek(object):
def __init__(self, product, from_date, to_date, quantity):
self.product = product
self.from_date = from_date
self.to_date = to_date
self.quantity = quantity
self.plan = None
def plan_weeks(member, products, from_date, to_date):
plans = ProductPlan.objects.filter(member=member)
#if member.is_customer():
# products = CustomerProduct.objects.filter(customer=member, planned=True)
#else:
# products = ProducerProduct.objects.filter(producer=member, planned=True)
#if not products:
# products = Product.objects.filter(plannable=True)
rows = {}
for pp in products:
try:
product = pp.product
except:
product = pp
wkdate = from_date
row = [product]
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
row.append(PlannedWeek(product, wkdate, enddate, Decimal("0")))
wkdate = enddate + datetime.timedelta(days=1)
#row.insert(0, product)
rows.setdefault(product, row)
for plan in plans:
product = plan.product
wkdate = from_date
week = 0
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
if plan.from_date <= wkdate and plan.to_date >= wkdate:
rows[product][week + 1].quantity = plan.quantity
rows[product][week + 1].plan = plan
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
label = "Product/Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def plans_for_dojo(member, products, from_date, to_date):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.filter(member=member)
rows = {}
for pp in products:
yearly = 0
try:
product = pp.product
yearly = pp.qty_per_year
except:
product = pp
if not yearly:
try:
pp = ProducerProduct.objects.get(producer=member, product=product)
yearly = pp.qty_per_year
except:
pass
wkdate = from_date
row = {}
row["product"] = product.long_name
row["yearly"] = int(yearly)
row["id"] = product.id
row["member_id"] = member.id
row["from_date"] = from_date.strftime('%Y-%m-%d')
row["to_date"] = to_date.strftime('%Y-%m-%d')
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
row[wkdate.strftime('%Y-%m-%d')] = "0"
wkdate = enddate + datetime.timedelta(days=1)
rows.setdefault(product, row)
#import pdb; pdb.set_trace()
for plan in plans:
product = plan.product
wkdate = from_date
week = 0
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
if plan.from_date <= wkdate and plan.to_date >= wkdate:
rows[product][wkdate.strftime('%Y-%m-%d')] = str(plan.quantity)
rows[product][":".join([wkdate.strftime('%Y-%m-%d'), "plan_id"])] = plan.id
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return rows
def create_all_inventory_item_forms(avail_date, plans, items, data=None):
item_dict = {}
for item in items:
# This means one lot per producer per product per week
item_dict["-".join([str(item.product.id), str(item.producer.id)])] = item
form_list = []
for plan in plans:
#import pdb; pdb.set_trace()
custodian_id = ""
try:
member = plan.member
except:
member = plan.producer
try:
item = item_dict["-".join([str(plan.product.id),
str(member.id)])]
if item.custodian:
custodian_id = item.custodian.id
except KeyError:
item = False
try:
plan_qty = plan.quantity
except:
plan_qty = 0
#import pdb; pdb.set_trace()
if item:
pref = "-".join(["item", str(item.id)])
the_form = AllInventoryItemForm(data, prefix=pref, initial={
'item_id': item.id,
'product_id': item.product.id,
'producer_id': item.producer.id,
'freeform_lot_id': item.freeform_lot_id,
'field_id': item.field_id,
'custodian': custodian_id,
'inventory_date': item.inventory_date,
'expiration_date': item.expiration_date,
'planned': item.planned,
'received': item.received,
'notes': item.notes})
else:
pref = "-".join(["plan", str(plan.id)])
expiration_date = avail_date + datetime.timedelta(days=plan.product.expiration_days)
the_form = AllInventoryItemForm(data, prefix=pref, initial={
'item_id': 0,
'product_id': plan.product.id,
'producer_id': member.id,
'inventory_date': avail_date,
'expiration_date': expiration_date,
'planned': 0,
'received': 0,
'notes': ''})
the_form.description = plan.product.long_name
the_form.producer = member.short_name
the_form.plan_qty = plan_qty
form_list.append(the_form)
#import pdb; pdb.set_trace()
#form_list.sort(lambda x, y: cmp(x.producer, y.producer))
form_list = sorted(form_list, key=attrgetter('producer', 'description'))
return form_list
def create_delivery_cycle_selection_forms(data=None):
dcs = DeliveryCycle.objects.all()
form_list = []
for dc in dcs:
form = DeliveryCycleSelectionForm(data, prefix=dc.id)
form.cycle = dc
form.delivery_date = dc.next_delivery_date_using_closing()
form_list.append(form)
return form_list
def create_avail_item_forms(avail_date, data=None):
fn = food_network()
items = fn.avail_items_for_customer(avail_date)
form_list = []
for item in items:
pref = "-".join(["item", str(item.id)])
the_form = AvailableItemForm(data, prefix=pref, initial={
'item_id': item.id,
'inventory_date': item.inventory_date,
'expiration_date': item.expiration_date,
'quantity': item.avail_qty(),
})
the_form.description = item.product.name_with_method()
the_form.producer = item.producer.short_name
the_form.ordered = item.product.total_ordered_for_timespan(
item.inventory_date, item.expiration_date)
form_list.append(the_form)
form_list = sorted(form_list, key=attrgetter('description', 'producer'))
return form_list
def send_avail_emails(cycle):
fn = food_network()
food_network_name = fn.long_name
delivery_date = cycle.next_delivery_date_using_closing()
fresh_list = fn.email_availability(delivery_date)
users = []
for customer in cycle.customers.all():
users.append(customer)
for contact in customer.contacts.all():
if contact.email != customer.email:
users.append(contact)
oc = fn.order_contact()
if oc:
users.append(oc)
if fn.email != oc.email:
if fn.email:
users.append(fn)
users = list(set(users))
intro = avail_email_intro()
domain = Site.objects.get_current().domain
notification.send(users, "distribution_fresh_list", {
"intro": intro.message,
"domain": domain,
"fresh_list": fresh_list,
"delivery_date": delivery_date,
"food_network_name": food_network_name,
"cycle": cycle,
})
def create_order_item_forms_by_producer(order, delivery_date, data=None):
form_list = []
item_dict = {}
items = []
if order:
items = order.orderitem_set.all()
for item in items:
key = "-".join([str(item.product.id), str(item.producer.id)])
item_dict[key] = item
fn = food_network()
avail = fn.staff_availability_by_producer(delivery_date)
for prod in avail:
totavail = prod.avail
totordered = prod.ordered
producer = prod.producer
key = "-".join([str(prod.product.id), str(prod.producer.id)])
item = item_dict.get(key)
if item:
initial_data = {
'product_id': prod.product.id,
'producer_id': prod.producer.id,
'avail': totavail,
'unit_price': item.formatted_unit_price(),
'ordered': totordered,
}
prefix = "".join([str(item.product.id), str(item.producer.id)])
oiform = OrderItemForm(data, prefix=prefix, instance=item,
initial=initial_data)
oiform.producer = producer
oiform.description = prod.product.long_name
oiform.parents = prod.category
oiform.growing_method = prod.product.growing_method
form_list.append(oiform)
else:
#fee = prod.decide_fee()
prefix = "".join([str(prod.product.id), str(prod.producer.id)])
oiform = OrderItemForm(data, prefix=prefix, initial={
'product_id': prod.product.id,
'producer_id': prod.producer.id,
'avail': totavail,
'ordered': totordered,
'unit_price': prod.price,
#'fee': fee,
'quantity': 0})
oiform.description = prod.product.long_name
oiform.producer = producer
oiform.parents = prod.category
oiform.growing_method = prod.product.growing_method
form_list.append(oiform)
return form_list
| mit | -3,354,612,000,780,010,000 | 35.327848 | 96 | 0.559671 | false |
tensorflow/models | official/vision/beta/projects/movinet/configs/movinet.py | 1 | 4058 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for MoViNet structures.
Reference: "MoViNets: Mobile Video Networks for Efficient Video Recognition"
https://arxiv.org/pdf/2103.11511.pdf
MoViNets are efficient video classification networks that are part of a model
family, ranging from the smallest model, MoViNet-A0, to the largest model,
MoViNet-A6. Each model has various width, depth, input resolution, and input
frame-rate associated with them. See the main paper for more details.
"""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.beta.configs import backbones_3d
from official.vision.beta.configs import common
from official.vision.beta.configs import video_classification
@dataclasses.dataclass
class Movinet(hyperparams.Config):
"""Backbone config for Base MoViNet."""
model_id: str = 'a0'
causal: bool = False
use_positional_encoding: bool = False
# Choose from ['3d', '2plus1d', '3d_2plus1d']
# 3d: default 3D convolution
# 2plus1d: (2+1)D convolution with Conv2D (2D reshaping)
# 3d_2plus1d: (2+1)D convolution with Conv3D (no 2D reshaping)
conv_type: str = '3d'
activation: str = 'swish'
gating_activation: str = 'sigmoid'
stochastic_depth_drop_rate: float = 0.2
use_external_states: bool = False
@dataclasses.dataclass
class MovinetA0(Movinet):
"""Backbone config for MoViNet-A0.
Represents the smallest base MoViNet searched by NAS.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
model_id: str = 'a0'
@dataclasses.dataclass
class MovinetA1(Movinet):
"""Backbone config for MoViNet-A1."""
model_id: str = 'a1'
@dataclasses.dataclass
class MovinetA2(Movinet):
"""Backbone config for MoViNet-A2."""
model_id: str = 'a2'
@dataclasses.dataclass
class MovinetA3(Movinet):
"""Backbone config for MoViNet-A3."""
model_id: str = 'a3'
@dataclasses.dataclass
class MovinetA4(Movinet):
"""Backbone config for MoViNet-A4."""
model_id: str = 'a4'
@dataclasses.dataclass
class MovinetA5(Movinet):
"""Backbone config for MoViNet-A5.
Represents the largest base MoViNet searched by NAS.
"""
model_id: str = 'a5'
@dataclasses.dataclass
class MovinetT0(Movinet):
"""Backbone config for MoViNet-T0.
MoViNet-T0 is a smaller version of MoViNet-A0 for even faster processing.
"""
model_id: str = 't0'
@dataclasses.dataclass
class Backbone3D(backbones_3d.Backbone3D):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, on the of fields below.
movinet: movinet backbone config.
"""
type: str = 'movinet'
movinet: Movinet = Movinet()
@dataclasses.dataclass
class MovinetModel(video_classification.VideoClassificationModel):
"""The MoViNet model config."""
model_type: str = 'movinet'
backbone: Backbone3D = Backbone3D()
norm_activation: common.NormActivation = common.NormActivation(
activation='swish',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)
output_states: bool = False
@exp_factory.register_config_factory('movinet_kinetics600')
def movinet_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Videonet with MoViNet backbone."""
exp = video_classification.video_classification_kinetics600()
exp.task.train_data.dtype = 'bfloat16'
exp.task.validation_data.dtype = 'bfloat16'
model = MovinetModel()
exp.task.model = model
return exp
| apache-2.0 | -9,146,474,159,486,402,000 | 27.780142 | 77 | 0.73928 | false |
andela/codango | codango/resources/models.py | 1 | 1889 | from cloudinary.models import CloudinaryField
from community.models import Community
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class Resource(models.Model):
LANGUAGE_TAGS = (
('PYTHON', 'Python'),
('RUBY', 'Ruby'),
('ANDROID', 'Android'),
('MARKUP', 'HTML/CSS'),
('JAVA', 'Java'),
('PHP', 'PHP'),
('IOS', 'IOS'),
('JAVASCRIPT', 'Javascript'),
('C', 'C')
)
author = models.ForeignKey(User)
community = models.ForeignKey(Community, blank=True, null=True,
related_name='resources')
text = models.TextField(null=True, blank=False)
language_tags = models.CharField(
max_length=30, choices=LANGUAGE_TAGS, default='Untagged')
resource_file = CloudinaryField(
'resource_file', null=True, blank=True)
resource_file_name = models.CharField(max_length=100, null=True)
resource_file_size = models.IntegerField(default=0)
snippet_text = models.TextField(null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.text
def get_absolute_url(self):
return reverse('detail', args=[str(self.id)])
def upvotes(self):
liked_ids = [
vote.user.id for vote in self.votes.all() if vote.vote is True]
return liked_ids
def downvotes(self):
unliked_ids = [
vote.user.id for vote in self.votes.all() if vote.vote is False]
return unliked_ids
class NotificationQueue(models.Model):
user = models.ForeignKey(User)
notification_type = models.CharField(max_length=20)
first_interaction = models.CharField(max_length=20)
count = models.IntegerField(default=0)
| mit | -2,677,900,237,199,745,000 | 31.016949 | 76 | 0.637374 | false |
TribeMedia/synapse | synapse/storage/client_ips.py | 2 | 4803 | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from ._base import Cache
from . import background_updates
logger = logging.getLogger(__name__)
# Number of msec of granularity to store the user IP 'last seen' time. Smaller
# times give more inserts into the database even for readonly API hits
# 120 seconds == 2 minutes
LAST_SEEN_GRANULARITY = 120 * 1000
class ClientIpStore(background_updates.BackgroundUpdateStore):
def __init__(self, hs):
self.client_ip_last_seen = Cache(
name="client_ip_last_seen",
keylen=4,
)
super(ClientIpStore, self).__init__(hs)
self.register_background_index_update(
"user_ips_device_index",
index_name="user_ips_device_id",
table="user_ips",
columns=["user_id", "device_id", "last_seen"],
)
@defer.inlineCallbacks
def insert_client_ip(self, user, access_token, ip, user_agent, device_id):
now = int(self._clock.time_msec())
key = (user.to_string(), access_token, ip)
try:
last_seen = self.client_ip_last_seen.get(key)
except KeyError:
last_seen = None
# Rate-limited inserts
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
defer.returnValue(None)
self.client_ip_last_seen.prefill(key, now)
# It's safe not to lock here: a) no unique constraint,
# b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
yield self._simple_upsert(
"user_ips",
keyvalues={
"user_id": user.to_string(),
"access_token": access_token,
"ip": ip,
"user_agent": user_agent,
"device_id": device_id,
},
values={
"last_seen": now,
},
desc="insert_client_ip",
lock=False,
)
@defer.inlineCallbacks
def get_last_client_ip_by_device(self, devices):
"""For each device_id listed, give the user_ip it was last seen on
Args:
devices (iterable[(str, str)]): list of (user_id, device_id) pairs
Returns:
defer.Deferred: resolves to a dict, where the keys
are (user_id, device_id) tuples. The values are also dicts, with
keys giving the column names
"""
res = yield self.runInteraction(
"get_last_client_ip_by_device",
self._get_last_client_ip_by_device_txn,
retcols=(
"user_id",
"access_token",
"ip",
"user_agent",
"device_id",
"last_seen",
),
devices=devices
)
ret = {(d["user_id"], d["device_id"]): d for d in res}
defer.returnValue(ret)
@classmethod
def _get_last_client_ip_by_device_txn(cls, txn, devices, retcols):
where_clauses = []
bindings = []
for (user_id, device_id) in devices:
if device_id is None:
where_clauses.append("(user_id = ? AND device_id IS NULL)")
bindings.extend((user_id, ))
else:
where_clauses.append("(user_id = ? AND device_id = ?)")
bindings.extend((user_id, device_id))
inner_select = (
"SELECT MAX(last_seen) mls, user_id, device_id FROM user_ips "
"WHERE %(where)s "
"GROUP BY user_id, device_id"
) % {
"where": " OR ".join(where_clauses),
}
sql = (
"SELECT %(retcols)s FROM user_ips "
"JOIN (%(inner_select)s) ips ON"
" user_ips.last_seen = ips.mls AND"
" user_ips.user_id = ips.user_id AND"
" (user_ips.device_id = ips.device_id OR"
" (user_ips.device_id IS NULL AND ips.device_id IS NULL)"
" )"
) % {
"retcols": ",".join("user_ips." + c for c in retcols),
"inner_select": inner_select,
}
txn.execute(sql, bindings)
return cls.cursor_to_dict(txn)
| apache-2.0 | 1,665,801,080,157,915,600 | 32.124138 | 79 | 0.553196 | false |
openspending/ckanext-budgets | ckanext/budgets/controllers.py | 1 | 2310 | import json
import ckan.plugins.toolkit as toolkit
import ckan.model
import pylons
import dateutil.parser
from budgetdatapackage import BudgetDataPackage, BudgetResource
import logging
log = logging.getLogger(__name__)
class BudgetDataPackageController(toolkit.BaseController):
def descriptor(self, id, resource_id):
# Set context
context = {'model': ckan.model, 'session': ckan.model.Session,
'user': pylons.c.user or pylons.c.author}
# Get package
package = toolkit.get_action('package_show')(
context, {'id': id})
# Get resource
resource = toolkit.get_action('resource_show')(
context, {'id': resource_id})
if not resource.get('BudgetDataPackage', False):
raise toolkit.ObjectNotFound(
'No descriptor file for this resource')
# If last modified hasn't been set we set it as time of creation
last_modified = resource.get('last_modified')
if last_modified is None:
last_modified = resource['created']
# Create the budget data package resource
bdgt_resource = BudgetResource(
name=resource['id'],
schema=resource['schema'],
url=resource['url'],
currency=resource['currency'],
fiscalYear=resource['year'],
granularity=resource['granularity'],
type=resource['type'],
status=resource['status'],
datePublished=dateutil.parser.parse(
resource['created']).date().isoformat(),
dateLastUpdated=dateutil.parser.parse(
last_modified).date().isoformat(),
location=resource['country'])
# datapackage_uri and is_local are added but not needed
# so we clean up our budget resource
del bdgt_resource['datapackage_uri']
del bdgt_resource['is_local']
# Create the budget data package
bdpkg = BudgetDataPackage(
name=id,
title=package['title'],
description=package['notes'],
resources=[bdgt_resource]
)
# Return the budget data package descriptor (json)
toolkit.response.headers['Content-Type'] = 'application/json'
return json.dumps(bdpkg)
| agpl-3.0 | 7,718,835,142,215,778,000 | 33.477612 | 72 | 0.608225 | false |
yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/notification/verify_notification_item.py | 1 | 2163 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class VerifyNotificationItemAction(BaseAction):
action = 'VerifyNotificationItem'
command = 'verify-notification-item'
usage = '%(prog)s [-c --notification_item_content...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-c', '--notification-item-content', dest='notification_item_content',
action='store', type=str, default='',
help='The content of notification item which will be verified.')
parser.add_argument('-v', '--verification-code', dest='verification_code',
action='store', type=str, default='',
help='The verification code.')
@classmethod
def build_directive(cls, options):
if options.notification_item_content == '':
print('error: notification_item_content should be specified.')
return None
if options.verification_code == '':
print('error: verification_code should be specified.')
return None
directive = {
"notification_item_content": options.notification_item_content,
"verification_code": options.verification_code,
}
return directive
| apache-2.0 | -4,494,124,505,304,918,000 | 43.142857 | 98 | 0.585761 | false |
EderSantana/seya | seya/layers/imageproc.py | 1 | 4991 | """Note: this code was modified from:
https://github.com/lpigou/Theano-3D-ConvNet/blob/master/LICENSE
by @lpigou and collaborators
"""
import numpy as np
import theano.tensor as T
import keras.backend as K
from keras.layers.core import Layer
class NormLayer(Layer):
""" Normalization layer """
def __init__(self, method="lcn", kernel_size=9, threshold=1e-4,
nb_channels=3,
use_divisor=True, **kwargs):
"""
method: "lcn", "gcn", "mean"
LCN: local contrast normalization
kwargs:
kernel_size=9, threshold=1e-4, use_divisor=True
GCN: global contrast normalization
kwargs:
scale=1., subtract_mean=True, use_std=False, sqrt_bias=0.,
min_divisor=1e-8
MEAN: local mean subtraction
kwargs:
kernel_size=5
"""
super(NormLayer, self).__init__(**kwargs)
self.method = method
self.kernel_size = kernel_size
self.threshold = threshold
self.use_divisor = use_divisor
self.nb_channels = nb_channels
self.input = K.placeholder(ndim=4)
def get_output(self, train=False):
X = self.get_input()
out = []
if self.method == "lcn":
for i in range(self.nb_channels):
XX = X[:, i:i+1, :, :]
out += [self.lecun_lcn(XX, self.kernel_size, self.threshold,
self.use_divisor)]
out = K.concatenate(out, axis=1)
elif self.method == "gcn":
out = self.global_contrast_normalize(X)
elif self.method == "mean":
out = self.local_mean_subtraction(X, self.kernel_size)
else:
raise NotImplementedError()
return out
def lecun_lcn(self, X, kernel_size=7, threshold=1e-4, use_divisor=True):
"""
Yann LeCun's local contrast normalization
Orginal code in Theano by: Guillaume Desjardins
"""
filter_shape = (1, 1, kernel_size, kernel_size)
filters = self.gaussian_filter(
kernel_size).reshape(filter_shape)
# filters = shared(_asarray(filters, dtype=floatX), borrow=True)
filters = K.variable(filters)
convout = K.conv2d(X, filters, filter_shape=filter_shape,
border_mode='same')
# For each pixel, remove mean of kernel_sizexkernel_size neighborhood
new_X = X - convout
if use_divisor:
# Scale down norm of kernel_sizexkernel_size patch
sum_sqr_XX = K.conv2d(K.pow(K.abs(new_X), 2), filters,
filter_shape=filter_shape, border_mode='same')
denom = T.sqrt(sum_sqr_XX)
per_img_mean = denom.mean(axis=[2, 3])
divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
divisor = T.maximum(divisor, threshold)
new_X /= divisor
return new_X
def local_mean_subtraction(self, X, kernel_size=5):
filter_shape = (1, 1, kernel_size, kernel_size)
filters = self.mean_filter(kernel_size).reshape(filter_shape)
filters = K.variable(filters)
mean = K.conv2d(X, filters, filter_shape=filter_shape,
border_mode='same')
return X - mean
def global_contrast_normalize(self, X, scale=1., subtract_mean=True,
use_std=False, sqrt_bias=0., min_divisor=1e-6):
ndim = X.ndim
if ndim not in [3, 4]:
raise NotImplementedError("X.dim>4 or X.ndim<3")
scale = float(scale)
mean = X.mean(axis=ndim-1)
new_X = X.copy()
if subtract_mean:
if ndim == 3:
new_X = X - mean[:, :, None]
else:
new_X = X - mean[:, :, :, None]
if use_std:
normalizers = T.sqrt(sqrt_bias + X.var(axis=ndim-1)) / scale
else:
normalizers = T.sqrt(sqrt_bias + (new_X ** 2).sum(axis=ndim-1)) / scale
# Don't normalize by anything too small.
T.set_subtensor(normalizers[(normalizers < min_divisor).nonzero()], 1.)
if ndim == 3:
new_X /= (normalizers[:, :, None] + 1e-6)
else:
new_X /= (normalizers[:, :, :, None] + 1e-6)
return new_X
def gaussian_filter(self, kernel_shape):
x = np.zeros((kernel_shape, kernel_shape), dtype='float32')
def gauss(x, y, sigma=2.0):
Z = 2 * np.pi * sigma**2
return 1./Z * np.exp(-(x**2 + y**2) / (2. * sigma**2))
mid = np.floor(kernel_shape / 2.)
for i in xrange(0, kernel_shape):
for j in xrange(0, kernel_shape):
x[i, j] = gauss(i-mid, j-mid)
return x / sum(x)
def mean_filter(self, kernel_size):
s = kernel_size**2
x = np.repeat(1./s, s).reshape((kernel_size, kernel_size))
return x
| bsd-3-clause | 2,430,868,583,431,697,400 | 32.952381 | 83 | 0.537167 | false |
NMGRL/pychron | pychron/git/hosts/local.py | 1 | 1669 | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
from git import Repo
from pychron.git.hosts import BaseGitHostService
from pychron.paths import paths
class LocalGitHostService(BaseGitHostService):
def make_url(self):
return
def create_repo(self, name, **kw):
self.create_empty_repo(name)
return True
def create_empty_repo(self, name):
root = paths.repository_dataset_dir
p = os.path.join(root, name)
if not os.path.isdir(p):
os.mkdir(p)
repo = Repo.init(p)
def get_repository_names(self, organization):
root = paths.repository_dataset_dir
names = [n for n in os.listdir(root) if os.path.isdir(os.path.join(root, n))]
return names
def get_repos(self, organization):
names = self.get_repository_names(organization)
repos = [{'name': n} for n in names]
return repos
# ============= EOF =============================================
| apache-2.0 | 922,089,579,292,431,400 | 32.38 | 85 | 0.59257 | false |
cooldome/Nim-gdb | nim.py | 1 | 8490 |
import gdb
import re
#################################################################################################
##### Type pretty printers
#################################################################################################
class NimTypePrinter(gdb.types.TypePrinter):
"Nim type printer, one printer for all Nim types"
type_hash_regex = re.compile("^(.+?)_([A-Za-z0-9]+)$")
type_map_static = {
'NI': 'int', 'NI8': 'int8', 'NI16': 'int16', 'NI32': 'int32', 'NI64': 'in64',
'NU': 'uint', 'NU8': 'uint8','NU16': 'uint16', 'NU32': 'uint32', 'NU64': 'uint64',
'NF': 'float', 'NF32': 'float32', 'NF32': 'float64',
'NIM_BOOL': 'bool', 'NIM_CHAR': 'char', 'NCSTRING': 'cstring',
'NimStringDesc': 'string'
}
def __init__ (self):
super (NimTypePrinter, self).__init__ ("NimTypePrinter")
@staticmethod
def rti(type_name):
"Get static const TNimType variable, should be available for every non trivial Nim type"
m = NimTypePrinter.type_hash_regex.match(type_name)
if m is not None:
try:
return gdb.parse_and_eval("NTI_" + m.group(2) + "_")
except:
return None
def instantiate(self):
return self._recognizer()
class _recognizer(object):
def recognize(self, type_obj):
tname = ""
if type_obj.tag is not None:
tname = type_obj.tag
elif type_obj.name is not None:
tname = type_obj.name
else:
return None
result = NimTypePrinter.type_map_static.get(tname, None)
if result is not None:
return result
rti = NimTypePrinter.rti(tname)
if rti is not None:
return str(rti['name'])
return None
nimobjfile = gdb.current_objfile() or gdb.objfiles()[0]
nimobjfile.type_printers = []
nimobjfile.type_printers = [NimTypePrinter()]
#################################################################################################
##### GDB Function, equivalent of Nim's $ operator
#################################################################################################
class DollarPrintFunction (gdb.Function):
"Nim's equivalent of $ operator as a gdb function, available in expressions `print $dollar(myvalue)"
_gdb_dollar_functions = gdb.execute("info functions dollar__", True, True)
dollar_functions = re.findall('NimStringDesc \*(dollar__[A-z0-9_]+?)\(([^,)]*)\);', _gdb_dollar_functions)
def __init__ (self):
super (DollarPrintFunction, self).__init__("dollar")
@staticmethod
def invoke_static(arg):
for func, arg_typ in DollarPrintFunction.dollar_functions:
if arg.type.name == arg_typ:
func_value = gdb.lookup_global_symbol(func, gdb.SYMBOL_FUNCTIONS_DOMAIN).value()
return func_value(arg)
if arg.type.name + " *" == arg_typ:
func_value = gdb.lookup_global_symbol(func, gdb.SYMBOL_FUNCTIONS_DOMAIN).value()
return func_value(arg.address)
raise ValueError("No suitable Nim $ operator found for type: " + arg.type.name)
def invoke(self, arg):
return self.invoke_static(arg)
DollarPrintFunction()
#################################################################################################
##### GDB Command, equivalent of Nim's $ operator
#################################################################################################
class DollarPrintCmd (gdb.Command):
"""Dollar print command for Nim, `$ expr` will invoke Nim's $ operator"""
def __init__ (self):
super (DollarPrintCmd, self).__init__ ("$", gdb.COMMAND_DATA, gdb.COMPLETE_EXPRESSION)
def invoke (self, arg, from_tty):
param = gdb.parse_and_eval(arg)
gdb.write(str(DollarPrintFunction.invoke_static(param)) + "\n", gdb.STDOUT)
DollarPrintCmd()
#################################################################################################
##### Value pretty printers
#################################################################################################
class NimBoolPrinter:
pattern = re.compile(r'^NIM_BOOL$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'bool'
def to_string(self):
if self.val == 0:
return "false"
else:
return "true"
################################################################
class NimStringPrinter:
pattern = re.compile(r'^NimStringDesc \*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val:
l = int(self.val['Sup']['len'])
return self.val['data'][0].address.string("utf-8", "ignore", l)
else:
return "<nil>"
################################################################
class NimEnumPrinter:
pattern = re.compile(r'^tyEnum_(.+?)_([A-Za-z0-9]+)$')
reprEnum = gdb.lookup_global_symbol("reprEnum", gdb.SYMBOL_FUNCTIONS_DOMAIN).value()
def __init__(self, val):
self.val = val
if self.reprEnum is None:
raise ValueError("reprEnum function symbol is not found, can't display Nim enum. reprEnum was likely removed by dead code elimination")
def display_hint(self):
return 'enum'
def to_string(self):
try:
m = self.pattern.match(str(self.val.type))
nti = gdb.parse_and_eval("NTI_" + m.group(2) + "_").address
return self.reprEnum(self.val, nti)
except Exception as e:
gdb.write("reprEnum exception: " + str(e) + "\n", gdb.STDERR)
################################################################
class NimSetPrinter:
pattern = re.compile(r'^tySet_(.+?)_([A-Za-z0-9]+)$')
def __init__(self, val):
self.val = val
def to_string(self):
try:
return DollarPrintFunction.invoke_static(self.val)
except:
gdb.write("RTI information not found for set, likely removed by dead code elimination: " + str(self.val.type) + "\n", gdb.STDERR)
################################################################
class NimSeqPrinter:
pattern = re.compile(r'^tySequence_.*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return 'seq'
def children(self):
if not self.val:
yield ("seq", "<nil>")
raise StopIteration
len = int(self.val['Sup']['len'])
for i in range(len):
yield ('[{0}]'.format(i), self.val["data"][i])
################################################################
class NimObjectPrinter:
pattern = re.compile(r'^tyObject_.*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'object'
def to_string(self):
return str(self.val.type)
def children(self):
if not self.val:
yield "object", "<nil>"
raise StopIteration
for (i, field) in enumerate(self.val.type.fields()):
if field.type.code == gdb.TYPE_CODE_UNION:
yield _union_field
else:
yield (field.name, self.val[field])
def _union_field(self, i, field):
rti = NimTypePrinter.rti(self.val.type.name)
if rti is None:
return (field.name, "UNION field can't be displayed without RTI")
node_sons = rti['node'].dereference()['sons']
prev_field = self.val.type.fields()[i - 1]
descriminant_node = None
for i in range(int(node['len'])):
son = node_sons[i].dereference()
if son['name'].string("utf-8", "ignore") == str(prev_field.name):
descriminant_node = son
break
if descriminant_node is None:
raise ValueError("Can't find union descriminant field in object RTI")
if descriminant_node is None: raise ValueError("Can't find union field in object RTI")
union_node = descriminant_node['sons'][int(self.val[prev_field])].dereference()
union_val = self.val[field]
for f1 in union_val.type.fields():
for f2 in union_val[f1].type.fields():
if str(f2.name) == union_node['name'].string("utf-8", "ignore"):
return (str(f2.name), union_val[f1][f2])
raise ValueError("RTI is absent or incomplete, can't find union definition in RTI")
################################################################
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except Exception, e:
print("Nim matcher exception: ", str(e))
return matcher
nimobjfile.pretty_printers = []
nimobjfile.pretty_printers.extend([makematcher(var) for var in vars().values() if hasattr(var, 'pattern')])
| mit | -7,555,962,532,464,927,000 | 28.275862 | 141 | 0.538869 | false |
shygiants/ChangeGAN | change-gan/change-gan/models/change_gan.py | 1 | 16146 | """ Contains the definition of the ChangeGAN architecture. """
import multiprocessing
import tensorflow as tf
from gan_utils import encoder, decoder, transformer, discriminator, preprocess_image
slim = tf.contrib.slim
default_image_size = 256
def model_fn(inputs_a, inputs_b, learning_rate, num_blocks=9, is_training=True, scope=None, weight_decay=0.0001):
encoder_dims = [32, 64, 128]
deep_encoder_dims = [64, 128, 256]
decoder_dims = [64, 32, 3]
deep_decoder_dims = [128, 64, 3]
with tf.variable_scope(scope, 'ChangeGAN', [inputs_a, inputs_b]):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.batch_norm],
is_training=True):
def converter_ab(inputs_a, reuse=None):
################
# Encoder part #
################
z_a = encoder(inputs_a, deep_encoder_dims, scope='Encoder_A', reuse=reuse)
# z_a is split into c_b, z_a-b
c_b, z_a_b = tf.split(z_a, num_or_size_splits=2, axis=3)
####################
# Transformer part #
####################
c_b = transformer(c_b, encoder_dims[-1], num_blocks=num_blocks,
scope='Transformer_B', reuse=reuse)
################
# Decoder part #
################
outputs_b = decoder(c_b, decoder_dims, scope='Decoder_B', reuse=reuse)
return outputs_b, z_a_b
def converter_ba(inputs_b, z_a_b, reuse=None):
################
# Encoder part #
################
z_b = encoder(inputs_b, encoder_dims, scope='Encoder_B', reuse=reuse)
# Concat z_b and z_a-b
c_a = tf.concat([z_b, z_a_b], 3)
####################
# Transformer part #
####################
c_a = transformer(c_a, deep_encoder_dims[-1], num_blocks=num_blocks,
scope='Transformer_A', reuse=reuse)
################
# Decoder part #
################
outputs_a = decoder(c_a, deep_decoder_dims, scope='Decoder_A', reuse=reuse)
return outputs_a
bbox_channel_a = _get_bbox(inputs_a)
outputs_ab, z_a_b = converter_ab(inputs_a)
outputs_bbox_ab = tf.concat([outputs_ab, bbox_channel_a], 3)
outputs_ba = converter_ba(inputs_b, z_a_b)
outputs_bbox_ba = tf.concat([outputs_ba, bbox_channel_a], 3)
outputs_bab, _ = converter_ab(outputs_bbox_ba, reuse=True)
outputs_aba = converter_ba(outputs_bbox_ab, z_a_b, reuse=True)
logits_a_real, probs_a_real = discriminator(inputs_a, deep_encoder_dims, scope='Discriminator_A')
logits_a_fake, probs_a_fake = discriminator(outputs_bbox_ba, deep_encoder_dims, scope='Discriminator_A', reuse=True)
logits_b_real, probs_b_real = discriminator(inputs_b, deep_encoder_dims, scope='Discriminator_B')
logits_b_fake, probs_b_fake = discriminator(outputs_bbox_ab, deep_encoder_dims, scope='Discriminator_B', reuse=True)
outputs = [inputs_a, inputs_b, outputs_ba, outputs_ab, outputs_aba, outputs_bab]
outputs = map(lambda image: tf.image.convert_image_dtype(image, dtype=tf.uint8), outputs)
with tf.name_scope('images'):
tf.summary.image('X_A', _remove_bbox(inputs_a))
tf.summary.image('X_B', _remove_bbox(inputs_b))
tf.summary.image('X_BA', outputs_ba)
tf.summary.image('X_AB', outputs_ab)
tf.summary.image('X_ABA', outputs_aba)
tf.summary.image('X_BAB', outputs_bab)
global_step = tf.train.get_or_create_global_step()
if not is_training:
return outputs
t_vars = tf.trainable_variables()
d_a_vars = [var for var in t_vars if 'Discriminator_A' in var.name]
d_b_vars = [var for var in t_vars if 'Discriminator_B' in var.name]
g_vars = [var for var in t_vars if 'coder' in var.name or 'Transformer' in var.name]
##########
# Losses #
##########
# Losses for discriminator
l_d_a_fake = tf.reduce_mean(tf.square(logits_a_fake))
l_d_a_real = tf.reduce_mean(tf.squared_difference(logits_a_real, 1.))
l_d_a = 0.5 * (l_d_a_fake + l_d_a_real)
train_op_d_a = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=0.5,
beta2=0.999
).minimize(l_d_a, global_step=global_step, var_list=d_a_vars)
l_d_b_fake = tf.reduce_mean(tf.square(logits_b_fake))
l_d_b_real = tf.reduce_mean(tf.squared_difference(logits_b_real, 1.))
l_d_b = 0.5 * (l_d_b_fake + l_d_b_real)
train_op_d_b = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=0.5,
beta2=0.999
).minimize(l_d_b, global_step=global_step, var_list=d_b_vars)
l_d = l_d_a + l_d_b
# Losses for generators
l_g_a = tf.reduce_mean(tf.squared_difference(logits_a_fake, 1.))
l_g_b = tf.reduce_mean(tf.squared_difference(logits_b_fake, 1.))
l_const_a = tf.reduce_mean(tf.losses.absolute_difference(_remove_bbox(inputs_a), outputs_aba))
l_const_b = tf.reduce_mean(tf.losses.absolute_difference(_remove_bbox(inputs_b), outputs_bab))
l_g = l_g_a + l_g_b + 10. * (l_const_a + l_const_b)
train_op_g = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=0.5,
beta2=0.999
).minimize(l_g, global_step=global_step, var_list=g_vars)
with tf.name_scope('losses'):
tf.summary.scalar('L_D_A_Fake', l_d_a_fake)
tf.summary.scalar('L_D_A_Real', l_d_a_real)
tf.summary.scalar('L_D_A', l_d_a)
tf.summary.scalar('L_D_B_Fake', l_d_b_fake)
tf.summary.scalar('L_D_B_Real', l_d_b_real)
tf.summary.scalar('L_D_B', l_d_b)
tf.summary.scalar('L_D', l_d)
tf.summary.scalar('L_G_A', l_g_a)
tf.summary.scalar('L_G_B', l_g_b)
tf.summary.scalar('L_Const_A', l_const_a)
tf.summary.scalar('L_Const_B', l_const_b)
tf.summary.scalar('L_G', l_g)
train_op = tf.group(*[train_op_d_a, train_op_d_b, train_op_g])
return train_op, global_step, outputs
def input_fn(dataset_a, dataset_b, batch_size=1, num_readers=4, is_training=True):
provider_a = slim.dataset_data_provider.DatasetDataProvider(
dataset_a,
num_readers=num_readers,
common_queue_capacity=20 * batch_size,
common_queue_min=10 * batch_size)
provider_b = slim.dataset_data_provider.DatasetDataProvider(
dataset_b,
num_readers=num_readers,
common_queue_capacity=20 * batch_size,
common_queue_min=10 * batch_size)
[image_a, bbox_a] = provider_a.get(['image', 'object/bbox'])
[image_b, bbox_b] = provider_b.get(['image', 'object/bbox'])
train_image_size = default_image_size
def add_channel(image, bbox, padding='ZERO'):
ymin = bbox[0]
xmin = bbox[1]
ymax = bbox[2]
xmax = bbox[3]
image_shape = tf.to_float(tf.shape(image))
height = image_shape[0]
width = image_shape[1]
bbox_height = (ymax - ymin) * height
bbox_width = (xmax - xmin) * width
channel = tf.ones(tf.to_int32(tf.stack([bbox_height, bbox_width])))
channel = tf.expand_dims(channel, axis=2)
pad_top = tf.to_int32(ymin * height)
pad_left = tf.to_int32(xmin * width)
height = tf.to_int32(height)
width = tf.to_int32(width)
channel = tf.image.pad_to_bounding_box(channel, pad_top, pad_left, height, width)
# TODO: Decide pad one or zero
if padding == 'ONE':
channel = tf.ones_like(channel) - channel
image = tf.concat([image, channel], axis=2)
return image
image_a = tf.image.convert_image_dtype(image_a, dtype=tf.float32)
image_b = tf.image.convert_image_dtype(image_b, dtype=tf.float32)
# [Num of boxes, 4] => [4]
bbox_a = tf.squeeze(bbox_a, axis=0)
bbox_b = tf.squeeze(bbox_b, axis=0)
# Add bound box as 4th channel
image_a = add_channel(image_a, bbox_a)
image_b = add_channel(image_b, bbox_b)
image_space_a = Image(image_a, bbox_a)
image_space_b = Image(image_b, bbox_b)
# Resize image B
ratio = image_space_a.bbox_height / image_space_b.bbox_height
image_space_b.resize(ratio)
# Shift image B to fit bboxes of two images
pixel_shift = image_space_a.translate2pxl(image_space_a.bbox_center) - \
image_space_b.translate2pxl(image_space_b.bbox_center)
# Calculate ymin and xmin
crop_top = tf.less(pixel_shift[0], 0)
pad_y = tf.cond(crop_top, true_fn=lambda: 0, false_fn=lambda: pixel_shift[0])
crop_ymin = tf.cond(crop_top,
true_fn=lambda: image_space_b.translate2coor(pixel_y=tf.negative(pixel_shift[0])),
false_fn=lambda: 0.)
crop_left = tf.less(pixel_shift[1], 0)
pad_x = tf.cond(crop_left, true_fn=lambda: 0, false_fn=lambda: pixel_shift[1])
crop_xmin = tf.cond(crop_left,
true_fn=lambda: image_space_b.translate2coor(pixel_x=tf.negative(pixel_shift[1])),
false_fn=lambda: 0.)
# Calculate ymax and xmax
over_y = pixel_shift[0] + image_space_b.height - image_space_a.height
crop_bottom = tf.greater(over_y, 0)
crop_ymax = tf.cond(crop_bottom,
true_fn=lambda: 1. - image_space_b.translate2coor(pixel_y=over_y),
false_fn=lambda: 1.)
over_x = pixel_shift[1] + image_space_b.width - image_space_a.width
crop_right = tf.greater(over_x, 0)
crop_xmax = tf.cond(crop_right,
true_fn=lambda: 1. - image_space_b.translate2coor(pixel_x=over_x),
false_fn=lambda: 1.)
# Resize, Crop, Pad
image_b_cropped = image_space_b.crop(crop_ymin, crop_xmin, crop_ymax, crop_xmax)
def pad_to_bounding_box(image):
return tf.image.pad_to_bounding_box(image, pad_y, pad_x, image_space_a.height, image_space_a.width)
# Pad differently depending on type of channel
image_b_cropped, bbox_channel = _split_image_bbox(image_b_cropped)
# One padding for RGB
rgb_padding = pad_to_bounding_box(tf.ones_like(image_b_cropped))
rgb_padding = tf.ones_like(rgb_padding) - rgb_padding
# Sample background color and pad
rgb_padding *= image_b_cropped[0, 0]
# Pad for RGB
image_b = pad_to_bounding_box(image_b_cropped) + rgb_padding
# Zero padding for bbox channel
bbox_channel = pad_to_bounding_box(bbox_channel)
# Concat RGB and bbox channel
image_b = tf.concat([image_b, bbox_channel], axis=2)
# Preprocess images
image_a = _preprocess_image(image_a, train_image_size, train_image_size, is_training=is_training)
image_b = _preprocess_image(image_b, train_image_size, train_image_size, is_training=is_training)
images_a, images_b, bboxes_a, bboxes_b = tf.train.batch(
[image_a, image_b, bbox_a, bbox_b],
batch_size=batch_size,
num_threads=multiprocessing.cpu_count(),
capacity=5 * batch_size)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images_a, images_b, bboxes_a, bboxes_b], capacity=2)
images_a, images_b, bboxes_a, bboxes_b = batch_queue.dequeue()
with tf.name_scope('inputs'):
tf.summary.image('X_A', _remove_bbox(images_a))
tf.summary.image('X_A_BBox', images_a)
tf.summary.image('X_B', _remove_bbox(images_b))
tf.summary.image('X_B_BBox', images_b)
return images_a, images_b, bboxes_a, bboxes_b
def _preprocess_image(image, height, width, is_training=True):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Central square crop and resize
shape = tf.to_float(tf.shape(image))
original_height = shape[0]
original_width = shape[1]
rest = (1. - original_width / original_height) / 2.
image = tf.expand_dims(image, 0)
images = tf.image.crop_and_resize(image,
[[rest, 0., 1. - rest, 1.]], [0],
[height, width])
image = tf.squeeze(images, [0])
# image = tf.image.resize_images(image, [height, width])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def _split_image_bbox(image_bbox):
image, bbox = tf.split(image_bbox, [3, 1], axis=image_bbox.shape.ndims - 1)
return image, bbox
def _remove_bbox(image_bbox):
image, bbox = _split_image_bbox(image_bbox)
return image
def _get_bbox(image_bbox):
image, bbox = _split_image_bbox(image_bbox)
return bbox
class Image:
def __init__(self, image, bbox):
self._image = image
self._image_shape = tf.to_float(tf.shape(image))
self._height = self._image_shape[0]
self._width = self._image_shape[1]
self._ratio = None
self._bbox = bbox
self._ymin = bbox[0]
self._xmin = bbox[1]
self._ymax = bbox[2]
self._xmax = bbox[3]
self._bbox_height = (self._ymax - self._ymin) * self._height
self._bbox_width = (self._xmax - self._xmin) * self._width
self._center_y = (self._ymin + self._ymax) / 2.
self._center_x = (self._xmin + self._xmax) / 2.
@property
def image(self):
return self._image
@property
def height(self):
height = self._height
if self._ratio is not None:
height *= self._ratio
return tf.to_int32(height)
@property
def width(self):
width = self._width
if self._ratio is not None:
width *= self._ratio
return tf.to_int32(width)
@property
def bbox_height(self):
return self._bbox_height
@property
def bbox_center(self):
return tf.stack([self._center_y, self._center_x])
def resize(self, ratio):
self._ratio = ratio
def translate2pxl(self, coor):
if coor.dtype != tf.float32:
coor = tf.to_float(coor)
pixel = coor * self._image_shape[:2]
if self._ratio is not None:
pixel *= self._ratio
return tf.to_int32(pixel)
def translate2coor(self, pixel_y=None, pixel_x=None):
if pixel_y is None and pixel_x is None:
raise ValueError
if pixel_y is not None and pixel_x is not None:
raise ValueError
divisor = self._image_shape[0 if pixel_y is not None else 1]
pixel = pixel_y if pixel_y is not None else pixel_x
if pixel.dtype != tf.float32:
pixel = tf.to_float(pixel)
if self._ratio is not None:
divisor *= self._ratio
coor = pixel / divisor
return coor
def crop(self, ymin, xmin, ymax, xmax):
image = self._image
if self._ratio is not None:
target_shape = tf.to_int32(self._image_shape[:2] * self._ratio)
image = tf.image.resize_images(image, target_shape)
shape = tf.to_float(tf.shape(image))
height = shape[0]
width = shape[1]
offset_height = tf.to_int32(ymin * height)
offset_width = tf.to_int32(xmin * width)
target_height = tf.to_int32((ymax - ymin) * height)
target_width = tf.to_int32((xmax - xmin) * width)
image = tf.image.crop_to_bounding_box(image,
offset_height,
offset_width,
target_height,
target_width)
return image
| mit | 2,200,847,747,076,158,000 | 36.812646 | 132 | 0.565775 | false |
wangzheng0822/algo | python/07_linkedlist/linked_list_algo.py | 1 | 2395 | """
1) Reverse singly-linked list
2) Detect cycle in a list
3) Merge two sorted lists
4) Remove nth node from the end
5) Find middle node
Author: Wenru
"""
from typing import Optional
class Node:
def __init__(self, data: int, next=None):
self.data = data
self._next = next
# Reverse singly-linked list
# 单链表反转
# Note that the input is assumed to be a Node, not a linked list.
def reverse(head: Node) -> Optional[Node]:
reversed_head = None
current = head
while current:
reversed_head, reversed_head._next, current = current, reversed_head, current._next
return reversed_head
# Detect cycle in a list
# 检测环
def has_cycle(head: Node) -> bool:
slow, fast = head, head
while fast and fast._next:
slow = slow._next
fast = fast._next._next
if slow == fast:
return True
return False
# Merge two sorted linked list
# 有序链表合并
def merge_sorted_list(l1: Node, l2: Node) -> Optional[Node]:
if l1 and l2:
p1, p2 = l1, l2
fake_head = Node(None)
current = fake_head
while p1 and p2:
if p1.data <= p2.data:
current._next = p1
p1 = p1._next
else:
current._next = p2
p2 = p2._next
current = current._next
current._next = p1 if p1 else p2
return fake_head._next
return l1 or l2
# Remove nth node from the end
# 删除倒数第n个节点。假设n大于0
def remove_nth_from_end(head: Node, n: int) -> Optional[Node]:
fast = head
count = 0
while fast and count < n:
fast = fast._next
count += 1
if not fast and count < n: # not that many nodes
return head
if not fast and count == n:
return head._next
slow = head
while fast._next:
fast, slow = fast._next, slow._next
slow._next = slow._next._next
return head
def find_middle_node(head: Node) -> Optional[Node]:
slow, fast = head, head
fast = fast._next if fast else None
while fast and fast._next:
slow, fast = slow._next, fast._next._next
return slow
def print_all(head: Node):
nums = []
current = head
while current:
nums.append(current.data)
current = current._next
print("->".join(str(num) for num in nums))
| apache-2.0 | -448,324,623,278,686,500 | 24.445652 | 91 | 0.579667 | false |
etale-cohomology/unicorns | activation_functions.py | 1 | 7428 | # Copyright (c) 2016, Diego Alonso Cortez, [email protected]
# Code released under the BSD 2-Clause License
# Inspired by: Michael Nielsen, Mocha
# http://neuralnetworksanddeeplearning.com
# https://github.com/pluskid/Mocha.jl
import numpy as np
import abc # Define "skeleton" classes that do nothing but provide a blueprint to create classes
from mathisart import abstractstaticmethod
"""The Unicors module with all the activations functions! Each activation function is implemented
as a class with two static methods: the function itself and its derivative.
"""
class ActivationFunction(metaclass=abc.ABCMeta):
"""A blueprint for the *Activation Function* classes! These interface with the Layer classes.
An activation function need only be differentiable (or so it was thought!), because
differentiability will be our basis for learning.
Regardless of the type of layer, activation functions are be applied (elementwise) on the
so-called *weighted inputs* (denoted *z*). On a given layer *l*, the image of the activation
function (on a given *z_l*) is called the *activation* and is denoted *a_l*.
The activation of a layer will be what we consider its "output".
"""
@abstractstaticmethod
def f(z):
"""The activation function itself!"""
@abstractstaticmethod
def D(z):
"""The derivative of the activation function!"""
class Sigmoid(ActivationFunction):
"""The good 'ol sigmoid (aka. logistic) activation function. It maps the real line to the
(open) unit interval bijectively, and it's symmetric across x=0 over y=1/2. It's continuous
everywhere and differentiable everywhere. It satisfies a first-order nonlinear differential
equation (f' = f - f**2), leading to a very simple derivative. It saturates very easily,
slowing down learning to a virtual halt. "Saturation" refers to places where its derivative is
near 0.
The sigmoid function is homotopically equivalent to the tanh function.
Initially loved, the sigmoid has been superseeded by the more computationally efficient and
biologically plausible ReLU.
"""
def f(z):
return (1 + np.exp(z)**-1)**-1
def D(z):
return Sigmoid.f(z) * (1 - Sigmoid.f(z))
class TanH(ActivationFunction):
"""The hyperbolic tangent activation function. It maps the real line to the interval (-1; 1)
bijectively, and it's symmetric across x=0 over y=0. It's continuous everywhere and
differentiable everywhere. It satisfies a first-order nonlinear differential
equation (f' = 1 - f**2), leading to a very simple derivative. It saturates very easily,
slowing down learning to a virtual halt. "Saturation" refers to places where its derivative is
near 0.
The tanh function is homotopically equivalent to the sigmoid function.
"""
def f(z):
return np.tanh(z)
def D(z):
return 1 - TanH.f(z)**2
class ReLU(ActivationFunction):
"""The AWESOME Rectified Linear Unit. Efficient, biological. What more could you want? It maps
the real line to the positive real line. It's continuous everywhere, but nondifferentiable at
x=0.
It has been shown that piecewise linear units, such as ReLU, can compute highly complex and
structured functions (Montufar et al., 2014).
"""
def f(z):
return z * (z > 0)
# return np.maximum(0, z)
def D(z):
return z > 0
return np.array(z > 0, dtype=np.float32)
# return 1 * (z > 0)
# return 1.0 * (z > 0)
# return (np.sign(z) + 1) / 2
class SoftPlus(ActivationFunction):
"""The everywhere-differentiable version of the ReLU. Its derivative is, surprisingly, the
sigmoid function.
"""
def f(z):
return np.log(1 + np.exp(z))
def D(z):
return Sigmoid.f(z)
class NoisyReLU(ActivationFunction):
"""A ReLU with Gaussian noise!
"""
def f(z):
return np.maximum(0, z + np.random.randn(*z.shape)) # Some activations below zero!
return np.maximum(0, z + np.random.randn(*z.shape) * (z > 0)) # No activations below zero!
def D(z):
return ReLU.D(z)
class LReLU(ActivationFunction):
"""Leaky ReLU! A generalization of ReLUs with a small nonzero gradient when the unit is no
active.
Leaky ReLU is defined as:
a = max(0, y) + alpha * min(0, y)
When alpha = 0, we get ReLU. When alpha = 1, we get the Identity activation.
"""
alpha = 0.1 # Leakage parameter!
def f(z):
lrelu = LReLU.alpha * z * (z < 0)
lrelu += ReLU.f(z)
# leakyrelu = ReLU.f(z)
# leakyrelu[np.where(leakyrelu == 0)] = LeakyReLU.alpha * z
return lrelu
def D(z):
lrelu = LReLU.alpha * (z <= 0)
lrelu += z > 0
# leakyrelu = ReLU.D(z)
# leakyrelu[np.where(leakyrelu == 0)] = LeakyReLU.alpha
return lrelu
class PReLU(ActivationFunction): # TODO
"""Parametric ReLU! A generalization of leaky ReLUs where the leakage parameter is *learned*
and can vary across channels! This last remark means the leakage parameter is a vector
corresponding to each input of the activation function!
PReLU can be trained by backpropagation and optimized simultaneously with the other layers.
Source:
http://arxiv.org/pdf/1502.01852v1.pdf
"""
def f(z, alpha):
prelu = alpha * z * (z < 0) # z and alpha have the same length!
prelu += ReLU.f(z)
return prelu
def D(z, alpha):
prelu = alpha * (z <= 0) # z and alpha have the same length!
prelu += z > 0
return prelu
class Identity(ActivationFunction):
"""The identity activation function! Even more computationally efficient than the ReLU, but
maybe less biologically plausible?
"Why would one want to do use an identity activation function? After all, a multi-layered
network with linear activations at each layer can be equally-formulated as a single-layered
linear network. It turns out that the identity activation function is surprisingly useful. For
example, a multi-layer network that has nonlinear activation functions amongst the hidden
units and an output layer that uses the identity activation function implements a powerful
form of nonlinear regression. Specifically, the network can predict continuous target values
using a linear combination of signals that arise from one or more layers of nonlinear
transformations of the input."
"""
def f(z):
return z
def D(z):
return 1
class Kumaraswamy(ActivationFunction):
"""The Kumaraswamy unit (Tomczak), as seen on TV (well, the Arxiv):
http://arxiv.org/pdf/1505.02581.pdf
The Kumaraswamy unit follows from modeling a bunch of copies of the same neuron using the
generalized Kumaraswamy distribution, and it's closely related to the ReLU.
When a = b = 1, we recover the sigmoid function!
"""
a, b = 8, 30 # Or 5, 6
def f(z):
return 1 - (1 - Sigmoid.f(z)**Kumaraswamy.a)**Kumaraswamy.b
def D(z):
return # TODO
# return -Kumaraswamy.b * (1 - Sigmoid.f(z)**Kumaraswamy.a)**(Kumaraswamy.b - 1)
| bsd-2-clause | -3,485,144,952,076,071,400 | 34.058252 | 99 | 0.655627 | false |
TheDeverEric/noesis-importers | Eric Van Hoven/tex_AmericanChopper_csi.py | 1 | 1790 | #-------------------------------------------------------------------------------
# Name: American Chopper CSI Image (Tested with XBOX Only)
# Purpose: Import Texture
#
# Author: Eric Van Hoven
#
# Created: 07/07/2018
# Copyright: (c) Eric Van Hoven 2018
# Licence: <MIT License>
#-------------------------------------------------------------------------------
from inc_noesis import *
import noesis
def registerNoesisTypes():
handle = noesis.register("American Chopper CSI Image", ".csi")
noesis.setHandlerTypeCheck(handle, noepyCheckType)
noesis.setHandlerLoadRGBA(handle, noepyLoadRGBA)
return 1
def noepyCheckType(data):
bs = NoeBitStream(data)
id = bs.readBytes(4).decode("ASCII")
if id != "MISC":
return 0
return 1
def noepyLoadRGBA(data, texList):
bs = NoeBitStream(data)
bs.seek(0x8, 0)
headln = bs.readUInt()
dataStart = bs.readUInt()
width = bs.readUInt()
height = bs.readUInt()
bs.seek(0x8, 1) #null uint32's
codecidsz = bs.readUInt()
codec = bs.readBytes(codecidsz).decode("ASCII")
dataSize = (width * height) * codecidsz
bs.seek(dataStart, 0)
data = bs.readBytes(dataSize)
if codec == "RGB":
data = rapi.imageDecodeRaw(data, width, height, "r8 g8 b8")
texFmt = noesis.NOESISTEX_RGB24
elif codec == "BGR":
data = rapi.imageDecodeRaw(data, width, height, "b8 g8 r8")
texFmt = noesis.NOESISTEX_RGB24
else:
data = rapi.imageDecodeRaw(data, width, height, "b8 g8 r8 a8")
texFmt = noesis.NOESISTEX_RGBA32
texFmt = noesis.NOESISTEX_RGBA32
texList.append(NoeTexture(rapi.getInputName(), width, height, data, texFmt))
return 1
| mit | -7,782,279,928,896,052,000 | 27.344262 | 80 | 0.567039 | false |
weapp/miner | shared/objectid.py | 1 | 9362 | # Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with MongoDB `ObjectIds
<http://dochub.mongodb.org/core/objectids>`_.
"""
import binascii
import calendar
import datetime
try:
import hashlib
_md5func = hashlib.md5
except ImportError: # for Python < 2.5
import md5
_md5func = md5.new
import os
import random
import socket
import struct
import threading
import time
from bson.errors import InvalidId
from bson.py3compat import (PY3, b, binary_type, text_type,
bytes_from_hex, string_types)
from bson.tz_util import utc
EMPTY = b("")
ZERO = b("\x00")
def _machine_bytes():
"""Get the machine portion of an ObjectId.
"""
machine_hash = _md5func()
if PY3:
# gethostname() returns a unicode string in python 3.x
# while update() requires a byte string.
machine_hash.update(socket.gethostname().encode())
else:
# Calling encode() here will fail with non-ascii hostnames
machine_hash.update(socket.gethostname())
return machine_hash.digest()[0:3]
class ObjectId(object):
"""A MongoDB ObjectId.
"""
_inc = random.randint(0, 0xFFFFFF)
_inc_lock = threading.Lock()
_machine_bytes = _machine_bytes()
__slots__ = ('_id')
_type_marker = 7
def __init__(self, oid=None):
"""Initialize a new ObjectId.
If `oid` is ``None``, create a new (unique) ObjectId. If `oid`
is an instance of (:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), :class:`ObjectId`) validate it and use that. Otherwise,
a :class:`TypeError` is raised. If `oid` is invalid,
:class:`~bson.errors.InvalidId` is raised.
:Parameters:
- `oid` (optional): a valid ObjectId (12 byte binary or 24 character
hex string)
.. versionadded:: 1.2.1
The `oid` parameter can be a ``unicode`` instance (that contains
only hexadecimal digits).
.. mongodoc:: objectids
"""
if oid is None:
self._generate()
else:
self.__validate(oid)
@classmethod
def from_datetime(cls, generation_time):
"""Create a dummy ObjectId instance with a specific generation time.
This method is useful for doing range queries on a field
containing :class:`ObjectId` instances.
.. warning::
It is not safe to insert a document containing an ObjectId
generated using this method. This method deliberately
eliminates the uniqueness guarantee that ObjectIds
generally provide. ObjectIds generated with this method
should be used exclusively in queries.
`generation_time` will be converted to UTC. Naive datetime
instances will be treated as though they already contain UTC.
An example using this helper to get documents where ``"_id"``
was generated before January 1, 2010 would be:
>>> gen_time = datetime.datetime(2010, 1, 1)
>>> dummy_id = ObjectId.from_datetime(gen_time)
>>> result = collection.find({"_id": {"$lt": dummy_id}})
:Parameters:
- `generation_time`: :class:`~datetime.datetime` to be used
as the generation time for the resulting ObjectId.
.. versionchanged:: 1.8
Properly handle timezone aware values for
`generation_time`.
.. versionadded:: 1.6
"""
if generation_time.utcoffset() is not None:
generation_time = generation_time - generation_time.utcoffset()
ts = calendar.timegm(generation_time.timetuple())
oid = struct.pack(">i", int(ts)) + ZERO * 8
return cls(oid)
@classmethod
def is_valid(cls, oid):
"""Checks if a `oid` string is valid or not.
:Parameters:
- `oid`: the object id to validate
.. versionadded:: 2.3
"""
try:
ObjectId(oid)
return True
except (InvalidId, TypeError):
return False
def _generate(self, generation_time=None):
if generation_time:
if generation_time.utcoffset() is not None:
generationn_time = generation_time - generation_time.utcoffset()
ts = calendar.timegm(generation_time.timetuple())
generation_time = int(ts)
"""Generate a new value for this ObjectId.
"""
oid = EMPTY
# 4 bytes current time
oid += struct.pack(">i", (generation_time or time.time()))
# 3 bytes machine
oid += ObjectId._machine_bytes
# 2 bytes pid
oid += struct.pack(">H", os.getpid() % 0xFFFF)
# 3 bytes inc
ObjectId._inc_lock.acquire()
oid += struct.pack(">i", ObjectId._inc)[1:4]
ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF
ObjectId._inc_lock.release()
self._id = oid
return self
def __validate(self, oid):
"""Validate and use the given id for this ObjectId.
Raises TypeError if id is not an instance of
(:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), ObjectId) and InvalidId if it is not a
valid ObjectId.
:Parameters:
- `oid`: a valid ObjectId
"""
if isinstance(oid, ObjectId):
self._id = oid._id
elif isinstance(oid, string_types):
if len(oid) == 12:
if isinstance(oid, binary_type):
self._id = oid
else:
raise InvalidId("%s is not a valid ObjectId" % oid)
elif len(oid) == 24:
try:
self._id = bytes_from_hex(oid)
except (TypeError, ValueError):
raise InvalidId("%s is not a valid ObjectId" % oid)
else:
raise InvalidId("%s is not a valid ObjectId" % oid)
else:
raise TypeError("id must be an instance of (%s, %s, ObjectId), "
"not %s" % (binary_type.__name__,
text_type.__name__, type(oid)))
@property
def binary(self):
"""12-byte binary representation of this ObjectId.
"""
return self._id
@property
def generation_time(self):
"""A :class:`datetime.datetime` instance representing the time of
generation for this :class:`ObjectId`.
The :class:`datetime.datetime` is timezone aware, and
represents the generation time in UTC. It is precise to the
second.
.. versionchanged:: 1.8
Now return an aware datetime instead of a naive one.
.. versionadded:: 1.2
"""
t = struct.unpack(">i", self._id[0:4])[0]
return datetime.datetime.fromtimestamp(t, utc)
def __getstate__(self):
"""return value of object for pickling.
needed explicitly because __slots__() defined.
"""
return self._id
def __setstate__(self, value):
"""explicit state set from pickling
"""
# Provide backwards compatability with OIDs
# pickled with pymongo-1.9 or older.
if isinstance(value, dict):
oid = value["_ObjectId__id"]
else:
oid = value
# ObjectIds pickled in python 2.x used `str` for __id.
# In python 3.x this has to be converted to `bytes`
# by encoding latin-1.
if PY3 and isinstance(oid, text_type):
self._id = oid.encode('latin-1')
else:
self._id = oid
def __str__(self):
if PY3:
return binascii.hexlify(self._id).decode()
return binascii.hexlify(self._id)
def __repr__(self):
return "ObjectId('%s')" % (str(self),)
def __eq__(self, other):
if isinstance(other, ObjectId):
return self._id == other._id
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObjectId):
return self._id != other._id
return NotImplemented
def __lt__(self, other):
if isinstance(other, ObjectId):
return self._id < other._id
return NotImplemented
def __le__(self, other):
if isinstance(other, ObjectId):
return self._id <= other._id
return NotImplemented
def __gt__(self, other):
if isinstance(other, ObjectId):
return self._id > other._id
return NotImplemented
def __ge__(self, other):
if isinstance(other, ObjectId):
return self._id >= other._id
return NotImplemented
def __hash__(self):
"""Get a hash value for this :class:`ObjectId`.
.. versionadded:: 1.1
"""
return hash(self._id)
| mit | -345,590,328,993,939,400 | 30.416107 | 80 | 0.579897 | false |
wateraccounting/wa | Collect/RFE/monthly.py | 1 | 3465 | # -*- coding: utf-8 -*-
import sys
import pandas as pd
import calendar
import os
from DataAccess import DownloadData
import wa.General.data_conversions as DC
import wa.General.raster_conversions as RC
def main(Dir, Startdate='', Enddate='',
latlim=[-50, 50], lonlim=[-180, 180], cores=False, Waitbar = 1):
"""
This function downloads RFE V2.0 (monthly) data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -50 and 50)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
cores -- The number of cores used to run the routine.
It can be 'False' to avoid using parallel computing
routines.
Waitbar -- 1 (Default) will print a waitbar
"""
# Download data
print '\nDownload monthly RFE precipitation data for period %s till %s' %(Startdate, Enddate)
# Check variables
if not Startdate:
Startdate = pd.Timestamp('2001-01-01')
if not Enddate:
Enddate = pd.Timestamp('Now')
Dates = pd.date_range(Startdate, Enddate, freq='MS')
# Make directory
output_folder = os.path.join(Dir, 'Precipitation', 'RFE', 'Monthly/')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
for Date in Dates:
month = Date.month
year = Date.year
end_day = calendar.monthrange(year, month)[1]
Startdate_one_month = '%s-%02s-01' %(year, month)
Enddate_one_month = '%s-%02s-%02s' %(year, month, end_day)
DownloadData(Dir, Startdate_one_month, Enddate_one_month, latlim, lonlim, 0, cores)
Dates_daily = pd.date_range(Startdate_one_month, Enddate_one_month, freq='D')
# Make directory
input_folder_daily = os.path.join(Dir, 'Precipitation', 'RFE', 'Daily/')
i = 0
for Date_daily in Dates_daily:
file_name = 'P_RFE.v2.0_mm-day-1_daily_%s.%02s.%02s.tif' %(Date_daily.strftime('%Y'), Date_daily.strftime('%m'), Date_daily.strftime('%d'))
file_name_daily_path = os.path.join(input_folder_daily, file_name)
if os.path.exists(file_name_daily_path):
if Date_daily == Dates_daily[i]:
Raster_monthly = RC.Open_tiff_array(file_name_daily_path)
else:
Raster_monthly += RC.Open_tiff_array(file_name_daily_path)
else:
if Date_daily == Dates_daily[i]:
i += 1
geo_out, proj, size_X, size_Y = RC.Open_array_info(file_name_daily_path)
file_name = 'P_RFE.v2.0_mm-month-1_monthly_%s.%02s.01.tif' %(Date.strftime('%Y'), Date.strftime('%m'))
file_name_output = os.path.join(output_folder, file_name)
DC.Save_as_tiff(file_name_output, Raster_monthly, geo_out, projection="WGS84")
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -4,120,924,246,631,314,000 | 38.827586 | 157 | 0.588745 | false |
Zlopez/chess | chess/figures/queen.py | 1 | 4620 | """
Implementation of Bishop figure in chess command line client.
"""
import logging
import math
from . import figure
from ..board import Board
class Queen(figure.Figure):
"""
Queen figure for chess implementation.
"""
def _is_move_correct(self, x_index, y_index):
# Move x_index or y_index
return (((x_index < self._x_index or x_index > self._x_index) and
y_index == self._y_index or
(y_index < self._y_index or y_index > self._y_index) and
x_index == self._x_index) or
# Move diagonally
(math.fabs(x_index - self._x_index) == math.fabs(y_index - self._y_index)))
def _test_move(self, x_index, y_index):
result = None
# Check if move is correct
if self._is_move_correct(x_index, y_index):
# Check if figure is moving
if not self._is_moving(x_index, y_index):
result = False
# Check if the move is inside board
if not self._is_move_inside_board(
x_index, y_index):
result = False
# Check if king is in target position
if self._is_king_on_position(x_index, y_index):
result = False
# Check if another figure is on target destination
if self._is_figure_on_target_position(x_index, y_index):
result = False
# check if path is free
if not self._check_vector(x_index, y_index):
result = False
if result is None:
target_figure = self._board.get_figure(x_index, y_index)
# Attack
if target_figure and target_figure.get_owner() != self._owner:
logging.info("Attacking %s on position %s:%s",
target_figure.get_type(), x_index, y_index)
result = True
else:
logging.info(
"Queen moved from %s:%s to %s:%s",
self._x_index,
self._y_index,
x_index,
y_index)
result = True
else:
# Move is illegal
logging.info(
"Invalid move for queen from %s:%s to %s:%s",
self._x_index,
self._y_index,
x_index,
y_index)
result = False
return result
# if __name__ == "__main__":
# # Start logging
# logging.basicConfig(
# format='[%(asctime)s] ' +
# '{%(pathname)s:%(lineno)d} %(levelname)s - %(message)s',
# level=logging.DEBUG)
#
# # Test invalid move
# print("Test invalid move:")
# queen = Queen(0, 0, figure.queen, "black")
# state = {(0, 0): (figure.queen, queen._owner)}
# queen.moveTo(-2, -32, state, 8, 8)
# print("New state " + str(state))
#
# # Test correct move in axis
# print("Test correct move in axis:")
# queen = Queen(0, 0, figure.queen, "black")
# state = {(0, 0): (figure.queen, queen._owner)}
# queen.moveTo(2, 0, state, 8, 8)
# print("New state " + str(state))
#
# # Test correct move diagonally
# print("Test correct move in axis:")
# queen = Queen(0, 0, figure.queen, "black")
# state = {(0, 0): (figure.queen, queen._owner)}
# queen.moveTo(2, 2, state, 8, 8)
# print("New state " + str(state))
#
# # Test attack
# print("Test attack move:")
# queen = Queen(0, 0, figure.queen, "white")
# state = {(0, 0): (figure.queen, queen._owner),
# (2, 2): (figure.queen, "black")}
# queen.moveTo(2, 2, state, 8, 8)
# print("New state " + str(state))
#
# # Test move on target destination
# print("Test move on target destination:")
# queen = Queen(0, 0, figure.queen, "white")
# state = {(0, 0): (figure.queen, queen._owner),
# (2, 2): (figure.queen, "white")}
# queen.moveTo(2, 2, state, 8, 8)
# print("New state " + str(state))
#
# # Test generation
# print("Test moves generation:")
# queen = Queen(4, 4, figure.queen, "white")
# state = {(4, 4): (figure.queen, queen._owner)}
# states = queen.generateMoves(state, 8, 8)
# print("Generated states " + str(states))
#
# # Test king capture
# print("Test king capture:")
# queen = Queen(4, 4, figure.queen, "white")
# state = {(4, 4): (figure.queen, queen._owner),
# (6, 6): (figure.king, figure.black)}
# queen.moveTo(6, 6, state, 8, 8)
# print("New state " + str(state))
| gpl-3.0 | 5,723,919,041,787,688,000 | 33.222222 | 91 | 0.514069 | false |
ad-m/django-teryt-tree | teryt_tree/dal_ext/views.py | 1 | 1263 | from dal import autocomplete
from teryt_tree.models import JednostkaAdministracyjna
class VoivodeshipAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = JednostkaAdministracyjna.objects.voivodeship().all()
if self.q:
qs = qs.filter(name__istartswith=self.q)
return qs
class CountyAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = JednostkaAdministracyjna.objects.county().all()
if self.q:
qs = qs.filter(name__istartswith=self.q)
voivodeship = self.forwarded.get("voivodeship", None)
if voivodeship:
return qs.filter(parent=voivodeship)
return qs
class CommunityAutocomplete(autocomplete.Select2QuerySetView):
def get_result_label(self, result):
return "{} ({})".format(str(result), str(result.category))
def get_queryset(self):
qs = (
JednostkaAdministracyjna.objects.community()
.select_related("category")
.all()
)
if self.q:
qs = qs.filter(name__istartswith=self.q)
county = self.forwarded.get("county", None)
if county:
return qs.filter(parent=county)
return qs
| bsd-3-clause | -9,305,648,048,797,586 | 27.704545 | 66 | 0.637371 | false |
pyfarm/pyfarm-agent | setup.py | 1 | 5292 | # No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
from os import walk
from os.path import isfile, join
from setuptools import setup
assert sys.version_info[0:2] >= (2, 6), "Python 2.6 or higher is required"
install_requires = [
"pyfarm.core>=0.9.3",
# "PyOpenSSL", "service_identity", # required for full SSL support
"netaddr", "twisted", "ntplib", "requests!=2.4.0", "treq",
"voluptuous", "jinja2", "psutil>=2.1.0",
"netifaces>=0.10.2", "pyasn1"]
if "READTHEDOCS" in os.environ:
install_requires += ["sphinxcontrib-httpdomain", "sphinx"]
# Windows is a little special because we have to have pywin32
# installed. It's a requirement of Twisted, mainly because of spawnProcess,
# however other parts of Twisted use it as well. Twisted's setup.py itself
# does not declare this dependency, likely because of the difficulties
# associated with installing the package. Eventually Twisted will move
# away from this requirement once https://twistedmatrix.com/trac/ticket/7477
# is closed. In the mean time however we'll use pypiwin32 which is built
# by some of Twisted's maintainers:
# http://sourceforge.net/p/pywin32/feature-requests/110/
if sys.platform.startswith("win"):
install_requires += ["wmi"]
try:
import win32api
except ImportError:
install_requires += ["pypiwin32"]
# The wheel package understands and can handle the wheel
# format (which we need in order to handle pypiwin32).
try:
import wheel
except ImportError:
raise ImportError(
"Please run `pip install wheel`. This step is required in "
"order to download and install one of the dependencies, "
"pypiwin32."
)
if sys.version_info[0:2] == (2, 6):
install_requires += ["importlib", "ordereddict", "argparse"]
if isfile("README.rst"):
with open("README.rst", "r") as readme:
long_description = readme.read()
else:
long_description = ""
def get_package_data(parent, roots):
output = []
for top in roots:
if top.startswith("/"):
raise ValueError("get_package_data was given an absolute path or "
"the filesystem root to traverse, refusing.")
for root, dirs, files in walk(top):
for filename in files:
output.append(join(root, filename).split(parent)[-1][1:])
return output
agent_root = join("pyfarm", "agent")
agent_package_data_roots = (
join(agent_root, "etc"),
join(agent_root, "http", "static"),
join(agent_root, "http", "templates"))
jobtype_root = join("pyfarm", "jobtypes")
jobtype_root_package_data_roots = (
join(jobtype_root, "etc"), )
setup(
name="pyfarm.agent",
version="0.8.7",
packages=[
"pyfarm",
"pyfarm.agent",
"pyfarm.agent.entrypoints",
"pyfarm.agent.http",
"pyfarm.agent.http.api",
"pyfarm.agent.http.core",
"pyfarm.agent.logger",
"pyfarm.agent.sysinfo",
"pyfarm.jobtypes",
"pyfarm.jobtypes.core"],
data_files=[
("etc/pyfarm", [
"pyfarm/jobtypes/etc/jobtypes.yml",
"pyfarm/agent/etc/agent.yml"])],
package_data={
"pyfarm.agent": get_package_data(
agent_root, agent_package_data_roots),
"pyfarm.jobtypes": get_package_data(
jobtype_root, jobtype_root_package_data_roots)},
namespace_packages=["pyfarm"],
entry_points={
"console_scripts": [
"pyfarm-agent = pyfarm.agent.entrypoints.main:agent",
"pyfarm-supervisor = "
" pyfarm.agent.entrypoints.supervisor:supervisor",
"pyfarm-dev-fakerender = "
" pyfarm.agent.entrypoints.development:fake_render",
"pyfarm-dev-fakework = "
" pyfarm.agent.entrypoints.development:fake_work"]},
include_package_data=True,
install_requires=install_requires,
url="https://github.com/pyfarm/pyfarm-agent",
license="Apache v2.0",
author="Oliver Palmer",
author_email="[email protected]",
description="Core module containing code to run PyFarm's agent.",
long_description=long_description,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: No Input/Output (Daemon)",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: System :: Distributed Computing"])
| apache-2.0 | 7,502,455,223,679,567,000 | 35.75 | 78 | 0.641723 | false |
goodwillcoding/RIDE | src/robotide/editor/editors.py | 1 | 12638 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import robotapi, context
from robotide.controller.settingcontrollers import DocumentationController,\
VariableController, TagsController
from robotide.usages.UsageRunner import ResourceFileUsages
from robotide.publish import RideItemSettingsChanged, RideInitFileRemoved,\
RideFileNameChanged
from robotide.widgets import ButtonWithHandler, Label, HeaderLabel,\
HorizontalSizer, HtmlWindow
from .settingeditors import DocumentationEditor, SettingEditor, TagsEditor,\
ImportSettingListEditor, VariablesListEditor, MetadataListEditor
class WelcomePage(HtmlWindow):
undo = cut = copy = paste = delete = comment = uncomment = save \
= show_content_assist = tree_item_selected = lambda *args: None
def __init__(self, parent):
HtmlWindow.__init__(self, parent, text=context.ABOUT_RIDE)
def close(self):
self.Show(False)
def destroy(self):
self.close()
self.Destroy()
class EditorPanel(wx.Panel):
"""Base class for all editor panels"""
# TODO: Move outside default editor package, document
name = doc = ''
title = None
undo = cut = copy = paste = delete = comment = uncomment = save \
= show_content_assist = lambda self: None
def __init__(self, plugin, parent, controller, tree):
wx.Panel.__init__(self, parent)
self.plugin = plugin
self.controller = controller
self._tree = tree
def tree_item_selected(self, item):
pass
class _RobotTableEditor(EditorPanel):
name = 'table'
doc = 'table editor'
_settings_open_id = 'robot table settings open'
def __init__(self, plugin, parent, controller, tree):
EditorPanel.__init__(self, plugin, parent, controller, tree)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.SetSizer(self.sizer)
if self.title:
self.sizer.Add(self._create_header(self.title),
0, wx.EXPAND | wx.ALL, 5)
self.sizer.Add((0, 10))
self._editors = []
self._reset_last_show_tooltip()
self._populate()
self.plugin.subscribe(self._settings_changed, RideItemSettingsChanged)
def _should_settings_be_open(self):
if self._settings_open_id not in self.plugin.global_settings:
return False
return self.plugin.global_settings[self._settings_open_id]
def _store_settings_open_status(self):
self.plugin.global_settings[self._settings_open_id] = \
self._settings.IsExpanded()
def _settings_changed(self, data):
if data.item == self.controller:
for editor in self._editors:
editor.update_value()
def OnIdle(self, event):
if self._last_shown_tooltip and self._mouse_outside_tooltip():
self._last_shown_tooltip.hide()
self._reset_last_show_tooltip()
def _mouse_outside_tooltip(self):
mx, my = wx.GetMousePosition()
tx, ty = self._last_shown_tooltip.screen_position
dx, dy = self._last_shown_tooltip.size
return (mx < tx or mx > tx+dx) or (my < ty or my > ty+dy)
def tooltip_allowed(self, tooltip):
if wx.GetMouseState().ControlDown() or \
self._last_shown_tooltip is tooltip:
return False
self._last_shown_tooltip = tooltip
return True
def _reset_last_show_tooltip(self):
self._last_shown_tooltip = None
def close(self):
self.plugin.unsubscribe(
self._settings_changed, RideItemSettingsChanged)
self.Unbind(wx.EVT_MOTION)
self.Show(False)
def destroy(self):
self.close()
self.Destroy()
def _create_header(self, text, readonly=False):
if readonly:
text += ' (READ ONLY)'
self._title_display = HeaderLabel(self, text)
return self._title_display
def _add_settings(self):
self._settings = self._create_settings()
self._restore_settings_open_status()
self._editors.append(self._settings)
self.sizer.Add(self._settings, 0, wx.ALL |wx.EXPAND, 2)
def _create_settings(self):
settings = Settings(self)
settings.Bind(
wx.EVT_COLLAPSIBLEPANE_CHANGED, self._collabsible_changed)
settings.build(self.controller.settings, self.plugin, self._tree)
return settings
def _restore_settings_open_status(self):
if self._should_settings_be_open():
self._settings.Expand()
wx.CallLater(200, self._collabsible_changed)
else:
self._settings.Collapse()
def _collabsible_changed(self, event=None):
self._store_settings_open_status()
self.GetSizer().Layout()
self.Refresh()
if event:
event.Skip()
def highlight_cell(self, obj, row, column):
'''Highlight the given object at the given row and column'''
if isinstance(obj, robotapi.Setting):
setting_editor = self._get_settings_editor(obj)
if setting_editor and hasattr(setting_editor, "highlight"):
setting_editor.highlight(column)
elif row >= 0 and column >= 0:
self.kweditor.select(row, column)
def _get_settings_editor(self, setting):
'''Return the settings editor for the given setting object'''
for child in self.GetChildren():
if isinstance(child, SettingEditor):
if child._item == setting:
return child
return None
def highlight(self, text, expand=True):
for editor in self._editors:
editor.highlight(text, expand=expand)
class Settings(wx.CollapsiblePane):
BORDER = 2
def __init__(self, parent):
wx.CollapsiblePane.__init__(
self, parent, wx.ID_ANY, 'Settings',
style=wx.CP_DEFAULT_STYLE | wx.CP_NO_TLW_RESIZE)
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._editors = []
self.Bind(wx.EVT_SIZE, self._recalc_size)
def Expand(self):
wx.CollapsiblePane.Expand(self)
def GetPane(self):
pane = wx.CollapsiblePane.GetPane(self)
pane.tooltip_allowed = self.GetParent().tooltip_allowed
return pane
def close(self):
for editor in self._editors:
editor.close()
def update_value(self):
for editor in self._editors:
editor.update_value()
def create_editor_for(self, controller, plugin, tree):
editor_cls = self._get_editor_class(controller)
return editor_cls(self.GetPane(), controller, plugin, tree)
def _get_editor_class(self, controller):
if isinstance(controller, DocumentationController):
return DocumentationEditor
if isinstance(controller, TagsController):
return TagsEditor
return SettingEditor
def build(self, settings, plugin, tree):
for setting in settings:
editor = self.create_editor_for(setting, plugin, tree)
self._sizer.Add(editor, 0, wx.ALL | wx.EXPAND, self.BORDER)
self._editors.append(editor)
self.GetPane().SetSizer(self._sizer)
self._sizer.SetSizeHints(self.GetPane())
def _recalc_size(self, event=None):
if self.IsExpanded():
expand_button_height = 32 # good guess...
height = sum(editor.Size[1] + 2 * self.BORDER
for editor in self._editors)
self.SetSizeHints(-1, height + expand_button_height)
if event:
event.Skip()
def highlight(self, text, expand=True):
match = False
for editor in self._editors:
if editor.contains(text):
editor.highlight(text)
match = True
else:
editor.clear_highlight()
if match and expand:
self.Expand()
self.Parent.GetSizer().Layout()
class _FileEditor(_RobotTableEditor):
def __init__(self, *args):
_RobotTableEditor.__init__(self, *args)
self.plugin.subscribe(
self._update_source_and_name, RideFileNameChanged)
def _update_source(self, message=None):
self._source.SetValue(self.controller.data.source)
def _update_source_and_name(self, message):
self._title_display.SetLabel(self.controller.name)
self._update_source()
def tree_item_selected(self, item):
if isinstance(item, VariableController):
self._var_editor.select(item.name)
def _populate(self):
datafile = self.controller.data
header = self._create_header(
datafile.name, not self.controller.is_modifiable())
self.sizer.Add(header, 0, wx.EXPAND | wx.ALL, 5)
self.sizer.Add(
self._create_source_label(datafile.source), 0, wx.EXPAND | wx.ALL, 1)
self.sizer.Add((0, 10))
self._add_settings()
self._add_import_settings()
self._add_variable_table()
def _create_source_label(self, source):
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add((5,0))
sizer.Add(Label(self, label='Source',
size=(context.SETTING_LABEL_WIDTH,
context.SETTING_ROW_HEIGTH)))
self._source = wx.TextCtrl(self, style=wx.TE_READONLY|wx.NO_BORDER)
self._source.SetBackgroundColour(self.BackgroundColour)
self._source.SetValue(source)
self._source.SetMaxSize(wx.Size(-1, context.SETTING_ROW_HEIGTH))
sizer.Add(self._source, 1, wx.EXPAND)
return sizer
def _add_import_settings(self):
import_editor = ImportSettingListEditor(self, self._tree, self.controller.imports)
self.sizer.Add(import_editor, 1, wx.EXPAND)
self._editors.append(import_editor)
def _add_variable_table(self):
self._var_editor = VariablesListEditor(self, self._tree, self.controller.variables)
self.sizer.Add(self._var_editor, 1, wx.EXPAND)
self._editors.append(self._var_editor)
def close(self):
self.plugin.unsubscribe(self._update_source_and_name, RideFileNameChanged)
for editor in self._editors:
editor.close()
self._editors = []
_RobotTableEditor.close(self)
delete_rows = insert_rows = lambda s:0 #Stubs so that ctrl+d ctrl+i don't throw exceptions
class FindUsagesHeader(HorizontalSizer):
def __init__(self, parent, header, usages_callback):
HorizontalSizer.__init__(self)
self._header = HeaderLabel(parent, header)
self.add_expanding(self._header)
self.add(ButtonWithHandler(parent, 'Find Usages', usages_callback))
def SetLabel(self, label):
self._header.SetLabel(label)
class ResourceFileEditor(_FileEditor):
_settings_open_id = 'resource file settings open'
def _create_header(self, text, readonly=False):
if readonly:
text += ' (READ ONLY)'
def cb(event):
ResourceFileUsages(self.controller, self._tree.highlight).show()
self._title_display = FindUsagesHeader(self, text, cb)
return self._title_display
class TestCaseFileEditor(_FileEditor):
_settings_open_id = 'test case file settings open'
def _populate(self):
_FileEditor._populate(self)
self.sizer.Add((0, 10))
self._add_metadata()
def _add_metadata(self):
metadata_editor = MetadataListEditor(self, self._tree, self.controller.metadata)
self.sizer.Add(metadata_editor, 1, wx.EXPAND)
self._editors.append(metadata_editor)
class InitFileEditor(TestCaseFileEditor):
_settings_open_id = 'init file settings open'
def _populate(self):
TestCaseFileEditor._populate(self)
self.plugin.subscribe(self._init_file_removed, RideInitFileRemoved)
def _init_file_removed(self, message):
for setting, editor in zip(self.controller.settings, self._editors):
editor.refresh(setting)
| apache-2.0 | 8,504,961,411,227,969,000 | 34.105556 | 94 | 0.630954 | false |
ingenieroariel/inasafe | safe/engine/impact_functions_for_testing/BNPB_earthquake_guidelines.py | 1 | 5972 | """Impact function based on Padang 2009 post earthquake survey
This impact function estimates percentual damage to buildings as a
function of ground shaking measured in MMI.
Buildings are currently assumed to be represented in OpenStreetMap with
attributes collected as during the July 2011 Indonesian mapping competition.
This impact function maps the OSM buildings into 2 classes:
Unreinforced masonry (URM) and reinforced masonry (RM) according to
the guidelines.
"""
import numpy
from safe.impact_functions.core import FunctionProvider
from safe.impact_functions.core import get_hazard_layer, get_exposure_layer
from safe.storage.vector import Vector
from safe.common.utilities import ugettext as _
from safe.impact_functions.mappings import osm2bnpb
from safe.impact_functions.mappings import unspecific2bnpb
from safe.impact_functions.mappings import sigab2bnpb
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
# Damage 'curves' for the two vulnerability classes
damage_parameters = {'URM': [6, 7],
'RM': [6, 8]}
class EarthquakeGuidelinesFunction(FunctionProvider):
"""Risk plugin for BNPB guidelines for earthquake damage to buildings
:param requires category=='hazard' and \
subcategory.startswith('earthquake') and \
layertype=='raster' and \
unit=='MMI'
:param requires category=='exposure' and \
subcategory.startswith('building') and \
layertype=='vector'
"""
vclass_tag = 'VCLASS'
target_field = 'DMGLEVEL'
def run(self, layers):
"""Risk plugin for earthquake school damage
"""
# Extract data
H = get_hazard_layer(layers) # Ground shaking
E = get_exposure_layer(layers) # Building locations
keywords = E.get_keywords()
if 'datatype' in keywords:
datatype = keywords['datatype']
if datatype.lower() == 'osm':
# Map from OSM attributes to the guideline classes (URM and RM)
E = osm2bnpb(E, target_attribute=self.vclass_tag)
elif datatype.lower() == 'sigab':
# Map from SIGAB attributes to the guideline classes
# (URM and RM)
E = sigab2bnpb(E)
else:
E = unspecific2bnpb(E, target_attribute=self.vclass_tag)
else:
E = unspecific2bnpb(E, target_attribute=self.vclass_tag)
# Interpolate hazard level to building locations
H = assign_hazard_values_to_exposure_data(H, E,
attribute_name='MMI')
# Extract relevant numerical data
coordinates = E.get_geometry()
shaking = H.get_data()
N = len(shaking)
# List attributes to carry forward to result layer
attributes = E.get_attribute_names()
# Calculate building damage
count3 = 0
count2 = 0
count1 = 0
count_unknown = 0
building_damage = []
for i in range(N):
mmi = float(shaking[i]['MMI'])
building_class = E.get_data(self.vclass_tag, i)
lo, hi = damage_parameters[building_class]
if numpy.isnan(mmi):
# If we don't know the shake level assign Not-a-Number
damage = numpy.nan
count_unknown += 1
elif mmi < lo:
damage = 1 # Low
count1 += 1
elif lo <= mmi < hi:
damage = 2 # Medium
count2 += 1
elif mmi >= hi:
damage = 3 # High
count3 += 1
else:
msg = 'Undefined shakelevel %s' % str(mmi)
raise Exception(msg)
# Collect shake level and calculated damage
result_dict = {self.target_field: damage,
'MMI': mmi}
# Carry all orginal attributes forward
for key in attributes:
result_dict[key] = E.get_data(key, i)
# Record result for this feature
building_damage.append(result_dict)
# Create report
impact_summary = ('<table border="0" width="320px">'
' <tr><th><b>%s</b></th><th><b>%s</b></th></th>'
' <tr></tr>'
' <tr><td>%s:</td><td>%i</td></tr>'
' <tr><td>%s (10-25%%):</td><td>%i</td></tr>'
' <tr><td>%s (25-50%%):</td><td>%i</td></tr>'
' <tr><td>%s (50-100%%):</td><td>%i</td></tr>'
% (_('Buildings'), _('Total'),
_('All'), N,
_('Low damage'), count1,
_('Medium damage'), count2,
_('High damage'), count3))
impact_summary += (' <tr><td>%s (NaN):</td><td>%i</td></tr>'
% ('Unknown', count_unknown))
impact_summary += '</table>'
# Create style
style_classes = [dict(label=_('Low damage'), min=0.5, max=1.5,
colour='#fecc5c', transparency=1),
dict(label=_('Medium damage'), min=1.5, max=2.5,
colour='#fd8d3c', transparency=1),
dict(label=_('High damage'), min=2.5, max=3.5,
colour='#f31a1c', transparency=1)]
style_info = dict(target_field=self.target_field,
style_classes=style_classes)
# Create vector layer and return
V = Vector(data=building_damage,
projection=E.get_projection(),
geometry=coordinates,
name='Estimated damage level',
keywords={'impact_summary': impact_summary},
style_info=style_info)
return V
| gpl-3.0 | -3,609,612,572,815,342,000 | 37.282051 | 79 | 0.534494 | false |
TUM-LIS/faultify | analysis/optimization/matlab/viterbi/plotData.py | 1 | 17455 | import scipy.io
p1_1_0 = scipy.io.loadmat('manualOpt_fine_part1_snr0.mat')
p2_1_0 = scipy.io.loadmat('manualOpt_fine_snr0.mat')
p3_1_0 = scipy.io.loadmat('manualOpt_fine_part3_snr0.mat')
p1_2_0 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr0.mat')
p2_2_0 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr0.mat')
p3_2_0 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr0.mat')
p4_2_0 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr0.mat')
db0 = sum(p1_1_0['tt'][0]) +sum(p2_1_0['tt'][0])+ sum(p3_1_0['tt'][0]) + sum(p1_2_0['tt'][0])+ sum(p2_2_0['tt'][0])+sum(p3_2_0['tt'][0])+sum(p4_2_0['tt'][0])
p1_1_2 = scipy.io.loadmat('manualOpt_fine_part1_snr2.mat')
p2_1_2 = scipy.io.loadmat('manualOpt_fine_snr2.mat')
p3_1_2 = scipy.io.loadmat('manualOpt_fine_part3_snr2.mat')
p1_2_2 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr2.mat')
p2_2_2 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr2.mat')
p3_2_2 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr2.mat')
p4_2_2 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr2.mat')
db2 = sum(p1_1_2['tt'][0]) +sum(p2_1_2['tt'][0])+ sum(p3_1_2['tt'][0]) + sum(p1_2_2['tt'][0])+ sum(p2_2_2['tt'][0])+sum(p3_2_2['tt'][0])+sum(p4_2_2['tt'][0])
p1_1_4 = scipy.io.loadmat('manualOpt_fine_part1_snr4.mat')
p2_1_4 = scipy.io.loadmat('manualOpt_fine_snr4.mat')
p3_1_4 = scipy.io.loadmat('manualOpt_fine_part3_snr4.mat')
p1_2_4 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr4.mat')
p2_2_4 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr4.mat')
p3_2_4 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr4.mat')
p4_2_4 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr4.mat')
db4 = sum(p1_1_4['tt'][0]) +sum(p2_1_4['tt'][0])+ sum(p3_1_4['tt'][0]) + sum(p1_2_4['tt'][0])+ sum(p2_2_4['tt'][0])+sum(p3_2_4['tt'][0])+sum(p4_2_4['tt'][0])
p1_1_6 = scipy.io.loadmat('manualOpt_fine_part1_snr6.mat')
p2_1_6 = scipy.io.loadmat('manualOpt_fine_snr6.mat')
p3_1_6 = scipy.io.loadmat('manualOpt_fine_part3_snr6.mat')
p1_2_6 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr6.mat')
p2_2_6 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr6.mat')
p3_2_6 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr6.mat')
p4_2_6 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr6.mat')
db6 = sum(p1_1_6['tt'][0]) +sum(p2_1_6['tt'][0])+ sum(p3_1_6['tt'][0]) + sum(p1_2_6['tt'][0])+ sum(p2_2_6['tt'][0]) +sum(p3_2_6['tt'][0])+ sum(p4_2_6['tt'][0])
p1_1_8 = scipy.io.loadmat('manualOpt_fine_part1_snr8.mat')
p2_1_8 = scipy.io.loadmat('manualOpt_fine_snr8.mat')
p3_1_8 = scipy.io.loadmat('manualOpt_fine_part3_snr8.mat')
p1_2_8 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr8.mat')
p2_2_8 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr8.mat')
p3_2_8 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr8.mat')
p4_2_8 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr8.mat')
db8 = sum(p1_1_8['tt'][0]) +sum(p2_1_8['tt'][0])+ sum(p3_1_8['tt'][0]) + sum(p1_2_8['tt'][0])+ sum(p2_2_8['tt'][0]) +sum(p3_2_8['tt'][0])+ sum(p4_2_8['tt'][0])
p1_1_10 = scipy.io.loadmat('manualOpt_fine_part1_snr10.mat')
p2_1_10 = scipy.io.loadmat('manualOpt_fine_snr10.mat')
p3_1_10 = scipy.io.loadmat('manualOpt_fine_part3_snr10.mat')
p1_2_10 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr10.mat')
p2_2_10 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr10.mat')
p3_2_10 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr10.mat')
p4_2_10 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr10.mat')
db10 = sum(p1_1_10['tt'][0]) +sum(p2_1_10['tt'][0])+ sum(p3_1_10['tt'][0]) + sum(p1_2_10['tt'][0])+ sum(p2_2_10['tt'][0]) +sum(p3_2_10['tt'][0])+ sum(p4_2_10['tt'][0])
p1_1_12 = scipy.io.loadmat('manualOpt_fine_part1_snr12.mat')
p2_1_12 = scipy.io.loadmat('manualOpt_fine_snr12.mat')
p3_1_12 = scipy.io.loadmat('manualOpt_fine_part3_snr12.mat')
p1_2_12 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr12.mat')
p2_2_12 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr12.mat')
p3_2_12 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr12.mat')
p4_2_12 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr12.mat')
db12 = sum(p1_1_12['tt'][0]) +sum(p2_1_12['tt'][0])+ sum(p3_1_12['tt'][0]) + sum(p1_2_12['tt'][0])+ sum(p2_2_12['tt'][0]) +sum(p3_2_12['tt'][0])+ sum(p4_2_12['tt'][0])
p1_1_14 = scipy.io.loadmat('manualOpt_fine_part1_snr14.mat')
p2_1_14 = scipy.io.loadmat('manualOpt_fine_snr14.mat')
p3_1_14 = scipy.io.loadmat('manualOpt_fine_part3_snr14.mat')
p1_2_14 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr14.mat')
p2_2_14 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr14.mat')
p3_2_14 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr14.mat')
p4_2_14 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr14.mat')
db14 = sum(p1_1_14['tt'][0]) +sum(p2_1_14['tt'][0])+ sum(p3_1_14['tt'][0]) + sum(p1_2_14['tt'][0])+ sum(p2_2_14['tt'][0]) +sum(p3_2_14['tt'][0])+ sum(p4_2_14['tt'][0])
p1_1_16 = scipy.io.loadmat('manualOpt_fine_part1_snr16.mat')
p2_1_16 = scipy.io.loadmat('manualOpt_fine_snr16.mat')
p3_1_16 = scipy.io.loadmat('manualOpt_fine_part3_snr16.mat')
p1_2_16 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr16.mat')
p2_2_16 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr16.mat')
p3_2_16 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr16.mat')
p4_2_16 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr16.mat')
db16 = sum(p1_1_16['tt'][0]) +sum(p2_1_16['tt'][0])+ sum(p3_1_16['tt'][0]) + sum(p1_2_16['tt'][0])+ sum(p2_2_16['tt'][0]) +sum(p3_2_16['tt'][0])+ sum(p4_2_16['tt'][0])
p1_1_18 = scipy.io.loadmat('manualOpt_fine_part1_snr18.mat')
p2_1_18 = scipy.io.loadmat('manualOpt_fine_snr18.mat')
p3_1_18 = scipy.io.loadmat('manualOpt_fine_part3_snr18.mat')
p1_2_18 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr18.mat')
p2_2_18 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr18.mat')
p3_2_18 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr18.mat')
p4_2_18 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr18.mat')
db18 = sum(p1_1_18['tt'][0]) +sum(p2_1_18['tt'][0])+ sum(p3_1_18['tt'][0]) + sum(p1_2_18['tt'][0])+ sum(p2_2_18['tt'][0]) +sum(p3_2_18['tt'][0])+ sum(p4_2_18['tt'][0])
p1_1_20 = scipy.io.loadmat('manualOpt_fine_part1_snr20.mat')
p2_1_20 = scipy.io.loadmat('manualOpt_fine_snr20.mat')
p3_1_20 = scipy.io.loadmat('manualOpt_fine_part3_snr20.mat')
p1_2_20 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr20.mat')
p2_2_20 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr20.mat')
p3_2_20 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr20.mat')
p4_2_20 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr20.mat')
db20 = sum(p1_1_20['tt'][0]) +sum(p2_1_20['tt'][0])+ sum(p3_1_20['tt'][0]) + sum(p1_2_20['tt'][0])+ sum(p2_2_20['tt'][0]) +sum(p3_2_20['tt'][0])+ sum(p4_2_20['tt'][0])
p1_1_22 = scipy.io.loadmat('manualOpt_fine_part1_snr22.mat')
p2_1_22 = scipy.io.loadmat('manualOpt_fine_snr22.mat')
p3_1_22 = scipy.io.loadmat('manualOpt_fine_part3_snr22.mat')
p1_2_22 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr22.mat')
p2_2_22 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr22.mat')
p3_2_22 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr22.mat')
p4_2_22 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr22.mat')
db22 = sum(p1_1_22['tt'][0]) +sum(p2_1_22['tt'][0])+ sum(p3_1_22['tt'][0]) + sum(p1_2_22['tt'][0])+ sum(p2_2_22['tt'][0]) +sum(p3_2_22['tt'][0])+ sum(p4_2_22['tt'][0])
p1_1_24 = scipy.io.loadmat('manualOpt_fine_part1_snr24.mat')
p2_1_24 = scipy.io.loadmat('manualOpt_fine_snr24.mat')
p3_1_24 = scipy.io.loadmat('manualOpt_fine_part3_snr24.mat')
p1_2_24 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr24.mat')
p2_2_24 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr24.mat')
p3_2_24 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr24.mat')
p4_2_24 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr24.mat')
db24 = sum(p1_1_24['tt'][0]) +sum(p2_1_24['tt'][0])+ sum(p3_1_24['tt'][0]) + sum(p1_2_24['tt'][0])+ sum(p2_2_24['tt'][0]) +sum(p3_2_24['tt'][0])+ sum(p4_2_24['tt'][0])
p1_1_26 = scipy.io.loadmat('manualOpt_fine_part1_snr26.mat')
p2_1_26 = scipy.io.loadmat('manualOpt_fine_snr26.mat')
p3_1_26 = scipy.io.loadmat('manualOpt_fine_part3_snr26.mat')
p1_2_26 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr26.mat')
p2_2_26 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr26.mat')
p3_2_26 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr26.mat')
p4_2_26 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr26.mat')
db26 = sum(p1_1_26['tt'][0]) +sum(p2_1_26['tt'][0])+ sum(p3_1_26['tt'][0]) + sum(p1_2_26['tt'][0])+ sum(p2_2_26['tt'][0]) +sum(p3_2_26['tt'][0])+ sum(p4_2_26['tt'][0])
p1_1_28 = scipy.io.loadmat('manualOpt_fine_part1_snr28.mat')
p2_1_28 = scipy.io.loadmat('manualOpt_fine_snr28.mat')
p3_1_28 = scipy.io.loadmat('manualOpt_fine_part3_snr28.mat')
p1_2_28 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr28.mat')
p2_2_28 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr28.mat')
p3_2_28 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr28.mat')
p4_2_28 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr28.mat')
db28 = sum(p1_1_28['tt'][0]) +sum(p2_1_28['tt'][0])+ sum(p3_1_28['tt'][0]) + sum(p1_2_28['tt'][0])+ sum(p2_2_28['tt'][0]) +sum(p3_2_28['tt'][0])+ sum(p4_2_28['tt'][0])
p1_1_30 = scipy.io.loadmat('manualOpt_fine_part1_snr30.mat')
p2_1_30 = scipy.io.loadmat('manualOpt_fine_snr30.mat')
p3_1_30 = scipy.io.loadmat('manualOpt_fine_part3_snr30.mat')
p1_2_30 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr30.mat')
p2_2_30 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr30.mat')
p3_2_30 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr30.mat')
p4_2_30 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr30.mat')
db30 = sum(p1_1_30['tt'][0]) +sum(p2_1_30['tt'][0])+ sum(p3_1_30['tt'][0]) + sum(p1_2_30['tt'][0])+ sum(p2_2_30['tt'][0]) +sum(p3_2_30['tt'][0])+ sum(p4_2_30['tt'][0])
p1_1_32 = scipy.io.loadmat('manualOpt_fine_part1_snr32.mat')
p2_1_32 = scipy.io.loadmat('manualOpt_fine_snr32.mat')
p3_1_32 = scipy.io.loadmat('manualOpt_fine_part3_snr32.mat')
p1_2_32 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr32.mat')
p2_2_32 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr32.mat')
p3_2_32 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr32.mat')
p4_2_32 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr32.mat')
db32 = sum(p1_1_32['tt'][0]) +sum(p2_1_32['tt'][0])+ sum(p3_1_32['tt'][0]) + sum(p1_2_32['tt'][0])+ sum(p2_2_32['tt'][0]) +sum(p3_2_32['tt'][0])+ sum(p4_2_32['tt'][0])
p1_1_34 = scipy.io.loadmat('manualOpt_fine_part1_snr34.mat')
p2_1_34 = scipy.io.loadmat('manualOpt_fine_snr34.mat')
p3_1_34 = scipy.io.loadmat('manualOpt_fine_part3_snr34.mat')
p1_2_34 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr34.mat')
p2_2_34 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr34.mat')
p3_2_34 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr34.mat')
p4_2_34 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr34.mat')
db34 = sum(p1_1_34['tt'][0]) +sum(p2_1_34['tt'][0])+ sum(p3_1_34['tt'][0]) + sum(p1_2_34['tt'][0])+ sum(p2_2_34['tt'][0]) +sum(p3_2_34['tt'][0])+ sum(p4_2_34['tt'][0])
p1_1_36 = scipy.io.loadmat('manualOpt_fine_part1_snr36.mat')
p2_1_36 = scipy.io.loadmat('manualOpt_fine_snr36.mat')
p3_1_36 = scipy.io.loadmat('manualOpt_fine_part3_snr36.mat')
p1_2_36 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr36.mat')
p2_2_36 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr36.mat')
p3_2_36 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr36.mat')
p4_2_36 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr36.mat')
db36 = sum(p1_1_36['tt'][0]) +sum(p2_1_36['tt'][0])+ sum(p3_1_36['tt'][0]) + sum(p1_2_36['tt'][0])+ sum(p2_2_36['tt'][0]) +sum(p3_2_36['tt'][0])+ sum(p4_2_36['tt'][0])
p1_1_38 = scipy.io.loadmat('manualOpt_fine_part1_snr38.mat')
p2_1_38 = scipy.io.loadmat('manualOpt_fine_snr38.mat')
p3_1_38 = scipy.io.loadmat('manualOpt_fine_part3_snr38.mat')
p1_2_38 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr38.mat')
p2_2_38 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr38.mat')
p3_2_38 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr38.mat')
p4_2_38 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr38.mat')
db38 = sum(p1_1_38['tt'][0]) +sum(p2_1_38['tt'][0])+ sum(p3_1_38['tt'][0]) + sum(p1_2_38['tt'][0])+ sum(p2_2_38['tt'][0]) +sum(p3_2_38['tt'][0])+ sum(p4_2_38['tt'][0])
p1_1_40 = scipy.io.loadmat('manualOpt_fine_part1_snr40.mat')
p2_1_40 = scipy.io.loadmat('manualOpt_fine_snr40.mat')
p3_1_40 = scipy.io.loadmat('manualOpt_fine_part3_snr40.mat')
p1_2_40 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr40.mat')
p2_2_40 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr40.mat')
p3_2_40 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr40.mat')
p4_2_40 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr40.mat')
db40 = sum(p1_1_40['tt'][0]) +sum(p2_1_40['tt'][0])+ sum(p3_1_40['tt'][0]) + sum(p1_2_40['tt'][0])+ sum(p2_2_40['tt'][0]) +sum(p3_2_40['tt'][0])+ sum(p4_2_40['tt'][0])
p1_1_42 = scipy.io.loadmat('manualOpt_fine_part1_snr42.mat')
p2_1_42 = scipy.io.loadmat('manualOpt_fine_snr42.mat')
p3_1_42 = scipy.io.loadmat('manualOpt_fine_part3_snr42.mat')
p1_2_42 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr42.mat')
p2_2_42 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr42.mat')
p3_2_42 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr42.mat')
p4_2_42 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr42.mat')
db42 = sum(p1_1_42['tt'][0]) +sum(p2_1_42['tt'][0])+ sum(p3_1_42['tt'][0]) + sum(p1_2_42['tt'][0])+ sum(p2_2_42['tt'][0]) +sum(p3_2_42['tt'][0])+ sum(p4_2_42['tt'][0])
p1_1_44 = scipy.io.loadmat('manualOpt_fine_part1_snr44.mat')
p2_1_44 = scipy.io.loadmat('manualOpt_fine_snr44.mat')
p3_1_44 = scipy.io.loadmat('manualOpt_fine_part3_snr44.mat')
p1_2_44 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr44.mat')
p2_2_44 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr44.mat')
p3_2_44 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr44.mat')
p4_2_44 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr44.mat')
db44 = sum(p1_1_44['tt'][0]) +sum(p2_1_44['tt'][0])+ sum(p3_1_44['tt'][0]) + sum(p1_2_44['tt'][0])+ sum(p2_2_44['tt'][0]) +sum(p3_2_44['tt'][0])+ sum(p4_2_44['tt'][0])
p1_1_46 = scipy.io.loadmat('manualOpt_fine_part1_snr46.mat')
p2_1_46 = scipy.io.loadmat('manualOpt_fine_snr46.mat')
p3_1_46 = scipy.io.loadmat('manualOpt_fine_part3_snr46.mat')
p1_2_46 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr46.mat')
p2_2_46 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr46.mat')
p3_2_46 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr46.mat')
p4_2_46 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr46.mat')
db46 = sum(p1_1_46['tt'][0]) +sum(p2_1_46['tt'][0])+ sum(p3_1_46['tt'][0]) + sum(p1_2_46['tt'][0])+ sum(p2_2_46['tt'][0]) +sum(p3_2_46['tt'][0])+ sum(p4_2_46['tt'][0])
p1_1_48 = scipy.io.loadmat('manualOpt_fine_part1_snr48.mat')
p2_1_48 = scipy.io.loadmat('manualOpt_fine_snr48.mat')
p3_1_48 = scipy.io.loadmat('manualOpt_fine_part3_snr48.mat')
p1_2_48 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr48.mat')
p2_2_48 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr48.mat')
p3_2_48 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr48.mat')
p4_2_48 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr48.mat')
db48 = sum(p1_1_48['tt'][0]) +sum(p2_1_48['tt'][0])+ sum(p3_1_48['tt'][0]) + sum(p1_2_48['tt'][0])+ sum(p2_2_48['tt'][0]) +sum(p3_2_48['tt'][0])+ sum(p4_2_48['tt'][0])
p1_1_50 = scipy.io.loadmat('manualOpt_fine_part1_snr50.mat')
p2_1_50 = scipy.io.loadmat('manualOpt_fine_snr50.mat')
p3_1_50 = scipy.io.loadmat('manualOpt_fine_part3_snr50.mat')
p1_2_50 = scipy.io.loadmat('manualOpt_fine_part1_FDRE_snr50.mat')
p2_2_50 = scipy.io.loadmat('manualOpt_fine_part2_FDRE_snr50.mat')
p3_2_50 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr50.mat')
p4_2_50 = scipy.io.loadmat('manualOpt_fine_part3_FDRE_snr50.mat')
db50 = sum(p1_1_50['tt'][0]) +sum(p2_1_50['tt'][0])+ sum(p3_1_50['tt'][0]) + sum(p1_2_50['tt'][0])+ sum(p2_2_50['tt'][0])+sum(p3_2_50['tt'][0])+sum(p4_2_50['tt'][0])
approx = [db0, db2, db4, db6, db8, db10, db12, db14, db16, db18, db20, db22, db24, db26, db28, db30, db32, db34, db36, db38, db40, db42, db44, db46, db48, db50]
#print approx
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
def fitFunc(t, a, b, c):
return a*np.exp(-b*t) + c
t = np.arange(0, 52, 2)
#print t
fitParams, fitCovariances = curve_fit(fitFunc, t, approx)
print ' fit coefficients:\n', fitParams
print ' Covariance matrix:\n', fitCovariances
import pylab
from pylab import arange,pi,sin,cos,sqrt
fig_width_pt = 253.04987 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
params = {'backend': 'ps',
'axes.labelsize': 8,
'text.fontsize': 8,
'legend.fontsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(params)
t = np.arange(0, 52, 2)
#print t
pylab.figure(1)
pylab.clf()
pylab.axes([0.18,0.17,0.95-0.17,0.95-0.22])
pylab.plot(t,approx,'b.-',label='simulated')
#plt.errorbar(t,approx,fmt='ro',yerr = 0.2)
pylab.plot(t, fitFunc(t, fitParams[0], fitParams[1], fitParams[2]),'r--',label='curve fitting')
pylab.xlabel('Signal-to-Noise Ratio [dB]')
pylab.ylabel(r'Tolerated sum of $p_e$')
pylab.legend(loc=2)
pylab.savefig('optimization_viterbi.pdf')
#pylab.show()
| gpl-2.0 | 6,216,856,939,200,127,000 | 49.302594 | 167 | 0.662446 | false |
cloudera/Impala | tests/metadata/test_partition_metadata.py | 1 | 10955 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
from tests.common.test_dimensions import (create_single_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.util.filesystem_utils import get_fs_path, WAREHOUSE, FILESYSTEM_PREFIX
# Map from the test dimension file_format string to the SQL "STORED AS"
# argument.
STORED_AS_ARGS = { 'text': 'textfile', 'parquet': 'parquet', 'avro': 'avro',
'seq': 'sequencefile' }
# Tests specific to partition metadata.
# TODO: Split up the DDL tests and move some of the partition-specific tests
# here.
class TestPartitionMetadata(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestPartitionMetadata, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# Run one variation of the test with each file formats that we support writing.
# The compression shouldn't affect the partition handling so restrict to the core
# compression codecs.
cls.ImpalaTestMatrix.add_constraint(lambda v:
(v.get_value('table_format').file_format in ('text', 'parquet') and
v.get_value('table_format').compression_codec == 'none'))
@SkipIfLocal.hdfs_client # TODO: this dependency might not exist anymore
def test_multiple_partitions_same_location(self, vector, unique_database):
"""Regression test for IMPALA-597. Verifies Impala is able to properly read
tables that have multiple partitions pointing to the same location.
"""
TBL_NAME = "same_loc_test"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = '%s/%s.db/%s' % (WAREHOUSE, unique_database, TBL_NAME)
file_format = vector.get_value('table_format').file_format
# Create the table
self.client.execute(
"create table %s (i int) partitioned by(j int) stored as %s location '%s'"
% (FQ_TBL_NAME, STORED_AS_ARGS[file_format], TBL_LOCATION))
# Point both partitions to the same location.
self.client.execute("alter table %s add partition (j=1) location '%s/p'"
% (FQ_TBL_NAME, TBL_LOCATION))
self.client.execute("alter table %s add partition (j=2) location '%s/p'"
% (FQ_TBL_NAME, TBL_LOCATION))
# Insert some data. This will only update partition j=1 (IMPALA-1480).
self.client.execute("insert into table %s partition(j=1) select 1" % FQ_TBL_NAME)
# Refresh to update file metadata of both partitions
self.client.execute("refresh %s" % FQ_TBL_NAME)
# The data will be read twice because each partition points to the same location.
data = self.execute_scalar("select sum(i), sum(j) from %s" % FQ_TBL_NAME)
assert data.split('\t') == ['2', '3']
self.client.execute("insert into %s partition(j) select 1, 1" % FQ_TBL_NAME)
self.client.execute("insert into %s partition(j) select 1, 2" % FQ_TBL_NAME)
self.client.execute("refresh %s" % FQ_TBL_NAME)
data = self.execute_scalar("select sum(i), sum(j) from %s" % FQ_TBL_NAME)
assert data.split('\t') == ['6', '9']
# Force all scan ranges to be on the same node. It should produce the same
# result as above. See IMPALA-5412.
self.client.execute("set num_nodes=1")
data = self.execute_scalar("select sum(i), sum(j) from %s" % FQ_TBL_NAME)
assert data.split('\t') == ['6', '9']
@SkipIfS3.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
def test_partition_metadata_compatibility(self, vector, unique_database):
"""Regression test for IMPALA-2048. For partitioned tables, test that when Impala
updates the partition metadata (e.g. by doing a compute stats), the tables are
accessible in Hive."""
FQ_TBL_HIVE = unique_database + ".part_parquet_tbl_hive"
FQ_TBL_IMP = unique_database + ".part_parquet_tbl_impala"
# First case, the table is created in HIVE.
self.run_stmt_in_hive("create table %s (a int) partitioned by (x int) "\
"stored as parquet" % FQ_TBL_HIVE)
self.run_stmt_in_hive("set hive.exec.dynamic.partition.mode=nostrict;"\
"insert into %s partition (x) values(1,1)" % FQ_TBL_HIVE)
self.run_stmt_in_hive("select * from %s" % FQ_TBL_HIVE)
# Load the table in Impala and modify its partition metadata by computing table
# statistics.
self.client.execute("invalidate metadata %s" % FQ_TBL_HIVE)
self.client.execute("compute stats %s" % FQ_TBL_HIVE)
self.client.execute("select * from %s" % FQ_TBL_HIVE)
# Make sure the table is accessible in Hive
self.run_stmt_in_hive("select * from %s" % FQ_TBL_HIVE)
# Second case, the table is created in Impala
self.client.execute("create table %s (a int) partitioned by (x int) "\
"stored as parquet" % FQ_TBL_IMP)
self.client.execute("insert into %s partition(x) values(1,1)" % FQ_TBL_IMP)
# Make sure the table is accessible in HIVE
self.run_stmt_in_hive("select * from %s" % FQ_TBL_IMP)
# Compute table statistics
self.client.execute("compute stats %s" % FQ_TBL_IMP)
self.client.execute("select * from %s" % FQ_TBL_IMP)
# Make sure the table remains accessible in HIVE
self.run_stmt_in_hive("select * from %s" % FQ_TBL_IMP)
class TestMixedPartitions(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestMixedPartitions, cls).add_test_dimensions()
# This test only needs to be run once.
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@pytest.mark.parametrize('main_table_format', ['parquetfile', 'textfile'])
def test_incompatible_avro_partition_in_non_avro_table(
self, vector, unique_database, main_table_format):
if main_table_format == 'parquetfile' and \
not pytest.config.option.use_local_catalog:
pytest.xfail("IMPALA-7309: adding an avro partition to a parquet table "
"changes its schema")
self.run_test_case("QueryTest/incompatible_avro_partition", vector,
unique_database,
test_file_vars={'$MAIN_TABLE_FORMAT': main_table_format})
class TestPartitionMetadataUncompressedTextOnly(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestPartitionMetadataUncompressedTextOnly, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@SkipIfLocal.hdfs_client
def test_unsupported_text_compression(self, vector, unique_database):
"""Test querying tables with a mix of supported and unsupported compression codecs.
Should be able to query partitions with supported codecs."""
if FILESYSTEM_PREFIX:
pytest.xfail("IMPALA-7099: this test's filesystem prefix handling is broken")
TBL_NAME = "multi_text_compression"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = '%s/%s.db/%s' % (WAREHOUSE, unique_database, TBL_NAME)
file_format = vector.get_value('table_format').file_format
# Clean up any existing data in the table directory.
self.filesystem_client.delete_file_dir(TBL_NAME, recursive=True)
# Create the table
self.client.execute(
"create external table {0} like functional.alltypes location '{1}'".format(
FQ_TBL_NAME, TBL_LOCATION))
self.__add_alltypes_partition(vector, FQ_TBL_NAME, "functional", 2009, 1)
self.__add_alltypes_partition(vector, FQ_TBL_NAME, "functional_text_lzo", 2009, 2)
# Create a new partition with a bogus file with the unsupported LZ4 suffix.
lz4_year = 2009
lz4_month = 3
lz4_ym_partition_loc = self.__make_ym_partition_dir(TBL_LOCATION, lz4_year, lz4_month)
self.filesystem_client.create_file("{0}/fake.lz4".format(lz4_ym_partition_loc)[1:],
"some test data")
self.client.execute(
"alter table {0} add partition (year={1}, month={2}) location '{3}'".format(
FQ_TBL_NAME, lz4_year, lz4_month, lz4_ym_partition_loc))
# Create a new partition with a bogus compression codec.
fake_comp_year = 2009
fake_comp_month = 4
fake_comp_ym_partition_loc = self.__make_ym_partition_dir(
TBL_LOCATION, fake_comp_year, fake_comp_month)
self.filesystem_client.create_file(
"{0}/fake.fake_comp".format(fake_comp_ym_partition_loc)[1:], "fake compression")
self.client.execute(
"alter table {0} add partition (year={1}, month={2}) location '{3}'".format(
FQ_TBL_NAME, fake_comp_year, fake_comp_month, fake_comp_ym_partition_loc))
show_files_result = self.client.execute("show files in {0}".format(FQ_TBL_NAME))
assert len(show_files_result.data) == 4, "Expected one file per partition dir"
self.run_test_case('QueryTest/unsupported-compression-partitions', vector,
unique_database)
def __add_alltypes_partition(self, vector, dst_tbl, src_db, year, month):
"""Add the (year, month) partition from ${db_name}.alltypes to dst_tbl."""
tbl_location = self._get_table_location("{0}.alltypes".format(src_db), vector)
part_location = "{0}/year={1}/month={2}".format(tbl_location, year, month)
self.client.execute(
"alter table {0} add partition (year={1}, month={2}) location '{3}'".format(
dst_tbl, year, month, part_location))
def __make_ym_partition_dir(self, tbl_location, year, month):
"""Create the year/month partition directory and return the path."""
y_partition_loc = "{0}/year={1}".format(tbl_location, year)
ym_partition_loc = "{0}/month={1}".format(y_partition_loc, month)
self.filesystem_client.delete_file_dir(tbl_location[1:], recursive=True)
self.filesystem_client.make_dir(tbl_location[1:])
self.filesystem_client.make_dir(y_partition_loc[1:])
self.filesystem_client.make_dir(ym_partition_loc[1:])
return ym_partition_loc
| apache-2.0 | -78,445,492,894,843,780 | 46.838428 | 90 | 0.694751 | false |
caedesvvv/lintgtk | lintgtk/__init__.py | 1 | 1090 | # This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from kiwi.environ import Library, environ
import os
lib = Library("lintgtk")
if lib.uninstalled:
if os.path.exists("/usr/share/lintgtk"):
lib.add_global_resource('glade', '/usr/share/lintgtk/glade')
lib.add_global_resource('images', '/usr/share/lintgtk/images')
else:
environ.add_resource('glade', 'glade')
environ.add_resource('images', 'images')
| gpl-2.0 | -5,886,326,020,855,658,000 | 40.923077 | 79 | 0.731193 | false |
z-Wind/Python_Challenge | Level14_spiralImg.py | 1 | 2358 | # http://www.pythonchallenge.com/pc/return/italy.html
__author__ = 'chihchieh.sun'
from PIL import Image
import urllib.request
from io import BytesIO
def getImg(url):
# User Name & Password
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
top_level_url = 'http://www.pythonchallenge.com/pc/return/'
password_mgr.add_password(None, top_level_url, 'huge', 'file')
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
# Proxy setting
proxy = urllib.request.getproxies()
proxy_support = urllib.request.ProxyHandler({'sock5': proxy.get('http')})
# opener setting
opener = urllib.request.build_opener(proxy_support,handler)
imUrl = opener.open(url).read()
return Image.open(BytesIO(imUrl)) # Image.open requires a file-like object
def spiralImg(source):
target = Image.new(source.mode, (100, 100))
left, top, right, bottom = (0, 0, 99, 99)
x, y = 0, 0
dirx, diry = 1, 0
h, v = source.size
for i in range(h * v):
target.putpixel((x, y), source.getpixel((i, 0)))
if dirx == 1 and x == right:
dirx, diry = 0, 1
top += 1
elif dirx == -1 and x == left:
dirx, diry = 0, -1
bottom -= 1
elif diry == 1 and y == bottom:
dirx, diry = -1, 0
right -= 1
elif diry == -1 and y == top:
dirx, diry = 1, 0
left += 1
x += dirx
y += diry
return target
strip = getImg('http://www.pythonchallenge.com/pc/return/wire.png')
spiral = spiralImg(strip)
# written by me
# spiral = Image.new(strip.mode, (100,100), 0)
# move = [100, 99, 99, 98]
# steps = [1, 1, -1, -1]
# original = [-1, 0]
#
# i = 0
# data = strip.getdata()
# while i < len(data):
# for direction in range(4):
# if direction % 2 == 0:
# for offset in range(move[direction]):
# original[0] += steps[direction]
# spiral.putpixel(tuple(original), data[i])
# i += 1
#
# else:
# for offset in range(move[direction]):
# original[1] += steps[direction]
# spiral.putpixel(tuple(original), data[i])
# i += 1
#
# move[direction] -= 2
spiral.show()
| mit | -8,320,253,825,786,456,000 | 28.623377 | 79 | 0.542409 | false |
CCLab/sezam | apps/authority/urls.py | 1 | 2016 | """ authority urls
"""
#from django.conf import settings
from django.conf.urls import patterns, url
from haystack.query import SearchQuerySet
from haystack.views import SearchView
from apps.browser.forms import ModelSearchForm
urlpatterns = patterns('apps.authority.views',
# Both `display_authorities` and `search_authority` launch the same process.
# `display_authorities` is for the Authorities page with the tree and list.
url(r'^$', 'display_authority', {'template': 'authorities.html',
'search_only': False}, name='display_authorities'),
# `search_authority` is for the empty Authorities page
# with the search form only.
url(r'^find/$', 'display_authority', {'template': 'authorities.html',
'search_only': True}, name='search_authority_blank'),
url(r'^(?P<slug>[-\w]+)/follow/$', 'follow_authority',
name='follow_authority'),
url(r'^(?P<slug>[-\w]+)/unfollow/$', 'unfollow_authority',
name='unfollow_authority'),
url(r'^search/$', SearchView(template='authorities.html',
searchqueryset=SearchQuerySet().all(), form_class=ModelSearchForm),
name='search_authority'),
url(r'^search/autocomplete/$', 'autocomplete', name='autocomplete'),
url(r'^tree/$', 'get_authority_tree'),
url(r'^add/$', 'add_authority', {'template': 'add_record.html'},
name='add_authority'),
url(r'^download/(?P<ext>[-\w]+)/$', 'download_authority_list',
name='download_authority_list'),
url(r'^list/$', 'get_authority_list',
{'template': 'includes/authority_list.html'},
name='get_authority_list'),
url(r'^list/(?P<a_id>\d+)/$', 'get_authority_list',
{'template': 'includes/authority_list.html'},
name='get_authority_list_id'),
url(r'^(?P<slug>[-\w]+)/$', 'get_authority_info',
{'template': 'authority.html'}, name='get_authority_info'),
url(r'^(?P<id>\d+)/$', 'display_authority',
{'template': 'authorities.html'}, name='display_authority'),
)
| bsd-3-clause | -4,936,085,818,441,163,000 | 36.333333 | 80 | 0.636409 | false |
kennedyshead/home-assistant | homeassistant/components/plum_lightpad/__init__.py | 1 | 2434 | """Support for Plum Lightpad devices."""
import logging
from aiohttp import ContentTypeError
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
from .utils import load_plum
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Plum Lightpad Platform initialization."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
_LOGGER.info("Found Plum Lightpad configuration in config, importing")
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Plum Lightpad from a config entry."""
_LOGGER.debug("Setting up config entry with ID = %s", entry.unique_id)
username = entry.data.get(CONF_USERNAME)
password = entry.data.get(CONF_PASSWORD)
try:
plum = await load_plum(username, password, hass)
except ContentTypeError as ex:
_LOGGER.error("Unable to authenticate to Plum cloud: %s", ex)
return False
except (ConnectTimeout, HTTPError) as ex:
_LOGGER.error("Unable to connect to Plum cloud: %s", ex)
raise ConfigEntryNotReady from ex
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = plum
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
def cleanup(event):
"""Clean up resources."""
plum.cleanup()
entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup))
return True
| apache-2.0 | -6,322,286,563,704,237,000 | 28.325301 | 88 | 0.666393 | false |
hunse/pytest | _pytest/pytester.py | 1 | 37858 | """ (disabled by default) support for testing pytest and pytest plugins. """
import gc
import sys
import traceback
import os
import codecs
import re
import time
import platform
from fnmatch import fnmatch
import subprocess
import py
import pytest
from py.builtin import print_
from _pytest.main import Session, EXIT_OK
def pytest_addoption(parser):
# group = parser.getgroup("pytester", "pytester (self-tests) options")
parser.addoption('--lsof',
action="store_true", dest="lsof", default=False,
help=("run FD checks if lsof is available"))
parser.addoption('--runpytest', default="inprocess", dest="runpytest",
choices=("inprocess", "subprocess", ),
help=("run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"))
def pytest_configure(config):
# This might be called multiple times. Only take the first.
global _pytest_fullpath
try:
_pytest_fullpath
except NameError:
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith('f') and ("deleted" not in line and
'mem' not in line and "txt" not in line and 'cwd' not in line)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split('\0')
fd = fields[0][1:]
filename = fields[1][1:]
if filename.startswith('/'):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except py.process.cmdexec.Error:
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_item(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
pytest.fail("\n".join(error), pytrace=False)
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
'python2.7': r'C:\Python27\python.exe',
'python2.6': r'C:\Python26\python.exe',
'python3.1': r'C:\Python31\python.exe',
'python3.2': r'C:\Python32\python.exe',
'python3.3': r'C:\Python33\python.exe',
'python3.4': r'C:\Python34\python.exe',
'python3.5': r'C:\Python35\python.exe',
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
if name == "jython":
import subprocess
popen = subprocess.Popen([str(executable), "--version"],
universal_newlines=True, stderr=subprocess.PIPE)
out, err = popen.communicate()
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
cache[name] = executable
return executable
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
'pypy', 'pypy3'])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
""" Return a helper which offers a gethookrecorder(hook)
method which returns a HookRecorder instance which helps
to make assertions about called hooks.
"""
return PytestArg(request)
class PytestArg:
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(l):
"""Only return names from iterator l without a leading underscore."""
return [x for x in l if x[0] != "_"]
class ParsedCall:
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d['_name']
return "<ParsedCall %r(**%r)>" %(self._name, d)
class HookRecorder:
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording
each call before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print_("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print_("CHECKERMATCH", repr(check), "->", call)
else:
print_("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print_("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
l = self.getcalls(name)
assert len(l) == 1, (name, l)
return l[0]
# functionality for test reports
def getreports(self,
names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(self, inamepart="",
names="pytest_runtest_logreport pytest_collectreport", when=None):
""" return a testreport whose dotted import path matches """
l = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, 'when', None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
l.append(rep)
if not l:
raise ValueError("could not find test report matching %r: "
"no test reports at all!" % (inamepart,))
if len(l) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" %(inamepart, l))
return l[0]
def getfailures(self,
names='pytest_runtest_logreport pytest_collectreport'):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures('pytest_collectreport')
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports(
"pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
def pytest_funcarg__LineMatcher(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile("(\d+) (\w+)")
class RunResult:
"""The result of running a command.
Attributes:
:ret: The return value.
:outlines: List of lines captured from stdout.
:errlines: List of lines captures from stderr.
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used
``stdout.fnmatch_lines()`` method.
:stderrr: :py:class:`LineMatcher` of stderr.
:duration: Duration in seconds.
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
""" Return a dictionary of outcomestring->num from parsing
the terminal output that the test process produced."""
for line in reversed(self.outlines):
if 'seconds' in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
def assert_outcomes(self, passed=0, skipped=0, failed=0):
""" assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run."""
d = self.parseoutcomes()
assert passed == d.get("passed", 0)
assert skipped == d.get("skipped", 0)
assert failed == d.get("failed", 0)
class Testdir:
"""Temporary test directory with tools to test/run py.test itself.
This is based on the ``tmpdir`` fixture but provides a number of
methods which aid with testing py.test itself. Unless
:py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary
directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but
plugins can be added to the list. The type of items to add to
the list depend on the method which uses them so refer to them
for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
# XXX remove duplication with tmpdir plugin
basetmp = tmpdir_factory.ensuretemp("testdir")
name = request.function.__name__
for i in range(100):
try:
tmpdir = basetmp.mkdir(name + str(i))
except py.error.EEXIST:
continue
break
self.tmpdir = tmpdir
self.plugins = []
self._savesyspath = (list(sys.path), list(sys.meta_path))
self._savemodulekeys = set(sys.modules)
self.chdir() # always chdir
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this
tries to clean this up. It does not remove the temporary
directory however so it can be looked at after the test run
has finished.
"""
sys.path[:], sys.meta_path[:] = self._savesyspath
if hasattr(self, '_olddir'):
self._olddir.chdir()
self.delete_loaded_modules()
def delete_loaded_modules(self):
"""Delete modules that have been loaded during a test.
This allows the interpreter to catch module changes in case
the module is re-imported.
"""
for name in set(sys.modules).difference(self._savemodulekeys):
# it seems zope.interfaces is keeping some state
# (used by twisted related tests)
if name != "zope.interface":
del sys.modules[name]
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
assert not hasattr(pluginmanager, "reprec")
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
old = self.tmpdir.chdir()
if not hasattr(self, '_olddir'):
self._olddir = old
def _makefile(self, ext, args, kwargs):
items = list(kwargs.items())
if args:
source = py.builtin._totext("\n").join(
map(py.builtin._totext, args)) + py.builtin._totext("\n")
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for name, value in items:
p = self.tmpdir.join(name).new(ext=ext)
source = py.code.Source(value)
def my_totext(s, encoding="utf-8"):
if py.builtin._isbytes(s):
s = py.builtin._totext(s, encoding=encoding)
return s
source_unicode = "\n".join([my_totext(line) for line in source.lines])
source = py.builtin._totext(source_unicode)
content = source.strip().encode("utf-8") # + "\n"
#content = content.rstrip() + "\n"
p.write(content, "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
"""Create a new file in the testdir.
ext: The extension the file should use, including the dot.
E.g. ".py".
args: All args will be treated as strings and joined using
newlines. The result will be written as contents to the
file. The name of the file will be based on the test
function requesting this fixture.
E.g. "testdir.makefile('.txt', 'line1', 'line2')"
kwargs: Each keyword is the name of a file, while the value of
it will be written as contents of the file.
E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile('.ini', tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)['pytest']
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile('.py', args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile('.txt', args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically after the test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)direcotry with an empty ``__init__.py``
file so that is recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to
create the configuration.
:param arg: A :py:class:`py.path.local` instance of the file.
"""
session = Session(config)
assert '::' not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses
:py:meth:`parseconfigure` to create the (configured) py.test
Config instance.
:param path: A :py:class:`py.path.local` instance of the file.
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of
all the test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (the class which contains the test
method) must provide a ``.getrunner()`` method which should
return a runner which can run the test protocol for a single
item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder`
instance for the result.
:param source: The source code of the test module.
:param cmdlineargs: Any extra command line arguments to use.
:return: :py:class:`HookRecorder` instance of the result.
"""
p = self.makepyfile(source)
l = list(cmdlineargs) + [p]
return self.inline_run(*l)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Retuns a tuple of the collected items and a
:py:class:`HookRecorder` instance.
This runs the :py:func:`pytest.main` function to run all of
py.test inside the test process itself like
:py:meth:`inline_run`. However the return value is a tuple of
the collection items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
This runs the :py:func:`pytest.main` function to run all of
py.test inside the test process itself. This means it can
return a :py:class:`HookRecorder` instance which gives more
detailed results from then run then can be done by matching
stdout/stderr from :py:meth:`runpytest`.
:param args: Any command line arguments to pass to
:py:func:`pytest.main`.
:param plugin: (keyword-only) Extra plugin instances the
``pytest.main()`` instance should use.
:return: A :py:class:`HookRecorder` instance.
"""
rec = []
class Collect:
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
self.delete_loaded_modules()
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec:
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
def runpytest_inprocess(self, *args, **kwargs):
""" Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides. """
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = py.io.StdCapture()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec:
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec:
ret = 3
finally:
out, err = capture.reset()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret,
out.split("\n"), err.split("\n"),
time.time()-now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
""" Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith('--basetemp'):
#print ("basedtemp exists: %s" %(args,))
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
#print ("added basetemp: %s" %(args,))
return args
def parseconfig(self, *args):
"""Return a new py.test Config instance from given commandline args.
This invokes the py.test bootstrapping code in _pytest.config
to create a new :py:class:`_pytest.core.PluginManager` and
call the pytest_cmdline_parse hook to create new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin
modules which will be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new py.test configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance
like :py:meth:`parseconfig`, but also calls the
pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs py.test's
collection on the resulting module, returning the test item
for the requested function name.
:param source: The module source.
:param funcname: The name of the test function for which the
Item must be returned.
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" %(
funcname, source, items)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs py.test's
collection on the resulting module, returning all test items
contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile`
and then runs the py.test collection on it, returning the
collection node for the test module.
:param source: The source code of the module to collect.
:param configargs: Any extra arguments to pass to
:py:meth:`parseconfigure`.
:param withinit: Whether to also write a ``__init__.py`` file
to the temporarly directory to ensure it is a package.
"""
kw = {self.request.function.__name__: py.code.Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__ = "#")
self.config = config = self.parseconfigure(path, *configargs)
node = self.getnode(config, path)
return node
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection
node matching the given name.
:param modcol: A module collection node, see
:py:meth:`getmodulecol`.
:param name: The name of the node to return.
"""
for colitem in modcol._memocollect():
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working
directory is the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
str(os.getcwd()), env.get('PYTHONPATH', '')]))
kw['env'] = env
return subprocess.Popen(cmdargs,
stdout=stdout, stderr=stderr, **kw)
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and
stderr.
Returns a :py:class:`RunResult`.
"""
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print_("running:", ' '.join(cmdargs))
print_(" in:", str(py.path.local()))
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
close_fds=(sys.platform != "win32"))
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time()-now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
py.builtin.print_(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
# we cannot use "(sys.executable,script)"
# because on windows the script is e.g. a py.test.exe
return (sys.executable, _pytest_fullpath,) # noqa
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run py.test as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added
using the ``-p`` command line option. Addtionally
``--basetemp`` is used put any temporary files and directories
in a numbered directory prefixed with "runpytest-" so they do
not conflict with the normal numberd pytest location for
temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(prefix="runpytest-",
keep=None, rootdir=self.tmpdir)
args = ('--basetemp=%s' % p, ) + args
#for x in args:
# if '--confcutdir' in str(x):
# break
#else:
# pass
# args = ('--confcutdir=.',) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ('-p', plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run py.test using pexpect.
This makes sure to use the right py.test and sets up the
temporary directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform == "darwin":
pytest.xfail("pexpect does not work reliably on darwin?!")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),)
class LineComp:
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
""" assert that lines2 are contained (linearly) in lines1.
return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher:
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing
newlines, i.e. ``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = py.code.Source(lines2)
if isinstance(lines2, py.code.Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the
output, in any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or fnmatch(x, line):
print_("matched: ", repr(line))
break
else:
raise ValueError("line %r not found in output" % line)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i+1:]
raise ValueError("line %r not found in output" % fnline)
def fnmatch_lines(self, lines2):
"""Search the text for matching lines.
The argument is a list of lines which have to match and can
use glob wildcards. If they do not match an pytest.fail() is
called. The matches and non-matches are also printed on
stdout.
"""
def show(arg1, arg2):
py.builtin.print_(arg1, arg2, file=sys.stderr)
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
show("exact match:", repr(line))
break
elif fnmatch(nextline, line):
show("fnmatch:", repr(line))
show(" with:", repr(nextline))
break
else:
if not nomatchprinted:
show("nomatch:", repr(line))
nomatchprinted = True
show(" and:", repr(nextline))
extralines.append(nextline)
else:
pytest.fail("remains unmatched: %r, see stderr" % (line,))
| mit | 108,018,001,310,943,410 | 33.668498 | 82 | 0.580828 | false |
Orhideous/nidhogg | tests/test_legacy.py | 1 | 5496 | from nidhogg.common.models import User
from nidhogg.common.utils import generate_token
from nidhogg.protocol.legacy import request as req
from nidhogg.protocol.legacy import exceptions as exc
from tests import BaseTestCase
class LegacyRequestTest(BaseTestCase):
def test_wrong_argument(self):
with self.assertRaises(exc.BadPayload):
req.LegacyRequest([])
with self.assertRaises(exc.BadPayload):
req.LegacyRequest(b'123')
def test_unimplemented_process(self):
with self.assertRaises(NotImplementedError):
req.LegacyRequest({"first_key": "first_value"}).process()
def test_wrong_key_value_types(self):
with self.assertRaises(exc.BadPayload):
req.LegacyRequest({1: "2"})
with self.assertRaises(exc.BadPayload):
req.LegacyRequest({"3": 4})
def test_empty_payload(self):
with self.assertRaises(exc.EmptyPayload):
req.LegacyRequest({})
def test_save_payload(self):
payload = {"first_key": "first_value", "second_arg": "second_value"}
r = req.LegacyRequest(payload)
self.assertEqual(payload, r.payload)
def test_token_method(self):
token = generate_token()
self.assertIsInstance(token, str)
self.assertEqual(len(token), 32)
self.assertIsInstance(int(token, 16), int)
def test_result_tuple(self):
payload = {"first_key": "first_value", "second_arg": "second_value"}
r = req.LegacyRequest(payload)
result = ('first', "second")
r._result = result
self.assertEqual(r.result, "first:second")
def test_result_str(self):
payload = {"first_key": "first_value", "second_arg": "second_value"}
r = req.LegacyRequest(payload)
result = "OK"
r._result = result
self.assertEqual(r.result, "OK")
class AuthenticateTest(BaseTestCase):
def test_empty_credentials(self):
with self.assertRaises(exc.EmptyCredentials):
payload = {"key": "strange"}
req.Authenticate(payload)
with self.assertRaises(exc.EmptyCredentials):
payload = {"user": "Twilight"}
req.Authenticate(payload)
with self.assertRaises(exc.EmptyCredentials):
payload = {"password": "12345"}
req.Authenticate(payload)
def test_no_such_user(self):
payload = {
"user": "[email protected]",
"password": "12345",
}
with self.assertRaises(exc.InvalidCredentials):
request = req.Authenticate(payload)
request.process()
def test_wrong_password(self):
payload = {
"user": "[email protected]",
"password": "123456",
}
with self.assertRaises(exc.InvalidCredentials):
request = req.Authenticate(payload)
request.process()
def test_success_simple(self):
payload = {
"user": "[email protected]",
"password": "12345",
}
request = req.Authenticate(payload)
request.process()
result = request.result.split(":")
user = User.query.filter(User.email == payload["user"]).one()
self.assertEqual(result[3], user.token.access)
self.assertEqual(result[4], user.token.client)
class ValidateTest(BaseTestCase):
def test_invalid_payload(self):
with self.assertRaises(exc.EmptyCredentials):
payload = {"key": "strange"}
req.Authenticate(payload)
with self.assertRaises(exc.EmptyCredentials):
payload = {"user": "Twilight"}
req.Authenticate(payload)
with self.assertRaises(exc.EmptyCredentials):
payload = {"sessionId": "12345"}
req.Authenticate(payload)
def test_invalid_token(self):
payload = {
"user": "[email protected]",
"sessionId": "nothing"
}
with self.assertRaises(exc.BadPayload):
request = req._Validate(payload)
request.process()
def test_successful_validate(self):
payload = {
"user": "[email protected]",
"password": "12345"
}
request = req.Authenticate(payload)
request.process()
payload = {
"user": payload["user"],
"sessionId": request.result.split(":")[3]
}
request = req._Validate(payload)
request.process()
class CheckTest(BaseTestCase):
def test_ok_check(self):
payload = {
"user": "[email protected]",
"password": "12345"
}
request = req.Authenticate(payload)
request.process()
payload = {
"user": payload["user"],
"sessionId": request.result.split(":")[3]
}
request = req.Check(payload)
request.process()
self.assertEqual(request.result, "YES")
class JoinTest(BaseTestCase):
def test_ok_check(self):
payload = {
"user": "[email protected]",
"password": "12345"
}
request = req.Authenticate(payload)
request.process()
payload = {
"user": payload["user"],
"sessionId": request.result.split(":")[3]
}
request = req.Join(payload)
request.process()
self.assertEqual(request.result, "OK")
| gpl-3.0 | 737,082,699,798,673,200 | 30.050847 | 76 | 0.586426 | false |
jamesward-demo/air-quick-fix | AIRQuickFixServer/pyamf/remoting/gateway/django.py | 1 | 3714 | # Copyright (c) 2007-2008 The PyAMF Project.
# See LICENSE for details.
"""
Gateway for the Django framework.
This gateway allows you to expose functions in Django to AMF clients and
servers.
@see: U{Django homepage (external)<http://djangoproject.com>}
@author: U{Arnar Birgisson<mailto:[email protected]>}
@since: 0.1.0
"""
django = __import__('django')
http = django.http
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['DjangoGateway']
class DjangoGateway(gateway.BaseGateway):
"""
An instance of this class is suitable as a Django view.
An example usage would be through C{urlconf}::
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^gateway/', 'yourproject.yourapp.gateway.gw_instance'),
)
where C{yourproject.yourapp.gateway.gw_instance} refers to an instance of
this class.
@ivar expose_request: The standard Django view always has the request
object as the first parameter. To disable this functionality, set this
to C{False}.
@type expose_request: C{bool}
"""
def __init__(self, *args, **kwargs):
kwargs['expose_request'] = kwargs.get('expose_request', True)
gateway.BaseGateway.__init__(self, *args, **kwargs)
def getResponse(self, http_request, request):
"""
Processes the AMF request, returning an AMF response.
@param http_request: The underlying HTTP Request.
@type http_request: C{HTTPRequest<django.core.http.HTTPRequest>}
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion, request.clientType)
for name, message in request:
processor = self.getProcessor(message)
response[name] = processor(message, http_request=http_request)
return response
def __call__(self, http_request):
"""
Processes and dispatches the request.
@param http_request: The C{HTTPRequest} object.
@type http_request: C{HTTPRequest}
@return: The response to the request.
@rtype: C{HTTPResponse}
"""
if http_request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
context = pyamf.get_context(pyamf.AMF0)
stream = None
http_response = http.HttpResponse()
# Decode the request
try:
request = remoting.decode(http_request.raw_post_data, context)
except pyamf.DecodeError:
self.logger.debug(gateway.format_exception())
http_response.status_code = 400
return http_response
self.logger.debug("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(http_request, request)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.logger.debug(gateway.format_exception())
return http.HttpResponseServerError()
self.logger.debug("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, context)
except pyamf.EncodeError:
self.logger.debug(gateway.format_exception())
return http.HttpResponseServerError('Unable to encode the response')
buf = stream.getvalue()
http_response['Content-Type'] = remoting.CONTENT_TYPE
http_response['Content-Length'] = str(len(buf))
http_response.write(buf)
return http_response
| apache-2.0 | 8,034,444,590,400,092,000 | 29.195122 | 80 | 0.636241 | false |
ColdrickSotK/storyboard | storyboard/tests/common/test_hook_priority.py | 1 | 1522 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import storyboard.common.hook_priorities as priority
import storyboard.tests.base as base
CONF = cfg.CONF
class TestHookPriority(base.TestCase):
def test_hook_order(self):
"""Assert that the hook priorities are ordered properly."""
self.assertLess(priority.PRE_AUTH, priority.AUTH)
self.assertLess(priority.PRE_AUTH, priority.VALIDATION)
self.assertLess(priority.PRE_AUTH, priority.POST_VALIDATION)
self.assertLess(priority.PRE_AUTH, priority.DEFAULT)
self.assertLess(priority.AUTH, priority.VALIDATION)
self.assertLess(priority.AUTH, priority.POST_VALIDATION)
self.assertLess(priority.AUTH, priority.DEFAULT)
self.assertLess(priority.VALIDATION, priority.POST_VALIDATION)
self.assertLess(priority.VALIDATION, priority.DEFAULT)
self.assertLess(priority.POST_VALIDATION, priority.DEFAULT)
| apache-2.0 | 7,812,888,462,680,532,000 | 38.025641 | 78 | 0.749671 | false |
isaacyeaton/global-dyn-non-equil-gliding | Code/squirrel.py | 1 | 13697 | from __future__ import division
import numpy as np
from scipy import interpolate
import pandas as pd
def load_run(run_num, df):
"""Load in trial data.
Parameters
----------
run_num : int
Which run to load.
df : DataFrame
The DataFrame loaded from the original excel file.
Returns
-------
pos : array
(x, z) positions
tvec : array
Time vector for the run.
dt : float
Sampling interval between data points.
"""
# sampling rate
# http://rsif.royalsocietypublishing.org/content/10/80/20120794/suppl/DC1
if run_num <= 7:
dt = 1 / 60.
else:
dt = 1 / 125.
xkey = "'Caribou_Trial_{0:02d}_Xvalues'".format(run_num)
zkey = "'Caribou_Trial_{0:02d}_Zvalues'".format(run_num)
d = df[[xkey, zkey]]
d = np.array(d)
# get rid of nans and a bunch of junky zeros starting at row 301
start_bad = np.where(np.isnan(d))[0]
if len(start_bad) > 0:
start_bad = start_bad[0]
d = d[:start_bad]
# get rid of zeros (if we get past rows 301...)
start_bad = np.where(d == 0.)[0]
if len(d) > 300 and len(start_bad) > 0:
start_bad = start_bad[0]
d = d[:start_bad]
tvec = np.arange(0, len(d)) * dt
return d, tvec, dt
def calc_vel(pos_data, dt):
"""Velocity in the x and z directions.
Parameters
----------
pos_data : array
(x, z) position information
dt : float
Sampling rate
Returns
-------
vel : array
(vx, vz)
"""
vx = np.gradient(pos_data[:, 0], dt)
vy = np.gradient(pos_data[:, 1], dt)
return np.c_[vx, vy]
def calc_accel(vel_data, dt):
"""Acceleration in the x and z directions.
Parameters
----------
vel_data : array
(vx, vz) velocity data
dt : float
Sampling rate
Returns
-------
accel : array
(ax, az)
"""
ax = np.gradient(vel_data[:, 0], dt)
ay = np.gradient(vel_data[:, 1], dt)
return np.c_[ax, ay]
def calc_vel_mag(vel_data):
"""Velocity magnitude.
Parameters
----------
vel_data : array
(vx, vz) velocity data
Returns
-------
vel_mag : array
np.sqrt(vx**2 + vz**2)
"""
return np.sqrt(vel_data[:, 0]**2 + vel_data[:, 1]**2)
def calc_gamma(vel_data):
"""Glide angle.
Parameters
----------
vel_data : array
(vx, vz)
Returns
-------
gamma : array
Glide angle in rad
"""
return -np.arctan2(vel_data[:, 1], vel_data[:, 0])
def splfit_all(data, tvec, k=5, s=.5):
"""Fit a spline to the data.
"""
posx = interpolate.UnivariateSpline(tvec, data[:, 0], k=k, s=s)
posz = interpolate.UnivariateSpline(tvec, data[:, 1], k=k, s=s)
velx = posx.derivative(1)
velz = posz.derivative(1)
accx = posx.derivative(2)
accz = posz.derivative(2)
pos = np.c_[posx(tvec), posz(tvec)]
vel = np.c_[velx(tvec), velz(tvec)]
acc = np.c_[accx(tvec), accz(tvec)]
return pos, vel, acc
def polyfit(data, tvec, intfun):
"""Fit a spline to the data.
"""
posx = intfun(tvec, data[:, 0])
posz = intfun(tvec, data[:, 1])
velx = posx.derivative(1)
velz = posz.derivative(1)
accx = posx.derivative(2)
accz = posz.derivative(2)
pos = np.c_[posx(tvec), posz(tvec)]
vel = np.c_[velx(tvec), velz(tvec)]
acc = np.c_[accx(tvec), accz(tvec)]
return pos, vel, acc
def polyfit_all(data, tvec, deg, wn=0):
"""Fit a spline to the data.
TODO: this does not to the mirroring correctly!
"""
start = data[:wn][::-1]
stop = data[-wn:][::-1]
datanew = np.r_[start, data, stop]
tvecnew = np.r_[tvec[:wn][::-1], tvec, tvec[-wn:][::-1]]
posx = np.polyfit(tvecnew, datanew[:, 0], deg)
posz = np.polyfit(tvecnew, datanew[:, 1], deg)
velx = np.polyder(posx, 1)
velz = np.polyder(posz, 1)
accx = np.polyder(posx, 2)
accz = np.polyder(posz, 2)
pos = np.c_[np.polyval(posx, tvec), np.polyval(posz, tvec)]
vel = np.c_[np.polyval(velx, tvec), np.polyval(velz, tvec)]
acc = np.c_[np.polyval(accx, tvec), np.polyval(accz, tvec)]
return pos, vel, acc
def fill_df(pos, vel, acc, gamma, velmag, tvec, i):
"""Put one trial's data into a DataFrame.
Parameters
----------
pos : (n x 2) array
x and z position data
vel : (n x 2) array
x and z velocity data
acc : (n x 2) array
x and z acceleration data
gamma : (n x 1) array
Glide angles in deg
velmag : (n x 1) array
Velocity magnitude
tvec : (n x 1) array
Time points
i : int
Trial number that becomes the column name
Returns
-------
posx, posz, velx, velz, accx, accz, gamm, vmag : DataFrame
Data in a DataFrame
"""
posx = pd.DataFrame(pos[:, 0], index=tvec, columns=[str(i)])
posz = pd.DataFrame(pos[:, 1], index=tvec, columns=[str(i)])
velx = pd.DataFrame(vel[:, 0], index=tvec, columns=[str(i)])
velz = pd.DataFrame(vel[:, 1], index=tvec, columns=[str(i)])
accx = pd.DataFrame(acc[:, 0], index=tvec, columns=[str(i)])
accz = pd.DataFrame(acc[:, 1], index=tvec, columns=[str(i)])
gamm = pd.DataFrame(gamma, index=tvec, columns=[str(i)])
vmag = pd.DataFrame(velmag, index=tvec, columns=[str(i)])
return posx, posz, velx, velz, accx, accz, gamm, vmag
def window_bounds(i, n, wn):
"""Start and stop indices for a moving window.
Parameters
----------
i : int
Current index
n : int
Total number of points
wn : int, odd
Total window size
Returns
-------
start : int
Start index
stop : int
Stop index
at_end : bool
Whether we are truncating the window
"""
at_end = False
hw = wn // 2
start = i - hw
stop = i + hw + 1
if start < 0:
at_end = True
start = 0
elif stop > n:
at_end = True
stop = n
return start, stop, at_end
def moving_window_pts(data, tvec, wn, deg=2, drop_deg=False):
"""Perform moving window smoothing.
Parameters
----------
data : (n x 2) array
Data to smooth and take derivatives of
tvec : (n x 1) array
Time vector
wn : int, odd
Total window size
deg : int, default=2
Polynomial degree to fit to data
drop_deg : bool, default=False
Whether to drop in interpolating polynomial at the
ends of the time series, since the truncated window can
negatively affect things.
Returns
-------
spos : (n x 2) array
x and z smoothed data
svel : (n x 2) array
First derivatives of smoothed data (velocity)
sacc : (n x 2) array
Second derivatives of smoothed data (acceleration)
"""
deg_orig = deg
posx, posz = data.T
npts = len(posx)
spos = np.zeros((npts, 2))
svel = np.zeros((npts, 2))
sacc = np.zeros((npts, 2))
for i in range(npts):
start, stop, at_end = window_bounds(i, npts, wn)
if at_end and drop_deg:
deg = deg_orig - 1
else:
deg = deg_orig
t = tvec[start:stop]
x = posx[start:stop]
z = posz[start:stop]
pfpx = np.polyfit(t, x, deg)
pfpz = np.polyfit(t, z, deg)
pfvx = np.polyder(pfpx, m=1)
pfvz = np.polyder(pfpz, m=1)
pfax = np.polyder(pfpx, m=2)
pfaz = np.polyder(pfpz, m=2)
tval = tvec[i]
spos[i] = np.polyval(pfpx, tval), np.polyval(pfpz, tval)
svel[i] = np.polyval(pfvx, tval), np.polyval(pfvz, tval)
sacc[i] = np.polyval(pfax, tval), np.polyval(pfaz, tval)
return spos, svel, sacc
def moving_window_pos(data, tvec, wn, deg=2):
"""Do a moving window of +/- wn, where wn is position.
"""
xwn = wn
hxwn = xwn / 2
posx, posz = data.T
npts = len(posx)
spos = np.zeros((npts, 2))
svel = np.zeros((npts, 2))
sacc = np.zeros((npts, 2))
for i in range(npts):
ind = np.where((posx >= posx[i] - hxwn) & (posx <= posx[i] + hxwn))[0]
t = tvec[ind]
x = posx[ind]
z = posz[ind]
pfpx = np.polyfit(t, x, deg)
pfpz = np.polyfit(t, z, deg)
pfvx = np.polyder(pfpx, m=1)
pfvz = np.polyder(pfpz, m=1)
pfax = np.polyder(pfpx, m=2)
pfaz = np.polyder(pfpz, m=2)
tval = tvec[i]
spos[i] = np.polyval(pfpx, tval), np.polyval(pfpz, tval)
svel[i] = np.polyval(pfvx, tval), np.polyval(pfvz, tval)
sacc[i] = np.polyval(pfax, tval), np.polyval(pfaz, tval)
return spos, svel, sacc
def moving_window_spl(data, tvec, wn, s=.5):
"""Do a moving window of +/- wn on the data and
take derivatves.
"""
posx, posz = data.T
npts = len(posx)
spos = np.zeros((npts, 2))
svel = np.zeros((npts, 2))
sacc = np.zeros((npts, 2))
for i in range(npts):
start, stop, at_end = window_bounds(i, npts, wn)
t = tvec[start:stop]
x = posx[start:stop]
z = posz[start:stop]
px = interpolate.UnivariateSpline(t, x, k=5, s=s)
pz = interpolate.UnivariateSpline(t, z, k=5, s=s)
vx = px.derivative(1)
vz = pz.derivative(1)
ax = px.derivative(2)
az = pz.derivative(2)
tval = tvec[i]
spos[i] = px(tval), pz(tval)
svel[i] = vx(tval), vz(tval)
sacc[i] = ax(tval), az(tval)
return spos, svel, sacc
def svfilter(tvec, data, wn, order, mode='interp'):
"""Use a Savitzky-Golay to smooth position data and to
calculate the derivatives.
This blog post has a modification of this, which might have better
high frequency filtering: http://bit.ly/1wjZKvk
"""
from scipy.signal import savgol_filter
x, z = data.T
dt = np.diff(tvec).mean()
px = savgol_filter(x, wn, order, mode=mode)
pz = savgol_filter(z, wn, order, mode=mode)
vx = savgol_filter(x, wn, order, mode=mode, deriv=1, delta=dt)
vz = savgol_filter(z, wn, order, mode=mode, deriv=1, delta=dt)
ax = savgol_filter(x, wn, order, mode=mode, deriv=2, delta=dt)
az = savgol_filter(z, wn, order, mode=mode, deriv=2, delta=dt)
return np.c_[px, pz], np.c_[vx, vz], np.c_[ax, az]
def clcd_binning(gl_bins, gl_rad, Cl, Cd):
"""Bin the lift and drag coefficient curves against glide angle
to get average across all trajectories
Parameters
----------
gl_bins : array
The different bins [left, right)
gl_rad : DataFrame
Glide angle data in radians
Cl : DataFrame
Lift coefficients
Cd : DataFrame
Drag coefficients
Returns
-------
clcd_means : array, (n x 3)
lift-to-drag ratio mean, std, stderror
cl_means : array, (n x 3)
same for lift coefficient
cd_means : array, (n x 3)
same for drag coefficient
gl_means : array, (n x 3)
same for glide angle
Notes
-----
This uses a Taylor expansion for the Cl/Cd ratio statistics,
becuase I guess using a standard ratio is biased.
"""
nbins = len(gl_bins)
gl_flattened = gl_rad.values.flatten()
cl_flattened = Cl.values.flatten()
cd_flattened = Cd.values.flatten()
bins = np.digitize(gl_flattened, gl_bins)
all_indices = []
no_data = []
cl_means = np.zeros((nbins, 3))
cd_means = np.zeros((nbins, 3))
clcd_means = np.zeros((nbins, 3))
gl_means = np.zeros((nbins, 3))
for idx in np.arange(nbins):
# find relevent indices
all_indices.append(np.where(bins == idx)[0])
indices = np.where(bins == idx)[0]
if len(indices) == 0:
no_data.append(idx)
continue
# get out our data
glsnip = gl_flattened[indices]
clsnip = cl_flattened[indices]
cdsnip = cd_flattened[indices]
clcd_means[idx] = taylor_moments(clsnip, cdsnip)
cl_means[idx] = simple_moments(clsnip)
cd_means[idx] = simple_moments(cdsnip)
gl_means[idx] = simple_moments(glsnip)
# remove where we have no interpolation
# clcd_means[no_data] = np.nan
# cl_means[no_data] = np.nan
# cd_means[no_data] = np.nan
# gl_means[no_data] = np.nan
return clcd_means[1:], cl_means[1:], cd_means[1:], gl_means[1:]
def taylor_moments(x, y):
"""Taylor series approximations to the moments of a ratio.
See http://bit.ly/1uy8qND and http://bit.ly/VHPX4u
and http://en.wikipedia.org/wiki/Ratio_estimator
Parameters
----------
x : 1D array
Numerator of the ratio
y : 1D array
Denomenator of the ratio
Returns
-------
tmean : float
Mean of the ratio
tstd : float
STD of the ratio
tserr : float
Standard error of the ratio
"""
n = len(x)
ex = x.mean()
ey = y.mean()
varx = x.var()
vary = y.var()
cov = np.cov(x, y)[0, 1]
tmean = ex / ey - cov / ey**2 + ex / ey**3 * vary
tvar = varx / ey**2 - 2 * ex / ey**3 * cov + ex**2 / ey**4 * vary
tstd = np.sqrt(tvar)
return tmean, tstd, tstd / np.sqrt(n)
def simple_moments(x):
"""Moments for Cl and Cd curves.
Parameters
----------
x : 1D numpy array
Returns
-------
mean, std, sterr
"""
mean = x.mean()
std = x.std()
sterr = std / np.sqrt(len(x))
return mean, std, sterr
def interpspl(data, npts, k=3, s=3):
"""Interpolate using splines.
"""
tck, u = interpolate.splprep(data.T, k=k, s=s, nest=-1)
datanew = interpolate.splev(np.linspace(0, 1, npts), tck)
return np.array(datanew).T
| mit | -4,676,687,735,099,849,000 | 23.458929 | 78 | 0.557494 | false |
tensorflow/probability | tensorflow_probability/python/bijectors/bijector_test.py | 1 | 35232 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import mock
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.internal import cache_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import test_util
JAX_MODE = False
@test_util.test_all_tf_execution_regimes
class BaseBijectorTest(test_util.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
with self.assertRaisesRegexp(TypeError,
('Can\'t instantiate abstract class Bijector '
'with abstract methods __init__')):
tfb.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(tfb.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
parameters = dict(locals())
super(_BareBonesBijector, self).__init__(
forward_min_event_ndims=0,
parameters=parameters)
bij = _BareBonesBijector()
self.assertFalse(bij.is_constant_jacobian)
self.assertFalse(bij.validate_args)
self.assertIsNone(bij.dtype)
self.assertStartsWith(bij.name, 'bare_bones_bijector')
for shape in [[], [1, 2], [1, 2, 3]]:
forward_event_shape_ = self.evaluate(
bij.inverse_event_shape_tensor(shape))
inverse_event_shape_ = self.evaluate(
bij.forward_event_shape_tensor(shape))
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
with self.assertRaisesRegexp(NotImplementedError,
'inverse not implemented'):
bij.inverse(0)
with self.assertRaisesRegexp(NotImplementedError,
'forward not implemented'):
bij.forward(0)
with self.assertRaisesRegexp(
NotImplementedError,
'Cannot derive `inverse_log_det_jacobian`'):
bij.inverse_log_det_jacobian(0, event_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError,
'Cannot derive `forward_log_det_jacobian`'):
bij.forward_log_det_jacobian(0, event_ndims=0)
def testVariableEq(self):
# Testing for b/186021261, bijector equality in the presence of TF
# Variables.
v1 = tf.Variable(3, dtype=tf.float32)
v2 = tf.Variable(4, dtype=tf.float32)
self.assertNotEqual(tfb.SinhArcsinh(v1), tfb.SinhArcsinh(v2))
@test_util.disable_test_for_backend(
disable_numpy=True,
reason='`convert_to_tensor` casts instead of raising')
def testChecksDType(self):
class _TypedIdentity(tfb.Bijector):
"""Bijector with an explicit dtype."""
def __init__(self, dtype):
parameters = dict(locals())
super(_TypedIdentity, self).__init__(
forward_min_event_ndims=0,
dtype=dtype,
parameters=parameters)
def _forward(self, x):
return x
x32 = tf.constant(0, dtype=tf.float32)
x64 = tf.constant(0, dtype=tf.float64)
error_clazz = TypeError if JAX_MODE else ValueError
b32 = _TypedIdentity(tf.float32)
self.assertEqual(tf.float32, b32(0).dtype)
self.assertEqual(tf.float32, b32(x32).dtype)
with self.assertRaisesRegexp(
error_clazz, 'Tensor conversion requested dtype'):
b32.forward(x64)
b64 = _TypedIdentity(tf.float64)
self.assertEqual(tf.float64, b64(0).dtype)
self.assertEqual(tf.float64, b64(x64).dtype)
with self.assertRaisesRegexp(
error_clazz, 'Tensor conversion requested dtype'):
b64.forward(x32)
@parameterized.named_parameters(
('no_batch_shape', 1.4),
('with_batch_shape', [[[2., 3.], [5., 7.]]]))
@test_util.numpy_disable_gradient_test
def testAutodiffLogDetJacobian(self, bijector_scale):
class NoJacobianBijector(tfb.Bijector):
"""Bijector with no log det jacobian methods."""
def __init__(self, scale=2.):
parameters = dict(locals())
self._scale = tensor_util.convert_nonref_to_tensor(scale)
super(NoJacobianBijector, self).__init__(
validate_args=True,
forward_min_event_ndims=0,
parameters=parameters)
def _forward(self, x):
return tf.exp(self._scale * x)
def _inverse(self, y):
return tf.math.log(y) / self._scale
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
scale=parameter_properties.ParameterProperties(event_ndims=0))
b = NoJacobianBijector(scale=bijector_scale)
x = tf.convert_to_tensor([2., -3.])
[
fldj,
true_fldj,
ildj
] = self.evaluate([
b.forward_log_det_jacobian(x, event_ndims=0),
tf.math.log(b._scale) + b._scale * x,
b.inverse_log_det_jacobian(b.forward(x), event_ndims=0)
])
self.assertAllClose(fldj, true_fldj)
self.assertAllClose(fldj, -ildj)
y = tf.convert_to_tensor([27., 5.])
[
ildj,
true_ildj,
fldj
] = self.evaluate([
b.inverse_log_det_jacobian(y, event_ndims=0),
-tf.math.log(tf.abs(y * b._scale)),
b.forward_log_det_jacobian(b.inverse(y), event_ndims=0)
])
self.assertAllClose(ildj, true_ildj)
self.assertAllClose(ildj, -fldj)
def testCopyExtraArgs(self):
# Note: we cannot easily test all bijectors since each requires
# different initialization arguments. We therefore spot test a few.
sigmoid = tfb.Sigmoid(low=-1., high=2., validate_args=True)
self.assertEqual(sigmoid.parameters, sigmoid.copy().parameters)
chain = tfb.Chain(
[
tfb.Softplus(hinge_softness=[1., 2.], validate_args=True),
tfb.MatrixInverseTriL(validate_args=True)
], validate_args=True)
self.assertEqual(chain.parameters, chain.copy().parameters)
def testCopyOverride(self):
sigmoid = tfb.Sigmoid(low=-1., high=2., validate_args=True)
self.assertEqual(sigmoid.parameters, sigmoid.copy().parameters)
unused_sigmoid_copy = sigmoid.copy(validate_args=False)
base_params = sigmoid.parameters.copy()
copy_params = sigmoid.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop('validate_args'), copy_params.pop('validate_args'))
self.assertEqual(base_params, copy_params)
class IntentionallyMissingError(Exception):
pass
class ForwardOnlyBijector(tfb.Bijector):
"""Bijector with no inverse methods at all."""
def __init__(self, scale=2., validate_args=False, name=None):
parameters = dict(locals())
with tf.name_scope(name or 'forward_only') as name:
self._scale = tensor_util.convert_nonref_to_tensor(
scale,
dtype_hint=tf.float32)
super(ForwardOnlyBijector, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
parameters=parameters,
name=name)
def _forward(self, x):
return self._scale * x
def _forward_log_det_jacobian(self, _):
return tf.math.log(self._scale)
class InverseOnlyBijector(tfb.Bijector):
"""Bijector with no forward methods at all."""
def __init__(self, scale=2., validate_args=False, name=None):
parameters = dict(locals())
with tf.name_scope(name or 'inverse_only') as name:
self._scale = tensor_util.convert_nonref_to_tensor(
scale,
dtype_hint=tf.float32)
super(InverseOnlyBijector, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
parameters=parameters,
name=name)
def _inverse(self, y):
return y / self._scale
def _inverse_log_det_jacobian(self, _):
return -tf.math.log(self._scale)
class ExpOnlyJacobian(tfb.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, validate_args=False, forward_min_event_ndims=0):
parameters = dict(locals())
super(ExpOnlyJacobian, self).__init__(
validate_args=validate_args,
is_constant_jacobian=False,
forward_min_event_ndims=forward_min_event_ndims,
parameters=parameters,
name='exp')
def _inverse_log_det_jacobian(self, y):
return -tf.math.log(y)
def _forward_log_det_jacobian(self, x):
return tf.math.log(x)
class VectorExpOnlyJacobian(tfb.Bijector):
"""An Exp bijector that operates only on vector (or higher-order) events."""
def __init__(self):
parameters = dict(locals())
super(VectorExpOnlyJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=False,
forward_min_event_ndims=1,
parameters=parameters,
name='vector_exp')
def _inverse_log_det_jacobian(self, y):
return -tf.reduce_sum(tf.math.log(y), axis=-1)
def _forward_log_det_jacobian(self, x):
return tf.reduce_sum(tf.math.log(x), axis=-1)
class ConstantJacobian(tfb.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
parameters = dict(locals())
super(ConstantJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
parameters=parameters,
name='c')
def _inverse_log_det_jacobian(self, y):
return tf.constant(2., y.dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(-2., x.dtype)
class UniqueCacheKey(tfb.Bijector):
"""Used to test instance-level caching."""
def __init__(self, forward_min_event_ndims=0):
parameters = dict(locals())
super(UniqueCacheKey, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
parameters=parameters,
name='instance_cache')
def _forward(self, x):
return x + tf.constant(1., x.dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0., x.dtype)
def _get_parameterization(self):
return id(self)
class UnspecifiedParameters(tfb.Bijector):
"""A bijector that fails to pass `parameters` to the base class."""
def __init__(self, loc):
self._loc = loc
super(UnspecifiedParameters, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=0,
name='unspecified_parameters')
def _forward(self, x):
return x + self._loc
def _forward_log_det_jacobian(self, x):
return tf.constant(0., x.dtype)
@test_util.test_all_tf_execution_regimes
class BijectorTestEventNdims(test_util.TestCase):
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testBijectorNonIntegerEventNdims(self):
bij = ExpOnlyJacobian()
with self.assertRaisesRegexp(ValueError, 'Expected integer'):
bij.forward_log_det_jacobian(1., event_ndims=1.5)
with self.assertRaisesRegexp(ValueError, 'Expected integer'):
bij.inverse_log_det_jacobian(1., event_ndims=1.5)
def testBijectorArrayEventNdims(self):
bij = ExpOnlyJacobian()
with self.assertRaisesRegexp(ValueError, 'Expected scalar'):
bij.forward_log_det_jacobian(1., event_ndims=(1, 2))
with self.assertRaisesRegexp(ValueError, 'Expected scalar'):
bij.inverse_log_det_jacobian(1., event_ndims=(1, 2))
def testBijectorDynamicEventNdims(self):
with self.assertRaisesError('Expected scalar'):
bij = ExpOnlyJacobian(validate_args=True)
event_ndims = tf1.placeholder_with_default((1, 2), shape=None)
self.evaluate(
bij.forward_log_det_jacobian(1., event_ndims=event_ndims))
with self.assertRaisesError('Expected scalar'):
bij = ExpOnlyJacobian(validate_args=True)
event_ndims = tf1.placeholder_with_default((1, 2), shape=None)
self.evaluate(
bij.inverse_log_det_jacobian(1., event_ndims=event_ndims))
@test_util.test_all_tf_execution_regimes
class BijectorBatchShapesTest(test_util.TestCase):
@parameterized.named_parameters(
('exp', tfb.Exp, None),
('scale',
lambda: tfb.Scale(tf.ones([4, 2])), None),
('sigmoid',
lambda: tfb.Sigmoid(low=tf.zeros([3]), high=tf.ones([4, 1])), None),
('scale_matvec',
lambda: tfb.ScaleMatvecDiag([[0.], [3.]]), None),
('invert',
lambda: tfb.Invert(tfb.ScaleMatvecDiag(tf.ones([2, 1]))), None),
('reshape',
lambda: tfb.Reshape([1], event_shape_in=[1, 1]), None),
('chain',
lambda: tfb.Chain([tfb.Power(power=[[2.], [3.]]), # pylint: disable=g-long-lambda
tfb.Invert(tfb.Split(2))]),
None),
('jointmap_01',
lambda: tfb.JointMap([tfb.Scale([5, 3]), tfb.Scale([1, 4])]), [0, 1]),
('jointmap_11',
lambda: tfb.JointMap([tfb.Scale([5, 3]), tfb.Scale([1, 4])]), [1, 1]),
('jointmap_20',
lambda: tfb.JointMap([tfb.Scale([5, 3]), tfb.Scale([1, 4])]), [2, 0]),
('jointmap_22',
lambda: tfb.JointMap([tfb.Scale([5, 3]), tfb.Scale([1, 4])]), [2, 2]),
('restructure_with_ragged_event_ndims',
lambda: tfb.Restructure(input_structure=[0, 1], # pylint: disable=g-long-lambda
output_structure={'a': 0, 'b': 1}),
[0, 1]))
def test_batch_shape_matches_output_shapes(self,
bijector_fn,
override_x_event_ndims=None):
bijector = bijector_fn()
if override_x_event_ndims is None:
x_event_ndims = bijector.forward_min_event_ndims
y_event_ndims = bijector.inverse_min_event_ndims
else:
x_event_ndims = override_x_event_ndims
y_event_ndims = bijector.forward_event_ndims(x_event_ndims)
# All ways of calculating the batch shape should yield the same result.
batch_shape_x = bijector.experimental_batch_shape(
x_event_ndims=x_event_ndims)
batch_shape_y = bijector.experimental_batch_shape(
y_event_ndims=y_event_ndims)
self.assertEqual(batch_shape_x, batch_shape_y)
batch_shape_tensor_x = bijector.experimental_batch_shape_tensor(
x_event_ndims=x_event_ndims)
batch_shape_tensor_y = bijector.experimental_batch_shape_tensor(
y_event_ndims=y_event_ndims)
self.assertAllEqual(batch_shape_tensor_x, batch_shape_tensor_y)
self.assertAllEqual(batch_shape_tensor_x, batch_shape_x)
# Check that we're robust to integer type.
batch_shape_tensor_x64 = bijector.experimental_batch_shape_tensor(
x_event_ndims=tf.nest.map_structure(np.int64, x_event_ndims))
batch_shape_tensor_y64 = bijector.experimental_batch_shape_tensor(
y_event_ndims=tf.nest.map_structure(np.int64, y_event_ndims))
self.assertAllEqual(batch_shape_tensor_x64, batch_shape_tensor_y64)
self.assertAllEqual(batch_shape_tensor_x64, batch_shape_x)
# Pushing a value through the bijector should return a Tensor(s) with
# the expected batch shape...
xs = tf.nest.map_structure(lambda nd: tf.ones([1] * nd), x_event_ndims)
ys = bijector.forward(xs)
for y_part, nd in zip(tf.nest.flatten(ys), tf.nest.flatten(y_event_ndims)):
part_batch_shape = ps.shape(y_part)[:ps.rank(y_part) - nd]
self.assertAllEqual(batch_shape_y,
ps.broadcast_shape(batch_shape_y, part_batch_shape))
# ... which should also be the shape of the fldj.
fldj = bijector.forward_log_det_jacobian(xs, event_ndims=x_event_ndims)
self.assertAllEqual(batch_shape_y, ps.shape(fldj))
# Also check the inverse case.
xs = bijector.inverse(tf.nest.map_structure(tf.identity, ys))
for x_part, nd in zip(tf.nest.flatten(xs), tf.nest.flatten(x_event_ndims)):
part_batch_shape = ps.shape(x_part)[:ps.rank(x_part) - nd]
self.assertAllEqual(batch_shape_x,
ps.broadcast_shape(batch_shape_x, part_batch_shape))
ildj = bijector.inverse_log_det_jacobian(ys, event_ndims=y_event_ndims)
self.assertAllEqual(batch_shape_x, ps.shape(ildj))
@parameterized.named_parameters(
('scale', lambda: tfb.Scale([3.14159])),
('chain', lambda: tfb.Exp()(tfb.Scale([3.14159]))))
def test_ndims_specification(self, bijector_fn):
bijector = bijector_fn()
# If no `event_ndims` is passed, should assume min_event_ndims.
self.assertAllEqual(bijector.experimental_batch_shape(), [1])
self.assertAllEqual(bijector.experimental_batch_shape_tensor(), [1])
with self.assertRaisesRegex(
ValueError, 'Only one of `x_event_ndims` and `y_event_ndims`'):
bijector.experimental_batch_shape(x_event_ndims=0, y_event_ndims=0)
with self.assertRaisesRegex(
ValueError, 'Only one of `x_event_ndims` and `y_event_ndims`'):
bijector.experimental_batch_shape_tensor(x_event_ndims=0, y_event_ndims=0)
@test_util.test_all_tf_execution_regimes
class BijectorCachingTest(test_util.TestCase):
def testCachingOfForwardResults(self):
forward_only_bijector = ForwardOnlyBijector()
x = tf.constant(1.1)
y = tf.constant(2.2)
with self.assertRaises(NotImplementedError):
forward_only_bijector.inverse(y)
with self.assertRaises(NotImplementedError):
forward_only_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = forward_only_bijector.forward(x)
_ = forward_only_bijector.forward_log_det_jacobian(x, event_ndims=0)
self.assertIs(y, forward_only_bijector.forward(x))
# Now, everything should be cached if the argument `is y`, so these are ok.
forward_only_bijector.inverse(y)
forward_only_bijector.inverse_log_det_jacobian(y, event_ndims=0)
def testCachingOfInverseResults(self):
inverse_only_bijector = InverseOnlyBijector()
x = tf.constant(1.1)
y = tf.constant(2.2)
with self.assertRaises(NotImplementedError):
inverse_only_bijector.forward(x)
with self.assertRaises(NotImplementedError):
inverse_only_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = inverse_only_bijector.inverse(y)
_ = inverse_only_bijector.inverse_log_det_jacobian(y, event_ndims=0)
self.assertIs(x, inverse_only_bijector.inverse(y))
# Now, everything should be cached if the argument `is x`.
inverse_only_bijector.forward(x)
inverse_only_bijector.forward_log_det_jacobian(x, event_ndims=0)
def testCachingGarbageCollection(self):
bijector = ForwardOnlyBijector()
bijector._cache.clear()
niters = 6
for i in range(niters):
x = tf.constant(i, dtype=tf.float32)
y = bijector.forward(x) # pylint: disable=unused-variable
# We tolerate leaking tensor references in graph mode only.
expected_live = 1 if tf.executing_eagerly() else niters
self.assertEqual(
expected_live, len(bijector._cache.weak_keys(direction='forward')))
def testSharedCacheForward(self):
# Test that shared caching behaves as expected when bijectors are
# parameterized by Python floats, Tensors, and np arrays.
f = lambda x: x
g = lambda x: tf.constant(x, dtype=tf.float32)
h = lambda x: np.array(x).astype(np.float32)
scale_1 = 2.
scale_2 = 3.
x = tf.constant(3., dtype=tf.float32)
for fn in [f, g, h]:
s_1 = fn(scale_1)
s_2 = fn(scale_2)
bijector_1a = ForwardOnlyBijector(scale=s_1)
bijector_1b = ForwardOnlyBijector(scale=s_1)
bijector_2 = ForwardOnlyBijector(scale=s_2)
y = bijector_1a.forward(x)
# Different bijector instances with the same type/parameterization
# => cache hit.
self.assertIs(y, bijector_1b.forward(x))
# Bijectors with different parameterizations => cache miss.
self.assertIsNot(y, bijector_2.forward(x))
def testSharedCacheInverse(self):
# Test that shared caching behaves as expected when bijectors are
# parameterized by Python floats, Tensors, and np arrays.
f = lambda x: x
g = lambda x: tf.constant(x, dtype=tf.float32)
h = lambda x: np.array(x).astype(np.float32)
scale_1 = 2.
scale_2 = 3.
y = tf.constant(3., dtype=tf.float32)
for fn in [f, g, h]:
s_1 = fn(scale_1)
s_2 = fn(scale_2)
InverseOnlyBijector._cache.clear()
bijector_1a = InverseOnlyBijector(scale=s_1)
bijector_1b = InverseOnlyBijector(scale=s_1)
bijector_2 = InverseOnlyBijector(scale=s_2)
x = bijector_1a.inverse(y)
# Different bijector instances with the same type/parameterization
# => cache hit.
self.assertIs(x, bijector_1b.inverse(y))
# Bijectors with different parameterizations => cache miss.
self.assertIsNot(x, bijector_2.inverse(y))
# There is only one entry in the cache corresponding to each fn call
self.assertLen(bijector_1a._cache.weak_keys(direction='forward'), 1)
self.assertLen(bijector_2._cache.weak_keys(direction='inverse'), 1)
def testUniqueCacheKey(self):
bijector_1 = UniqueCacheKey()
bijector_2 = UniqueCacheKey()
x = tf.constant(3., dtype=tf.float32)
y_1 = bijector_1.forward(x)
y_2 = bijector_2.forward(x)
self.assertIsNot(y_1, y_2)
self.assertLen(bijector_1._cache.weak_keys(direction='forward'), 1)
self.assertLen(bijector_2._cache.weak_keys(direction='forward'), 1)
def testBijectorsWithUnspecifiedParametersDoNotShareCache(self):
bijector_1 = UnspecifiedParameters(loc=tf.constant(1., dtype=tf.float32))
bijector_2 = UnspecifiedParameters(loc=tf.constant(2., dtype=tf.float32))
x = tf.constant(3., dtype=tf.float32)
y_1 = bijector_1.forward(x)
y_2 = bijector_2.forward(x)
self.assertIsNot(y_1, y_2)
self.assertLen(bijector_1._cache.weak_keys(direction='forward'), 1)
self.assertLen(bijector_2._cache.weak_keys(direction='forward'), 1)
def testInstanceCache(self):
instance_cache_bijector = tfb.Exp()
instance_cache_bijector._cache = cache_util.BijectorCache(
bijector=instance_cache_bijector)
global_cache_bijector = tfb.Exp()
# Ensure the global cache does not persist between tests in different
# execution regimes.
tfb.Exp._cache.clear()
x = tf.constant(0., dtype=tf.float32)
y = global_cache_bijector.forward(x)
# Instance-level cache doesn't store values from calls to an identical but
# globally-cached bijector.
self.assertLen(
global_cache_bijector._cache.weak_keys(direction='forward'), 1)
self.assertLen(
instance_cache_bijector._cache.weak_keys(direction='forward'), 0)
# Bijector with instance-level cache performs a globally-cached
# transformation => cache miss. (Implying global cache did not pick it up.)
z = instance_cache_bijector.forward(x)
self.assertIsNot(y, z)
@test_util.test_all_tf_execution_regimes
class BijectorReduceEventDimsTest(test_util.TestCase):
"""Test reducing of event dims."""
def testReduceEventNdimsForward(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
np.log(x), self.evaluate(
bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(np.log(x), axis=-1),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(np.log(x), axis=(-1, -2)),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testNoReductionWhenEventNdimsIsOmitted(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
np.log(x),
self.evaluate(bij.forward_log_det_jacobian(x)))
self.assertAllClose(
-np.log(x),
self.evaluate(bij.inverse_log_det_jacobian(x)))
bij = VectorExpOnlyJacobian()
self.assertAllClose(
np.sum(np.log(x), axis=-1),
self.evaluate(bij.forward_log_det_jacobian(x)))
self.assertAllClose(
np.sum(-np.log(x), axis=-1),
self.evaluate(bij.inverse_log_det_jacobian(x)))
def testInverseWithEventDimsOmitted(self):
bij = tfb.Split(2)
self.assertAllEqual(
0.0,
self.evaluate(bij.inverse_log_det_jacobian(
[tf.ones((3, 4, 5)), tf.ones((3, 4, 5))])))
def testReduceEventNdimsForwardRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, 'must be at least'):
bij.forward_log_det_jacobian(x, event_ndims=0)
with self.assertRaisesRegexp(ValueError, 'Input must have rank at least'):
bij.forward_log_det_jacobian(x, event_ndims=4)
def testReduceEventNdimsInverse(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
-np.log(x), self.evaluate(
bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(-np.log(x), axis=-1),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(-np.log(x), axis=(-1, -2)),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, 'must be at least'):
bij.inverse_log_det_jacobian(x, event_ndims=0)
with self.assertRaisesRegexp(ValueError, 'Input must have rank at least'):
bij.inverse_log_det_jacobian(x, event_ndims=4)
def testReduceEventNdimsForwardConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
-2., self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
-4., self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-8., self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
2., self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
4., self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
8., self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
def testHandlesNonStaticEventNdims(self):
x_ = [[[1., 2.], [3., 4.]]]
x = tf1.placeholder_with_default(x_, shape=None)
event_ndims = tf1.placeholder_with_default(1, shape=None)
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
bij.inverse_log_det_jacobian(x, event_ndims=event_ndims)
ildj = self.evaluate(
bij.inverse_log_det_jacobian(x, event_ndims=event_ndims))
self.assertAllClose(-np.log(x_), ildj)
class BijectorLDJCachingTest(test_util.TestCase):
def testShapeCachingIssue(self):
if tf.executing_eagerly(): return
# Exercise the scenario outlined in
# https://github.com/tensorflow/probability/issues/253 (originally reported
# internally as b/119756336).
x1 = tf1.placeholder(tf.float32, shape=[None, 2], name='x1')
x2 = tf1.placeholder(tf.float32, shape=[None, 2], name='x2')
bij = ConstantJacobian()
bij.forward_log_det_jacobian(x2, event_ndims=1)
a = bij.forward_log_det_jacobian(x1, event_ndims=1, name='a_fldj')
x1_value = np.random.uniform(size=[10, 2])
with self.test_session() as sess:
sess.run(a, feed_dict={x1: x1_value})
@test_util.test_all_tf_execution_regimes
class NumpyArrayCaching(test_util.TestCase):
def test_caches(self):
# We need to call convert_to_tensor on outputs to make sure scalar
# outputs from the numpy backend are wrapped correctly. We could just
# directly wrap numpy scalars with np.array, but it would look pretty
# out of place, considering that the numpy backend is still private.
if mock is None:
return
x_ = np.array([[-0.1, 0.2], [0.3, -0.4]], np.float32)
y_ = np.exp(x_)
b = tfb.Exp()
# Ensure the global cache does not persist between tests in different
# execution regimes.
tfb.Exp._cache.clear()
# We will intercept calls to TF to ensure np.array objects don't get
# converted to tf.Tensor objects.
with mock.patch.object(tf, 'convert_to_tensor', return_value=x_):
with mock.patch.object(tf, 'exp', return_value=y_):
y = b.forward(x_)
self.assertIsInstance(y, np.ndarray)
self.assertAllEqual(
[x_], [k() for k in b._cache.weak_keys(direction='forward')])
with mock.patch.object(tf, 'convert_to_tensor', return_value=y_):
with mock.patch.object(tf.math, 'log', return_value=x_):
x = b.inverse(y_)
self.assertIsInstance(x, np.ndarray)
self.assertIs(x, b.inverse(y))
self.assertAllEqual(
[y_], [k() for k in b._cache.weak_keys(direction='inverse')])
yt_ = y_.T
xt_ = x_.T
with mock.patch.object(tf, 'convert_to_tensor', return_value=yt_):
with mock.patch.object(tf.math, 'log', return_value=xt_):
xt = b.inverse(yt_)
self.assertIsNot(x, xt)
self.assertIs(xt_, xt)
@test_util.test_all_tf_execution_regimes
class TfModuleTest(test_util.TestCase):
@test_util.jax_disable_variable_test
def test_variable_tracking(self):
x = tf.Variable(1.)
b = ForwardOnlyBijector(scale=x, validate_args=True)
self.assertIsInstance(b, tf.Module)
self.assertEqual((x,), b.trainable_variables)
@test_util.jax_disable_variable_test
def test_gradient(self):
x = tf.Variable(1.)
b = InverseOnlyBijector(scale=x, validate_args=True)
with tf.GradientTape() as tape:
loss = b.inverse(1.)
g = tape.gradient(loss, b.trainable_variables)
self.evaluate(tf1.global_variables_initializer())
self.assertEqual((-1.,), self.evaluate(g))
class _ConditionalBijector(tfb.Bijector):
def __init__(self):
parameters = dict(locals())
super(_ConditionalBijector, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=False,
dtype=tf.float32,
parameters=parameters,
name='test_bijector')
# These are not implemented in the base class, but we need to write a stub in
# order to mock them out.
def _inverse_log_det_jacobian(self, _, arg1, arg2):
pass
def _forward_log_det_jacobian(self, _, arg1, arg2):
pass
# Test that ensures kwargs from public methods are passed in to
# private methods.
@test_util.test_all_tf_execution_regimes
class ConditionalBijectorTest(test_util.TestCase):
def testConditionalBijector(self):
b = _ConditionalBijector()
arg1 = 'b1'
arg2 = 'b2'
retval = tf.constant(1.)
for name in ['forward', 'inverse']:
method = getattr(b, name)
with mock.patch.object(b, '_' + name, return_value=retval) as mock_method:
method(1., arg1=arg1, arg2=arg2)
mock_method.assert_called_once_with(mock.ANY, arg1=arg1, arg2=arg2)
for name in ['inverse_log_det_jacobian', 'forward_log_det_jacobian']:
method = getattr(b, name)
with mock.patch.object(b, '_' + name, return_value=retval) as mock_method:
method(1., event_ndims=0, arg1=arg1, arg2=arg2)
mock_method.assert_called_once_with(mock.ANY, arg1=arg1, arg2=arg2)
def testNestedCondition(self):
b = _ConditionalBijector()
arg1 = {'b1': 'c1'}
arg2 = {'b2': 'c2'}
retval = tf.constant(1.)
for name in ['forward', 'inverse']:
method = getattr(b, name)
with mock.patch.object(b, '_' + name, return_value=retval) as mock_method:
method(1., arg1=arg1, arg2=arg2)
mock_method.assert_called_once_with(mock.ANY, arg1=arg1, arg2=arg2)
for name in ['inverse_log_det_jacobian', 'forward_log_det_jacobian']:
method = getattr(b, name)
with mock.patch.object(b, '_' + name, return_value=retval) as mock_method:
method(1., event_ndims=0, arg1=arg1, arg2=arg2)
mock_method.assert_called_once_with(mock.ANY, arg1=arg1, arg2=arg2)
class CompositeForwardBijector(tfb.AutoCompositeTensorBijector):
def __init__(self, scale=2., validate_args=False, parameters=None, name=None):
parameters = dict(locals()) if parameters is None else parameters
with tf.name_scope(name or 'forward_only') as name:
self._scale = tensor_util.convert_nonref_to_tensor(
scale,
dtype_hint=tf.float32)
super(CompositeForwardBijector, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
parameters=parameters,
name=name)
def _forward(self, x):
return self._scale * x
def _forward_log_det_jacobian(self, _):
return tf.math.log(self._scale)
class CompositeForwardScaleThree(CompositeForwardBijector):
def __init__(self, name='scale_three'):
parameters = dict(locals())
super(CompositeForwardScaleThree, self).__init__(
scale=3., parameters=parameters, name=name)
@test_util.test_all_tf_execution_regimes
class AutoCompositeTensorBijectorTest(test_util.TestCase):
def test_disable_ct_bijector(self):
ct_bijector = CompositeForwardBijector()
self.assertIsInstance(ct_bijector, tf.__internal__.CompositeTensor)
non_ct_bijector = ForwardOnlyBijector()
self.assertNotIsInstance(non_ct_bijector, tf.__internal__.CompositeTensor)
flat = tf.nest.flatten(ct_bijector, expand_composites=True)
unflat = tf.nest.pack_sequence_as(
ct_bijector, flat, expand_composites=True)
x = tf.constant([2., 3.])
self.assertAllClose(
non_ct_bijector.forward(x),
tf.function(lambda b: b.forward(x))(unflat))
def test_composite_tensor_subclass(self):
bij = CompositeForwardScaleThree()
self.assertIs(bij._type_spec.value_type, type(bij))
flat = tf.nest.flatten(bij, expand_composites=True)
unflat = tf.nest.pack_sequence_as(bij, flat, expand_composites=True)
self.assertIsInstance(unflat, CompositeForwardScaleThree)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -8,731,417,941,040,100,000 | 35.098361 | 89 | 0.661387 | false |
NLeSC/embodied-emotions-scripts | embem/debates/debates2csv.py | 1 | 4854 | #!/usr/bin/env python
# -.*- coding: utf-8 -.*-
"""Script to extract counts for words in word field.
Usage: debates2csv.py <xml-file or directory containing xml files>
2014-11-18 [email protected]
"""
import argparse
import xml.etree.ElementTree as ET
import re
from collections import Counter
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('xml', help='the name of the xml file containing the '
'the word field counts should be extracted for')
args = parser.parse_args()
# file or directory?
if os.path.isfile(args.xml):
files = [args.xml]
else:
files = []
for fn in os.listdir(args.xml):
file_name = '{}{}{}'.format(args.xml, os.sep, fn)
if os.path.isfile(file_name):
files.append(file_name)
# a list of the LIWC anger words
# abuse.* means that any word starting with 'abuse' (e.g., 'abuser' or
# 'abuses') is counted.
word_field = ['abuse.*', 'abusi.*', 'aggravat.*', 'aggress.*', 'agitat.*',
'anger.*', 'angr.*', 'annoy.*', 'antagoni.*', 'argh.*',
'argu.*', 'arrogan.*', 'assault.*', 'asshole.*', 'attack.*',
'bastard.*', 'battl.*', 'beaten', 'bitch.*', 'bitter.*',
'blam.*', 'bother.*', 'brutal.*', 'cheat.*', 'confront.*',
'contempt.*', 'contradic.*', 'crap', 'crappy', 'critical',
'critici.*', 'crude.*', 'cruel.*', 'cunt.*', 'cut', 'cynic',
'damn.*', 'danger.*', 'defenc.*', 'defens.*', 'despis.*',
'destroy.*', 'destruct.*', 'disgust.*', 'distrust.*',
'domina.*', 'dumb.*', 'dump.*', 'enemie.*', 'enemy.*',
'enrag.*', 'envie.*', 'envious', 'envy.*', 'evil.*',
'feroc.*', 'feud.*', 'fiery', 'fight.*', 'foe.*', 'fought',
'frustrat.*', 'fuck', 'fucked.*', 'fucker.*', 'fuckin.*',
'fucks', 'fume.*', 'fuming', 'furious.*', 'fury', 'goddam.*',
'greed.*', 'grouch.*', 'grr.*', 'harass.*', 'hate', 'hated',
'hateful.*', 'hater.*', 'hates', 'hating', 'hatred',
'heartless.*', 'hell', 'hellish', 'hit', 'hostil.*',
'humiliat.*', 'idiot.*', 'insult.*', 'interrup.*',
'intimidat.*', 'jealous.*', 'jerk', 'jerked', 'jerks',
'kill.*', 'liar.*', 'lied', 'lies', 'lous.*', 'ludicrous.*',
'lying', 'mad', 'maddening', 'madder', 'maddest', 'maniac.*',
'mock', 'mocked', 'mocker.*', 'mocking', 'mocks', 'molest.*',
'moron.*', 'murder.*', 'nag.*', 'nast.*', 'obnoxious.*',
'offence.*', 'offend.*', 'offens.*', 'outrag.*', 'paranoi.*',
'pettie.*', 'petty.*', 'piss.*', 'poison.*', 'prejudic.*',
'prick.*', 'protest', 'protested', 'protesting', 'punish.*',
'rage.*', 'raging', 'rape.*', 'raping', 'rapist.*',
'rebel.*', 'resent.*', 'revenge.*', 'ridicul.*', 'rude.*',
'sarcas.*', 'savage.*', 'sceptic.*', 'screw.*', 'shit.*',
'sinister', 'skeptic.*', 'smother.*', 'snob.*', 'spite.*',
'stubborn.*', 'stupid.*', 'suck', 'sucked', 'sucker.*',
'sucks', 'sucky', 'tantrum.*', 'teas.*', 'temper', 'tempers',
'terrify', 'threat.*', 'ticked', 'tortur.*', 'trick.*',
'ugl.*', 'vicious.*', 'victim.*', 'vile', 'villain.*',
'violat.*', 'violent.*', 'war', 'warfare.*', 'warred',
'warring', 'wars', 'weapon.*', 'wicked.*']
num_words = 0
all_words = Counter()
wf_words = Counter()
for input_file in files:
# read xml file
tree = ET.parse(input_file)
root = tree.getroot()
for speech in tree.getiterator('speech'):
speaker = speech.attrib.get('speaker')
text = ET.tostring(speech)
# remove xml tags
text = re.sub('<[^>]*>', '', text)
# remove html entities (e.g., ɣ)
text = re.sub('&#\d+;', '', text)
# convert to lower case
text = text.lower()
# extract a list of words
words = re.findall('\w+', text)
# count words
num_words += len(words)
all_words.update(words)
regex = re.compile('^{}$'.format('$|^'.join(word_field)))
# count word field words
for word in all_words:
if regex.match(word):
wf_words[word] += all_words[word]
# print output
print 'Word\tFrequency'
print 'TOTAL\t{}'.format(num_words)
for (word, freq) in wf_words.most_common():
print '{}\t{}'.format(word, freq)
| apache-2.0 | 807,590,557,542,389,000 | 43.127273 | 79 | 0.468892 | false |
resync/resync | resync/hashes.py | 1 | 4083 | """util.py: A collection of utility functions for source and/or client."""
import base64
import hashlib
class Hashes(object):
"""Compute hash digests for ResourceSync.
These are all base64 encoded according to the rules of
http://www.ietf.org/rfc/rfc4648.txt
MD5
ResourceSync defined to be the same as for Content-MD5 in HTTP,
http://www.ietf.org/rfc/rfc2616.txt which, in turn, defined the
digest string as the "base64 of 128 bit MD5 digest as per RFC 1864"
http://www.ietf.org/rfc/rfc1864.txt
Unfortunately, RFC1864 is rather vague and contains only and example
which doesn't use encoding characters for 62 or 63. It points to
RFC1521 to describe base64 which is explicit that the encoding alphabet
is [A-Za-z0-9+/] with = to pad.
The above corresponds with the alphabet of "3. Base 64 Encoding" in RFC3548
http://www.ietf.org/rfc/rfc3548.txt
and not the url safe version, "Base 64 Encoding with URL and Filename Safe
Alphabet" which replaces + and / with - and _ respectively.
This is the same as the alphabet of "4. Base 64 Encoding" in RFC4648
http://www.ietf.org/rfc/rfc4648.txt.
This algorithm is implemented by base64.standard_b64encode() or
base64.b64encode() with no altchars specified. Available in python2.4 and
up [http://docs.python.org/library/base64.html]
"""
NAME_TO_ATTRIBUTE = {'md5': 'md5', 'sha-1': 'sha1', 'sha-256': 'sha256'}
def __init__(self, hashes=None, file=None):
"""Initialize Hashes object with types of hash to caluclate.
If file is supplied then compute for that file.
"""
self.hashes = set()
for hash in hashes:
if (hash not in self.NAME_TO_ATTRIBUTE.keys()):
raise Exception("Hash type %s not supported" % (hash))
self.hashes.add(hash)
#
self.md5_calc = None
self.sha1_calc = None
self.sha256_calc = None
#
if (file is not None):
self.compute_for_file(file)
def initialize_hashes(self):
"""Create new hashlib objects for each hash we are going to calculate."""
if ('md5' in self.hashes):
self.md5_calc = hashlib.md5()
if ('sha-1' in self.hashes):
self.sha1_calc = hashlib.sha1()
if ('sha-256' in self.hashes):
self.sha256_calc = hashlib.sha256()
def compute_for_file(self, file, block_size=2**14):
"""Compute hash digests for a file.
Calculate the hashes based on one read through the file.
Optional block_size parameter controls memory used to do
calculations. This should be a multiple of 128 bytes.
"""
self.initialize_hashes()
f = open(file, 'rb')
while True:
data = f.read(block_size)
if not data:
break
if self.md5_calc is not None:
self.md5_calc.update(data)
if self.sha1_calc is not None:
self.sha1_calc.update(data)
if self.sha256_calc is not None:
self.sha256_calc.update(data)
f.close()
def set(self, resource):
"""Set hash values for resource from current file.
Assumes that resource has appropriate attributes or setters
with names md5, sha1, etc. and that hashes have been calculated.
"""
for hash in self.hashes:
att = self.NAME_TO_ATTRIBUTE[hash]
setattr(resource, att, getattr(self, att))
@property
def md5(self):
"""Return MD5 hash calculated."""
if (self.md5_calc is None):
return None
return self.md5_calc.hexdigest()
@property
def sha1(self):
"""Return SHA-1 hash calculated."""
if (self.sha1_calc is None):
return None
return self.sha1_calc.hexdigest()
@property
def sha256(self):
"""Return SHA-256 hash calculated."""
if (self.sha256_calc is None):
return None
return self.sha256_calc.hexdigest()
| apache-2.0 | -3,536,747,152,276,613,000 | 34.198276 | 81 | 0.613764 | false |
totcoindev/totcoin | qa/rpc-tests/multi_rpc.py | 1 | 4587 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to sos.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "sos.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit | 5,949,845,305,899,907,000 | 36.909091 | 129 | 0.641596 | false |
erigones/api_openvpn | exceptions.py | 1 | 2259 | from rest_framework.exceptions import APIException
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
def validate_only_one_instance(obj):
model = obj.__class__
if (model.objects.count() > 0 and
obj.id != model.objects.get().id):
raise ValidationError("Can only create 1 %s instance" % model.__name__)
class FatalKeyException(APIException):
status_code = 404
default_detail = "You must identify user by his username"
class TypeOfKeyDoesNotExist(APIException):
status_code = 404
default_detail = "This type of key is not supported"
class UserNotFoundException(APIException):
status_code = 404
default_detail = "User has not been found"
class InstanceHaveNoVarsAttributesException(APIException):
status_code = 500
default_detail = "Object can't be parsed into dictionary, because it hasn't" \
"vars attributes"
class NoContentException(APIException):
status_code = 204
default_detail = "No content"
class ServerIsNotCreatedException(APIException):
status_code = 404
default_detail = "Server keys hasn't been generated yet"
class InvalidSectionException(APIException):
status_code = 404
default_detail = 'Section "{section}" is invalid.'
def __init__(self, section, detail=None):
if detail is not None:
self.detail = force_text(detail)
else:
self.detail = force_text(self.default_detail).format(section=section)
class EmptyValueException(APIException):
status_code = 404
default_detail = "Configuration '{name}' can't be blank."
def __init__(self, name, detail=None):
if detail is not None:
self.detail = force_text(detail)
else:
self.detail = force_text(self.default_detail).format(name=name)
class InvalidValueException(APIException):
status_code = 404
default_detail = "Value '{value}' in '{name}' is invalid. Try '{correct}'."
def __init__(self, name, value, correct, detail=None):
if detail is not None:
self.detail = force_text(detail)
else:
self.detail = force_text(self.default_detail).format(name=name, value=value, correct=correct) | bsd-3-clause | 8,523,195,803,407,811,000 | 29.540541 | 105 | 0.677291 | false |
olegnev/dx-toolkit | src/python/dxpy/scripts/dx_build_app.py | 1 | 48240 | #!/usr/bin/env python
#
# Copyright (C) 2013-2015 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals
import logging
logging.basicConfig(level=logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
import os, sys, json, subprocess, argparse
import py_compile
import re
import shutil
import tempfile
import time
from datetime import datetime
import dxpy, dxpy.app_builder
from dxpy import logger
from dxpy.utils import json_load_raise_on_duplicates
from dxpy.utils.resolver import resolve_path, is_container_id
from dxpy.utils.completer import LocalCompleter
from dxpy.app_categories import APP_CATEGORIES
from dxpy.exceptions import err_exit, DXError
from dxpy.utils.printing import BOLD
from dxpy.compat import open, USING_PYTHON2, decode_command_line_args
decode_command_line_args()
parser = argparse.ArgumentParser(description="Uploads a DNAnexus App.")
APP_VERSION_RE = re.compile("^([1-9][0-9]*|0)\.([1-9][0-9]*|0)\.([1-9][0-9]*|0)(-[-0-9A-Za-z]+(\.[-0-9A-Za-z]+)*)?(\+[-0-9A-Za-z]+(\.[-0-9A-Za-z]+)*)?$")
app_options = parser.add_argument_group('options for creating apps', '(Only valid when --app/--create-app is specified)')
applet_options = parser.add_argument_group('options for creating applets', '(Only valid when --app/--create-app is NOT specified)')
# COMMON OPTIONS
parser.add_argument("--ensure-upload", help="If specified, will bypass computing checksum of " +
"resources directory and upload it unconditionally; " +
"by default, will compute checksum and upload only if " +
"it differs from a previously uploaded resources bundle.",
action="store_true")
src_dir_action = parser.add_argument("src_dir", help="App or applet source directory (default: current directory)", nargs='?')
src_dir_action.completer = LocalCompleter()
parser.set_defaults(mode="app")
parser.add_argument("--app", "--create-app", help="Create an app (otherwise, creates an applet)", action="store_const",
dest="mode", const="app")
parser.add_argument("--create-applet", help=argparse.SUPPRESS, action="store_const", dest="mode", const="applet")
applet_options.add_argument("-d", "--destination", help="Specifies the destination project, destination folder, and/or name for the applet, in the form [PROJECT_NAME_OR_ID:][/[FOLDER/][NAME]]. Overrides the project, folder, and name fields of the dxapp.json, if they were supplied.", default='.')
# --[no-]dry-run
#
# The --dry-run flag can be used to see the applet spec that would be
# provided to /applet/new, for debugging purposes. However, the output
# would deviate from that of a real run in the following ways:
#
# * Any bundled resources are NOT uploaded and are not reflected in the
# app(let) spec.
# * No temporary project is created (if building an app) and the
# "project" field is not set in the app spec.
parser.set_defaults(dry_run=False)
parser.add_argument("--dry-run", "-n", help="Do not create an app(let): only perform local checks and compilation steps, and show the spec of the app(let) that would have been created.", action="store_true", dest="dry_run")
parser.add_argument("--no-dry-run", help=argparse.SUPPRESS, action="store_false", dest="dry_run")
# --[no-]publish
app_options.set_defaults(publish=False)
app_options.add_argument("--publish", help="Publish the resulting app and make it the default.", action="store_true",
dest="publish")
app_options.add_argument("--no-publish", help=argparse.SUPPRESS, action="store_false", dest="publish")
# --[no-]remote
parser.set_defaults(remote=False)
parser.add_argument("--remote", help="Build the app remotely by uploading the source directory to the DNAnexus Platform and building it there. This option is useful if you would otherwise need to cross-compile the app(let) to target the Execution Environment.", action="store_true", dest="remote")
parser.add_argument("--no-remote", help=argparse.SUPPRESS, action="store_false", dest="remote")
applet_options.add_argument("-f", "--overwrite", help="Remove existing applet(s) of the same name in the destination folder.",
action="store_true", default=False)
applet_options.add_argument("-a", "--archive", help="Archive existing applet(s) of the same name in the destination folder.",
action="store_true", default=False)
parser.add_argument("-v", "--version", help="Override the version number supplied in the manifest.", default=None,
dest="version_override", metavar='VERSION')
app_options.add_argument("-b", "--bill-to", help="Entity (of the form user-NAME or org-ORGNAME) to bill for the app.",
default=None, dest="bill_to", metavar='USER_OR_ORG')
# --[no-]check-syntax
parser.set_defaults(check_syntax=True)
parser.add_argument("--check-syntax", help=argparse.SUPPRESS, action="store_true", dest="check_syntax")
parser.add_argument("--no-check-syntax", help="Warn but do not fail when syntax problems are found (default is to fail on such errors)", action="store_false", dest="check_syntax")
# --[no-]version-autonumbering
app_options.set_defaults(version_autonumbering=True)
app_options.add_argument("--version-autonumbering", help=argparse.SUPPRESS, action="store_true", dest="version_autonumbering")
app_options.add_argument("--no-version-autonumbering", help="Only attempt to create the version number supplied in the manifest (that is, do not try to create an autonumbered version such as 1.2.3+git.ab1b1c1d if 1.2.3 already exists and is published).", action="store_false", dest="version_autonumbering")
# --[no-]update
app_options.set_defaults(update=True)
app_options.add_argument("--update", help=argparse.SUPPRESS, action="store_true", dest="update")
app_options.add_argument("--no-update", help="Never update an existing unpublished app in place.", action="store_false", dest="update")
# --[no-]dx-toolkit-autodep
parser.set_defaults(dx_toolkit_autodep="stable")
parser.add_argument("--dx-toolkit-legacy-git-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="git")
parser.add_argument("--dx-toolkit-stable-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="stable")
parser.add_argument("--dx-toolkit-beta-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="beta") # deprecated
parser.add_argument("--dx-toolkit-unstable-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="unstable") # deprecated
parser.add_argument("--dx-toolkit-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="stable")
parser.add_argument("--no-dx-toolkit-autodep", help="Do not auto-insert the dx-toolkit dependency (default is to add it if it would otherwise be absent from the runSpec)", action="store_false", dest="dx_toolkit_autodep")
# --[no-]parallel-build
parser.set_defaults(parallel_build=True)
parser.add_argument("--parallel-build", help=argparse.SUPPRESS, action="store_true", dest="parallel_build")
parser.add_argument("--no-parallel-build", help="Build with " + BOLD("make") + " instead of " + BOLD("make -jN") + ".", action="store_false",
dest="parallel_build")
app_options.set_defaults(use_temp_build_project=True)
app_options.add_argument("--no-temp-build-project", help="When building an app, build its applet in the current project instead of a temporary project", action="store_false", dest="use_temp_build_project")
# --yes
app_options.add_argument('-y', '--yes', dest='confirm', help='Do not ask for confirmation for potentially dangerous operations', action='store_false')
# --[no-]json (undocumented): dumps the JSON describe of the app or
# applet that was created. Useful for tests.
parser.set_defaults(json=False)
parser.add_argument("--json", help=argparse.SUPPRESS, action="store_true", dest="json")
parser.add_argument("--no-json", help=argparse.SUPPRESS, action="store_false", dest="json")
parser.add_argument("--extra-args", help="Arguments (in JSON format) to pass to the /applet/new API method, overriding all other settings")
parser.add_argument("--run", help="Run the app or applet after building it (options following this are passed to "+BOLD("dx run")+"; run at high priority by default)", nargs=argparse.REMAINDER)
class DXSyntaxError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def _get_timestamp_version_suffix(version):
if "+" in version:
return ".build." + datetime.today().strftime('%Y%m%d.%H%M')
else:
return "+build." + datetime.today().strftime('%Y%m%d.%H%M')
def _get_version_suffix(src_dir, version):
# If anything goes wrong, fall back to the date-based suffix.
try:
if os.path.exists(os.path.join(src_dir, ".git")):
abbrev_sha1 = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=src_dir).strip()[:7]
# We ensure that if VERSION is semver-compliant, then
# VERSION + SUFFIX will be too. In particular that means
# (here and in _get_timestamp_version_suffix above) we add
# what semver refers to as a "build metadata" section
# (delimited by "+"), unless one already exists, in which
# case we append to the existing one.
if "+" in version:
return ".git." + abbrev_sha1
else:
return "+git." + abbrev_sha1
except:
pass
return _get_timestamp_version_suffix(version)
def parse_destination(dest_str):
"""
Parses dest_str, which is (roughly) of the form
PROJECT:/FOLDER/NAME, and returns a tuple (project, folder, name)
"""
# Interpret strings of form "project-XXXX" (no colon) as project. If
# we pass these through to resolve_path they would get interpreted
# as folder names...
if is_container_id(dest_str):
return (dest_str, None, None)
# ...otherwise, defer to resolver.resolve_path. This handles the
# following forms:
#
# /FOLDER/
# /ENTITYNAME
# /FOLDER/ENTITYNAME
# [PROJECT]:
# [PROJECT]:/FOLDER/
# [PROJECT]:/ENTITYNAME
# [PROJECT]:/FOLDER/ENTITYNAME
return resolve_path(dest_str)
def _lint(dxapp_json_filename, mode):
"""
Examines the specified dxapp.json file and warns about any
violations of app guidelines.
Precondition: the dxapp.json file exists and can be parsed.
"""
def _find_readme(dirname):
for basename in ['README.md', 'Readme.md', 'readme.md']:
if os.path.exists(os.path.join(dirname, basename)):
return os.path.join(dirname, basename)
return None
app_spec = json.load(open(dxapp_json_filename))
dirname = os.path.basename(os.path.dirname(os.path.abspath(dxapp_json_filename)))
if mode == "app":
if 'title' not in app_spec:
logger.warn('app is missing a title, please add one in the "title" field of dxapp.json')
if 'summary' in app_spec:
if app_spec['summary'].endswith('.'):
logger.warn('summary "%s" should be a short phrase not ending in a period' % (app_spec['summary'],))
else:
logger.warn('app is missing a summary, please add one in the "summary" field of dxapp.json')
readme_filename = _find_readme(os.path.dirname(dxapp_json_filename))
if 'description' in app_spec:
if readme_filename:
logger.warn('"description" field shadows file ' + readme_filename)
if not app_spec['description'].strip().endswith('.'):
logger.warn('"description" field should be written in complete sentences and end with a period')
else:
if readme_filename is None:
logger.warn("app is missing a description, please supply one in README.md")
if 'categories' in app_spec:
for category in app_spec['categories']:
if category not in APP_CATEGORIES:
logger.warn('app has unrecognized category "%s"' % (category,))
if category == 'Import':
if 'title' in app_spec and not app_spec['title'].endswith('Importer'):
logger.warn('title "%s" should end in "Importer"' % (app_spec['title'],))
if category == 'Export':
if 'title' in app_spec and not app_spec['title'].endswith('Exporter'):
logger.warn('title "%s" should end in "Exporter"' % (app_spec['title'],))
if 'name' in app_spec:
if app_spec['name'] != app_spec['name'].lower():
logger.warn('name "%s" should be all lowercase' % (app_spec['name'],))
if dirname != app_spec['name']:
logger.warn('app name "%s" does not match containing directory "%s"' % (app_spec['name'], dirname))
else:
logger.warn('app is missing a name, please add one in the "name" field of dxapp.json')
if 'version' in app_spec:
if not APP_VERSION_RE.match(app_spec['version']):
logger.warn('"version" %s should be semver compliant (e.g. of the form X.Y.Z)' % (app_spec['version'],))
# Note that identical checks are performed on the server side (and
# will cause the app build to fail), but the checks here are printed
# sooner and multiple naming problems can be detected in a single
# pass.
if 'inputSpec' in app_spec:
for i, input_field in enumerate(app_spec['inputSpec']):
if not re.match("^[a-zA-Z_][0-9a-zA-Z_]*$", input_field['name']):
logger.error('input %d has illegal name "%s" (must match ^[a-zA-Z_][0-9a-zA-Z_]*$)' % (i, input_field['name']))
else:
logger.warn("dxapp.json contains no input specification (inputSpec). Your applet will not be usable as an " +
"app, runnable from the GUI, or composable using workflows.")
if 'outputSpec' in app_spec:
for i, output_field in enumerate(app_spec['outputSpec']):
if not re.match("^[a-zA-Z_][0-9a-zA-Z_]*$", output_field['name']):
logger.error('output %d has illegal name "%s" (must match ^[a-zA-Z_][0-9a-zA-Z_]*$)' % (i, output_field['name']))
else:
logger.warn("dxapp.json contains no output specification (outputSpec). Your applet will not be usable as an " +
"app, runnable from the GUI, or composable using workflows.")
def _check_syntax(code, lang, temp_dir, enforce=True):
"""
Checks that the code whose text is in CODE parses as LANG.
Raises DXSyntaxError if there is a problem and "enforce" is True.
"""
# This function needs the language to be explicitly set, so we can
# generate an appropriate temp filename.
if lang == 'python2.7':
temp_basename = 'inlined_code_from_dxapp_json.py'
elif lang == 'bash':
temp_basename = 'inlined_code_from_dxapp_json.sh'
else:
raise ValueError('lang must be one of "python2.7" or "bash"')
# Dump the contents out to a temporary file, then call _check_file_syntax.
with open(os.path.join(temp_dir, temp_basename), 'w') as ofile:
ofile.write(code)
_check_file_syntax(os.path.join(temp_dir, temp_basename), temp_dir, override_lang=lang, enforce=enforce)
def _check_file_syntax(filename, temp_dir, override_lang=None, enforce=True):
"""
Checks that the code in FILENAME parses, attempting to autodetect
the language if necessary.
Raises IOError if the file cannot be read.
Raises DXSyntaxError if there is a problem and "enforce" is True.
"""
def check_python(filename):
# Generate a semi-recognizable name to write the pyc to. Of
# course it's possible that different files being scanned could
# have the same basename, so this path won't be unique, but the
# checks don't run concurrently so this shouldn't cause any
# problems.
pyc_path = os.path.join(temp_dir, os.path.basename(filename) + ".pyc")
try:
if USING_PYTHON2:
filename = filename.encode(sys.getfilesystemencoding())
py_compile.compile(filename, cfile=pyc_path, doraise=True)
finally:
try:
os.unlink(pyc_path)
except OSError:
pass
def check_bash(filename):
subprocess.check_output(["/bin/bash", "-n", filename], stderr=subprocess.STDOUT)
if override_lang == 'python2.7':
checker_fn = check_python
elif override_lang == 'bash':
checker_fn = check_bash
elif filename.endswith('.py'):
checker_fn = check_python
elif filename.endswith('.sh'):
checker_fn = check_bash
else:
# Ignore other kinds of files.
return
# Do a test read of the file to catch errors like the file not
# existing or not being readable.
open(filename)
try:
checker_fn(filename)
except subprocess.CalledProcessError as e:
print(filename + " has a syntax error! Interpreter output:", file=sys.stderr)
for line in e.output.strip("\n").split("\n"):
print(" " + line.rstrip("\n"), file=sys.stderr)
if enforce:
raise DXSyntaxError(filename + " has a syntax error")
except py_compile.PyCompileError as e:
print(filename + " has a syntax error! Interpreter output:", file=sys.stderr)
print(" " + e.msg.strip(), file=sys.stderr)
if enforce:
raise DXSyntaxError(e.msg.strip())
def _verify_app_source_dir_impl(src_dir, temp_dir, mode, enforce=True):
"""Performs syntax and lint checks on the app source.
Precondition: the dxapp.json file exists and can be parsed.
"""
_lint(os.path.join(src_dir, "dxapp.json"), mode)
# Check that the entry point file parses as the type it is going to
# be interpreted as. The extension is irrelevant.
manifest = json.load(open(os.path.join(src_dir, "dxapp.json")))
if "runSpec" in manifest:
if "interpreter" not in manifest['runSpec']:
raise dxpy.app_builder.AppBuilderException('runSpec.interpreter field was not present')
if manifest['runSpec']['interpreter'] in ["python2.7", "bash"]:
if "file" in manifest['runSpec']:
entry_point_file = os.path.abspath(os.path.join(src_dir, manifest['runSpec']['file']))
try:
_check_file_syntax(entry_point_file, temp_dir, override_lang=manifest['runSpec']['interpreter'], enforce=enforce)
except IOError as e:
raise dxpy.app_builder.AppBuilderException(
'Could not open runSpec.file=%r. The problem was: %s' % (entry_point_file, e))
except DXSyntaxError:
raise dxpy.app_builder.AppBuilderException('Entry point file %s has syntax errors, see above for details. Rerun with --no-check-syntax to proceed anyway.' % (entry_point_file,))
elif "code" in manifest['runSpec']:
try:
_check_syntax(manifest['runSpec']['code'], manifest['runSpec']['interpreter'], temp_dir, enforce=enforce)
except DXSyntaxError:
raise dxpy.app_builder.AppBuilderException('Code in runSpec.code has syntax errors, see above for details. Rerun with --no-check-syntax to proceed anyway.')
if 'execDepends' in manifest['runSpec']:
if not isinstance(manifest['runSpec']['execDepends'], list):
raise dxpy.app_builder.AppBuilderException('Expected runSpec.execDepends to be an array. Rerun with --no-check-syntax to proceed anyway.')
if not all(isinstance(dep, dict) for dep in manifest['runSpec']['execDepends']):
raise dxpy.app_builder.AppBuilderException('Expected runSpec.execDepends to be an array of hashes. Rerun with --no-check-syntax to proceed anyway.')
if any(dep.get('package_manager', 'apt') != 'apt' for dep in manifest['runSpec']['execDepends']):
if not isinstance(manifest.get('access'), dict) or 'network' not in manifest['access']:
msg = '\n'.join(['runSpec.execDepends specifies non-APT dependencies, but no network access spec is given.',
'Add {"access": {"network": ["*"]}} to allow dependencies to install.',
'See https://wiki.dnanexus.com/Developer-Tutorials/Request-Additional-App-Resources#Network-Access.',
'Rerun with --no-check-syntax to proceed anyway.'])
raise dxpy.app_builder.AppBuilderException(msg)
if 'authorizedUsers' in manifest:
if not isinstance(manifest['authorizedUsers'], list) or isinstance(manifest['authorizedUsers'], basestring):
raise dxpy.app_builder.AppBuilderException('Expected authorizedUsers to be a list of strings')
for thing in manifest['authorizedUsers']:
if thing != 'PUBLIC' and (not isinstance(thing, basestring) or not re.match("^(org-|user-)", thing)):
raise dxpy.app_builder.AppBuilderException('authorizedUsers field contains an entry which is not either the string "PUBLIC" or a user or org ID')
# Check all other files that are going to be in the resources tree.
# For these we detect the language based on the filename extension.
# Obviously this check can have false positives, since the app can
# execute (or not execute!) all these files in whatever way it
# wishes, e.g. it could use Python != 2.7 or some non-bash shell.
# Consequently errors here are non-fatal.
files_with_problems = []
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(src_dir, "resources"))):
for filename in filenames:
# On Mac OS, the resource fork for "FILE.EXT" gets tarred up
# as a file named "._FILE.EXT". To a naive check this
# appears to be a file of the same extension. Therefore, we
# exclude these from syntax checking since they are likely
# to not parse as whatever language they appear to be.
if not filename.startswith("._"):
try:
_check_file_syntax(os.path.join(dirpath, filename), temp_dir, enforce=True)
except IOError as e:
raise dxpy.app_builder.AppBuilderException(
'Could not open file in resources directory %r. The problem was: %s' %
(os.path.join(dirpath, filename), e)
)
except DXSyntaxError:
# Suppresses errors from _check_file_syntax so we
# only print a nice error message
files_with_problems.append(os.path.join(dirpath, filename))
if files_with_problems:
# Make a message of the form:
# "/path/to/my/app.py"
# OR "/path/to/my/app.py and 3 other files"
files_str = files_with_problems[0] if len(files_with_problems) == 1 else (files_with_problems[0] + " and " + str(len(files_with_problems) - 1) + " other file" + ("s" if len(files_with_problems) > 2 else ""))
logging.warn('%s contained syntax errors, see above for details' % (files_str,))
def _verify_app_source_dir(src_dir, mode, enforce=True):
"""Performs syntax and lint checks on the app source.
Precondition: the dxapp.json file exists and can be parsed.
"""
temp_dir = tempfile.mkdtemp(prefix='dx-build_tmp')
try:
_verify_app_source_dir_impl(src_dir, temp_dir, mode, enforce=enforce)
finally:
shutil.rmtree(temp_dir)
def _verify_app_writable(app_name):
app_name_already_exists = True
try:
is_developer = dxpy.api.app_describe('app-' + app_name)['isDeveloperFor']
except dxpy.exceptions.DXAPIError as e:
if e.name == 'ResourceNotFound':
app_name_already_exists = False
else:
raise e
if not app_name_already_exists:
# This app doesn't exist yet so its creation will succeed
# (or at least, not fail on the basis of the ACL).
return
if not is_developer:
raise dxpy.app_builder.AppBuilderException('You are not a developer for app {app}'.format(app=app_name))
def _parse_app_spec(src_dir):
"""Returns the parsed contents of dxapp.json.
Raises either AppBuilderException or a parser error (exit codes 3 or
2 respectively) if this cannot be done.
"""
if not os.path.isdir(src_dir):
parser.error("%s is not a directory" % src_dir)
if not os.path.exists(os.path.join(src_dir, "dxapp.json")):
raise dxpy.app_builder.AppBuilderException("Directory %s does not contain dxapp.json: not a valid DNAnexus app source directory" % src_dir)
with open(os.path.join(src_dir, "dxapp.json")) as app_desc:
try:
return json_load_raise_on_duplicates(app_desc)
except Exception as e:
raise dxpy.app_builder.AppBuilderException("Could not parse dxapp.json file as JSON: " + e.message)
def _build_app_remote(mode, src_dir, publish=False, destination_override=None,
version_override=None, bill_to_override=None, dx_toolkit_autodep="stable",
do_version_autonumbering=True, do_try_update=True, do_parallel_build=True,
do_check_syntax=True):
if mode == 'app':
builder_app = 'app-tarball_app_builder'
else:
builder_app = 'app-tarball_applet_builder'
temp_dir = tempfile.mkdtemp()
# TODO: this is vestigial, the "auto" setting should be removed.
if dx_toolkit_autodep == "auto":
dx_toolkit_autodep = "stable"
build_options = {'dx_toolkit_autodep': dx_toolkit_autodep}
if version_override:
build_options['version_override'] = version_override
elif do_version_autonumbering:
# If autonumbering is DISABLED, the interior run of dx-build-app
# will detect the correct version to use without our help. If it
# is ENABLED, the version suffix might depend on the state of
# the git repository. Since we'll remove the .git directory
# before uploading, we need to determine the correct version to
# use here and pass it in to the interior run of dx-build-app.
if do_version_autonumbering:
app_spec = _parse_app_spec(src_dir)
original_version = app_spec['version']
app_describe = None
try:
app_describe = dxpy.api.app_describe("app-" + app_spec["name"], alias=original_version, always_retry=False)
except dxpy.exceptions.DXAPIError as e:
if e.name == 'ResourceNotFound' or (mode == 'applet' and e.name == 'PermissionDenied'):
pass
else:
raise e
if app_describe is not None:
if app_describe.has_key('published') or not do_try_update:
# The version we wanted was taken; fall back to the
# autogenerated version number.
build_options['version_override'] = original_version + _get_version_suffix(src_dir, original_version)
# The following flags are basically passed through verbatim.
if bill_to_override:
build_options['bill_to_override'] = bill_to_override
if not do_version_autonumbering:
build_options['do_version_autonumbering'] = False
if not do_try_update:
build_options['do_try_update'] = False
if not do_parallel_build:
build_options['do_parallel_build'] = False
if not do_check_syntax:
build_options['do_check_syntax'] = False
using_temp_project_for_remote_build = False
# If building an applet, run the builder app in the destination
# project. If building an app, run the builder app in a temporary
# project.
dest_folder = None
dest_applet_name = None
if mode == "applet":
# Translate the --destination flag as follows. If --destination
# is PROJ:FOLDER/NAME,
#
# 1. Run the builder app in PROJ
# 2. Make the output folder FOLDER
# 3. Supply --destination=NAME to the interior call of dx-build-applet.
build_project_id = dxpy.WORKSPACE_ID
if destination_override:
build_project_id, dest_folder, dest_applet_name = parse_destination(destination_override)
if build_project_id is None:
parser.error("Can't create an applet without specifying a destination project; please use the -d/--destination flag to explicitly specify a project")
if dest_applet_name:
build_options['destination_override'] = '/' + dest_applet_name
elif mode == "app":
using_temp_project_for_remote_build = True
build_project_id = dxpy.api.project_new({"name": "dx-build-app --remote temporary project"})["id"]
try:
# Resolve relative paths and symlinks here so we have something
# reasonable to write in the job name below.
src_dir = os.path.realpath(src_dir)
# Show the user some progress as the tarball is being generated.
# Hopefully this will help them to understand when their tarball
# is huge (e.g. the target directory already has a whole bunch
# of binaries in it) and interrupt before uploading begins.
app_tarball_file = os.path.join(temp_dir, "app_tarball.tar.gz")
tar_subprocess = subprocess.Popen(["tar", "-czf", "-", "--exclude", "./.git", "."], cwd=src_dir, stdout=subprocess.PIPE)
with open(app_tarball_file, 'wb') as tar_output_file:
total_num_bytes = 0
last_console_update = 0
start_time = time.time()
printed_static_message = False
# Pipe the output of tar into the output file
while True:
tar_exitcode = tar_subprocess.poll()
data = tar_subprocess.stdout.read(4 * 1024 * 1024)
if tar_exitcode is not None and len(data) == 0:
break
tar_output_file.write(data)
total_num_bytes += len(data)
current_time = time.time()
# Don't show status messages at all for very short tar
# operations (< 1.0 sec)
if current_time - last_console_update > 0.25 and current_time - start_time > 1.0:
if sys.stderr.isatty():
if last_console_update > 0:
sys.stderr.write("\r")
sys.stderr.write("Compressing target directory {dir}... ({kb_so_far:,} kb)".format(dir=src_dir, kb_so_far=total_num_bytes / 1024))
sys.stderr.flush()
last_console_update = current_time
elif not printed_static_message:
# Print a message (once only) when stderr is not
# going to a live console
sys.stderr.write("Compressing target directory %s..." % (src_dir,))
printed_static_message = True
if last_console_update > 0:
sys.stderr.write("\n")
if tar_exitcode != 0:
raise Exception("tar exited with non-zero exit code " + str(tar_exitcode))
dxpy.set_workspace_id(build_project_id)
remote_file = dxpy.upload_local_file(app_tarball_file, media_type="application/gzip",
wait_on_close=True, show_progress=True)
try:
input_hash = {
"input_file": dxpy.dxlink(remote_file),
"build_options": build_options
}
if mode == 'app':
input_hash["publish"] = publish
api_options = {
"name": "Remote build of %s" % (os.path.basename(src_dir),),
"input": input_hash,
"project": build_project_id,
}
if dest_folder:
api_options["folder"] = dest_folder
app_run_result = dxpy.api.app_run(builder_app, input_params=api_options)
job_id = app_run_result["id"]
print("Started builder job %s" % (job_id,))
try:
subprocess.check_call(["dx", "watch", job_id])
except subprocess.CalledProcessError as e:
if e.returncode == 3:
# Some kind of failure to build the app. The reason
# for the failure is probably self-evident from the
# job log (and if it's not, the CalledProcessError
# is not informative anyway), so just propagate the
# return code without additional remarks.
sys.exit(3)
else:
raise e
dxpy.DXJob(job_id).wait_on_done(interval=1)
if mode == 'applet':
applet_id, _ = dxpy.get_dxlink_ids(dxpy.api.job_describe(job_id)['output']['output_applet'])
return applet_id
else:
# TODO: determine and return the app ID, to allow
# running the app if args.run is specified
return None
finally:
if not using_temp_project_for_remote_build:
dxpy.DXProject(build_project_id).remove_objects([remote_file.get_id()])
finally:
if using_temp_project_for_remote_build:
dxpy.api.project_destroy(build_project_id, {"terminateJobs": True})
shutil.rmtree(temp_dir)
def build_and_upload_locally(src_dir, mode, overwrite=False, archive=False, publish=False, destination_override=None,
version_override=None, bill_to_override=None, use_temp_build_project=True,
do_parallel_build=True, do_version_autonumbering=True, do_try_update=True,
dx_toolkit_autodep="stable", do_check_syntax=True, dry_run=False,
return_object_dump=False, confirm=True, ensure_upload=False, **kwargs):
app_json = _parse_app_spec(src_dir)
_verify_app_source_dir(src_dir, mode, enforce=do_check_syntax)
if mode == "app" and not dry_run:
_verify_app_writable(app_json['name'])
working_project = None
using_temp_project = False
override_folder = None
override_applet_name = None
if mode == "applet" and destination_override:
working_project, override_folder, override_applet_name = parse_destination(destination_override)
elif mode == "app" and use_temp_build_project and not dry_run:
# Create a temp project
working_project = dxpy.api.project_new({"name": "Temporary build project for dx-build-app"})["id"]
logger.debug("Created temporary project %s to build in" % (working_project,))
using_temp_project = True
try:
if mode == "applet" and working_project is None and dxpy.WORKSPACE_ID is None:
parser.error("Can't create an applet without specifying a destination project; please use the -d/--destination flag to explicitly specify a project")
if "buildOptions" in app_json:
if app_json["buildOptions"].get("dx_toolkit_autodep") == False:
dx_toolkit_autodep = False
# Perform check for existence of applet with same name in
# destination for case in which neither "-f" nor "-a" is
# given BEFORE uploading resources.
if mode == "applet" and not overwrite and not archive:
try:
dest_name = override_applet_name or app_json.get('name') or os.path.basename(os.path.abspath(src_dir))
except:
raise dxpy.app_builder.AppBuilderException("Could not determine applet name from specification + "
"(dxapp.json) or from working directory (%r)" % (src_dir,))
dest_folder = override_folder or app_json.get('folder') or '/'
if not dest_folder.endswith('/'):
dest_folder = dest_folder + '/'
dest_project = working_project if working_project else dxpy.WORKSPACE_ID
for result in dxpy.find_data_objects(classname="applet", name=dest_name, folder=dest_folder,
project=dest_project, recurse=False):
dest_path = dest_folder + dest_name
msg = "An applet already exists at {} (id {}) and neither".format(dest_path, result["id"])
msg += " -f/--overwrite nor -a/--archive were given."
raise dxpy.app_builder.AppBuilderException(msg)
dxpy.app_builder.build(src_dir, parallel_build=do_parallel_build)
bundled_resources = dxpy.app_builder.upload_resources(src_dir,
project=working_project,
folder=override_folder,
ensure_upload=ensure_upload) if not dry_run else []
try:
# TODO: the "auto" setting is vestigial and should be removed.
if dx_toolkit_autodep == "auto":
dx_toolkit_autodep = "stable"
applet_id, applet_spec = dxpy.app_builder.upload_applet(
src_dir,
bundled_resources,
check_name_collisions=(mode == "applet"),
overwrite=overwrite and mode == "applet",
archive=archive and mode == "applet",
project=working_project,
override_folder=override_folder,
override_name=override_applet_name,
dx_toolkit_autodep=dx_toolkit_autodep,
dry_run=dry_run,
**kwargs)
except:
# Avoid leaking any bundled_resources files we may have
# created, if applet creation fails. Note that if
# using_temp_project, the entire project gets destroyed at
# the end, so we don't bother.
if not using_temp_project:
objects_to_delete = [dxpy.get_dxlink_ids(bundled_resource_obj['id'])[0] for bundled_resource_obj in bundled_resources]
if objects_to_delete:
dxpy.api.project_remove_objects(dxpy.app_builder.get_destination_project(src_dir, project=working_project),
input_params={"objects": objects_to_delete})
raise
if dry_run:
return
applet_name = applet_spec['name']
logger.debug("Created applet " + applet_id + " successfully")
if mode == "app":
if 'version' not in app_json:
parser.error("dxapp.json contains no \"version\" field, but it is required to build an app")
version = app_json['version']
try_versions = [version_override or version]
if not version_override and do_version_autonumbering:
try_versions.append(version + _get_version_suffix(src_dir, version))
app_id = dxpy.app_builder.create_app(applet_id,
applet_name,
src_dir,
publish=publish,
set_default=publish,
billTo=bill_to_override,
try_versions=try_versions,
try_update=do_try_update,
confirm=confirm)
app_describe = dxpy.api.app_describe(app_id)
if publish:
print("Uploaded and published app %s/%s (%s) successfully" % (app_describe["name"], app_describe["version"], app_id), file=sys.stderr)
else:
print("Uploaded app %s/%s (%s) successfully" % (app_describe["name"], app_describe["version"], app_id), file=sys.stderr)
print("You can publish this app with:", file=sys.stderr)
print(" dx api app-%s/%s publish \"{\\\"makeDefault\\\": true}\"" % (app_describe["name"], app_describe["version"]), file=sys.stderr)
return app_describe if return_object_dump else {"id": app_id}
elif mode == "applet":
return dxpy.api.applet_describe(applet_id) if return_object_dump else {"id": applet_id}
else:
raise dxpy.app_builder.AppBuilderException("Unrecognized mode %r" % (mode,))
finally:
# Clean up after ourselves.
if using_temp_project:
dxpy.api.project_destroy(working_project)
def _build_app(args, extra_args):
"""Builds an app or applet and returns the resulting executable ID
(unless it was a dry-run, in which case None is returned).
TODO: remote app builds still return None, but we should fix this.
"""
if not args.remote:
# LOCAL BUILD
try:
output = build_and_upload_locally(
args.src_dir,
args.mode,
overwrite=args.overwrite,
archive=args.archive,
publish=args.publish,
destination_override=args.destination,
version_override=args.version_override,
bill_to_override=args.bill_to,
use_temp_build_project=args.use_temp_build_project,
do_parallel_build=args.parallel_build,
do_version_autonumbering=args.version_autonumbering,
do_try_update=args.update,
dx_toolkit_autodep=args.dx_toolkit_autodep,
do_check_syntax=args.check_syntax,
ensure_upload=args.ensure_upload,
dry_run=args.dry_run,
confirm=args.confirm,
return_object_dump=args.json,
**extra_args
)
if output is not None and args.run is None:
print(json.dumps(output))
except dxpy.app_builder.AppBuilderException as e:
# AppBuilderException represents errors during app or applet building
# that could reasonably have been anticipated by the user.
print("Error: %s" % (e.message,), file=sys.stderr)
sys.exit(3)
except dxpy.exceptions.DXAPIError as e:
print("Error: %s" % (e,), file=sys.stderr)
sys.exit(3)
if args.dry_run:
return None
return output['id']
else:
# REMOTE BUILD
try:
app_json = _parse_app_spec(args.src_dir)
_verify_app_source_dir(args.src_dir, args.mode)
if args.mode == "app" and not args.dry_run:
_verify_app_writable(app_json['name'])
except dxpy.app_builder.AppBuilderException as e:
print("Error: %s" % (e.message,), file=sys.stderr)
sys.exit(3)
# The following flags might be useful in conjunction with
# --remote. To enable these, we need to learn how to pass these
# options through to the interior call of dx_build_app(let).
if args.dry_run:
parser.error('--remote cannot be combined with --dry-run')
if args.overwrite:
parser.error('--remote cannot be combined with --overwrite/-f')
if args.archive:
parser.error('--remote cannot be combined with --archive/-a')
# The following flags are probably not useful in conjunction
# with --remote.
if args.json:
parser.error('--remote cannot be combined with --json')
if not args.use_temp_build_project:
parser.error('--remote cannot be combined with --no-temp-build-project')
more_kwargs = {}
if args.version_override:
more_kwargs['version_override'] = args.version_override
if args.bill_to:
more_kwargs['bill_to_override'] = args.bill_to
if not args.version_autonumbering:
more_kwargs['do_version_autonumbering'] = False
if not args.update:
more_kwargs['do_try_update'] = False
if not args.parallel_build:
more_kwargs['do_parallel_build'] = False
if not args.check_syntax:
more_kwargs['do_check_syntax'] = False
return _build_app_remote(args.mode, args.src_dir, destination_override=args.destination, publish=args.publish, dx_toolkit_autodep=args.dx_toolkit_autodep, **more_kwargs)
def main(**kwargs):
"""
Entry point for dx-build-app(let).
Don't call this function as a subroutine in your program! It is liable to
sys.exit your program when it detects certain error conditions, so you
can't recover from those as you could if it raised exceptions. Instead,
call dx_build_app.build_and_upload_locally which provides the real
implementation for dx-build-app(let) but is easier to use in your program.
"""
if len(sys.argv) > 0:
if sys.argv[0].endswith('dx-build-app'):
logging.warn('Warning: dx-build-app has been replaced with "dx build --create-app". Please update your scripts.')
elif sys.argv[0].endswith('dx-build-applet'):
logging.warn('Warning: dx-build-applet has been replaced with "dx build". Please update your scripts.')
if len(kwargs) == 0:
args = parser.parse_args()
else:
args = parser.parse_args(**kwargs)
if dxpy.AUTH_HELPER is None and not args.dry_run:
parser.error('Authentication required to build an executable on the platform; please run "dx login" first')
if args.src_dir is None:
args.src_dir = os.getcwd()
if USING_PYTHON2:
args.src_dir = args.src_dir.decode(sys.getfilesystemencoding())
if args.mode == "app" and args.destination != '.':
parser.error("--destination cannot be used when creating an app (only an applet)")
if args.dx_toolkit_autodep in ['beta', 'unstable']:
logging.warn('The --dx-toolkit-beta-autodep and --dx-toolkit-unstable-autodep flags have no effect and will be removed at some date in the future.')
if args.overwrite and args.archive:
parser.error("Options -f/--overwrite and -a/--archive cannot be specified together")
if args.run is not None and args.dry_run:
parser.error("Options --dry-run and --run cannot be specified together")
if args.run and args.remote and args.mode == 'app':
parser.error("Options --remote, --app, and --run cannot all be specified together. Try removing --run and then separately invoking dx run.")
executable_id = _build_app(args,
json.loads(args.extra_args) if args.extra_args else {})
if args.run is not None:
if executable_id is None:
raise AssertionError('Expected executable_id to be set here')
try:
subprocess.check_call(['dx', 'run', executable_id, '--priority', 'high'] + args.run)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
except:
err_exit()
return
if __name__ == '__main__':
main()
| apache-2.0 | -4,026,534,068,323,847,000 | 49.197711 | 306 | 0.617081 | false |
has2k1/plydata | plydata/eval.py | 1 | 7150 | # Credit: https://github.com/pydata/patsy
import __future__
import sys
import numbers
import inspect
from functools import lru_cache
__all__ = ["EvalEnvironment"]
def _all_future_flags():
flags = 0
for feature_name in __future__.all_feature_names:
feature = getattr(__future__, feature_name)
if feature.getMandatoryRelease() > sys.version_info:
flags |= feature.compiler_flag
return flags
_ALL_FUTURE_FLAGS = _all_future_flags()
@lru_cache(maxsize=256)
def _compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1):
"""
Cached compile
"""
return compile(source, filename, mode, flags, dont_inherit)
# This is just a minimal dict-like object that does lookup in a 'stack' of
# dicts -- first it checks the first, then the second, etc. Assignments go
# into an internal, zeroth dict.
class VarLookupDict(object):
def __init__(self, dicts):
self._dicts = [{}] + list(dicts)
def __getitem__(self, key):
for d in self._dicts:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self._dicts[0][key] = value
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._dicts)
def __getstate__(*args, **kwargs):
raise NotImplementedError("Sorry, pickling not supported")
def copy(self):
return self
class EvalEnvironment(object):
"""Represents a Python execution environment.
Encapsulates a namespace for variable lookup and set of __future__
flags."""
def __init__(self, namespaces, flags=0):
assert not flags & ~_ALL_FUTURE_FLAGS
self._namespaces = list(namespaces)
self.flags = flags
@property
def namespace(self):
"""A dict-like object that can be used to look up variables accessible
from the encapsulated environment."""
return VarLookupDict(self._namespaces)
def with_outer_namespace(self, outer_namespace):
"""Return a new EvalEnvironment with an extra namespace added.
This namespace will be used only for variables that are not found in
any existing namespace, i.e., it is "outside" them all."""
return self.__class__(self._namespaces + [outer_namespace],
self.flags)
def eval(self, expr, source_name="<string>", inner_namespace={}):
"""Evaluate some Python code in the encapsulated environment.
:arg expr: A string containing a Python expression.
:arg source_name: A name for this string, for use in tracebacks.
:arg inner_namespace: A dict-like object that will be checked first
when `expr` attempts to access any variables.
:returns: The value of `expr`.
"""
code = _compile(expr, source_name, 'eval', self.flags)
return eval(code, {}, VarLookupDict([inner_namespace]
+ self._namespaces))
@classmethod
def capture(cls, eval_env=0, reference=0):
"""Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument.
"""
if isinstance(eval_env, cls):
return eval_env
elif isinstance(eval_env, numbers.Integral):
depth = eval_env + reference
else:
raise TypeError("Parameter 'eval_env' must be either an integer "
"or an instance of patsy.EvalEnvironment.")
frame = inspect.currentframe()
try:
for i in range(depth + 1):
if frame is None:
raise ValueError("call-stack is not that deep!")
frame = frame.f_back
return cls([frame.f_locals, frame.f_globals],
frame.f_code.co_flags & _ALL_FUTURE_FLAGS)
# The try/finally is important to avoid a potential reference cycle --
# any exception traceback will carry a reference to *our* frame, which
# contains a reference to our local variables, which would otherwise
# carry a reference to some parent frame, where the exception was
# caught...:
finally:
del frame
def subset(self, names):
"""Creates a new, flat EvalEnvironment that contains only
the variables specified."""
vld = VarLookupDict(self._namespaces)
new_ns = dict((name, vld[name]) for name in names)
return EvalEnvironment([new_ns], self.flags)
def _namespace_ids(self):
return [id(n) for n in self._namespaces]
def __eq__(self, other):
return (isinstance(other, EvalEnvironment)
and self.flags == other.flags
and self._namespace_ids() == other._namespace_ids())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((EvalEnvironment,
self.flags,
tuple(self._namespace_ids())))
def __getstate__(*args, **kwargs):
raise NotImplementedError("Sorry, pickling not supported")
| bsd-3-clause | 3,623,312,421,378,813,000 | 36.239583 | 79 | 0.599021 | false |
jmwright/cadquery-freecad-module | Libs/pint/testsuite/test_quantity.py | 1 | 48047 | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
import copy
import math
import operator as op
from pint import DimensionalityError, OffsetUnitCalculusError, UnitRegistry
from pint.unit import UnitsContainer
from pint.compat import string_types, PYTHON3, np, unittest
from pint.testsuite import QuantityTestCase, helpers
from pint.testsuite.parameterized import ParameterizedTestCase
class TestQuantity(QuantityTestCase):
FORCE_NDARRAY = False
def test_quantity_creation(self):
for args in ((4.2, 'meter'),
(4.2, UnitsContainer(meter=1)),
(4.2, self.ureg.meter),
('4.2*meter', ),
('4.2/meter**(-1)', ),
(self.Q_(4.2, 'meter'),)):
x = self.Q_(*args)
self.assertEqual(x.magnitude, 4.2)
self.assertEqual(x.units, UnitsContainer(meter=1))
x = self.Q_(4.2, UnitsContainer(length=1))
y = self.Q_(x)
self.assertEqual(x.magnitude, y.magnitude)
self.assertEqual(x.units, y.units)
self.assertIsNot(x, y)
x = self.Q_(4.2, None)
self.assertEqual(x.magnitude, 4.2)
self.assertEqual(x.units, UnitsContainer())
with self.capture_log() as buffer:
self.assertEqual(4.2 * self.ureg.meter, self.Q_(4.2, 2 * self.ureg.meter))
self.assertEqual(len(buffer), 1)
def test_quantity_bool(self):
self.assertTrue(self.Q_(1, None))
self.assertTrue(self.Q_(1, 'meter'))
self.assertFalse(self.Q_(0, None))
self.assertFalse(self.Q_(0, 'meter'))
def test_quantity_comparison(self):
x = self.Q_(4.2, 'meter')
y = self.Q_(4.2, 'meter')
z = self.Q_(5, 'meter')
j = self.Q_(5, 'meter*meter')
# identity for single object
self.assertTrue(x == x)
self.assertFalse(x != x)
# identity for multiple objects with same value
self.assertTrue(x == y)
self.assertFalse(x != y)
self.assertTrue(x <= y)
self.assertTrue(x >= y)
self.assertFalse(x < y)
self.assertFalse(x > y)
self.assertFalse(x == z)
self.assertTrue(x != z)
self.assertTrue(x < z)
self.assertTrue(z != j)
self.assertNotEqual(z, j)
self.assertEqual(self.Q_(0, 'meter'), self.Q_(0, 'centimeter'))
self.assertNotEqual(self.Q_(0, 'meter'), self.Q_(0, 'second'))
self.assertLess(self.Q_(10, 'meter'), self.Q_(5, 'kilometer'))
def test_quantity_comparison_convert(self):
self.assertEqual(self.Q_(1000, 'millimeter'), self.Q_(1, 'meter'))
self.assertEqual(self.Q_(1000, 'millimeter/min'), self.Q_(1000/60, 'millimeter/s'))
def test_quantity_repr(self):
x = self.Q_(4.2, UnitsContainer(meter=1))
self.assertEqual(str(x), '4.2 meter')
self.assertEqual(repr(x), "<Quantity(4.2, 'meter')>")
def test_quantity_format(self):
x = self.Q_(4.12345678, UnitsContainer(meter=2, kilogram=1, second=-1))
for spec, result in (('{0}', str(x)), ('{0!s}', str(x)), ('{0!r}', repr(x)),
('{0.magnitude}', str(x.magnitude)), ('{0.units}', str(x.units)),
('{0.magnitude!s}', str(x.magnitude)), ('{0.units!s}', str(x.units)),
('{0.magnitude!r}', repr(x.magnitude)), ('{0.units!r}', repr(x.units)),
('{0:.4f}', '{0:.4f} {1!s}'.format(x.magnitude, x.units)),
('{0:L}', r'4.12345678 \frac{kilogram \cdot meter^{2}}{second}'),
('{0:P}', '4.12345678 kilogram·meter²/second'),
('{0:H}', '4.12345678 kilogram meter<sup>2</sup>/second'),
('{0:C}', '4.12345678 kilogram*meter**2/second'),
('{0:~}', '4.12345678 kg * m ** 2 / s'),
('{0:L~}', r'4.12345678 \frac{kg \cdot m^{2}}{s}'),
('{0:P~}', '4.12345678 kg·m²/s'),
('{0:H~}', '4.12345678 kg m<sup>2</sup>/s'),
('{0:C~}', '4.12345678 kg*m**2/s'),
):
self.assertEqual(spec.format(x), result)
def test_default_formatting(self):
ureg = UnitRegistry()
x = ureg.Quantity(4.12345678, UnitsContainer(meter=2, kilogram=1, second=-1))
for spec, result in (('L', r'4.12345678 \frac{kilogram \cdot meter^{2}}{second}'),
('P', '4.12345678 kilogram·meter²/second'),
('H', '4.12345678 kilogram meter<sup>2</sup>/second'),
('C', '4.12345678 kilogram*meter**2/second'),
('~', '4.12345678 kg * m ** 2 / s'),
('L~', r'4.12345678 \frac{kg \cdot m^{2}}{s}'),
('P~', '4.12345678 kg·m²/s'),
('H~', '4.12345678 kg m<sup>2</sup>/s'),
('C~', '4.12345678 kg*m**2/s'),
):
ureg.default_format = spec
self.assertEqual('{0}'.format(x), result)
def test_to_base_units(self):
x = self.Q_('1*inch')
self.assertQuantityAlmostEqual(x.to_base_units(), self.Q_(0.0254, 'meter'))
x = self.Q_('1*inch*inch')
self.assertQuantityAlmostEqual(x.to_base_units(), self.Q_(0.0254 ** 2.0, 'meter*meter'))
x = self.Q_('1*inch/minute')
self.assertQuantityAlmostEqual(x.to_base_units(), self.Q_(0.0254 / 60., 'meter/second'))
def test_convert(self):
x = self.Q_('2*inch')
self.assertQuantityAlmostEqual(x.to('meter'), self.Q_(2. * 0.0254, 'meter'))
x = self.Q_('2*meter')
self.assertQuantityAlmostEqual(x.to('inch'), self.Q_(2. / 0.0254, 'inch'))
x = self.Q_('2*sidereal_second')
self.assertQuantityAlmostEqual(x.to('second'), self.Q_(1.994539133 , 'second'))
x = self.Q_('2.54*centimeter/second')
self.assertQuantityAlmostEqual(x.to('inch/second'), self.Q_(1, 'inch/second'))
x = self.Q_('2.54*centimeter')
self.assertQuantityAlmostEqual(x.to('inch').magnitude, 1)
self.assertQuantityAlmostEqual(self.Q_(2, 'second').to('millisecond').magnitude, 2000)
@helpers.requires_numpy()
def test_convert(self):
# Conversions with single units take a different codepath than
# Conversions with more than one unit.
src_dst1 = UnitsContainer(meter=1), UnitsContainer(inch=1)
src_dst2 = UnitsContainer(meter=1, second=-1), UnitsContainer(inch=1, minute=-1)
for src, dst in (src_dst1, src_dst2):
a = np.ones((3, 1))
ac = np.ones((3, 1))
q = self.Q_(a, src)
qac = self.Q_(ac, src).to(dst)
r = q.to(dst)
self.assertQuantityAlmostEqual(qac, r)
self.assertIsNot(r, q)
self.assertIsNot(r._magnitude, a)
def test_context_attr(self):
self.assertEqual(self.ureg.meter, self.Q_(1, 'meter'))
def test_both_symbol(self):
self.assertEqual(self.Q_(2, 'ms'), self.Q_(2, 'millisecond'))
self.assertEqual(self.Q_(2, 'cm'), self.Q_(2, 'centimeter'))
def test_dimensionless_units(self):
self.assertAlmostEqual(self.Q_(360, 'degree').to('radian').magnitude, 2 * math.pi)
self.assertAlmostEqual(self.Q_(2 * math.pi, 'radian'), self.Q_(360, 'degree'))
self.assertEqual(self.Q_(1, 'radian').dimensionality, UnitsContainer())
self.assertTrue(self.Q_(1, 'radian').dimensionless)
self.assertFalse(self.Q_(1, 'radian').unitless)
self.assertEqual(self.Q_(1, 'meter')/self.Q_(1, 'meter'), 1)
self.assertEqual((self.Q_(1, 'meter')/self.Q_(1, 'mm')).to(''), 1000)
def test_offset(self):
self.assertQuantityAlmostEqual(self.Q_(0, 'kelvin').to('kelvin'), self.Q_(0, 'kelvin'))
self.assertQuantityAlmostEqual(self.Q_(0, 'degC').to('kelvin'), self.Q_(273.15, 'kelvin'))
self.assertQuantityAlmostEqual(self.Q_(0, 'degF').to('kelvin'), self.Q_(255.372222, 'kelvin'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'kelvin').to('kelvin'), self.Q_(100, 'kelvin'))
self.assertQuantityAlmostEqual(self.Q_(100, 'degC').to('kelvin'), self.Q_(373.15, 'kelvin'))
self.assertQuantityAlmostEqual(self.Q_(100, 'degF').to('kelvin'), self.Q_(310.92777777, 'kelvin'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(0, 'kelvin').to('degC'), self.Q_(-273.15, 'degC'))
self.assertQuantityAlmostEqual(self.Q_(100, 'kelvin').to('degC'), self.Q_(-173.15, 'degC'))
self.assertQuantityAlmostEqual(self.Q_(0, 'kelvin').to('degF'), self.Q_(-459.67, 'degF'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'kelvin').to('degF'), self.Q_(-279.67, 'degF'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(32, 'degF').to('degC'), self.Q_(0, 'degC'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'degC').to('degF'), self.Q_(212, 'degF'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(54, 'degF').to('degC'), self.Q_(12.2222, 'degC'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'degC').to('degF'), self.Q_(53.6, 'degF'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'kelvin').to('degC'), self.Q_(-261.15, 'degC'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'degC').to('kelvin'), self.Q_(285.15, 'kelvin'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'kelvin').to('degR'), self.Q_(21.6, 'degR'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'degR').to('kelvin'), self.Q_(6.66666667, 'kelvin'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'degC').to('degR'), self.Q_(513.27, 'degR'), atol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12, 'degR').to('degC'), self.Q_(-266.483333, 'degC'), atol=0.01)
def test_offset_delta(self):
self.assertQuantityAlmostEqual(self.Q_(0, 'delta_degC').to('kelvin'), self.Q_(0, 'kelvin'))
self.assertQuantityAlmostEqual(self.Q_(0, 'delta_degF').to('kelvin'), self.Q_(0, 'kelvin'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'kelvin').to('delta_degC'), self.Q_(100, 'delta_degC'))
self.assertQuantityAlmostEqual(self.Q_(100, 'kelvin').to('delta_degF'), self.Q_(180, 'delta_degF'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'delta_degF').to('kelvin'), self.Q_(55.55555556, 'kelvin'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'delta_degC').to('delta_degF'), self.Q_(180, 'delta_degF'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(100, 'delta_degF').to('delta_degC'), self.Q_(55.55555556, 'delta_degC'), rtol=0.01)
self.assertQuantityAlmostEqual(self.Q_(12.3, 'delta_degC').to('delta_degF'), self.Q_(22.14, 'delta_degF'), rtol=0.01)
def test_pickle(self):
import pickle
def pickle_test(q):
self.assertEqual(q, pickle.loads(pickle.dumps(q)))
pickle_test(self.Q_(32, ''))
pickle_test(self.Q_(2.4, ''))
pickle_test(self.Q_(32, 'm/s'))
pickle_test(self.Q_(2.4, 'm/s'))
class TestQuantityBasicMath(QuantityTestCase):
FORCE_NDARRAY = False
def _test_inplace(self, operator, value1, value2, expected_result, unit=None):
if isinstance(value1, string_types):
value1 = self.Q_(value1)
if isinstance(value2, string_types):
value2 = self.Q_(value2)
if isinstance(expected_result, string_types):
expected_result = self.Q_(expected_result)
if not unit is None:
value1 = value1 * unit
value2 = value2 * unit
expected_result = expected_result * unit
value1 = copy.copy(value1)
value2 = copy.copy(value2)
id1 = id(value1)
id2 = id(value2)
value1 = operator(value1, value2)
value2_cpy = copy.copy(value2)
self.assertQuantityAlmostEqual(value1, expected_result)
self.assertEqual(id1, id(value1))
self.assertQuantityAlmostEqual(value2, value2_cpy)
self.assertEqual(id2, id(value2))
def _test_not_inplace(self, operator, value1, value2, expected_result, unit=None):
if isinstance(value1, string_types):
value1 = self.Q_(value1)
if isinstance(value2, string_types):
value2 = self.Q_(value2)
if isinstance(expected_result, string_types):
expected_result = self.Q_(expected_result)
if not unit is None:
value1 = value1 * unit
value2 = value2 * unit
expected_result = expected_result * unit
id1 = id(value1)
id2 = id(value2)
value1_cpy = copy.copy(value1)
value2_cpy = copy.copy(value2)
result = operator(value1, value2)
self.assertQuantityAlmostEqual(expected_result, result)
self.assertQuantityAlmostEqual(value1, value1_cpy)
self.assertQuantityAlmostEqual(value2, value2_cpy)
self.assertNotEqual(id(result), id1)
self.assertNotEqual(id(result), id2)
def _test_quantity_add_sub(self, unit, func):
x = self.Q_(unit, 'centimeter')
y = self.Q_(unit, 'inch')
z = self.Q_(unit, 'second')
a = self.Q_(unit, None)
func(op.add, x, x, self.Q_(unit + unit, 'centimeter'))
func(op.add, x, y, self.Q_(unit + 2.54 * unit, 'centimeter'))
func(op.add, y, x, self.Q_(unit + unit / (2.54 * unit), 'inch'))
func(op.add, a, unit, self.Q_(unit + unit, None))
self.assertRaises(DimensionalityError, op.add, 10, x)
self.assertRaises(DimensionalityError, op.add, x, 10)
self.assertRaises(DimensionalityError, op.add, x, z)
func(op.sub, x, x, self.Q_(unit - unit, 'centimeter'))
func(op.sub, x, y, self.Q_(unit - 2.54 * unit, 'centimeter'))
func(op.sub, y, x, self.Q_(unit - unit / (2.54 * unit), 'inch'))
func(op.sub, a, unit, self.Q_(unit - unit, None))
self.assertRaises(DimensionalityError, op.sub, 10, x)
self.assertRaises(DimensionalityError, op.sub, x, 10)
self.assertRaises(DimensionalityError, op.sub, x, z)
def _test_quantity_iadd_isub(self, unit, func):
x = self.Q_(unit, 'centimeter')
y = self.Q_(unit, 'inch')
z = self.Q_(unit, 'second')
a = self.Q_(unit, None)
func(op.iadd, x, x, self.Q_(unit + unit, 'centimeter'))
func(op.iadd, x, y, self.Q_(unit + 2.54 * unit, 'centimeter'))
func(op.iadd, y, x, self.Q_(unit + unit / 2.54, 'inch'))
func(op.iadd, a, unit, self.Q_(unit + unit, None))
self.assertRaises(DimensionalityError, op.iadd, 10, x)
self.assertRaises(DimensionalityError, op.iadd, x, 10)
self.assertRaises(DimensionalityError, op.iadd, x, z)
func(op.isub, x, x, self.Q_(unit - unit, 'centimeter'))
func(op.isub, x, y, self.Q_(unit - 2.54, 'centimeter'))
func(op.isub, y, x, self.Q_(unit - unit / 2.54, 'inch'))
func(op.isub, a, unit, self.Q_(unit - unit, None))
self.assertRaises(DimensionalityError, op.sub, 10, x)
self.assertRaises(DimensionalityError, op.sub, x, 10)
self.assertRaises(DimensionalityError, op.sub, x, z)
def _test_quantity_mul_div(self, unit, func):
func(op.mul, unit * 10.0, '4.2*meter', '42*meter', unit)
func(op.mul, '4.2*meter', unit * 10.0, '42*meter', unit)
func(op.mul, '4.2*meter', '10*inch', '42*meter*inch', unit)
func(op.truediv, unit * 42, '4.2*meter', '10/meter', unit)
func(op.truediv, '4.2*meter', unit * 10.0, '0.42*meter', unit)
func(op.truediv, '4.2*meter', '10*inch', '0.42*meter/inch', unit)
def _test_quantity_imul_idiv(self, unit, func):
#func(op.imul, 10.0, '4.2*meter', '42*meter')
func(op.imul, '4.2*meter', 10.0, '42*meter', unit)
func(op.imul, '4.2*meter', '10*inch', '42*meter*inch', unit)
#func(op.truediv, 42, '4.2*meter', '10/meter')
func(op.itruediv, '4.2*meter', unit * 10.0, '0.42*meter', unit)
func(op.itruediv, '4.2*meter', '10*inch', '0.42*meter/inch', unit)
def _test_quantity_floordiv(self, unit, func):
func(op.floordiv, unit * 10.0, '4.2*meter', '2/meter', unit)
func(op.floordiv, '24*meter', unit * 10.0, '2*meter', unit)
func(op.floordiv, '10*meter', '4.2*inch', '2*meter/inch', unit)
def _test_quantity_ifloordiv(self, unit, func):
func(op.ifloordiv, 10.0, '4.2*meter', '2/meter', unit)
func(op.ifloordiv, '24*meter', 10.0, '2*meter', unit)
func(op.ifloordiv, '10*meter', '4.2*inch', '2*meter/inch', unit)
def _test_numeric(self, unit, ifunc):
self._test_quantity_add_sub(unit, self._test_not_inplace)
self._test_quantity_iadd_isub(unit, ifunc)
self._test_quantity_mul_div(unit, self._test_not_inplace)
self._test_quantity_imul_idiv(unit, ifunc)
self._test_quantity_floordiv(unit, self._test_not_inplace)
#self._test_quantity_ifloordiv(unit, ifunc)
def test_float(self):
self._test_numeric(1., self._test_not_inplace)
def test_fraction(self):
import fractions
self._test_numeric(fractions.Fraction(1, 1), self._test_not_inplace)
@helpers.requires_numpy()
def test_nparray(self):
self._test_numeric(np.ones((1, 3)), self._test_inplace)
def test_quantity_abs_round(self):
x = self.Q_(-4.2, 'meter')
y = self.Q_(4.2, 'meter')
# In Python 3+ round of x is delegated to x.__round__, instead of round(x.__float__)
# and therefore it can be properly implemented by Pint
for fun in (abs, op.pos, op.neg) + (round, ) if PYTHON3 else ():
zx = self.Q_(fun(x.magnitude), 'meter')
zy = self.Q_(fun(y.magnitude), 'meter')
rx = fun(x)
ry = fun(y)
self.assertEqual(rx, zx, 'while testing {0}'.format(fun))
self.assertEqual(ry, zy, 'while testing {0}'.format(fun))
self.assertIsNot(rx, zx, 'while testing {0}'.format(fun))
self.assertIsNot(ry, zy, 'while testing {0}'.format(fun))
def test_quantity_float_complex(self):
x = self.Q_(-4.2, None)
y = self.Q_(4.2, None)
z = self.Q_(1, 'meter')
for fun in (float, complex):
self.assertEqual(fun(x), fun(x.magnitude))
self.assertEqual(fun(y), fun(y.magnitude))
self.assertRaises(DimensionalityError, fun, z)
class TestDimensions(QuantityTestCase):
FORCE_NDARRAY = False
def test_get_dimensionality(self):
get = self.ureg.get_dimensionality
self.assertEqual(get('[time]'), UnitsContainer({'[time]': 1}))
self.assertEqual(get(UnitsContainer({'[time]': 1})), UnitsContainer({'[time]': 1}))
self.assertEqual(get('seconds'), UnitsContainer({'[time]': 1}))
self.assertEqual(get(UnitsContainer({'seconds': 1})), UnitsContainer({'[time]': 1}))
self.assertEqual(get('[speed]'), UnitsContainer({'[length]': 1, '[time]': -1}))
self.assertEqual(get('[acceleration]'), UnitsContainer({'[length]': 1, '[time]': -2}))
def test_dimensionality(self):
x = self.Q_(42, 'centimeter')
x.to_base_units()
x = self.Q_(42, 'meter*second')
self.assertEqual(x.dimensionality, UnitsContainer({'[length]': 1., '[time]': 1.}))
x = self.Q_(42, 'meter*second*second')
self.assertEqual(x.dimensionality, UnitsContainer({'[length]': 1., '[time]': 2.}))
x = self.Q_(42, 'inch*second*second')
self.assertEqual(x.dimensionality, UnitsContainer({'[length]': 1., '[time]': 2.}))
self.assertTrue(self.Q_(42, None).dimensionless)
self.assertFalse(self.Q_(42, 'meter').dimensionless)
self.assertTrue((self.Q_(42, 'meter') / self.Q_(1, 'meter')).dimensionless)
self.assertFalse((self.Q_(42, 'meter') / self.Q_(1, 'second')).dimensionless)
self.assertTrue((self.Q_(42, 'meter') / self.Q_(1, 'inch')).dimensionless)
class TestQuantityWithDefaultRegistry(TestDimensions):
@classmethod
def setUpClass(cls):
from pint import _DEFAULT_REGISTRY
cls.ureg = _DEFAULT_REGISTRY
cls.Q_ = cls.ureg.Quantity
class TestDimensionsWithDefaultRegistry(TestDimensions):
@classmethod
def setUpClass(cls):
from pint import _DEFAULT_REGISTRY
cls.ureg = _DEFAULT_REGISTRY
cls.Q_ = cls.ureg.Quantity
class TestOffsetUnitMath(QuantityTestCase, ParameterizedTestCase):
def setup(self):
self.ureg.autoconvert_offset_to_baseunit = False
self.ureg.default_as_delta = True
additions = [
# --- input tuple -------------------- | -- expected result --
(((100, 'kelvin'), (10, 'kelvin')), (110, 'kelvin')),
(((100, 'kelvin'), (10, 'degC')), 'error'),
(((100, 'kelvin'), (10, 'degF')), 'error'),
(((100, 'kelvin'), (10, 'degR')), (105.56, 'kelvin')),
(((100, 'kelvin'), (10, 'delta_degC')), (110, 'kelvin')),
(((100, 'kelvin'), (10, 'delta_degF')), (105.56, 'kelvin')),
(((100, 'degC'), (10, 'kelvin')), 'error'),
(((100, 'degC'), (10, 'degC')), 'error'),
(((100, 'degC'), (10, 'degF')), 'error'),
(((100, 'degC'), (10, 'degR')), 'error'),
(((100, 'degC'), (10, 'delta_degC')), (110, 'degC')),
(((100, 'degC'), (10, 'delta_degF')), (105.56, 'degC')),
(((100, 'degF'), (10, 'kelvin')), 'error'),
(((100, 'degF'), (10, 'degC')), 'error'),
(((100, 'degF'), (10, 'degF')), 'error'),
(((100, 'degF'), (10, 'degR')), 'error'),
(((100, 'degF'), (10, 'delta_degC')), (118, 'degF')),
(((100, 'degF'), (10, 'delta_degF')), (110, 'degF')),
(((100, 'degR'), (10, 'kelvin')), (118, 'degR')),
(((100, 'degR'), (10, 'degC')), 'error'),
(((100, 'degR'), (10, 'degF')), 'error'),
(((100, 'degR'), (10, 'degR')), (110, 'degR')),
(((100, 'degR'), (10, 'delta_degC')), (118, 'degR')),
(((100, 'degR'), (10, 'delta_degF')), (110, 'degR')),
(((100, 'delta_degC'), (10, 'kelvin')), (110, 'kelvin')),
(((100, 'delta_degC'), (10, 'degC')), (110, 'degC')),
(((100, 'delta_degC'), (10, 'degF')), (190, 'degF')),
(((100, 'delta_degC'), (10, 'degR')), (190, 'degR')),
(((100, 'delta_degC'), (10, 'delta_degC')), (110, 'delta_degC')),
(((100, 'delta_degC'), (10, 'delta_degF')), (105.56, 'delta_degC')),
(((100, 'delta_degF'), (10, 'kelvin')), (65.56, 'kelvin')),
(((100, 'delta_degF'), (10, 'degC')), (65.56, 'degC')),
(((100, 'delta_degF'), (10, 'degF')), (110, 'degF')),
(((100, 'delta_degF'), (10, 'degR')), (110, 'degR')),
(((100, 'delta_degF'), (10, 'delta_degC')), (118, 'delta_degF')),
(((100, 'delta_degF'), (10, 'delta_degF')), (110, 'delta_degF')),
]
@ParameterizedTestCase.parameterize(("input", "expected_output"),
additions)
def test_addition(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
qin1, qin2 = input_tuple
q1, q2 = self.Q_(*qin1), self.Q_(*qin2)
# update input tuple with new values to have correct values on failure
input_tuple = q1, q2
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.add, q1, q2)
else:
expected = self.Q_(*expected)
self.assertEqual(op.add(q1, q2).units, expected.units)
self.assertQuantityAlmostEqual(op.add(q1, q2), expected,
atol=0.01)
@helpers.requires_numpy()
@ParameterizedTestCase.parameterize(("input", "expected_output"),
additions)
def test_inplace_addition(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
(q1v, q1u), (q2v, q2u) = input_tuple
# update input tuple with new values to have correct values on failure
input_tuple = ((np.array([q1v]*2, dtype=np.float), q1u),
(np.array([q2v]*2, dtype=np.float), q2u))
Q_ = self.Q_
qin1, qin2 = input_tuple
q1, q2 = Q_(*qin1), Q_(*qin2)
q1_cp = copy.copy(q1)
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.iadd, q1_cp, q2)
else:
expected = np.array([expected[0]]*2, dtype=np.float), expected[1]
self.assertEqual(op.iadd(q1_cp, q2).units, Q_(*expected).units)
q1_cp = copy.copy(q1)
self.assertQuantityAlmostEqual(op.iadd(q1_cp, q2), Q_(*expected),
atol=0.01)
subtractions = [
(((100, 'kelvin'), (10, 'kelvin')), (90, 'kelvin')),
(((100, 'kelvin'), (10, 'degC')), (-183.15, 'kelvin')),
(((100, 'kelvin'), (10, 'degF')), (-160.93, 'kelvin')),
(((100, 'kelvin'), (10, 'degR')), (94.44, 'kelvin')),
(((100, 'kelvin'), (10, 'delta_degC')), (90, 'kelvin')),
(((100, 'kelvin'), (10, 'delta_degF')), (94.44, 'kelvin')),
(((100, 'degC'), (10, 'kelvin')), (363.15, 'delta_degC')),
(((100, 'degC'), (10, 'degC')), (90, 'delta_degC')),
(((100, 'degC'), (10, 'degF')), (112.22, 'delta_degC')),
(((100, 'degC'), (10, 'degR')), (367.59, 'delta_degC')),
(((100, 'degC'), (10, 'delta_degC')), (90, 'degC')),
(((100, 'degC'), (10, 'delta_degF')), (94.44, 'degC')),
(((100, 'degF'), (10, 'kelvin')), (541.67, 'delta_degF')),
(((100, 'degF'), (10, 'degC')), (50, 'delta_degF')),
(((100, 'degF'), (10, 'degF')), (90, 'delta_degF')),
(((100, 'degF'), (10, 'degR')), (549.67, 'delta_degF')),
(((100, 'degF'), (10, 'delta_degC')), (82, 'degF')),
(((100, 'degF'), (10, 'delta_degF')), (90, 'degF')),
(((100, 'degR'), (10, 'kelvin')), (82, 'degR')),
(((100, 'degR'), (10, 'degC')), (-409.67, 'degR')),
(((100, 'degR'), (10, 'degF')), (-369.67, 'degR')),
(((100, 'degR'), (10, 'degR')), (90, 'degR')),
(((100, 'degR'), (10, 'delta_degC')), (82, 'degR')),
(((100, 'degR'), (10, 'delta_degF')), (90, 'degR')),
(((100, 'delta_degC'), (10, 'kelvin')), (90, 'kelvin')),
(((100, 'delta_degC'), (10, 'degC')), (90, 'degC')),
(((100, 'delta_degC'), (10, 'degF')), (170, 'degF')),
(((100, 'delta_degC'), (10, 'degR')), (170, 'degR')),
(((100, 'delta_degC'), (10, 'delta_degC')), (90, 'delta_degC')),
(((100, 'delta_degC'), (10, 'delta_degF')), (94.44, 'delta_degC')),
(((100, 'delta_degF'), (10, 'kelvin')), (45.56, 'kelvin')),
(((100, 'delta_degF'), (10, 'degC')), (45.56, 'degC')),
(((100, 'delta_degF'), (10, 'degF')), (90, 'degF')),
(((100, 'delta_degF'), (10, 'degR')), (90, 'degR')),
(((100, 'delta_degF'), (10, 'delta_degC')), (82, 'delta_degF')),
(((100, 'delta_degF'), (10, 'delta_degF')), (90, 'delta_degF')),
]
@ParameterizedTestCase.parameterize(("input", "expected_output"),
subtractions)
def test_subtraction(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
qin1, qin2 = input_tuple
q1, q2 = self.Q_(*qin1), self.Q_(*qin2)
input_tuple = q1, q2
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.sub, q1, q2)
else:
expected = self.Q_(*expected)
self.assertEqual(op.sub(q1, q2).units, expected.units)
self.assertQuantityAlmostEqual(op.sub(q1, q2), expected,
atol=0.01)
# @unittest.expectedFailure
@helpers.requires_numpy()
@ParameterizedTestCase.parameterize(("input", "expected_output"),
subtractions)
def test_inplace_subtraction(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
(q1v, q1u), (q2v, q2u) = input_tuple
# update input tuple with new values to have correct values on failure
input_tuple = ((np.array([q1v]*2, dtype=np.float), q1u),
(np.array([q2v]*2, dtype=np.float), q2u))
Q_ = self.Q_
qin1, qin2 = input_tuple
q1, q2 = Q_(*qin1), Q_(*qin2)
q1_cp = copy.copy(q1)
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.isub, q1_cp, q2)
else:
expected = np.array([expected[0]]*2, dtype=np.float), expected[1]
self.assertEqual(op.isub(q1_cp, q2).units, Q_(*expected).units)
q1_cp = copy.copy(q1)
self.assertQuantityAlmostEqual(op.isub(q1_cp, q2), Q_(*expected),
atol=0.01)
multiplications = [
(((100, 'kelvin'), (10, 'kelvin')), (1000, 'kelvin**2')),
(((100, 'kelvin'), (10, 'degC')), 'error'),
(((100, 'kelvin'), (10, 'degF')), 'error'),
(((100, 'kelvin'), (10, 'degR')), (1000, 'kelvin*degR')),
(((100, 'kelvin'), (10, 'delta_degC')), (1000, 'kelvin*delta_degC')),
(((100, 'kelvin'), (10, 'delta_degF')), (1000, 'kelvin*delta_degF')),
(((100, 'degC'), (10, 'kelvin')), 'error'),
(((100, 'degC'), (10, 'degC')), 'error'),
(((100, 'degC'), (10, 'degF')), 'error'),
(((100, 'degC'), (10, 'degR')), 'error'),
(((100, 'degC'), (10, 'delta_degC')), 'error'),
(((100, 'degC'), (10, 'delta_degF')), 'error'),
(((100, 'degF'), (10, 'kelvin')), 'error'),
(((100, 'degF'), (10, 'degC')), 'error'),
(((100, 'degF'), (10, 'degF')), 'error'),
(((100, 'degF'), (10, 'degR')), 'error'),
(((100, 'degF'), (10, 'delta_degC')), 'error'),
(((100, 'degF'), (10, 'delta_degF')), 'error'),
(((100, 'degR'), (10, 'kelvin')), (1000, 'degR*kelvin')),
(((100, 'degR'), (10, 'degC')), 'error'),
(((100, 'degR'), (10, 'degF')), 'error'),
(((100, 'degR'), (10, 'degR')), (1000, 'degR**2')),
(((100, 'degR'), (10, 'delta_degC')), (1000, 'degR*delta_degC')),
(((100, 'degR'), (10, 'delta_degF')), (1000, 'degR*delta_degF')),
(((100, 'delta_degC'), (10, 'kelvin')), (1000, 'delta_degC*kelvin')),
(((100, 'delta_degC'), (10, 'degC')), 'error'),
(((100, 'delta_degC'), (10, 'degF')), 'error'),
(((100, 'delta_degC'), (10, 'degR')), (1000, 'delta_degC*degR')),
(((100, 'delta_degC'), (10, 'delta_degC')), (1000, 'delta_degC**2')),
(((100, 'delta_degC'), (10, 'delta_degF')), (1000, 'delta_degC*delta_degF')),
(((100, 'delta_degF'), (10, 'kelvin')), (1000, 'delta_degF*kelvin')),
(((100, 'delta_degF'), (10, 'degC')), 'error'),
(((100, 'delta_degF'), (10, 'degF')), 'error'),
(((100, 'delta_degF'), (10, 'degR')), (1000, 'delta_degF*degR')),
(((100, 'delta_degF'), (10, 'delta_degC')), (1000, 'delta_degF*delta_degC')),
(((100, 'delta_degF'), (10, 'delta_degF')), (1000, 'delta_degF**2')),
]
@ParameterizedTestCase.parameterize(("input", "expected_output"),
multiplications)
def test_multiplication(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
qin1, qin2 = input_tuple
q1, q2 = self.Q_(*qin1), self.Q_(*qin2)
input_tuple = q1, q2
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.mul, q1, q2)
else:
expected = self.Q_(*expected)
self.assertEqual(op.mul(q1, q2).units, expected.units)
self.assertQuantityAlmostEqual(op.mul(q1, q2), expected,
atol=0.01)
@helpers.requires_numpy()
@ParameterizedTestCase.parameterize(("input", "expected_output"),
multiplications)
def test_inplace_multiplication(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
(q1v, q1u), (q2v, q2u) = input_tuple
# update input tuple with new values to have correct values on failure
input_tuple = ((np.array([q1v]*2, dtype=np.float), q1u),
(np.array([q2v]*2, dtype=np.float), q2u))
Q_ = self.Q_
qin1, qin2 = input_tuple
q1, q2 = Q_(*qin1), Q_(*qin2)
q1_cp = copy.copy(q1)
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.imul, q1_cp, q2)
else:
expected = np.array([expected[0]]*2, dtype=np.float), expected[1]
self.assertEqual(op.imul(q1_cp, q2).units, Q_(*expected).units)
q1_cp = copy.copy(q1)
self.assertQuantityAlmostEqual(op.imul(q1_cp, q2), Q_(*expected),
atol=0.01)
divisions = [
(((100, 'kelvin'), (10, 'kelvin')), (10, '')),
(((100, 'kelvin'), (10, 'degC')), 'error'),
(((100, 'kelvin'), (10, 'degF')), 'error'),
(((100, 'kelvin'), (10, 'degR')), (10, 'kelvin/degR')),
(((100, 'kelvin'), (10, 'delta_degC')), (10, 'kelvin/delta_degC')),
(((100, 'kelvin'), (10, 'delta_degF')), (10, 'kelvin/delta_degF')),
(((100, 'degC'), (10, 'kelvin')), 'error'),
(((100, 'degC'), (10, 'degC')), 'error'),
(((100, 'degC'), (10, 'degF')), 'error'),
(((100, 'degC'), (10, 'degR')), 'error'),
(((100, 'degC'), (10, 'delta_degC')), 'error'),
(((100, 'degC'), (10, 'delta_degF')), 'error'),
(((100, 'degF'), (10, 'kelvin')), 'error'),
(((100, 'degF'), (10, 'degC')), 'error'),
(((100, 'degF'), (10, 'degF')), 'error'),
(((100, 'degF'), (10, 'degR')), 'error'),
(((100, 'degF'), (10, 'delta_degC')), 'error'),
(((100, 'degF'), (10, 'delta_degF')), 'error'),
(((100, 'degR'), (10, 'kelvin')), (10, 'degR/kelvin')),
(((100, 'degR'), (10, 'degC')), 'error'),
(((100, 'degR'), (10, 'degF')), 'error'),
(((100, 'degR'), (10, 'degR')), (10, '')),
(((100, 'degR'), (10, 'delta_degC')), (10, 'degR/delta_degC')),
(((100, 'degR'), (10, 'delta_degF')), (10, 'degR/delta_degF')),
(((100, 'delta_degC'), (10, 'kelvin')), (10, 'delta_degC/kelvin')),
(((100, 'delta_degC'), (10, 'degC')), 'error'),
(((100, 'delta_degC'), (10, 'degF')), 'error'),
(((100, 'delta_degC'), (10, 'degR')), (10, 'delta_degC/degR')),
(((100, 'delta_degC'), (10, 'delta_degC')), (10, '')),
(((100, 'delta_degC'), (10, 'delta_degF')), (10, 'delta_degC/delta_degF')),
(((100, 'delta_degF'), (10, 'kelvin')), (10, 'delta_degF/kelvin')),
(((100, 'delta_degF'), (10, 'degC')), 'error'),
(((100, 'delta_degF'), (10, 'degF')), 'error'),
(((100, 'delta_degF'), (10, 'degR')), (10, 'delta_degF/degR')),
(((100, 'delta_degF'), (10, 'delta_degC')), (10, 'delta_degF/delta_degC')),
(((100, 'delta_degF'), (10, 'delta_degF')), (10, '')),
]
@ParameterizedTestCase.parameterize(("input", "expected_output"),
divisions)
def test_truedivision(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
qin1, qin2 = input_tuple
q1, q2 = self.Q_(*qin1), self.Q_(*qin2)
input_tuple = q1, q2
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.truediv, q1, q2)
else:
expected = self.Q_(*expected)
self.assertEqual(op.truediv(q1, q2).units, expected.units)
self.assertQuantityAlmostEqual(op.truediv(q1, q2), expected,
atol=0.01)
@helpers.requires_numpy()
@ParameterizedTestCase.parameterize(("input", "expected_output"),
divisions)
def test_inplace_truedivision(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = False
(q1v, q1u), (q2v, q2u) = input_tuple
# update input tuple with new values to have correct values on failure
input_tuple = ((np.array([q1v]*2, dtype=np.float), q1u),
(np.array([q2v]*2, dtype=np.float), q2u))
Q_ = self.Q_
qin1, qin2 = input_tuple
q1, q2 = Q_(*qin1), Q_(*qin2)
q1_cp = copy.copy(q1)
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.itruediv, q1_cp, q2)
else:
expected = np.array([expected[0]]*2, dtype=np.float), expected[1]
self.assertEqual(op.itruediv(q1_cp, q2).units, Q_(*expected).units)
q1_cp = copy.copy(q1)
self.assertQuantityAlmostEqual(op.itruediv(q1_cp, q2),
Q_(*expected), atol=0.01)
multiplications_with_autoconvert_to_baseunit = [
(((100, 'kelvin'), (10, 'degC')), (28315., 'kelvin**2')),
(((100, 'kelvin'), (10, 'degF')), (26092.78, 'kelvin**2')),
(((100, 'degC'), (10, 'kelvin')), (3731.5, 'kelvin**2')),
(((100, 'degC'), (10, 'degC')), (105657.42, 'kelvin**2')),
(((100, 'degC'), (10, 'degF')), (97365.20, 'kelvin**2')),
(((100, 'degC'), (10, 'degR')), (3731.5, 'kelvin*degR')),
(((100, 'degC'), (10, 'delta_degC')), (3731.5, 'kelvin*delta_degC')),
(((100, 'degC'), (10, 'delta_degF')), (3731.5, 'kelvin*delta_degF')),
(((100, 'degF'), (10, 'kelvin')), (3109.28, 'kelvin**2')),
(((100, 'degF'), (10, 'degC')), (88039.20, 'kelvin**2')),
(((100, 'degF'), (10, 'degF')), (81129.69, 'kelvin**2')),
(((100, 'degF'), (10, 'degR')), (3109.28, 'kelvin*degR')),
(((100, 'degF'), (10, 'delta_degC')), (3109.28, 'kelvin*delta_degC')),
(((100, 'degF'), (10, 'delta_degF')), (3109.28, 'kelvin*delta_degF')),
(((100, 'degR'), (10, 'degC')), (28315., 'degR*kelvin')),
(((100, 'degR'), (10, 'degF')), (26092.78, 'degR*kelvin')),
(((100, 'delta_degC'), (10, 'degC')), (28315., 'delta_degC*kelvin')),
(((100, 'delta_degC'), (10, 'degF')), (26092.78, 'delta_degC*kelvin')),
(((100, 'delta_degF'), (10, 'degC')), (28315., 'delta_degF*kelvin')),
(((100, 'delta_degF'), (10, 'degF')), (26092.78, 'delta_degF*kelvin')),
]
@ParameterizedTestCase.parameterize(
("input", "expected_output"),
multiplications_with_autoconvert_to_baseunit)
def test_multiplication_with_autoconvert(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = True
qin1, qin2 = input_tuple
q1, q2 = self.Q_(*qin1), self.Q_(*qin2)
input_tuple = q1, q2
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.mul, q1, q2)
else:
expected = self.Q_(*expected)
self.assertEqual(op.mul(q1, q2).units, expected.units)
self.assertQuantityAlmostEqual(op.mul(q1, q2), expected,
atol=0.01)
@helpers.requires_numpy()
@ParameterizedTestCase.parameterize(
("input", "expected_output"),
multiplications_with_autoconvert_to_baseunit)
def test_inplace_multiplication_with_autoconvert(self, input_tuple, expected):
self.ureg.autoconvert_offset_to_baseunit = True
(q1v, q1u), (q2v, q2u) = input_tuple
# update input tuple with new values to have correct values on failure
input_tuple = ((np.array([q1v]*2, dtype=np.float), q1u),
(np.array([q2v]*2, dtype=np.float), q2u))
Q_ = self.Q_
qin1, qin2 = input_tuple
q1, q2 = Q_(*qin1), Q_(*qin2)
q1_cp = copy.copy(q1)
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.imul, q1_cp, q2)
else:
expected = np.array([expected[0]]*2, dtype=np.float), expected[1]
self.assertEqual(op.imul(q1_cp, q2).units, Q_(*expected).units)
q1_cp = copy.copy(q1)
self.assertQuantityAlmostEqual(op.imul(q1_cp, q2), Q_(*expected),
atol=0.01)
multiplications_with_scalar = [
(((10, 'kelvin'), 2), (20., 'kelvin')),
(((10, 'kelvin**2'), 2), (20., 'kelvin**2')),
(((10, 'degC'), 2), (20., 'degC')),
(((10, '1/degC'), 2), 'error'),
(((10, 'degC**0.5'), 2), 'error'),
(((10, 'degC**2'), 2), 'error'),
(((10, 'degC**-2'), 2), 'error'),
]
@ParameterizedTestCase.parameterize(
("input", "expected_output"), multiplications_with_scalar)
def test_multiplication_with_scalar(self, input_tuple, expected):
self.ureg.default_as_delta = False
in1, in2 = input_tuple
if type(in1) is tuple:
in1, in2 = self.Q_(*in1), in2
else:
in1, in2 = in1, self.Q_(*in2)
input_tuple = in1, in2 # update input_tuple for better tracebacks
if expected == 'error':
self.assertRaises(OffsetUnitCalculusError, op.mul, in1, in2)
else:
expected = self.Q_(*expected)
self.assertEqual(op.mul(in1, in2).units, expected.units)
self.assertQuantityAlmostEqual(op.mul(in1, in2), expected,
atol=0.01)
divisions_with_scalar = [ # without / with autoconvert to base unit
(((10, 'kelvin'), 2), [(5., 'kelvin'), (5., 'kelvin')]),
(((10, 'kelvin**2'), 2), [(5., 'kelvin**2'), (5., 'kelvin**2')]),
(((10, 'degC'), 2), ['error', 'error']),
(((10, 'degC**2'), 2), ['error', 'error']),
(((10, 'degC**-2'), 2), ['error', 'error']),
((2, (10, 'kelvin')), [(0.2, '1/kelvin'), (0.2, '1/kelvin')]),
((2, (10, 'degC')), ['error', (2/283.15, '1/kelvin')]),
((2, (10, 'degC**2')), ['error', 'error']),
((2, (10, 'degC**-2')), ['error', 'error']),
]
@ParameterizedTestCase.parameterize(
("input", "expected_output"), divisions_with_scalar)
def test_division_with_scalar(self, input_tuple, expected):
self.ureg.default_as_delta = False
in1, in2 = input_tuple
if type(in1) is tuple:
in1, in2 = self.Q_(*in1), in2
else:
in1, in2 = in1, self.Q_(*in2)
input_tuple = in1, in2 # update input_tuple for better tracebacks
expected_copy = expected[:]
for i, mode in enumerate([False, True]):
self.ureg.autoconvert_offset_to_baseunit = mode
if expected_copy[i] == 'error':
self.assertRaises(OffsetUnitCalculusError, op.truediv, in1, in2)
else:
expected = self.Q_(*expected_copy[i])
self.assertEqual(op.truediv(in1, in2).units, expected.units)
self.assertQuantityAlmostEqual(op.truediv(in1, in2), expected)
exponentiation = [ # resuls without / with autoconvert
(((10, 'degC'), 1), [(10, 'degC'), (10, 'degC')]),
(((10, 'degC'), 0.5), ['error', (283.15**0.5, 'kelvin**0.5')]),
(((10, 'degC'), 0), [(1., ''), (1., '')]),
(((10, 'degC'), -1), ['error', (1/(10+273.15), 'kelvin**-1')]),
(((10, 'degC'), -2), ['error', (1/(10+273.15)**2., 'kelvin**-2')]),
((( 0, 'degC'), -2), ['error', (1/(273.15)**2, 'kelvin**-2')]),
(((10, 'degC'), (2, '')), ['error', ((283.15)**2, 'kelvin**2')]),
(((10, 'degC'), (10, 'degK')), ['error', 'error']),
(((10, 'kelvin'), (2, '')), [(100., 'kelvin**2'), (100., 'kelvin**2')]),
(( 2, (2, 'kelvin')), ['error', 'error']),
(( 2, (500., 'millikelvin/kelvin')), [2**0.5, 2**0.5]),
(( 2, (0.5, 'kelvin/kelvin')), [2**0.5, 2**0.5]),
(((10, 'degC'), (500., 'millikelvin/kelvin')),
['error', (283.15**0.5, 'kelvin**0.5')]),
]
@ParameterizedTestCase.parameterize(
("input", "expected_output"), exponentiation)
def test_exponentiation(self, input_tuple, expected):
self.ureg.default_as_delta = False
in1, in2 = input_tuple
if type(in1) is tuple and type(in2) is tuple:
in1, in2 = self.Q_(*in1), self.Q_(*in2)
elif not type(in1) is tuple and type(in2) is tuple:
in2 = self.Q_(*in2)
else:
in1 = self.Q_(*in1)
input_tuple = in1, in2
expected_copy = expected[:]
for i, mode in enumerate([False, True]):
self.ureg.autoconvert_offset_to_baseunit = mode
if expected_copy[i] == 'error':
self.assertRaises((OffsetUnitCalculusError,
DimensionalityError), op.pow, in1, in2)
else:
if type(expected_copy[i]) is tuple:
expected = self.Q_(*expected_copy[i])
self.assertEqual(op.pow(in1, in2).units, expected.units)
else:
expected = expected_copy[i]
self.assertQuantityAlmostEqual(op.pow(in1, in2), expected)
@helpers.requires_numpy()
@ParameterizedTestCase.parameterize(
("input", "expected_output"), exponentiation)
def test_inplace_exponentiation(self, input_tuple, expected):
self.ureg.default_as_delta = False
in1, in2 = input_tuple
if type(in1) is tuple and type(in2) is tuple:
(q1v, q1u), (q2v, q2u) = in1, in2
in1 = self.Q_(*(np.array([q1v]*2, dtype=np.float), q1u))
in2 = self.Q_(q2v, q2u)
elif not type(in1) is tuple and type(in2) is tuple:
in2 = self.Q_(*in2)
else:
in1 = self.Q_(*in1)
input_tuple = in1, in2
expected_copy = expected[:]
for i, mode in enumerate([False, True]):
self.ureg.autoconvert_offset_to_baseunit = mode
in1_cp = copy.copy(in1)
if expected_copy[i] == 'error':
self.assertRaises((OffsetUnitCalculusError,
DimensionalityError), op.ipow, in1_cp, in2)
else:
if type(expected_copy[i]) is tuple:
expected = self.Q_(np.array([expected_copy[i][0]]*2,
dtype=np.float),
expected_copy[i][1])
self.assertEqual(op.ipow(in1_cp, in2).units, expected.units)
else:
expected = np.array([expected_copy[i]]*2, dtype=np.float)
in1_cp = copy.copy(in1)
self.assertQuantityAlmostEqual(op.ipow(in1_cp, in2), expected)
| lgpl-3.0 | -3,559,116,201,255,252,500 | 47.280402 | 130 | 0.517559 | false |
kohout/djangocms-getaweb-products | djangocms_product/models.py | 1 | 10145 | # -*- coding: utf-8 -*-
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext as _
from django.utils.timezone import utc
from easy_thumbnails.fields import ThumbnailerImageField
from easy_thumbnails.files import get_thumbnailer
from easy_thumbnails.exceptions import InvalidImageFormatError
from tinymce.models import HTMLField
from decimal import Decimal
import datetime
import settings
DJANGOCMS_PRODUCT_DEFAULT_COUNTRY = getattr(settings,
'DJANGOCMS_PRODUCT_DEFAULT_COUNTRY', u'')
DJANGOCMS_PRODUCT_COUNTRIES = getattr(settings,
'DJANGOCMS_PRODUCT_COUNTRIES', [])
class ProductCategory(models.Model):
title = models.CharField(
max_length=150,
verbose_name=_(u'Title'))
order = models.PositiveIntegerField(
default=0,
verbose_name=_(u'Order'))
section = models.CharField(
max_length=50,
blank=True, default=u'',
verbose_name=_(u'Section'))
slug = models.SlugField(
max_length=255,
db_index=True,
unique=True,
verbose_name=_("slug"))
free_shipping = models.BooleanField(
default=False,
help_text=u'Für Produkte dieser Kategorie werden keine ' \
u'Versandkosten berechnet',
verbose_name=u'Versandkostenfrei')
def active_productitems(self):
return self.productitem_set.filter(active=True)
def productitems_count(self):
return self.active_productitems().count()
productitems_count.short_description = _(u'Count of active product items')
def get_absolute_url(self):
view_name = '%s-product:product-index' % (
settings.SITE_PREFIX, )
return "%s?category=%s" % (reverse(view_name), self.pk)
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'Product Category')
verbose_name_plural = _(u'Product Categories')
class ProductItem(models.Model):
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name=_(u'Created at'))
changed_at = models.DateTimeField(
auto_now=True,
verbose_name=_(u'Created at'))
active = models.BooleanField(
default=False,
verbose_name=_(u'Active'))
title = models.CharField(
max_length=150,
verbose_name=_(u'Headline of the product article'))
slug = models.SlugField(
max_length=255,
db_index=True,
unique=True,
verbose_name=_("slug"))
content = HTMLField(
blank=True,
verbose_name=_(u'Content'))
price = models.DecimalField(
max_digits=20,
decimal_places=2,
default=Decimal('0.00'),
verbose_name=_(u'Price'))
special_offer = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name=_(u'Special offer'))
product_categories = models.ManyToManyField(
ProductCategory,
blank=True, null=True,
verbose_name=_(u'Selected product categories'))
target_page = models.ManyToManyField(
Page,
blank=True, null=True,
verbose_name=_(u'Target Page'))
document = models.FileField(
upload_to='cms_products',
blank=True, null=True,
verbose_name=_(u'Document (e.g. product catalogue, ...)'))
link = models.URLField(
blank=True, null=True,
help_text=_(u'Link to more detailed page'),
verbose_name=_(u'URL'))
order = models.PositiveIntegerField(
default=0,
verbose_name=_(u'Order'))
out_of_stock = models.BooleanField(
default=False,
verbose_name=u'ausverkauft')
def get_first_image(self):
images = self.productimage_set.all()
if images.count() == 0:
return None
first_image = images[0]
return first_image
def get_more_images(self):
return self.productimage_set.all()[1:]
def has_multiple_images(self):
return self.productimage_set.count() > 1
def get_absolute_url(self):
view_name = 'cms-product:product-detail'
return reverse(view_name, kwargs={'slug': self.slug})
def __unicode__(self):
return self.title
class Meta:
ordering = ('-changed_at', )
verbose_name = _(u'Product Item')
verbose_name_plural = _(u'Product Items')
class ProductTeaser(CMSPlugin):
product_ORDERING_FUTURE_ASC = 'future_asc'
product_ORDERING_PAST_DESC = 'past_desc'
product_ORDERING_CHOICES = (
(product_ORDERING_FUTURE_ASC, _(u'from now to future (ascending)')),
(product_ORDERING_PAST_DESC, _(u'from now to past (descending)')),
)
title = models.CharField(
max_length=150,
verbose_name=_(u'Headline of the product list'))
product_categories = models.ManyToManyField(
ProductCategory,
verbose_name=_(u'Selected product categories'))
ordering = models.CharField(
max_length=20,
choices=product_ORDERING_CHOICES,
default=product_ORDERING_PAST_DESC,
verbose_name=_(u'Ordering/Selection of Articles'))
target_page = models.ForeignKey(Page,
verbose_name=_(u'Target Page'))
def get_items(self):
items = ProductItem.objects.filter(active=True)
now = datetime.datetime.utcnow().replace(tzinfo=utc)
if self.ordering == self.product_ORDERING_PAST_DESC:
items = items.filter(changed_at__lte=now).order_by('-changed_at')
else:
items = items.filter(changed_at__gte=now).order_by('changed_at')
return items
def __unicode__(self):
return self.title
class ProductImage(models.Model):
image = ThumbnailerImageField(
upload_to='cms_product/',
verbose_name=_(u'Image'))
image_width = models.PositiveSmallIntegerField(
default=0,
null=True,
verbose_name=_(u'Original Image Width'))
image_height = models.PositiveSmallIntegerField(
default=0,
null=True,
verbose_name=_(u'Original Image Height'))
title = models.CharField(
blank=True,
default='',
max_length=150,
verbose_name=_(u'Image Title'))
alt = models.CharField(
blank=True,
default='',
max_length=150,
verbose_name=_(u'Alternative Image Text'))
ordering = models.PositiveIntegerField(
verbose_name=_(u'Ordering'))
product_item = models.ForeignKey(ProductItem,
verbose_name=_(u'Product Item'))
def get_title(self):
if self.title:
return self.title
return self.product_item.title
def get_alt(self):
if self.alt:
return self.alt
return u'Bild %s' % (self.ordering + 1)
def save(self, *args, **kwargs):
if self.ordering is None:
self.ordering = self.product_item.productimage_set.count()
super(ProductImage, self).save(*args, **kwargs)
def _get_image(self, image_format):
_image_format = settings.THUMBNAIL_ALIASES[''][image_format]
_img = self.image
try:
img = get_thumbnailer(_img).get_thumbnail(_image_format)
return {
'url': img.url,
'width': img.width,
'height': img.height,
'alt': self.alt,
'title': self.title,
}
except (UnicodeEncodeError, InvalidImageFormatError):
return None
def get_preview(self):
return self._get_image('preview')
def get_teaser(self):
return self._get_image('teaser')
def get_normal(self):
return self._get_image('normal')
def get_main(self):
return self._get_image('main')
def get_fullsize(self):
return self._get_image('fullsize')
def __unicode__(self):
if self.title:
return self.title
if self.alt:
return self.alt
return _(u'Image #%s') % self.ordering
class Meta:
ordering = ['ordering']
verbose_name = _(u'Product Image')
verbose_name_plural = _(u'Product Images')
class Order(models.Model):
created_at = models.DateTimeField(auto_now_add=True,
verbose_name=_(u'Erstellt am'))
first_name = models.CharField(max_length=50,
verbose_name=_(u'Vorname'))
last_name = models.CharField(max_length=50,
verbose_name=_(u'Nachname'))
address = models.CharField(max_length=150,
verbose_name=_(u'Adresse'))
zipcode = models.CharField(max_length=5,
verbose_name=_(u'PLZ'))
city = models.CharField(max_length=50,
verbose_name=_(u'Ort'))
country = models.CharField(max_length=100,
blank=True,
default=DJANGOCMS_PRODUCT_DEFAULT_COUNTRY,
choices=DJANGOCMS_PRODUCT_COUNTRIES,
verbose_name=_(u'Land'))
telephone = models.CharField(max_length=50,
verbose_name=_(u'Telefon'))
email = models.EmailField(max_length=150,
verbose_name=_(u'Email'))
total_amount = models.DecimalField(
max_digits=20,
decimal_places=2,
default=Decimal('0.00'),
verbose_name=_(u'Gesamtbetrag'))
shipping_amount = models.DecimalField(
max_digits=20,
decimal_places=2,
default=Decimal('0.00'),
verbose_name=_(u'Versandkosten'))
shipping_label = models.CharField(max_length=150,
default=u'', blank=True,
verbose_name=_(u'Versand-Label'))
@property
def amount_with_shipping(self):
return self.total_amount + self.shipping_amount
class Meta:
verbose_name = _(u'Bestellung')
verbose_name_plural = _(u'Bestellungen')
class OrderedItem(models.Model):
order = models.ForeignKey(Order,
verbose_name=_(u'Bestellung'))
product_item = models.ForeignKey(ProductItem,
verbose_name=_(u'Bestelltes Produkt'))
amount = models.PositiveIntegerField(default=0,
verbose_name=_(u'Menge'))
| unlicense | -9,202,939,927,887,959,000 | 28.835294 | 78 | 0.616719 | false |
totalgood/twote | twote/pw_model.py | 1 | 6558 | import datetime
from collections import Mapping
import peewee as pw
from playhouse.shortcuts import model_to_dict, dict_to_model
from playhouse.csv_utils import dump_csv
import pandas as pd
import gzip
from secrets import DB_NAME, DB_PASSWORD, DB_USER
import models
db = pw.SqliteDatabase('tweets.db')
psql_db = pw.PostgresqlDatabase(DB_NAME, user=DB_USER, password=DB_PASSWORD)
class BaseModel(pw.Model):
class Meta:
database = db
class Place(BaseModel):
"""Twitter API json "place" key"""
id_str = pw.CharField()
place_type = pw.CharField(null=True)
country_code = pw.CharField(null=True)
country = pw.CharField(null=True)
name = pw.CharField(null=True)
full_name = pw.CharField(null=True)
url = pw.CharField(null=True) # URL to json polygon of place boundary
bounding_box_coordinates = pw.CharField(null=True) # json list of 4 [lat, lon] pairs
class User(BaseModel):
id_str = pw.CharField(null=True) # v4
screen_name = pw.CharField(null=True)
verified = pw.BooleanField(null=True) # v4
time_zone = pw.CharField(null=True) # v4
utc_offset = pw.IntegerField(null=True) # -28800 (v4)
protected = pw.BooleanField(null=True) # v4
location = pw.CharField(null=True) # Houston, TX (v4)
lang = pw.CharField(null=True) # en (v4)
followers_count = pw.IntegerField(null=True)
created_date = pw.DateTimeField(default=datetime.datetime.now)
statuses_count = pw.IntegerField(null=True)
friends_count = pw.IntegerField(null=True)
favourites_count = pw.IntegerField(default=0)
class Tweet(BaseModel):
id_str = pw.CharField(null=True)
in_reply_to_id_str = pw.CharField(null=True, default=None)
in_reply_to = pw.ForeignKeyField('self', null=True, related_name='replies')
user = pw.ForeignKeyField(User, null=True, related_name='tweets')
source = pw.CharField(null=True) # e.g. "Twitter for iPhone"
text = pw.CharField(null=True)
tags = pw.CharField(null=True) # e.g. "#sarcasm #angry #trumped"
created_date = pw.DateTimeField(default=datetime.datetime.now)
location = pw.CharField(null=True)
place = pw.ForeignKeyField(Place, null=True)
favorite_count = pw.IntegerField(default=0)
def tweets_to_df():
tweets = []
for t in Tweet.select():
try:
tweets += [(t.user.screen_name, t.text, t.tags, t.favorite_count, t.user.followers_count, t.user.friends_count, t.user.statuses_count)]
except:
tweets += [(None, t.text, t.tags, t.favorite_count, None, None, None)]
return pd.DataFrame(tweets)
def dump_tweets(name='twitterbot'):
with gzip.open(name + '-tweets-2016-12-11.csv.gz', 'w') as fout:
query = Tweet.select()
dump_csv(query, fout)
with gzip.open(name + '-tweets-2016-12-11.csv.gz', 'w') as fout:
query = User.select()
dump_csv(query, fout)
def create_tables():
db.connect()
db.create_tables([Place, User, Tweet])
def pw2dj(tables=((User, models.User), (Place, models.Place), (Tweet, models.Tweet)), delete_first=True, batch_size=10000):
"""Copies all records from peewee sqlite database to Django postgresql database, ignoring ForeignKeys
This worked and also migrated foreign keys! (only 217 in_reply_to tweets out of 240k though)
"""
for from_cls, to_cls in tables:
print('=' * 100)
print('Copying {} -> {}'.format(from_cls, to_cls))
if delete_first:
M = to_cls.objects.count()
print('Deleting {} {} records'.format(M, to_cls))
to_cls.objects.all().delete()
assert(to_cls.objects.count() == 0)
query = from_cls.select()
N = query.count()
records = []
for i, obj in enumerate(query):
d = model_to_dict(obj)
if isinstance(obj, models.Tweet):
if d['in_reply_to'] is not None and len(d['in_reply_to']) > 0:
to_cls.in_reply_to = models.Tweet(**d['in_reply_to'])
for k, v in d.iteritems():
# only works for foreign keys to self
if isinstance(v, dict) and not len(v):
d[k] = None
else: # FIXME: come back later and fill in foreign keys: in_reply_to, place, user
d[k] = None
records += [to_cls(**d)]
if not i % batch_size:
assert(from_cls.select().count() == N)
print('Saving {:08d}/{:08d} {}: {}'.format(i, N, round(i * 100. / N, 1), obj))
# this will not work for many2many fields
to_cls.objects.bulk_create(records)
records = []
if len(records):
print('Saving last batch {:08d}/{:08d} {}: {}'.format(i, N, round(i * 100. / N, 1), obj))
# this will not work for many2many fields
to_cls.objects.bulk_create(records)
records = []
class Serializer(object):
"""Callable serializer. An instance of this class can be passed to the `default` arg in json.dump
>>> json.dumps(model.Tweet(), default=Serializer(), indent=2)
{...}
"""
date_format = '%Y-%m-%d'
time_format = '%H:%M:%S'
datetime_format = ' '.join([date_format, time_format])
def convert_value(self, value):
if isinstance(value, datetime.datetime):
return value.strftime(self.datetime_format)
elif isinstance(value, datetime.date):
return value.strftime(self.date_format)
elif isinstance(value, datetime.time):
return value.strftime(self.time_format)
elif isinstance(value, pw.Model):
return value.get_id()
else:
return value
def clean_data(self, data):
# flask doesn't bother with this condition check, why?
if isinstance(data, Mapping):
for key, value in data.items():
if isinstance(value, dict):
self.clean_data(value)
elif isinstance(value, (list, tuple)):
data[key] = map(self.clean_data, value)
else:
data[key] = self.convert_value(value)
return data
def serialize_object(self, obj, **kwargs):
data = model_to_dict(obj, **kwargs)
return self.clean_data(data)
def __call__(self, obj, **kwargs):
return self.serialize_object(obj, **kwargs)
class Deserializer(object):
def deserialize_object(self, model, data, **kwargs):
return dict_to_model(model, data, **kwargs)
| mit | 8,448,318,166,340,821,000 | 36.261364 | 147 | 0.60735 | false |
taedori81/shoop | shoop/admin/modules/products/__init__.py | 1 | 4165 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from collections import Counter
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shoop.admin.base import AdminModule, MenuEntry, SearchResult
from shoop.admin.utils.search import split_query
from shoop.admin.utils.urls import (
admin_url, derive_model_url, get_edit_and_list_urls, get_model_url,
manipulate_query_string
)
from shoop.core.models import Product
class ProductModule(AdminModule):
name = _("Products")
breadcrumbs_menu_entry = MenuEntry(name, url="shoop_admin:product.list")
def get_urls(self):
return [
admin_url(
"^products/(?P<pk>\d+)/delete/$", "shoop.admin.modules.products.views.ProductDeleteView",
name="product.delete"
),
admin_url(
"^products/(?P<pk>\d+)/media/$", "shoop.admin.modules.products.views.ProductMediaEditView",
name="product.edit_media"
),
admin_url(
"^products/(?P<pk>\d+)/crosssell/$", "shoop.admin.modules.products.views.ProductCrossSellEditView",
name="product.edit_cross_sell"
),
admin_url(
"^products/(?P<pk>\d+)/variation/$", "shoop.admin.modules.products.views.ProductVariationView",
name="product.edit_variation"
),
] + get_edit_and_list_urls(
url_prefix="^products",
view_template="shoop.admin.modules.products.views.Product%sView",
name_template="product.%s"
)
def get_menu_category_icons(self):
return {self.name: "fa fa-cube"}
def get_menu_entries(self, request):
category = _("Products")
return [
MenuEntry(
text=_("Products"),
icon="fa fa-cube",
url="shoop_admin:product.list",
category=category
)
]
def get_search_results(self, request, query):
minimum_query_length = 3
skus_seen = set()
if len(query) >= minimum_query_length:
pk_counter = Counter()
pk_counter.update(Product.objects.filter(sku__startswith=query).values_list("pk", flat=True))
name_q = Q()
for part in split_query(query, minimum_query_length):
name_q &= Q(name__icontains=part)
pk_counter.update(
Product._parler_meta.root_model.objects.filter(name_q).values_list("master_id", flat=True)
)
pks = [pk for (pk, count) in pk_counter.most_common(10)]
for product in Product.objects.filter(pk__in=pks):
relevance = 100 - pk_counter.get(product.pk, 0)
skus_seen.add(product.sku.lower())
yield SearchResult(
text=force_text(product),
url=get_model_url(product),
category=_("Products"),
relevance=relevance
)
if len(query) >= minimum_query_length:
url = reverse("shoop_admin:product.new")
if " " in query:
yield SearchResult(
text=_("Create Product Called \"%s\"") % query,
url=manipulate_query_string(url, name=query),
is_action=True
)
else:
if query.lower() not in skus_seen:
yield SearchResult(
text=_("Create Product with SKU \"%s\"") % query,
url=manipulate_query_string(url, sku=query),
is_action=True
)
def get_model_url(self, object, kind):
return derive_model_url(Product, "shoop_admin:product", object, kind)
| agpl-3.0 | 4,579,189,671,847,036,400 | 37.564815 | 115 | 0.561345 | false |
hpk42/numpyson | test_numpyson.py | 1 | 7235 | import datetime as dt
from functools import partial
import inspect
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_index_equal, assert_series_equal, assert_frame_equal
from numpy.testing import assert_equal
assert_series_equal_strict = partial(assert_series_equal, check_dtype=True, check_index_type=True,
check_series_type=True, check_less_precise=False)
assert_frame_equal_strict = partial(assert_frame_equal, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_less_precise=False,
check_names=True)
from numpyson import dumps, loads, build_index_handler_for_type
def test_version():
import numpyson
assert numpyson.__version__
@pytest.mark.parametrize('arr_before', [
np.array([1, 2, 3]),
np.array([1., 2., 3.]),
np.array(['foo', 'bar', 'baz']),
np.array([dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58), dt.datetime(1970, 1, 1, 12, 59)]),
np.array([dt.date(1970, 1, 1), dt.date(1970, 1, 2), dt.date(1970, 1, 3)]),
np.array([True, False, True]),
np.arange(10).T,
np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]]),
np.array([[[1., 10.], [4., 40.], [7., 70.]], [[2., 20.], [5., 50.], [8., 80.]], [[3., 30.], [6., 60.], [9., 90.]]]),
np.reshape(np.arange(100), (10, 10)),
np.reshape(np.arange(100).T, (10, 10)),
])
def test_numpy_array_handler(arr_before):
buf = dumps(arr_before)
arr_after = loads(buf)
assert_equal(arr_before, arr_after)
def test_nested_array():
data_before = {"1": np.array([1, 2])}
buf = dumps(data_before)
data_after = loads(buf)
assert_equal(data_before["1"], data_after["1"])
@pytest.mark.parametrize('ts_before', [
pd.TimeSeries([1, 2, 3], index=[0, 1, 2]),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='D')),
])
def test_pandas_timeseries_handler(ts_before):
buf = dumps(ts_before)
ts_after = loads(buf)
assert_series_equal_strict(ts_before, ts_after)
@pytest.mark.parametrize('index_before', [
pd.Index([0, 1, 2]),
pd.Index([0., 1., 2.]), # not sure why you would want to index by floating point numbers; here for completeness
pd.Index(['a', 'b', 'c']),
])
def test_pandas_index_handler(index_before):
buf = dumps(index_before)
index_after = loads(buf)
assert_index_equal(index_before, index_after)
@pytest.mark.parametrize('index_before', [
pd.date_range('1970-01-01', periods=3, freq='S'),
pd.date_range('1970-01-01', periods=3, freq='D'),
])
def test_pandas_datetime_index_handler(index_before):
buf = dumps(index_before)
index_after = loads(buf)
assert_index_equal(index_before, index_after)
@pytest.mark.parametrize('data_before', [
{"1": pd.date_range('1970-01-01', periods=3, freq='S')},
{"1": pd.date_range('1970-01-01', periods=3, freq='D')},
])
def test_datetime_index_nested(data_before):
buf = dumps(data_before)
data_after = loads(buf)
assert_index_equal(data_before["1"], data_after["1"])
TEST_DATA_FRAMES = (
pd.DataFrame({0: [1, 2, 3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='D')),
pd.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='D')),
pd.DataFrame({
'i': [1, 2, 3],
'f': [1.1, 2.2, 3.3],
's': ['ham', 'spam', 'eggs'],
'b': [True, False, True],
'o': [{'a': 1}, {'b': 2}, {'c': 3}],
},
index=pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame(np.ones(shape=(10,15)), index=pd.date_range('1970-01-01', periods=10))
)
@pytest.mark.parametrize('df_before', TEST_DATA_FRAMES)
def test_pandas_dataframe_handler(df_before):
buf = dumps(df_before)
df_after = loads(buf)
assert_frame_equal_strict(df_before, df_after)
def test_mixed_python_and_pandas_types():
data_before = TEST_DATA_FRAMES
buf = dumps(data_before)
data_after = loads(buf)
assert isinstance(data_after, tuple)
assert len(data_after) == len(TEST_DATA_FRAMES)
assert len(data_before) == len(data_after)
for df_before, df_after in zip(data_before, data_after):
assert_frame_equal_strict(df_before, df_after)
def test_build_index_handler_for_type():
for index_class in ():
handler_cls = build_index_handler_for_type(index_class)
assert inspect.isclass(handler_cls)
assert hasattr(handler_cls, 'flatten')
assert hasattr(handler_cls, 'restore')
with pytest.raises(TypeError):
build_index_handler_for_type(pd.DatetimeIndex)
with pytest.raises(TypeError):
build_index_handler_for_type(pd.TimeSeries)
@pytest.mark.xfail(reason='failing preserve underlying array state when it is wrapped inside a Pandas object')
def test_preservation_of_specific_array_ordering():
df_c = pd.DataFrame(np.array([[1,2],[3,4], [5,6]], order='C'))
df_c_after = loads(dumps(df_c))
assert_frame_equal_strict(df_c, df_c_after)
assert_equal(df_c.values, df_c_after.values)
assert not df_c.values.flags.fortran
assert not df_c_after.values.flags.fortran
df_f = pd.DataFrame(np.array([[1,2],[3,4], [5,6]], order='F'))
df_f_after = loads(dumps(df_f))
assert_frame_equal_strict(df_f, df_f_after)
assert_equal(df_f.values, df_f_after.values)
assert df_f.values.flags.fortran
assert df_f_after.values.flags.fortran
def test_preservation_of_specific_array_ordering_simple():
arr_c = np.array([[1,2],[3,4], [5,6]], order='C')
arr_f = np.array([[1,2],[3,4], [5,6]], order='F')
assert_equal(arr_c, arr_f)
assert arr_c.strides != arr_f.strides
# C array ordering
arr_c_after = loads(dumps(arr_c))
assert arr_c.strides == arr_c_after.strides
assert not arr_c.flags.fortran
assert not arr_c_after.flags.fortran
assert_equal(arr_c, arr_c_after)
# Fortran array order
arr_f_after = loads(dumps(arr_f))
assert arr_f.strides == arr_f_after.strides
assert arr_f.flags.fortran
assert arr_f_after.flags.fortran
assert_equal(arr_f, arr_f_after)
@pytest.mark.parametrize("val", [np.float64(4.2), np.int64(5)])
def test_number(val):
dumped = dumps(val)
loaded = loads(dumped)
assert loaded == val
assert type(loaded) == type(val)
def test_datetime_identity():
import datetime
date = datetime.datetime(2013, 11, 1, 0, 0)
val = {
'start': date,
'end': date,
'd': {"ttf": pd.TimeSeries([1.],
pd.date_range("1970-1-1", periods=1, freq='S'))
}
}
dumped = dumps(val)
loaded = loads(dumped)
assert loaded["start"] == val["start"], dumped
assert loaded["end"] == val["end"]
assert loaded["end"] == val["end"]
| mit | -594,982,396,743,412,500 | 33.617225 | 120 | 0.609399 | false |
daleroberts/luigi-examples | loadconfig.py | 1 | 1551 | import tempfile
import logging
import luigi
import time
import random
import os
from datetime import datetime
from os.path import join as pjoin, dirname, exists, basename, abspath
CONFIG = luigi.configuration.get_config()
CONFIG.add_config_path(pjoin(dirname(__file__), 'loadconfig.cfg'))
# Set global config
TEMPDIR = tempfile.mkdtemp()
ACQ_MIN = datetime(*map(int, CONFIG.get('data', 'acq_min').split('-')))
ACQ_MAX = datetime(*map(int, CONFIG.get('data', 'acq_max').split('-')))
logging.basicConfig()
log = logging.getLogger()
class DummyTask(luigi.Task):
id = luigi.Parameter()
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(TEMPDIR, str(self.id)))
class SleepyTask(luigi.Task):
id = luigi.Parameter()
def run(self):
time.sleep(random.uniform(0,2))
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(TEMPDIR, str(self.id)))
class ChainedSleepyTask(SleepyTask):
id = luigi.Parameter()
def requires(self):
if int(self.id) > 0:
return [ChainedSleepyTask(int(self.id)-1)]
else:
return []
if __name__ == '__main__':
log.warning('temp directory: ' + TEMPDIR)
log.warning('acq_min: ' + str(ACQ_MIN))
tasks = [DummyTask(id) for id in range(20)]
tasks.extend([SleepyTask(id) for id in range(20, 30)])
tasks.extend([ChainedSleepyTask(35)])
luigi.build(tasks, local_scheduler=True)
| unlicense | 2,018,563,003,085,412,400 | 23.234375 | 71 | 0.642166 | false |
leiferikb/bitpop | build/scripts/slave/gtest/json_results_generator.py | 1 | 17419 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility class to generate JSON results from given test results and upload
them to the specified results server.
"""
from __future__ import with_statement
import codecs
import logging
import os
import time
import urllib2
import simplejson
from slave.gtest.test_result import TestResult
from slave.gtest.test_results_uploader import TestResultsUploader
# A JSON results generator for generic tests.
JSON_PREFIX = 'ADD_RESULTS('
JSON_SUFFIX = ');'
def has_json_wrapper(string):
return string.startswith(JSON_PREFIX) and string.endswith(JSON_SUFFIX)
def strip_json_wrapper(json_content):
# FIXME: Kill this code once the server returns json instead of jsonp.
if has_json_wrapper(json_content):
return json_content[len(JSON_PREFIX):-len(JSON_SUFFIX)]
return json_content
def convert_trie_to_flat_paths(trie, prefix=None):
"""Converts the directory structure in the given trie to flat paths,
prepending a prefix to each."""
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + '/' + name
if len(data) and not 'results' in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
def add_path_to_trie(path, value, trie):
"""Inserts a single flat directory path and associated value into a directory
trie structure."""
if not '/' in path:
trie[path] = value
return
# we don't use slash
# pylint: disable=W0612
directory, slash, rest = path.partition('/')
if not directory in trie:
trie[directory] = {}
add_path_to_trie(rest, value, trie[directory])
def generate_test_timings_trie(individual_test_timings):
"""Breaks a test name into chunks by directory and puts the test time as a
value in the lowest part, e.g.
foo/bar/baz.html: 1ms
foo/bar/baz1.html: 3ms
becomes
foo: {
bar: {
baz.html: 1,
baz1.html: 3
}
}
"""
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie
class JSONResultsGenerator(object):
"""A JSON results generator for generic tests."""
MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
# Min time (seconds) that will be added to the JSON.
MIN_TIME = 1
# Note that in non-chromium tests those chars are used to indicate
# test modifiers (FAILS, FLAKY, etc) but not actual test results.
PASS_RESULT = 'P'
SKIP_RESULT = 'X'
FAIL_RESULT = 'F'
FLAKY_RESULT = 'L'
NO_DATA_RESULT = 'N'
MODIFIER_TO_CHAR = {
TestResult.NONE: PASS_RESULT,
TestResult.DISABLED: SKIP_RESULT,
TestResult.FAILS: FAIL_RESULT,
TestResult.FLAKY: FLAKY_RESULT}
VERSION = 4
VERSION_KEY = 'version'
RESULTS = 'results'
TIMES = 'times'
BUILD_NUMBERS = 'buildNumbers'
TIME = 'secondsSinceEpoch'
TESTS = 'tests'
FIXABLE_COUNT = 'fixableCount'
FIXABLE = 'fixableCounts'
ALL_FIXABLE_COUNT = 'allFixableCount'
RESULTS_FILENAME = 'results.json'
TIMES_MS_FILENAME = 'times_ms.json'
INCREMENTAL_RESULTS_FILENAME = 'incremental_results.json'
URL_FOR_TEST_LIST_JSON = \
'http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s'
def __init__(self, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_revisions=None,
test_results_server=None,
test_type='',
master_name='',
file_writer=None):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
Args
builder_name: the builder name (e.g. Webkit).
build_name: the build name (e.g. webkit-rel).
build_number: the build number.
results_file_base_path: Absolute path to the directory containing the
results json file.
builder_base_url: the URL where we have the archived test results.
If this is None no archived results will be retrieved.
test_results_map: A dictionary that maps test_name to TestResult.
svn_revisions: A (json_field_name, revision) pair for SVN
repositories that tests rely on. The SVN revision will be
included in the JSON with the given json_field_name.
test_results_server: server that hosts test results json.
test_type: test type string (e.g. 'layout-tests').
master_name: the name of the buildbot master.
file_writer: if given the parameter is used to write JSON data to a file.
The parameter must be the function that takes two arguments, 'file_path'
and 'data' to be written into the file_path.
"""
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
self._builder_base_url = builder_base_url
self._results_directory = results_file_base_path
self._test_results_map = test_results_map
self._test_results = test_results_map.values()
self._svn_revisions = svn_revisions
if not self._svn_revisions:
self._svn_revisions = {}
self._test_results_server = test_results_server
self._test_type = test_type
self._master_name = master_name
self._file_writer = file_writer
def generate_json_output(self):
json = self.get_json()
if json:
file_path = os.path.join(self._results_directory,
self.INCREMENTAL_RESULTS_FILENAME)
self._write_json(json, file_path)
def generate_times_ms_file(self):
times = generate_test_timings_trie(self._test_results_map.values())
file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME)
self._write_json(times, file_path)
def get_json(self):
"""Gets the results for the results.json file."""
results_json = {}
if not results_json:
results_json, error = self._get_archived_json_results()
if error:
# If there was an error don't write a results.json
# file at all as it would lose all the information on the
# bot.
logging.error('Archive directory is inaccessible. Not '
'modifying or clobbering the results.json '
'file: ' + str(error))
return None
builder_name = self._builder_name
if results_json and builder_name not in results_json:
logging.debug('Builder name (%s) is not in the results.json file.'
% builder_name)
self._convert_json_to_current_version(results_json)
if builder_name not in results_json:
results_json[builder_name] = (
self._create_results_for_builder_json())
results_for_builder = results_json[builder_name]
self._insert_generic_metadata(results_for_builder)
self._insert_failure_summaries(results_for_builder)
# Update the all failing tests with result type and time.
tests = results_for_builder[self.TESTS]
all_failing_tests = self._get_failed_test_names()
all_failing_tests.update(convert_trie_to_flat_paths(tests))
for test in all_failing_tests:
self._insert_test_time_and_result(test, tests)
return results_json
def upload_json_files(self, json_files):
"""Uploads the given json_files to the test_results_server (if the
test_results_server is given)."""
if not self._test_results_server:
return
if not self._master_name:
logging.error('--test-results-server was set, but --master-name was not. '
'Not uploading JSON files.')
return
print 'Uploading JSON files for builder: %s' % self._builder_name
attrs = [('builder', self._builder_name),
('testtype', self._test_type),
('master', self._master_name)]
files = [(f, os.path.join(self._results_directory, f)) for f in json_files]
uploader = TestResultsUploader(self._test_results_server)
# Set uploading timeout in case appengine server is having problem.
# 120 seconds are more than enough to upload test results.
uploader.upload(attrs, files, 120)
print 'JSON files uploaded.'
def _write_json(self, json_object, file_path):
# Specify separators in order to get compact encoding.
json_data = simplejson.dumps(json_object, separators=(',', ':'))
json_string = json_data
if self._file_writer:
self._file_writer(file_path, json_string)
else:
with codecs.open(file_path, 'w', 'utf8') as f:
f.write(json_string)
def _get_test_timing(self, test_name):
"""Returns test timing data (elapsed time) in second
for the given test_name."""
if test_name in self._test_results_map:
# Floor for now to get time in seconds.
return int(self._test_results_map[test_name].test_run_time)
return 0
def _get_failed_test_names(self):
"""Returns a set of failed test names."""
return set([r.test_name for r in self._test_results if r.failed])
def _get_modifier_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
return self.MODIFIER_TO_CHAR[test_result.modifier]
return self.__class__.PASS_RESULT
def _get_result_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier == TestResult.DISABLED:
return self.__class__.SKIP_RESULT
if test_result.failed:
return self.__class__.FAIL_RESULT
return self.__class__.PASS_RESULT
def _get_archived_json_results(self):
"""Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
Returns (archived_results, error) tuple where error is None if results
were successfully read.
"""
results_json = {}
old_results = None
error = None
if not self._test_results_server:
return {}, None
results_file_url = (self.URL_FOR_TEST_LIST_JSON % (
urllib2.quote(self._test_results_server),
urllib2.quote(self._builder_name),
self.RESULTS_FILENAME,
urllib2.quote(self._test_type),
urllib2.quote(self._master_name)))
try:
results_file = urllib2.urlopen(results_file_url)
old_results = results_file.read()
except urllib2.HTTPError, http_error:
# A non-4xx status code means the bot is hosed for some reason
# and we can't grab the results.json file off of it.
if http_error.code < 400 and http_error.code >= 500:
error = http_error
except urllib2.URLError, url_error:
error = url_error
if old_results:
# Strip the prefix and suffix so we can get the actual JSON object.
old_results = strip_json_wrapper(old_results)
try:
results_json = simplejson.loads(old_results)
except ValueError:
logging.debug('results.json was not valid JSON. Clobbering.')
# The JSON file is not valid JSON. Just clobber the results.
results_json = {}
else:
logging.debug('Old JSON results do not exist. Starting fresh.')
results_json = {}
return results_json, error
def _insert_failure_summaries(self, results_for_builder):
"""Inserts aggregate pass/failure statistics into the JSON.
This method reads self._test_results and generates
FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
"""
# Insert the number of tests that failed or skipped.
fixable_count = len([r for r in self._test_results if r.fixable()])
self._insert_item_into_raw_list(
results_for_builder, fixable_count, self.FIXABLE_COUNT)
# Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
entry = {}
for test_name in self._test_results_map.iterkeys():
result_char = self._get_modifier_char(test_name)
entry[result_char] = entry.get(result_char, 0) + 1
# Insert the pass/skip/failure summary dictionary.
self._insert_item_into_raw_list(results_for_builder, entry, self.FIXABLE)
# Insert the number of all the tests that are supposed to pass.
all_test_count = len(self._test_results)
self._insert_item_into_raw_list(results_for_builder,
all_test_count, self.ALL_FIXABLE_COUNT)
def _insert_item_into_raw_list(self, results_for_builder, item, key):
"""Inserts the item into the list with the given key in the results for
this builder. Creates the list if no such list exists.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
item: Number or string to insert into the list.
key: Key in results_for_builder for the list to insert into.
"""
if key in results_for_builder:
raw_list = results_for_builder[key]
else:
raw_list = []
raw_list.insert(0, item)
raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
results_for_builder[key] = raw_list
def _insert_item_run_length_encoded(self, item, encoded_results):
"""Inserts the item into the run-length encoded results.
Args:
item: String or number to insert.
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
if len(encoded_results) and item == encoded_results[0][1]:
num_results = encoded_results[0][0]
if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
encoded_results[0][0] = num_results + 1
else:
# Use a list instead of a class for the run-length encoding since
# we want the serialized form to be concise.
encoded_results.insert(0, [1, item])
def _insert_generic_metadata(self, results_for_builder):
""" Inserts generic metadata (such as version number, current time etc)
into the JSON.
Args:
results_for_builder: Dictionary containing the test results for
a single builder.
"""
self._insert_item_into_raw_list(results_for_builder, self._build_number,
self.BUILD_NUMBERS)
# Include SVN revisions for the given repositories.
for (name, revision) in self._svn_revisions:
self._insert_item_into_raw_list(results_for_builder, revision,
name + 'Revision')
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
self.TIME)
def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
Args:
tests: Dictionary containing test result entries.
"""
result = self._get_result_char(test_name)
tm = self._get_test_timing(test_name)
this_test = tests
for segment in test_name.split('/'):
if segment not in this_test:
this_test[segment] = {}
this_test = this_test[segment]
if not len(this_test):
self._populate_results_and_times_json(this_test)
if self.RESULTS in this_test:
self._insert_item_run_length_encoded(result, this_test[self.RESULTS])
else:
this_test[self.RESULTS] = [[1, result]]
if self.TIMES in this_test:
self._insert_item_run_length_encoded(tm, this_test[self.TIMES])
else:
this_test[self.TIMES] = [[1, tm]]
def _convert_json_to_current_version(self, results_json):
"""If the JSON does not match the current version, converts it to the
current version and adds in the new version number.
"""
if self.VERSION_KEY in results_json:
archive_version = results_json[self.VERSION_KEY]
if archive_version == self.VERSION:
return
else:
archive_version = 3
# version 3->4
if archive_version == 3:
for results in results_json.itervalues():
self._convert_tests_to_trie(results)
results_json[self.VERSION_KEY] = self.VERSION
def _convert_tests_to_trie(self, results):
if not self.TESTS in results:
return
test_results = results[self.TESTS]
test_results_trie = {}
for test in test_results.iterkeys():
single_test_result = test_results[test]
add_path_to_trie(test, single_test_result, test_results_trie)
results[self.TESTS] = test_results_trie
def _populate_results_and_times_json(self, results_and_times):
results_and_times[self.RESULTS] = []
results_and_times[self.TIMES] = []
return results_and_times
def _create_results_for_builder_json(self):
results_for_builder = {}
results_for_builder[self.TESTS] = {}
return results_for_builder
| gpl-3.0 | 3,123,908,108,397,054,500 | 32.955166 | 80 | 0.660543 | false |
numberoverzero/finder | scripts/scraper_converter.py | 1 | 2574 | '''
Usage:
python scraper_converter.py scraped.db
Processes the cards scraped using the gatherer downloader and adds sane attributes fields for querying
(int pow/toughness, cmc) and saves the output to the app's database location.
Card attributes are saved according to finder.models.Card
'''
import os
import sqlsoup
sides = [u'left', u'right']
# raw field name => models.Card attribute name
# note that these are only the fields we care about (printedname, printedrules etc are omitted)
field_conversion = {
'id': 'multiverse_id',
'name': 'name',
'cost': 'cost',
'color': 'color',
'type': 'type',
'set': 'set',
'rarity': 'rarity',
'power': 'power',
'toughness': 'toughness',
'rules': 'oracle_rules',
'flavor': 'flavor_text',
'watermark': 'watermark',
'cardnum': 'number',
'artist': 'artist',
'rulings': 'rulings'
}
def convert(indb, scale=10):
'''Convert each entry in indb using various parsers and save to outdb'''
from finder import db as dst
src = sqlsoup.SQLSoup('sqlite:///{}'.format(indb))
rows = src.MTGCardInfo.all()
n = len(rows)
notify_count = 10
notify_interval = 100 / notify_count
last = -notify_interval
for i, row in enumerate(rows):
convert_row(row, dst, scale=scale)
if (100 * i / n) / notify_interval > last:
last = (100 * i / n) / notify_interval
print("{}% ({} / {})".format(last * notify_interval, i, n))
print("100% ({} / {})".format(n, n))
print("\nSaving changes...")
dst.session.commit()
print("Saved!\n")
def convert_row(row, dst, scale=10):
'''Convert a src row into one or more dst cards'''
from finder import util
name = util.sanitize(row.name)
attrs = {dkey: getattr(row, skey) for skey, dkey in field_conversion.iteritems()}
# Split card, process both halves
if u'//' in name:
for side in sides:
dst.session.add(to_card(attrs, scale=scale, split=side))
else:
dst.session.add(to_card(attrs, scale=scale))
def to_card(attrs, scale=10, split=''):
'''attrs is a dictionary whose keys are finder.model.Card attributes.'''
from finder import controllers, models
card = models.Card(**attrs)
controllers.process_card(card, scale=scale, split=split)
return card
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='sqlite db to load from (gatherer downloader format)')
args = parser.parse_args()
convert(args.input, scale=10)
| mit | -6,135,808,375,069,640,000 | 30.012048 | 102 | 0.635198 | false |
chitr/neutron | neutron/agent/linux/pd.py | 1 | 14123 | # Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import functools
import signal
import six
from stevedore import driver
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils as linux_utils
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import ipv6_utils
from neutron.common import utils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('pd_dhcp_driver',
default='dibbler',
help=_('Service to handle DHCPv6 Prefix delegation.')),
]
cfg.CONF.register_opts(OPTS)
class PrefixDelegation(object):
def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb,
agent_conf):
self.context = context
self.pmon = pmon
self.intf_driver = intf_driver
self.notifier = notifier
self.routers = {}
self.pd_update_cb = pd_update_cb
self.agent_conf = agent_conf
self.pd_dhcp_driver = driver.DriverManager(
namespace='neutron.agent.linux.pd_drivers',
name=agent_conf.prefix_delegation_driver,
).driver
registry.subscribe(add_router,
resources.ROUTER,
events.BEFORE_CREATE)
registry.subscribe(remove_router,
resources.ROUTER,
events.AFTER_DELETE)
self._get_sync_data()
@utils.synchronized("l3-agent-pd")
def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
router = self.routers.get(router_id)
if router is None:
return
pd_info = router['subnets'].get(subnet_id)
if not pd_info:
pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac)
router['subnets'][subnet_id] = pd_info
pd_info.bind_lla = self._get_lla(mac)
if pd_info.sync:
pd_info.mac = mac
pd_info.old_prefix = prefix
else:
self._add_lla(router, pd_info.get_bind_lla_with_mask())
def _delete_pd(self, router, pd_info):
self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started:
pd_info.driver.disable(self.pmon, router['ns_name'])
@utils.synchronized("l3-agent-pd")
def disable_subnet(self, router_id, subnet_id):
prefix_update = {}
router = self.routers.get(router_id)
if not router:
return
pd_info = router['subnets'].get(subnet_id)
if not pd_info:
return
self._delete_pd(router, pd_info)
prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
del router['subnets'][subnet_id]
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
@utils.synchronized("l3-agent-pd")
def update_subnet(self, router_id, subnet_id, prefix):
router = self.routers.get(router_id)
if router is not None:
pd_info = router['subnets'].get(subnet_id)
if pd_info and pd_info.old_prefix != prefix:
old_prefix = pd_info.old_prefix
pd_info.old_prefix = prefix
return old_prefix
@utils.synchronized("l3-agent-pd")
def add_gw_interface(self, router_id, gw_ifname):
router = self.routers.get(router_id)
prefix_update = {}
if not router:
return
router['gw_interface'] = gw_ifname
for subnet_id, pd_info in six.iteritems(router['subnets']):
# gateway is added after internal router ports.
# If a PD is being synced, and if the prefix is available,
# send update if prefix out of sync; If not available,
# start the PD client
bind_lla_with_mask = pd_info.get_bind_lla_with_mask()
if pd_info.sync:
pd_info.sync = False
if pd_info.client_started:
if pd_info.prefix != pd_info.old_prefix:
prefix_update['subnet_id'] = pd_info.prefix
else:
self._delete_lla(router, bind_lla_with_mask)
self._add_lla(router, bind_lla_with_mask)
else:
self._add_lla(router, bind_lla_with_mask)
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
def delete_router_pd(self, router):
prefix_update = {}
for subnet_id, pd_info in six.iteritems(router['subnets']):
self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started:
pd_info.driver.disable(self.pmon, router['ns_name'])
pd_info.prefix = None
pd_info.client_started = False
prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
prefix_update[subnet_id] = prefix
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
@utils.synchronized("l3-agent-pd")
def remove_gw_interface(self, router_id):
router = self.routers.get(router_id)
if router is not None:
router['gw_interface'] = None
self.delete_router_pd(router)
@utils.synchronized("l3-agent-pd")
def sync_router(self, router_id):
router = self.routers.get(router_id)
if router is not None and router['gw_interface'] is None:
self.delete_router_pd(router)
@utils.synchronized("l3-agent-pd")
def remove_stale_ri_ifname(self, router_id, stale_ifname):
router = self.routers.get(router_id)
if router is not None:
for subnet_id, pd_info in router['subnets'].items():
if pd_info.ri_ifname == stale_ifname:
self._delete_pd(router, pd_info)
del router['subnets'][subnet_id]
@staticmethod
def _get_lla(mac):
lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX,
mac)
return lla
def _get_llas(self, gw_ifname, ns_name):
try:
return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name)
except RuntimeError:
# The error message was printed as part of the driver call
# This could happen if the gw_ifname was removed
# simply return and exit the thread
return
def _add_lla(self, router, lla_with_mask):
if router['gw_interface']:
self.intf_driver.add_ipv6_addr(router['gw_interface'],
lla_with_mask,
router['ns_name'],
'link')
# There is a delay before the LLA becomes active.
# This is because the kernel runs DAD to make sure LLA uniqueness
# Spawn a thread to wait for the interface to be ready
self._spawn_lla_thread(router['gw_interface'],
router['ns_name'],
lla_with_mask)
def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask):
eventlet.spawn_n(self._ensure_lla_task,
gw_ifname,
ns_name,
lla_with_mask)
def _delete_lla(self, router, lla_with_mask):
if lla_with_mask and router['gw_interface']:
try:
self.intf_driver.delete_ipv6_addr(router['gw_interface'],
lla_with_mask,
router['ns_name'])
except RuntimeError:
# Ignore error if the lla doesn't exist
pass
def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask):
# It would be insane for taking so long unless DAD test failed
# In that case, the subnet would never be assigned a prefix.
linux_utils.wait_until_true(functools.partial(self._lla_available,
gw_ifname,
ns_name,
lla_with_mask),
timeout=l3_constants.LLA_TASK_TIMEOUT,
sleep=2)
def _lla_available(self, gw_ifname, ns_name, lla_with_mask):
llas = self._get_llas(gw_ifname, ns_name)
if self._is_lla_active(lla_with_mask, llas):
LOG.debug("LLA %s is active now" % lla_with_mask)
self.pd_update_cb()
return True
@staticmethod
def _is_lla_active(lla_with_mask, llas):
for lla in llas:
if lla_with_mask == lla['cidr']:
return not lla['tentative']
return False
@utils.synchronized("l3-agent-pd")
def process_prefix_update(self):
LOG.debug("Processing IPv6 PD Prefix Update")
prefix_update = {}
for router_id, router in six.iteritems(self.routers):
if not router['gw_interface']:
continue
llas = None
for subnet_id, pd_info in six.iteritems(router['subnets']):
if pd_info.client_started:
prefix = pd_info.driver.get_prefix()
if prefix != pd_info.prefix:
pd_info.prefix = prefix
prefix_update[subnet_id] = prefix
else:
if not llas:
llas = self._get_llas(router['gw_interface'],
router['ns_name'])
if self._is_lla_active(pd_info.get_bind_lla_with_mask(),
llas):
if not pd_info.driver:
pd_info.driver = self.pd_dhcp_driver(
router_id, subnet_id, pd_info.ri_ifname)
pd_info.driver.enable(self.pmon, router['ns_name'],
router['gw_interface'],
pd_info.bind_lla)
pd_info.client_started = True
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
def after_start(self):
LOG.debug('SIGUSR1 signal handler set')
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
def _handle_sigusr1(self, signum, frame):
"""Update PD on receiving SIGUSR1.
The external DHCPv6 client uses SIGUSR1 to notify agent
of prefix changes.
"""
self.pd_update_cb()
def _get_sync_data(self):
sync_data = self.pd_dhcp_driver.get_sync_data()
for pd_info in sync_data:
router_id = pd_info.router_id
if not self.routers.get(router_id):
self.routers[router_id] = {'gw_interface': None,
'ns_name': None,
'subnets': {}}
new_pd_info = PDInfo(pd_info=pd_info)
subnets = self.routers[router_id]['subnets']
subnets[pd_info.subnet_id] = new_pd_info
@utils.synchronized("l3-agent-pd")
def remove_router(resource, event, l3_agent, **kwargs):
router_id = kwargs['router'].router_id
router = l3_agent.pd.routers.get(router_id)
l3_agent.pd.delete_router_pd(router)
del l3_agent.pd.routers[router_id]['subnets']
del l3_agent.pd.routers[router_id]
def get_router_entry(ns_name):
return {'gw_interface': None,
'ns_name': ns_name,
'subnets': {}}
@utils.synchronized("l3-agent-pd")
def add_router(resource, event, l3_agent, **kwargs):
added_router = kwargs['router']
router = l3_agent.pd.routers.get(added_router.router_id)
if not router:
l3_agent.pd.routers[added_router.router_id] = (
get_router_entry(added_router.ns_name))
else:
# This will happen during l3 agent restart
router['ns_name'] = added_router.ns_name
class PDInfo(object):
"""A class to simplify storing and passing of information relevant to
Prefix Delegation operations for a given subnet.
"""
def __init__(self, pd_info=None, ri_ifname=None, mac=None):
if pd_info is None:
self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
self.ri_ifname = ri_ifname
self.mac = mac
self.bind_lla = None
self.sync = False
self.driver = None
self.client_started = False
else:
self.prefix = pd_info.prefix
self.old_prefix = None
self.ri_ifname = pd_info.ri_ifname
self.mac = None
self.bind_lla = None
self.sync = True
self.driver = pd_info.driver
self.client_started = pd_info.client_started
def get_bind_lla_with_mask(self):
bind_lla_with_mask = '%s/64' % self.bind_lla
return bind_lla_with_mask
| apache-2.0 | 830,436,446,756,060,500 | 38.339833 | 78 | 0.553636 | false |
voostar/WangWangExcavator | py2exe_script/TrapForBB.py | 1 | 6634 | #!
#--* coding=utf-8 *--
import urllib2
from bs4 import BeautifulSoup as bs
import base64
import subprocess
import re
import time
import logging
import os, sys
# Define
FILE_PATH = "./web_links.txt"
URLDICT = {
u"南宁市科技局": "http://www.nnst.gov.cn",
u"南宁市工信委": "http://219.159.80.227/info/infopen.htm",
u"南宁市发改委": "http://fgw.nanning.gov.cn",
u"南宁市农业局": "http://www.nnny.gov.cn",
u"南宁市环保局": "http://www.nnhb.gov.cn",
u"南宁市商务局": "http://sw.nanning.gov.cn",
u"广西工信委": "http://www.gxgxw.gov.cn",
u"广西科技厅": "http://www.gxst.gov.cn",
u"广西发改委": "http://www.gxdrc.gov.cn",
u"广西农业信息网": "http://www.gxny.gov.cn",
u"广西商务厅": "http://www.gxswt.gov.cn",
u"南宁市中小企业信息网": "http://www.smenn.gov.cn",
u"广西中小企业信息网": "http://www.smegx.gov.cn",
}
KEYWORD = [u'申报', u'项目', u'通知', u'验收', u'立项', u'资金', u'课题']
AVOIDWORD = [u'通知公告', u'在线申报', u'招商项目', u'项目申报指南', u'项目申报',
u'农村经济项目', u'招商项目']
# define logger
logger = logging.getLogger(u"Excavator")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
def readLinks(file_path):
print u"开始读取网站列表".encode('utf-8')
with open(file_path, 'r') as f:
lines = f.readlines()
result_dict = {}
for i in lines:
print i
site_name = i.split("=")[0].strip("\n")
site_url = i.split("=")[1].strip("\n")
result_dict[site_name] = site_url
return result_dict
def checkWebLinks(file_path):
"""
check if web links file exists. if not, will reset a default one
"""
sleep_time = 10
if not os.path.exists(file_path):
print u"找不到网站列表,已经重新生存web_links.txt。".encode("utf-8")
with open('./web_links.txt', 'w') as f:
for k, v in URLDICT.items():
f.write('%s=%s\n' % (k.encode('utf-8'),v))
print u"%d 秒后将读取默认列表..." % sleep_time
time.sleep(sleep_time)
url_dict = readLinks(file_path)
return url_dict
def trapAllLinks(URLDICT):
result_list = {}
for website, url in URLDICT.items():
# logger.debug("Digging %s..." % website)
try:
page_content = urllib2.urlopen(url)
soup = bs(page_content)
links_in_pages = soup.find_all('a')
result_list[website] = links_in_pages
# logger.debug("Dug %s, got %d links." % (website, len(links_in_pages)))
except:
logger.debug("Dug %s Failed" % website)
pass
return result_list
def parseURL(website, a_soup):
result_dict = {}
for key_word in KEYWORD:
for link in a_soup:
# Title less than 8 characters will be drop
if key_word in link.text and len(link.text.strip()) > 8:
result_dict[link.text.strip()] = sillyURLReformat(
website, link['href'])
return result_dict
def sillyURLReformat(website, url):
"""
The silly website doesn't use stand url format so need to assembling again
"""
if website == u"南宁市工信委":
url = "http://219.159.80.227/" + url
if url.startswith("./"):
url = URLDICT[website] + url.replace("./", "/")
elif url.startswith("http://"):
pass
elif not url.startswith("/"):
url = URLDICT[website] + '/' + url
elif 'java' in url:
pass
elif URLDICT[website] in url:
pass
else:
url = URLDICT[website] + url
return url
def exportToHTML(result_dict):
table_container = ""
for website, title_and_link in result_dict.items():
website_line = "<tr><td><H2>" + website + "</H2></td></tr>"
for title, link in title_and_link.items():
link_line = "<tr><td>" + \
"<a href=\"%s\" target=_blank>%s</a>" % (link, title) + "</td></tr>"
website_line += link_line
website_line += "<tr><td></td></tr>"
table_container += website_line
html_container = u"""
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>旺旺的挖掘机</title>
</head>
<body>
<table>
%s
</table>
<hr>
I'm a Robot of [email protected]!
</body>
</html>
""" % table_container
file_time = time.localtime()
file_name = "%d-%d-%d %d%d.html" % (
file_time.tm_year,
file_time.tm_mon,
file_time.tm_mday,
file_time.tm_hour,
file_time.tm_min,)
with open('./' + file_name, 'w') as f:
f.write(html_container.encode("utf-8"))
def main():
start = time.time()
print u"检查: 网站列表是否存在".encode('utf-8')
checkWebLinks(FILE_PATH)
print u"任务开始于 ".encode('utf-8') + time.ctime()
result_dict = {}
# get all links in target websites
all_link_in_soup = trapAllLinks(URLDICT)
for website, a_soup in all_link_in_soup.items():
# logger.debug("Parsing %s..." % website)
title_and_link = parseURL(website, a_soup)
result_dict[website] = title_and_link
# logger.debug("Parsed %s, Matched %d links." %
# (website, len(title_and_link)))
# Show result
print '=' * 80
print 'Result'
print '=' * 80
for website, title_and_link in result_dict.items():
print 'Result of %s : %s' % (website.encode("utf-8"), URLDICT[website])
print '-' * 40
for title, link in title_and_link.items():
print '- %s' % title.encode("utf-8")
print '- %s' % link
print ''
print '-' * 40
print '=' * 80
print 'EOF'
print '=' * 80
print '-> I\'m a Robot of longzhiw'
end = time.time()
print "Job finish at " + time.ctime()
print "Cost time %s" % str(end - start)
exportToHTML(result_dict)
if __name__ == '__main__':
main() | apache-2.0 | -5,009,635,365,505,735,000 | 29.636364 | 84 | 0.542478 | false |
geraldspreer/the-maker | makerProjectLanguages.py | 1 | 4952 | #!/usr/bin/python
# -*- coding: utf-8 -*-
possibleLanguages = {
"Afar": "a",
"Abkhazian": "ab",
"Afrikaans": "af",
"Akan": "ak",
"Albanian": "sq",
"Amharic": "am",
"Arabic": "ar",
"Aragonese": "an",
"Armenian": "hy",
"Assamese": "as",
"Avaric": "av",
"Avestan": "ae",
"Aymara": "ay",
"Azerbaijani": "az",
"Bashkir": "ba",
"Bambara": "bm",
"Basque": "eu",
"Belarusian": "be",
"Bengali": "bn",
"Bihari": "bh",
"Bislama": "bi",
"Tibetan": "bo",
"Bosnian": "bs",
"Breton": "br",
"Bulgarian": "bg",
"Burmese": "my",
"Catalan": "ca",
"Valencian": "ca",
"Czech": "cs",
"Chamorro": "ch",
"Chechen": "ce",
"Chinese": "zh",
"Church Slavic": "cu",
"Old Slavonic": "cu",
"Church Slavonic": "cu",
"Old Bulgarian": "cu",
"Old Church Slavonic": "cu",
"Chuvash": "cv",
"Cornish": "kw",
"Corsican": "co",
"Cree": "cr",
"Welsh": "cy",
"Czech": "cs",
"Danish": "da",
"German": "de",
"Divehi": "dv",
"Dhivehi": "dv",
"Maldivian": "dv",
"Dutch": "nl",
"Flemish": "nl",
"Dzongkha": "dz",
"Greek, Modern": "el",
"English": "en",
"Esperanto": "eo",
"Estonian": "et",
"Basque": "eu",
"Ewe": "ee",
"Faroese": "fo",
"Persian": "fa",
"Fijian": "fj",
"Finnish": "fi",
"French": "fr",
"Western Frisian": "fy",
"Fulah": "ff",
"Georgian": "ka",
"German": "de",
"Gaelic": "gd",
"Scottish Gaelic": "gd",
"Irish": "ga",
"Galician": "gl",
"Manx": "gv",
"Guarani": "gn",
"Gujarati": "gu",
"Haitian": "ht",
"Haitian Creole": "ht",
"Hausa": "ha",
"Hebrew": "he",
"Herero": "hz",
"Hindi": "hi",
"Hiri Motu": "ho",
"Croatian": "hr",
"Hungarian": "hu",
"Armenian": "hy",
"Igbo": "ig",
"Icelandic": "is",
"Ido": "io",
"Sichuan Yi": "ii",
"Inuktitut": "iu",
"Interlingue": "ie",
"Interlingua": "ia",
"Indonesian": "id",
"Inupiaq": "ik",
"Icelandic": "is",
"Italian": "it",
"Javanese": "jv",
"Japanese": "ja",
"Kalaallisut": "kl",
"Greenlandic": "kl",
"Kannada": "kn",
"Kashmiri": "ks",
"Georgian": "ka",
"Kanuri": "kr",
"Kazakh": "kk",
"Central Khmer": "km",
"Kikuyu; Gikuyu": "ki",
"Kinyarwanda": "rw",
"Kirghiz; Kyrgyz": "ky",
"Komi": "kv",
"Kongo": "kg",
"Korean": "ko",
"Kuanyama": "kj",
"Kwanyama": "kj",
"Kurdish": "ku",
"Lao": "lo",
"Latin": "la",
"Latvian": "lv",
"Limburgan": "li",
"Limburger": "li",
"Limburgish": "li",
"Lingala": "ln",
"Lithuanian": "lt",
"Luxembourgish": "lb",
"Letzeburgesch": "lb",
"Luba-Katanga": "lu",
"Ganda": "lg",
"Macedonian": "mk",
"Marshallese": "mh",
"Malayalam": "ml",
"Maori": "mi",
"Marathi": "mr",
"Malay": "ms",
"Macedonian": "mk",
"Malagasy": "mg",
"Maltese": "mt",
"Moldavian": "mo",
"Mongolian": "mn",
"Maori": "mi",
"Malay": "ms",
"Burmese": "my",
"Nauru": "na",
"Navajo": "nv",
"Navaho": "nv",
"Ndebele, South": "nr",
"South Ndebele": "nr",
"Ndebele, North": "nd",
"North Ndebele": "nd",
"Ndonga": "ng",
"Nepali": "ne",
"Dutch": "nl",
"Flemish": "nl",
"Norwegian Nynorsk; Nynorsk, Norwegian": "nn",
"Norwegian": "no",
"Chichewa": "ny",
"Chewa": "ny",
"Nyanja": "ny",
"Ojibwa": "oj",
"Oriya": "or",
"Oromo": "om",
"Ossetian; Ossetic": "os",
"Panjabi; Punjabi": "pa",
"Persian": "fa",
"Pali": "pi",
"Polish": "pl",
"Portuguese": "pt",
"Pushto": "ps",
"Quechua": "qu",
"Romansh": "rm",
"Romanian": "ro",
"Romanian": "ro",
"Rundi": "rn",
"Russian": "ru",
"Sango": "sg",
"Sanskrit": "sa",
"Serbian": "sr",
"Croatian": "hr",
"Sinhala; Sinhalese": "si",
"Slovak": "sk",
"Slovenian": "sl",
"Northern Sami": "se",
"Samoan": "sm",
"Shona": "sn",
"Sindhi": "sd",
"Somali": "so",
"Sotho, Southern": "st",
"Spanish; Castilian": "es",
"Albanian": "sq",
"Sardinian": "sc",
"Serbian": "sr",
"Swati": "ss",
"Sundanese": "su",
"Swahili": "sw",
"Swedish": "sv",
"Tahitian": "ty",
"Tamil": "ta",
"Tatar": "tt",
"Telugu": "te",
"Tajik": "tg",
"Tagalog": "tl",
"Thai": "th",
"Tibetan": "bo",
"Tigrinya": "ti",
"Tonga (Tonga Islands)": "to",
"Tswana": "tn",
"Tsonga": "ts",
"Turkmen": "tk",
"Turkish": "tr",
"Twi": "tw",
"Uighur; Uyghur": "ug",
"Ukrainian": "uk",
"Urdu": "ur",
"Uzbek": "uz",
"Venda": "ve",
"Vietnamese": "vi",
"Welsh": "cy",
"Walloon": "wa",
"Wolof": "wo",
"Xhosa": "xh",
"Yiddish": "yi",
"Yoruba": "yo",
"Zhuang; Chuang": "za",
"Chinese": "zh",
"Zulu": "zu",
}
| gpl-3.0 | 3,721,244,761,717,408,000 | 20.814978 | 50 | 0.446486 | false |
gen2brain/comic-utils | comicutils/gui.py | 1 | 13537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import time
import shutil
try:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
except ImportError, err:
sys.stderr.write("Error: %s%s" % (str(err), os.linesep))
sys.exit(1)
try:
from comicutils.ui.cc_ui import Ui_MainWindow
from comicutils import utils
except ImportError, err:
sys.stderr.write("Error: %s%s" % (str(err), os.linesep))
sys.exit(1)
class ComicConvert(QMainWindow, Ui_MainWindow):
toggle_current = pyqtSignal()
show_message = pyqtSignal(str)
show_progress = pyqtSignal(bool)
item_status = pyqtSignal()
def __init__(self, parent = None):
QMainWindow.__init__(self, parent)
self.setupUi(self)
self.setFixedSize(self.sizeHint())
self.setWindowFlags((self.windowFlags() | Qt.CustomizeWindowHint) & ~Qt.WindowMaximizeButtonHint)
self.progressBar.hide()
self.set_opts()
self.model = QStandardItemModel()
self.treeView.setModel(self.model)
self.treeView.setSortingEnabled(True)
self.set_columns()
self.treeView.show()
self.connect_signals()
self.thread = Worker(self)
def connect_signals(self):
self.connect(self.opt_exclude,
SIGNAL("stateChanged(int)"), self.on_opt_exclude_stateChanged)
self.connect(self.opt_recursive,
SIGNAL("stateChanged(int)"), self.on_opt_recursive_stateChanged)
self.connect(self.opt_size,
SIGNAL("valueChanged(int)"), self.on_opt_size_valueChanged)
self.connect(self.select_lineEdit,
SIGNAL("returnPressed()"), self.refresh_treeview)
self.connect(self.progressBar,
SIGNAL("valueChanged(int)"), self.on_progress_bar_changed)
self.connect(self,
SIGNAL("toggle_current(PyQt_PyObject)"), self.on_toggle_current)
self.connect(self,
SIGNAL("show_message(PyQt_PyObject)"), self.on_show_message)
self.connect(self,
SIGNAL("show_progress(PyQt_PyObject)"), self.on_show_progress)
self.connect(self.treeView,
SIGNAL("doubleClicked(QModelIndex)"), self.on_double_click)
self.connect(self,
SIGNAL("item_status(PyQt_PyObject, PyQt_PyObject)"), self.on_item_status)
def set_opts(self):
self.opts = {}
self.opts['size'] = None
self.opts['recursive'] = False
self.opts['outdir'] = None
self.opts['bmp-4'] = False
self.opts['bmp-8'] = False
self.opts['jpeg'] = False
self.opts['png'] = False
self.opts['scale'] = '100%'
self.opts['quality'] = '85'
self.opts['level'] = None
self.opts['cover'] = False
self.opts['nocover'] = False
self.opts['norgb'] = False
self.opts['exclude'] = []
self.opts['rar'] = False
self.opts['zip'] = False
self.opts['suffix'] = ''
self.opts['verbose'] = False
def set_columns(self):
self.model.setHorizontalHeaderLabels(
["Filename", "Filetype", "Filesize", "Status"])
self.treeView.setColumnWidth(0, 460)
self.treeView.setColumnWidth(1, 100)
self.treeView.setColumnWidth(2, 100)
self.treeView.setColumnWidth(3, 100)
def refresh_treeview(self):
self.model.clear()
args = glob.glob(str(self.select_lineEdit.text()))
self.comics = utils.get_comics(args, self.opts, self.opts['size'])
for comic in self.comics:
filename, basename, fileext, filedir, fullpath, filetype, filesize, filemtime, fileuri = comic
row = [filename, filetype, utils.filesizeformat(filesize)]
items = []
sitems = [QStandardItem(item) for item in row]
for item in sitems:
item.setData(comic)
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
items.append(item)
self.model.appendRow(items)
self.set_columns()
self.refresh_status()
def refresh_status(self):
sizesum = 0
rowcount = self.model.rowCount()
for row in range(rowcount):
index = self.model.index(row, 0)
item = self.model.itemFromIndex(index)
comic = item.data().toPyObject()
filename, basename, fileext, filedir, fullpath, filetype, filesize, filemtime, fileuri = comic
sizesum += filesize
self.comicStatus.setText("comics: %d\t|\tsize: %s" % (
rowcount, utils.filesizeformat(sizesum)))
def get_options(self):
self.opts['outdir'] = str(self.opt_outdir.text())
self.opts['suffix'] = str(self.opt_suffix.text())
self.opts['nocover'] = self.opt_exclude_cover.isChecked()
self.opts['norgb'] = self.opt_exclude_rgb.isChecked()
self.opts['scale'] = str(self.opt_scale.text())
self.opts['level'] = str(self.opt_level.text())
self.opts['quality'] = str(self.opt_quality.value())
if self.opt_exclude.isChecked():
self.opts['exclude'] = str(self.opt_exclude_list.text()).split(",")
image_format = self.image_comboBox.itemData(
self.image_comboBox.currentIndex(), 2).toPyObject()
if image_format:
self.opts[str(image_format)] = True
archive_format = self.archive_comboBox.itemData(
self.archive_comboBox.currentIndex(), 2).toPyObject()
if archive_format:
self.opts[str(archive_format)] = True
def set_enabled(self, enabled=True):
self.convert_pushButton.setEnabled(enabled)
self.cancel_pushButton.setEnabled(not enabled)
self.select_pushButton.setEnabled(enabled)
self.refresh_pushButton.setEnabled(enabled)
self.outdir_pushButton.setEnabled(enabled)
self.opt_recursive.setEnabled(enabled)
self.opt_size.setEnabled(enabled)
def on_toggle_current(self, index):
self.treeView.selectionModel().setCurrentIndex(
index, QItemSelectionModel.ToggleCurrent)
def on_show_message(self, message=None):
if not message:
self.statusBar.clearMessage()
else:
self.statusBar.showMessage(message)
def on_show_progress(self, progress=True):
if progress:
self.progressBar.show()
else:
self.progressBar.hide()
def on_double_click(self, index):
if self.thread.isRunning(): return
item = self.model.itemFromIndex(index)
self.model.removeRow(item.row())
self.refresh_status()
def on_item_status(self, item, status):
self.model.setItem(item.row(), 3, QStandardItem(status))
def on_opt_exclude_stateChanged(self, enabled):
self.opt_exclude_list.setEnabled(enabled)
self.opt_exclude_list.setFocus()
def on_opt_recursive_stateChanged(self, enabled):
self.opts['recursive'] = bool(enabled)
self.refresh_treeview()
def on_opt_size_valueChanged(self, value):
self.opts['size'] = int(value)
self.refresh_treeview()
@pyqtSignature("")
def on_convert_pushButton_clicked(self):
self.get_options()
self.set_enabled(False)
self.thread.start()
@pyqtSignature("")
def on_cancel_pushButton_clicked(self):
self.set_enabled(True)
self.thread.stop()
@pyqtSignature("")
def on_select_pushButton_clicked(self):
dialog = QFileDialog()
ext_filter = "Comic book archives (*.cbz *.cbr);;All files (*)"
dialog.setNameFilter(ext_filter)
dialog.setFileMode(QFileDialog.ExistingFiles)
self.select_lineEdit.setText(dialog.getOpenFileName(filter=ext_filter))
self.refresh_treeview()
@pyqtSignature("")
def on_outdir_pushButton_clicked(self):
dialog = QFileDialog()
dialog.setFileMode(QFileDialog.Directory)
dir_name = dialog.getExistingDirectory()
self.opt_outdir.setText(dir_name)
@pyqtSignature("")
def on_refresh_pushButton_clicked(self):
self.refresh_treeview()
@pyqtSignature("")
def on_close_pushButton_clicked(self):
self.close()
@pyqtSignature("")
def on_about_pushButton_clicked(self):
about = """
<p align="center">Comic Convert 0.5<br>
<a href="https://github.com/gen2brain/comic-utils">
<span style="text-decoration:underline;color:#0057ae;">https://github.com/gen2brain/comic-utils</span></a></p>
<p align="center">Author:<br>
Milan Nikolic ([email protected])</p>
<p align="center">This program is released under the terms of the<br>
<a href="http://www.gnu.org/licenses/gpl-3.0.txt">
<span style="text-decoration:underline;color:#0057ae;">GNU General Public License version 3.</span></a></p>
<p align="center">Powered by:<br>
<a href="http://www.stripzona.com">
<span style="text-decoration:underline;color:#0057ae;">http://www.stripzona.com</span></a></p>"""
QMessageBox.about(self, "About", about)
@pyqtSignature("")
def on_progress_bar_changed(self, value):
self.progressBar.setValue(value)
class Worker(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.parent = parent
self.opts = parent.opts
self.tmpdir = None
self.exiting = False
self.item = None
def __del__(self):
self.exiting = True
self.wait()
def stop(self):
self.exiting = True
self.show_progress(False)
self.show_message(None)
if self.item:
self.item_status(self.item, "")
if self.tmpdir:
shutil.rmtree(self.tmpdir)
self.terminate()
def item_status(self, item, status):
self.parent.emit(SIGNAL("item_status(PyQt_PyObject, PyQt_PyObject)"), item, status)
def show_progress(self, progress):
self.parent.emit(SIGNAL("show_progress(PyQt_PyObject)"), progress)
def show_message(self, message, row=None):
if row is not None:
rowcount = self.parent.model.rowCount()
message = "File %d of %d - %s" % (row+1, rowcount, message)
self.parent.emit(SIGNAL("show_message(PyQt_PyObject)"), message)
def run(self):
self.exiting = False
rowcount = self.parent.model.rowCount()
while not self.exiting:
for row in range(rowcount):
index = self.parent.model.index(row, 0)
item = self.parent.model.itemFromIndex(index)
self.item = item
self.parent.emit(SIGNAL("toggle_current(PyQt_PyObject)"), index)
comic = item.data().toPyObject()
filename,basename,fileext,filedir,fullpath,filetype,filesize,filemtime,fileuri = comic
self.item_status(item, "CONVERTING...")
if self.opts['outdir']:
convdir = os.path.realpath(self.opts['outdir'])
else:
convdir = os.path.join(filedir, '_converted')
if os.path.isfile(os.path.join(convdir, filename)):
self.show_message('Skipping, file exists in %s' % convdir, row)
self.item_status(item, "SKIPPED")
time.sleep(1)
continue
self.show_message('Unpacking file %s (%sMB)...' % (
filename, filesize/(1024*1024)), row)
self.item_status(item, "UNPACKING...")
tempdir = utils.unpack_archive(fullpath, filetype, filename)
if tempdir is not None:
self.tmpdir = tempdir
self.show_message('File %s unpacked' % filename, row)
self.item_status(item, "CONVERTING...")
self.parent.emit(SIGNAL("show_progress(PyQt_PyObject)"), True)
if utils.convert_images(tempdir, self.opts, self.parent, row):
self.parent.progressBar.emit(SIGNAL("valueChanged(int)"), 0)
self.parent.emit(SIGNAL("show_progress(PyQt_PyObject)"), False)
self.show_message('File %s converted' % filename, row)
if not self.opts['outdir'] and not os.path.isdir(convdir):
os.mkdir(convdir)
if self.opts['suffix']:
basename = basename + self.opts['suffix']
filename = basename + fileext
if self.opts['rar']:
filetype = 'RAR'
filename = '%s.cbr' % basename
elif self.opts['zip']:
filetype = 'ZIP'
filename = '%s.cbz' % basename
self.show_message('Packing %s...' % filename, row)
self.item_status(item, "PACKING...")
filepath = os.path.join(convdir, filename)
if utils.pack_archive(tempdir, filetype, filepath):
self.show_message('File %s packed.' % filepath, row)
shutil.rmtree(tempdir)
self.item_status(item, "OK")
self.tmpdir = None
time.sleep(1)
self.show_message(None)
self.parent.set_enabled(True)
self.exiting = True
| gpl-3.0 | 3,185,777,950,968,541,000 | 37.240113 | 110 | 0.584915 | false |
NuWro/nuwro | src/rew/validation/qel_ratio.py | 1 | 1176 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
particles = ("numu", "numubar")
energies = (500, 1000, 1500)
mas = (800, 1000, 1200)
dyns = ("cc", "nc")
inpath = "qelma"
def plot(what):
with PdfPages("rew_val_qelma_ratio_{}.pdf".format(what)) as pdf:
for particle in particles:
for energy in energies:
for ma in mas:
for dyn in dyns:
plt.figure()
plt.title("{}, {}, E = {}, Ma = {}".format(particle, dyn, energy, ma))
x1, y1 = np.loadtxt("{}/{}{}_{}_ma{}_{}.txt".format(inpath, particle, energy, dyn, ma, what), delimiter=' ', unpack=True)
x2, y2 = np.loadtxt("{}/{}{}_{}_rewto_ma{}_{}.txt".format(inpath, particle, energy, dyn, ma, what), delimiter=' ', unpack=True)
plt.xlabel(what)
plt.ylabel("#events")
plt.plot(x1, (y2 - y1) / (y2 + y1), 'r-', label='(r - n) / (r + n)')
plt.legend()
pdf.savefig()
plot("Q2")
plot("Tk")
plot("ang") | gpl-3.0 | 3,190,528,384,730,777,000 | 33.617647 | 151 | 0.482143 | false |
ben-e-whitney/mpi-tutorial | scripts/c.py | 1 | 3280 | import os
class Macro:
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return '#define {nam} {val}'.format(nam=self.name, val=self.value)
class Typedef:
def __init__(self, type_specifier, declarator):
self.type_specifier = type_specifier
self.declarator = declarator
def __str__(self):
return 'typedef {spe} {dec};'.format(spe=self.type_specifier,
dec=self.declarator)
class Enum:
def __init__(self, name, values=None):
self.name = 'enum {name}'.format(name=name)
self.values = values or []
def append(self, value):
self.values.append(value)
def __str__(self):
return '{name} {{{vals}}};'.format(
name=self.name,
vals=', '.join(self.values)
)
class Function:
def __init__(self, name, argument_types, return_type, argument_names,
lines):
self.name = name
self.argument_types = argument_types
self.return_type = return_type
self.argument_names = argument_names
assert len(self.argument_types) == len(self.argument_names)
self.lines = lines
def arguments(self, names=False):
return self.argument_types if not names else tuple(map(
lambda t, n: '{t} {n}'.format(t=t, n=n),
self.argument_types,
self.argument_names,
))
def signature(self, names=False):
return '{return_type} {name}({arglist}){end}'.format(
return_type=self.return_type,
name=self.name,
arglist=', '.join(self.arguments(names=names)),
end=';' if not names else ' {',
)
def __str__(self):
return '\n'.join([
self.signature(names=True),
*self.lines,
'}',
])
class Classifier(Function):
def __init__(self, name, argument_type, return_type, pairings,
argument_name='x',
comparer=lambda a, b: '{a} == {b}'.format(a=a, b=b)):
lines = [
'if ({condition}) return {key};'.format(
key=key,
condition=' || '.join(
comparer(argument_name, value) for value in values
),
) for key, values in pairings.items()
]
lines.extend([
(
'fprintf(stderr, "Unrecognized {arg_t} \'{flg}.\'\\n", {var});'
).format(
arg_t=argument_type,
#Good enough for now.
flg='%d' if 'enum' in argument_type else '%s',
var=argument_name
),
'exit(1);',
])
super().__init__(name, [argument_type], return_type, [argument_name],
lines)
class Header:
def __init__(self, name):
self.name = name
def __str__(self):
return '#include <{nam}>'.format(nam=self.name)
class IncludeGuard:
def __init__(self, name):
self.name = os.path.basename(name).replace('.', '_').upper()
def begin(self):
return '#ifndef {nam}\n#define {nam}'.format(nam=self.name)
@staticmethod
def end():
return '#endif'
| gpl-3.0 | 19,194,634,387,759,416 | 29.091743 | 79 | 0.509756 | false |
electric-cloud/EC-WebSphere | src/main/resources/project/wsadmin_scripts/stop_app.py | 1 | 1413 | #
# Copyright 2015 Electric Cloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
$[/myProject/wsadmin_scripts/preamble.py]
appName = '''
$[appname]
'''.strip()
serverName = '''
$[serverName]
'''.strip()
clusterName = '''
$[clusterName]
'''.strip()
def stopApplication(appName, serverName = None, clusterName = None):
if clusterName:
AdminApplication.stopApplicationOnCluster(appName, clusterName)
elif serverName:
nodeName, serverName = serverName.split('=')
AdminApplication.stopApplicationOnSingleServer(appName, nodeName, serverName)
else:
appmgr = AdminControl.queryNames('name=ApplicationManager,*')
AdminControl.invoke(appmgr, 'stopApplication', appName)
if isAppRunning(appName):
stopApplication(appName, serverName=serverName, clusterName=clusterName)
else:
print "WARNING: Application %s is already stopped" % appName
| apache-2.0 | 3,546,637,956,825,736,700 | 29.06383 | 85 | 0.726822 | false |
deinfoxication/backend | deinfoxication/__init__.py | 1 | 2134 | """Main application module."""
import glob
import importlib
import itertools
import os
from functools import lru_cache
from celery import Celery
from flask import Flask
from flask_click_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from raven.contrib.flask import Sentry
from sqlalchemy import MetaData
from sqlalchemy.orm.mapper import configure_mappers
convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
db = SQLAlchemy(metadata=MetaData(naming_convention=convention))
celery = Celery(config_source="deinfoxication.configs")
migrate = Migrate()
@lru_cache()
def _preload_models():
"""Preload models so, alembic can create the migrations properly."""
base = os.path.dirname(__file__)
models_files = glob.iglob(os.path.join(base, "**", "models.py"), recursive=True)
models_packages = glob.iglob(os.path.join(base, "**", "models", "__init__.py"), recursive=True)
for filename in itertools.chain(models_files, models_packages):
package_name = (
filename.replace(base, __name__)
.replace(os.path.sep + "__init__.py", "")
.replace(os.path.sep, ".")
.replace(".py", "")
)
importlib.import_module(package_name)
# Configure mappers trigger backrefs creation.
# http://docs.sqlalchemy.org/en/latest/orm/relationship_api.html#sqlalchemy.orm.relationship.params.backref
configure_mappers()
def create_app():
"""Create deinfoxication app."""
app = Flask(__name__)
db.init_app(app)
app.config.from_pyfile(os.path.join(os.path.dirname(__file__), "configs.py"))
app.sentry = Sentry(app, dsn=app.config["SENTRY_DSN"])
# Configure the database and load models.
db.init_app(app)
migrate.init_app(app, db, os.path.join(os.path.dirname(__file__), "..", "migrations"))
_preload_models()
from .endpoints import register_endpoints
register_endpoints(app, db)
return app
| agpl-3.0 | -4,141,000,078,140,775,400 | 31.830769 | 111 | 0.666823 | false |
elryndir/GuildPortal | PortalRaid/urls.py | 1 | 1319 | from PortalRaid.views import NewCharacterFormView, ServerListView, ListCharactersUser, DetailsCharacter,\
DetailsCharacterFromAPI, RaidListView, RaidDetailView, SignUpRaidView, ClassCharacterAPI
__author__ = 'Alexandre Cloquet'
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', RaidListView.as_view(), name='index_raid'),
url(r'^(?P<pk>\d+)/$', RaidDetailView.as_view(), name='raid_detail'),
url(r'^(?P<pk>\d+)/signup$', SignUpRaidView.as_view(), name='raid_detail_signup'),
url(r'^add_new_character/$', NewCharacterFormView.as_view(), name='new_character'),
url(r'^server_list$', ServerListView.as_view(), name='serverlist'),
url(r'^ClassCharacterAPI', ClassCharacterAPI.as_view(), name='ClassCharacterAPI'),
url(r'^list_characters_user/$', ListCharactersUser.as_view(), name='list_characters_user'),
url(r'^detail_characters/(?P<pk>\d+)/$', DetailsCharacter.as_view(), name='detail_characters'),
url(r'^detail_characters_from_api$', DetailsCharacterFromAPI.as_view(), name='detail_characters_from_api'),
) | mit | 5,455,320,312,811,561,000 | 61.857143 | 130 | 0.611069 | false |
nirgal/ngw | core/views/messages.py | 1 | 18534 | '''
Messages managing views
'''
import email
import json
from datetime import date, timedelta
from email.message import EmailMessage
from email.utils import formatdate
from importlib import import_module
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import filters
from django.contrib.admin.widgets import AdminDateWidget
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.http.response import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import translation
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.generic import DetailView, FormView
from django.views.generic.detail import BaseDetailView
from ngw.core import perms
from ngw.core.models import Contact, ContactInGroup, ContactMsg
from ngw.core.views.generic import InGroupAcl, NgwDeleteView, NgwListView
#######################################################################
#
# Messages list
#
#######################################################################
class MessageDirectionFilter(filters.SimpleListFilter):
title = ugettext_lazy('direction')
parameter_name = 'answer'
def lookups(self, request, view):
return (
('1', _('Received')),
('0', _('Sent')),
)
def queryset(self, request, queryset):
val = self.value()
if val is None:
return queryset
filter_answer = val == '1'
return queryset.filter(is_answer=filter_answer)
class MessageReadFilter(filters.SimpleListFilter):
title = ugettext_lazy('read status')
parameter_name = 'unread'
def lookups(self, request, view):
return (
('1', _('Unread')),
('0', _('Read')),
)
def queryset(self, request, queryset):
val = self.value()
if val is None:
return queryset
filter_unread = val == '1'
return queryset.filter(read_date__isnull=filter_unread)
class MessageAttachmentFilter(filters.SimpleListFilter):
title = ugettext_lazy('has attachment')
parameter_name = 'attachment'
def lookups(self, request, view):
return (
('1', _('With')),
('0', _('Without')),
)
def queryset(self, request, queryset):
val = self.value()
if val is None:
return queryset
filter_attachment = val == '1'
return queryset.filter(has_attachment=filter_attachment)
class MessageContactFilter(filters.SimpleListFilter):
title = ugettext_lazy('contact')
parameter_name = 'contact'
template = 'admin/filter_select.html'
def lookups(self, request, view):
result = []
contacts = Contact.objects.all()
try:
group_id = view.kwargs.get('gid', None)
except AttributeError:
group_id = None
if group_id:
contacts = contacts.extra(
tables=('v_c_appears_in_cg',),
where=(
'v_c_appears_in_cg.contact_id=contact.id',
'v_c_appears_in_cg.group_id={}'.format(group_id)))
for contact in contacts:
result.append((contact.id, contact.name))
return result
def queryset(self, request, queryset):
val = self.value()
if val is None:
return queryset
return queryset.filter(contact_id=val)
class MessageListView(InGroupAcl, NgwListView):
list_display = 'nice_flags', 'nice_date', 'contact', 'subject', 'nice_size'
list_display_links = 'subject',
template_name = 'message_list.html'
list_filter = (
MessageDirectionFilter, MessageReadFilter, MessageContactFilter,
MessageAttachmentFilter)
append_slash = False
search_fields = 'subject', 'text',
def check_perm_groupuser(self, group, user):
if not group.userperms & perms.VIEW_MSGS:
raise PermissionDenied
def get_root_queryset(self):
return ContactMsg.objects \
.filter(group_id=self.contactgroup.id)
def get_context_data(self, **kwargs):
cg = self.contactgroup
context = {}
context['title'] = _('Messages for {}').format(cg)
context['nav'] = cg.get_smart_navbar()
context['nav'].add_component(('messages', _('messages')))
context['active_submenu'] = 'messages'
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Messages sending
#
#######################################################################
try:
EXTERNAL_MESSAGE_BACKEND_NAME = settings.EXTERNAL_MESSAGE_BACKEND
except AttributeError as e:
raise ImproperlyConfigured(('You need to add an "EXTERNAL_MESSAGE_BACKEND"'
' handler in your settings.py: "{}"'
.format(e)))
try:
EXTERNAL_MESSAGE_BACKEND = import_module(EXTERNAL_MESSAGE_BACKEND_NAME)
except ImportError as e:
raise ImproperlyConfigured(('Error importing external messages backend'
' module {}: "{}"'
.format(EXTERNAL_MESSAGE_BACKEND_NAME, e)))
def MimefyMessage(contact_to, subject, text, files):
policy = email.policy.EmailPolicy(utf8=True, linesep='\r\n')
msg = EmailMessage(policy)
msg['To'] = contact_to.get_email_to()
msg['Date'] = formatdate()
msg['Subject'] = subject
msg.set_content(text, 'utf-8')
for f in files:
maintype, subtype = f.content_type.split('/')
f.seek(0)
msg.add_attachment(f.read(), maintype=maintype, subtype=subtype,
filename=f.name)
str = msg.as_string()
return str
class SendMessageForm(forms.Form):
def __init__(self, contactgroup, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['ids'] = forms.CharField(widget=forms.widgets.HiddenInput)
if self.support_expiration_date():
if contactgroup.date:
initial_date = contactgroup.date
else:
initial_date = date.today() + timedelta(days=21)
self.fields['expiration_date'] = forms.DateField(
label=_('Expiration date'),
widget=AdminDateWidget,
initial=initial_date)
self.fields['subject'] = forms.CharField(
label=_('Subject'), max_length=900,
widget=forms.widgets.Input(
attrs={'size': '64', 'style': 'width:100%'}))
self.fields['message'] = forms.CharField(
label=_('Message'),
widget=forms.Textarea(attrs={'style': 'width:100%', 'rows': '25'}))
self.fields['files'] = forms.FileField(
required=False,
widget=forms.ClearableFileInput(attrs={
'multiple': True,
'class': 'inputfile_nicezone',
}))
def support_expiration_date(self):
return getattr(EXTERNAL_MESSAGE_BACKEND, 'SUPPORTS_EXPIRATION', False)
def clean_expiration_date(self):
expiration_date = self.cleaned_data['expiration_date']
date_cleaner = getattr(
EXTERNAL_MESSAGE_BACKEND, 'clean_expiration_date', None)
if date_cleaner:
expiration_date = date_cleaner(expiration_date)
return expiration_date
def clean_files(self):
# Hack for multiple files
if self.files:
return self.files.getlist('files')
else:
return []
def send_message(self, group):
contacts_noemail = []
language = translation.get_language()
sync_info = {
'backend': EXTERNAL_MESSAGE_BACKEND_NAME,
'language': language,
}
if self.support_expiration_date():
delta = self.cleaned_data['expiration_date'] - date.today()
expiration = delta.days
sync_info['expiration'] = expiration
json_sync_info = json.dumps(sync_info)
for contact_id in self.cleaned_data['ids'].split(','):
contact = get_object_or_404(Contact, pk=contact_id)
if not contact.get_fieldvalues_by_type('EMAIL'):
contacts_noemail.append(contact)
contact_msg = ContactMsg(contact=contact, group=group)
contact_msg.send_date = now()
contact_msg.subject = self.cleaned_data['subject']
contact_msg.text = MimefyMessage(contact,
self.cleaned_data['subject'],
self.cleaned_data['message'],
self.cleaned_data['files'])
if self.cleaned_data['files']:
contact_msg.has_attachment = True
contact_msg.sync_info = json_sync_info
contact_msg.save()
return contacts_noemail
class SendMessageView(InGroupAcl, FormView):
form_class = SendMessageForm
template_name = 'message_send.html'
def check_perm_groupuser(self, group, user):
if not group.userperms & perms.WRITE_MSGS:
raise PermissionDenied
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['contactgroup'] = self.contactgroup
return kwargs
def get_initial(self):
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
return {'ids': querydict['ids']}
def form_valid(self, form):
contacts_noemail = form.send_message(self.contactgroup)
nbmessages = len(form.cleaned_data['ids'].split(','))
if nbmessages == 1:
success_msg = _('Message stored.')
else:
success_msg = _('{} messages stored.').format(nbmessages)
messages.add_message(self.request, messages.SUCCESS, success_msg)
if contacts_noemail:
nb_noemail = len(contacts_noemail)
if nb_noemail == 1:
error_msg = _("One contact doesn't have an email address.")
else:
error_msg = (_("{} contacts don't have an email address.")
.format(nb_noemail))
messages.add_message(
self.request, messages.WARNING,
translation.string_concat(
error_msg,
_(" The message will be kept here until you define his"
" email address.")))
if self.request.is_ajax():
return JsonResponse({
'has_error': False,
'url': self.get_success_url()
})
else:
return super().form_valid(form)
def form_invalid(self, form):
if self.request.is_ajax():
return JsonResponse({
'has_error': True,
'errors': json.loads(form.errors.as_json()),
})
else:
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
return self.contactgroup.get_absolute_url()+'messages/'
def get_context_data(self, **kwargs):
cg = self.contactgroup
# if group.date and group.date <= now().date():
# return HttpResponse('Date error. Event is over.')
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
ids = querydict['ids'].split(',')
nbcontacts = len(ids)
noemails = []
for contact in Contact.objects.filter(id__in=ids):
c_emails = contact.get_fieldvalues_by_type('EMAIL')
if not c_emails:
noemails.append(contact)
context = {}
context['title'] = _('Send message in {}').format(cg)
context['nbcontacts'] = nbcontacts
context['noemails'] = noemails
context['nav'] = cg.get_smart_navbar() \
.add_component(('members', _('members'))) \
.add_component(('send_message', _('send message')))
context['active_submenu'] = 'messages'
context.update(kwargs)
return super().get_context_data(**context)
#######################################################################
#
# Message detail
#
#######################################################################
class MessageDetailView(InGroupAcl, DetailView):
pk_url_kwarg = 'mid'
model = ContactMsg
template_name = 'message_detail.html'
def check_perm_groupuser(self, group, user):
if not group.userperms & perms.VIEW_MSGS:
raise PermissionDenied
def get_object(self, queryset=None):
msg = super().get_object(queryset)
# Check the group match the one of the url
if msg.group_id != self.contactgroup.id:
raise PermissionDenied
return msg
def get_context_data(self, **kwargs):
if self.object.group != self.contactgroup:
# attempt to read an object from another group
raise PermissionDenied
if self.object.is_answer and self.object.read_date is None:
if self.contactgroup.userperms & perms.WRITE_MSGS:
self.object.read_date = now()
self.object.read_by = self.request.user
self.object.save()
else:
messages.add_message(
self.request, messages.WARNING,
_("You don't have the permission to mark that message as"
" read."))
cg = self.contactgroup
context = {}
if self.object.is_answer:
context['title'] = _(
'Message from {contactname} in group {groupname}').format(
contactname=self.object.contact.name,
groupname=cg)
else:
context['title'] = _(
'Message to {contactname} in group {groupname}').format(
contactname=self.object.contact.name,
groupname=cg)
try:
context['sync_info'] = json.loads(self.object.sync_info)
except ValueError:
context['sync_info'] = {}
context['nav'] = cg.get_smart_navbar()
context['nav'].add_component(('messages', _('messages')))
context['cig_url'] = (
self.contactgroup.get_absolute_url()
+ 'members/'
+ str(self.object.contact_id))
context['active_submenu'] = 'messages'
# 201505
try:
cig = ContactInGroup.objects.get(
contact_id=self.object.contact.id,
group_id=cg.id)
except ContactInGroup.DoesNotExist:
pass
else:
context['membership_note'] = cig.note
flags = perms.cig_flags_int(self.object.contact.id, cg.id)
flags_direct = perms.cig_flags_direct_int(self.object.contact.id,
cg.id)
membership_str = perms.int_to_text(flags_direct, flags & ~flags_direct)
context['membership'] = perms.int_to_flags(flags_direct)
context['membership_str'] = membership_str
context['membership_title'] = _(
'{contactname} in group {groupname}').format(
contactname=self.object.contact.name,
groupname=cg)
if self.contactgroup.userperms & perms.WRITE_MSGS:
context['reply_url'] = "../members/send_message?ids={}".format(
self.object.contact_id)
context.update(kwargs)
return super().get_context_data(**context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if request.POST.get('unread', None):
self.object.read_date = None
self.object.read_by = None
self.object.save()
return HttpResponseRedirect(
self.contactgroup.get_absolute_url() + 'messages/')
raise Http404
class MessageBlobView(InGroupAcl, BaseDetailView):
model = ContactMsg
pk_url_kwarg = 'mid'
def check_perm_groupuser(self, group, user):
if not group.userperms & perms.VIEW_MSGS:
raise PermissionDenied
def get_object(self, queryset=None):
msg = super().get_object(queryset)
# Check the group match the one of the url
if msg.group_id != self.contactgroup.id:
raise PermissionDenied
return msg
def render_to_response(self, context):
return HttpResponse(context['object'].text,
content_type='message/rfc822')
#######################################################################
#
# Messages toggle
#
#######################################################################
# from django.http.response import JsonResponse
# from django.shortcuts import get_object_or_404
# class MessageToggleReadView(InGroupAcl, View):
# def check_perm_groupuser(self, group, user):
# if not group.userperms & perms.WRITE_MSGS:
# raise PermissionDenied
#
# def get(self, request, *args, **kwargs):
# message_id = self.kwargs.get('mid', None)
# try:
# message_id = int(message_id)
# except (ValueError, TypeError):
# raise Http404
# message = get_object_or_404(ContactMsg, pk=message_id)
# if message.group_id != self.contactgroup.id:
# return HttpResponse('Bad group')
#
# return JsonResponse({'test': 'ok'})
#######################################################################
#
# Messages delete
#
#######################################################################
class MessageDeleteView(InGroupAcl, NgwDeleteView):
model = ContactMsg
pk_url_kwarg = 'mid'
def check_perm_groupuser(self, group, user):
if not group.userperms & perms.WRITE_MSGS:
raise PermissionDenied
def get_context_data(self, **kwargs):
context = {}
context['nav'] = self.contactgroup.get_smart_navbar() \
.add_component(('messages', _('messages'))) \
.add_component(('delete', _('delete')))
context.update(kwargs)
return super().get_context_data(**context)
| bsd-2-clause | 4,930,909,412,600,218,000 | 34.437859 | 79 | 0.561886 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.