repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cloudera/hue | desktop/libs/librdbms/java/query.py | 2 | 1577 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range
from py4j.java_gateway import JavaGateway
gateway = JavaGateway()
jdbc_driver = 'com.mysql.jdb.Driver'
db_url = 'jdbc:mysql://localhost/hue'
username = 'root'
password = 'root'
conn = gateway.jvm.java.sql.DriverManager.getConnection(db_url, username, password)
try:
stmt = conn.createStatement()
try:
rs = stmt.executeQuery('select username,email from auth_user')
try:
md = rs.getMetaData()
for i in range(md.getColumnCount()):
print(md.getColumnTypeName(i + 1))
while next(rs):
username = rs.getString("username")
email = rs.getString("email")
print(username, email)
finally:
rs.close()
finally:
stmt.close()
finally:
conn.close()
| apache-2.0 | -8,428,450,868,111,065,000 | 29.326923 | 83 | 0.717185 | false |
adongy/spreads | spreads/util.py | 1 | 14770 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions and classes.
"""
from __future__ import division, unicode_literals, print_function
import abc
import glob
import json
import logging
import os
import pkg_resources
import platform
import re
import subprocess
from unicodedata import normalize
import blinker
import colorama
import psutil
import roman
from colorama import Fore, Back, Style
from spreads.vendor.pathlib import Path
class SpreadsException(Exception):
""" General exception """
pass
class DeviceException(SpreadsException):
""" Raised when a device-related error occured. """
pass
class MissingDependencyException(SpreadsException):
""" Raised when a dependency for a plugin is missing. """
pass
def get_version():
""" Get installed version via pkg_resources. """
return pkg_resources.require('spreads')[0].version
def find_in_path(name):
""" Find executable in $PATH.
:param name: name of the executable
:type name: unicode
:returns: Path to executable or None if not found
:rtype: unicode or None
"""
candidates = None
if is_os('windows'):
import _winreg
if name.startswith('scantailor'):
try:
cmd = _winreg.QueryValue(
_winreg.HKEY_CLASSES_ROOT,
'Scan Tailor Project\\shell\\open\\command')
bin_path = cmd.split('" "')[0][1:]
if name.endswith('-cli'):
bin_path = bin_path[:-4] + "-cli.exe"
return bin_path if os.path.exists(bin_path) else None
except OSError:
return None
else:
path_dirs = os.environ.get('PATH').split(';')
path_dirs.append(os.getcwd())
path_exts = os.environ.get('PATHEXT').split(';')
candidates = (os.path.join(p, name + e)
for p in path_dirs
for e in path_exts)
else:
candidates = (os.path.join(p, name)
for p in os.environ.get('PATH').split(':'))
return next((c for c in candidates if os.path.exists(c)), None)
def is_os(osname):
""" Check if the current operating system matches the expected.
:param osname: Operating system name as returned by
:py:func:`platform.system`
:returns: Whether the OS matches or not
:rtype: bool
"""
return platform.system().lower() == osname
def check_futures_exceptions(futures):
"""" Go through passed :py:class:`concurrent.futures._base.Future` objects
and re-raise the first Exception raised by any one of them.
:param futures: Iterable that contains the futures to be checked
:type futures: iterable with :py:class:`concurrent.futures._base.Future`
instances
"""
if any(x.exception() for x in futures):
raise next(x for x in futures if x.exception()).exception()
def get_free_space(path):
""" Return free space on file-system underlying the passed path.
:param path: Path on file-system the free space of which is desired.
:type path; unicode
:return: Free space in bytes.
:rtype: int
"""
return psutil.disk_usage(unicode(path)).free
def get_subprocess(cmdline, **kwargs):
""" Get a :py:class:`subprocess.Popen` instance.
On Windows systems, the process will be ran in the background and won't
open a cmd-window or appear in the taskbar.
The function signature matches that of the :py:class:`subprocess.Popen`
initialization method.
"""
if subprocess.mswindows and 'startupinfo' not in kwargs:
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs['startupinfo'] = su
return subprocess.Popen(cmdline, **kwargs)
def wildcardify(pathnames):
""" Try to generate a single path with wildcards that matches all
`pathnames`.
:param pathnames: List of pathnames to find a wildcard string for
:type pathanmes: List of str/unicode
:return: The wildcard string or None if none was found
:rtype: unicode or None
"""
wildcard_str = ""
for idx, char in enumerate(pathnames[0]):
if all(p[idx] == char for p in pathnames[1:]):
wildcard_str += char
elif not wildcard_str or wildcard_str[-1] != "*":
wildcard_str += "*"
matched_paths = glob.glob(wildcard_str)
if not sorted(pathnames) == sorted(matched_paths):
return None
return wildcard_str
def diff_dicts(old, new):
""" Get the difference between two dictionaries.
:param old: Dictionary to base comparison on
:type old: dict
:param new: Dictionary to compare with
:type new: dict
:return: A (possibly nested) dictionary containing all items from `new`
that differ from the ones in `old`
:rtype: dict
"""
out = {}
for key, value in old.iteritems():
if new[key] != value:
out[key] = new[key]
elif isinstance(value, dict):
diff = diff_dicts(value, new[key])
if diff:
out[key] = diff
return out
def slugify(text, delimiter=u'-'):
"""Generates an ASCII-only slug.
Code adapted from Flask snipped by Armin Ronacher:
http://flask.pocoo.org/snippets/5/
:param text: Text to create slug for
:type text: unicode
:param delimiter: Delimiter to use in slug
:type delimiter: unicode
:return: The generated slug
:rtype: unicode
"""
punctuation_re = r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+'
result = []
for word in re.split(punctuation_re, text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delimiter.join(result))
class _instancemethodwrapper(object): # noqa
def __init__(self, callable):
self.callable = callable
self.__dontcall__ = False
def __getattr__(self, key):
return getattr(self.callable, key)
def __call__(self, *args, **kwargs):
if self.__dontcall__:
raise TypeError('Attempted to call abstract method.')
return self.callable(*args, **kwargs)
class _classmethod(classmethod): # noqa
def __init__(self, func):
super(_classmethod, self).__init__(func)
isabstractmethod = getattr(func, '__isabstractmethod__', False)
if isabstractmethod:
self.__isabstractmethod__ = isabstractmethod
def __get__(self, instance, owner):
result = _instancemethodwrapper(super(_classmethod, self)
.__get__(instance, owner))
isabstractmethod = getattr(self, '__isabstractmethod__', False)
if isabstractmethod:
result.__isabstractmethod__ = isabstractmethod
abstractmethods = getattr(owner, '__abstractmethods__', None)
if abstractmethods and result.__name__ in abstractmethods:
result.__dontcall__ = True
return result
class abstractclassmethod(_classmethod): # noqa
""" New decorator class that implements the @abstractclassmethod decorator
added in Python 3.3 for Python 2.7.
Kudos to http://stackoverflow.com/a/13640018/487903
"""
def __init__(self, func):
func = abc.abstractmethod(func)
super(abstractclassmethod, self).__init__(func)
class ColourStreamHandler(logging.StreamHandler):
""" A colorized output StreamHandler
Kudos to Leigh MacDonald: http://goo.gl/Lpr6C5
"""
# Some basic colour scheme defaults
colours = {
'DEBUG': Fore.CYAN,
'INFO': Fore.GREEN,
'WARN': Fore.YELLOW,
'WARNING': Fore.YELLOW,
'ERROR': Fore.RED,
'CRIT': Back.RED + Fore.WHITE,
'CRITICAL': Back.RED + Fore.WHITE
}
@property
def is_tty(self):
""" Check if we are using a "real" TTY. If we are not using a TTY it
means that the colour output should be disabled.
:return: Using a TTY status
:rtype: bool
"""
try:
return getattr(self.stream, 'isatty', None)()
except:
return False
def emit(self, record):
try:
message = self.format(record)
if not self.is_tty:
self.stream.write(message)
else:
self.stream.write(self.colours[record.levelname] +
message + Style.RESET_ALL)
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class EventHandler(logging.Handler):
""" Subclass of :py:class:`logging.Handler` that emits a
:py:class:`blinker.base.Signal` whenever a new record is emitted.
"""
signals = blinker.Namespace()
on_log_emit = signals.signal('logrecord', doc="""\
Sent when a log record was emitted.
:keyword :class:`logging.LogRecord` record: the LogRecord
""")
def emit(self, record):
self.on_log_emit.send(record=record)
def get_data_dir(create=False):
""" Return (and optionally create) the user's default data directory.
:param create: Create the data directory if it doesn't exist
:type create: bool
:return: Path to the default data directory
:rtype: unicode
"""
unix_dir_var = 'XDG_DATA_HOME'
unix_dir_fallback = '~/.config'
windows_dir_var = 'APPDATA'
windows_dir_fallback = '~\\AppData\\Roaming'
mac_dir = '~/Library/Application Support'
base_dir = None
if is_os('darwin'):
if Path(unix_dir_fallback).exists:
base_dir = unix_dir_fallback
else:
base_dir = mac_dir
elif is_os('windows'):
if windows_dir_var in os.environ:
base_dir = os.environ[windows_dir_var]
else:
base_dir = windows_dir_fallback
else:
if unix_dir_var in os.environ:
base_dir = os.environ[unix_dir_var]
else:
base_dir = unix_dir_fallback
app_path = Path(base_dir)/'spreads'
if create and not app_path.exists():
app_path.mkdir()
return unicode(app_path)
def colorize(text, color):
""" Return text with a new ANSI foreground color.
:param text: Text to be wrapped
:param color: ANSI color to wrap text in
:type color: str (from `colorama.ansi <http://git.io/9qnt0Q>`)
:return: Colorized text
"""
return color + text + colorama.Fore.RESET
class RomanNumeral(object):
""" Number type that represents integers as Roman numerals and that
can be used in all arithmetic operations applicable to integers.
"""
@staticmethod
def is_roman(value):
""" Check if `value` is a valid Roman numeral.
:param value: Value to be checked
:type value: unicode
:returns: Whether the value is valid or not
:rtype: bool
"""
return bool(roman.romanNumeralPattern.match(value))
def __init__(self, value, case='upper'):
""" Create a new instance.
:param value: Value of the instance
:type value: int, unicode containing valid Roman numeral or
:py:class:`RomanNumeral`
"""
self._val = self._to_int(value)
self._case = case
if isinstance(value, basestring) and not self.is_roman(value):
self._case = 'lower'
elif isinstance(value, RomanNumeral):
self._case = value._case
def _to_int(self, value):
if isinstance(value, int):
return value
elif isinstance(value, basestring) and self.is_roman(value.upper()):
return roman.fromRoman(value.upper())
elif isinstance(value, RomanNumeral):
return value._val
else:
raise ValueError("Value must be a valid roman numeral, a string"
" representing one or an integer: '{0}'"
.format(value))
def __cmp__(self, other):
if self._val > self._to_int(other):
return 1
elif self._val == self._to_int(other):
return 0
elif self._val < self._to_int(other):
return -1
def __add__(self, other):
return RomanNumeral(self._val + self._to_int(other), self._case)
def __sub__(self, other):
return RomanNumeral(self._val - self._to_int(other), self._case)
def __int__(self):
return self._val
def __str__(self):
strval = roman.toRoman(self._val)
if self._case == 'lower':
return strval.lower()
else:
return strval
def __unicode__(self):
return unicode(str(self))
def __repr__(self):
return str(self)
class CustomJSONEncoder(json.JSONEncoder):
""" Custom :py:class:`json.JSONEncoder`.
Uses an object's `to_dict` method if present for serialization.
Serializes :py:class:`pathlib.Path` instances to the string
representation of their relative path to a BagIt-compliant directory or
their absolute path if not applicable.
"""
def default(self, obj):
if hasattr(obj, 'to_dict'):
return obj.to_dict()
if isinstance(obj, Path):
# Serialize paths that belong to a workflow as paths relative to
# its base directory
base = next((p for p in obj.parents if (p/'bagit.txt').exists()),
None)
if base:
return unicode(obj.relative_to(base))
else:
return unicode(obj.absolute())
return json.JSONEncoder.default(self, obj)
| agpl-3.0 | 8,165,834,610,917,695,000 | 31.178649 | 79 | 0.599323 | false |
rjspiers/qgis-batch-save-layers | __init__.py | 1 | 1551 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
BatchSaveLayers
A QGIS plugin
Save open vector layers to one directory as shapefile
-------------------
begin : 2016-02-19
copyright : (C) 2016 by Robert Spiers
email : [email protected]
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load BatchSaveLayers class from file BatchSaveLayers.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .batch_save_layers import BatchSaveLayers
return BatchSaveLayers(iface)
| gpl-2.0 | 343,507,283,972,816,960 | 43.314286 | 77 | 0.407479 | false |
lumidify/fahrenheit451 | Engine.py | 1 | 9955 | import os
import sys
import pygame
pygame.init()
import importlib
from loader import *
from QuadTree import QuadTree
from Montag import *
from Obstacles import *
from Floor import Floor
from pygame.locals import *
from CONSTANTS import *
"""
This code is extremely bad, use at your own risk.
"""
TILEWIDTH = 128
TILEHEIGHT = 64
FLOORPATH = os.path.join("graphics", "floor_tiles")
FONT = pygame.font.Font("Lumidify_Casual.ttf", 50)
FONT_SMALL = pygame.font.Font("SourceCodePro.ttf", 14)
clock = pygame.time.Clock()
def load_module(path):
spec = importlib.util.spec_from_file_location("module.name", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
class Engine():
def __init__(self, screen):
print("Initializing Lumidify Isometric Engine (LIE) Version 1.0 ...")
self.screen = screen
self.screen.blit(FONT.render("Loading...", True, (255, 255, 255)), (0, 0))
self.screen.blit(FONT.render("Remember - patience is a virtue.", True, (255, 255, 255)), (0, 40))
pygame.display.update()
self.tiles, self.obstacles, self.characters, self.items, self.bullets = load_tiles()
self.floor = Floor(self.screen, self.tiles)
self.obstacles = Obstacles(self.screen, self.obstacles, self.characters, self.items, self.bullets, self)
temp = self.obstacles.characters["GUB"].copy()
temp["weapon"] = None
self.player = Montag(self.screen, x=0, y=0, obstaclemap=self.obstacles, **temp)
self.obstacles.player = self.player
self.game_variables = {}
self.active_layer = 0
self.screen_offset = [0, 0]
self.loaded_maps = {}
self.saved_maps = []
self.current_map = ""
self.wongame = False
self.lostgame = False
self.show_fps = True
def savegame(self):
if not os.path.isdir("save"):
os.mkdir("save")
for path, item in self.loaded_maps.items():
final_path = os.path.join("save", path)
if not os.path.exists(final_path):
os.makedirs(final_path)
self.obstacles.save(final_path, item["characters"] + item["dead_characters"], item["item_map"], item["triggers"], item["obstacles"])
self.floor.save(os.path.join(final_path, "floor.py"), item["floor"])
with open(os.path.join(final_path, "config.py"), "w") as f:
f.write("config = " + repr(item["config"]))
if not os.path.exists(os.path.join("save", self.current_map)):
os.makedirs(os.path.join("save", self.current_map))
self.obstacles.save(os.path.join("save", self.current_map), self.obstacles.charactermap + self.obstacles.dead_characters, self.obstacles.item_map, self.obstacles.triggers, self.obstacles.layers)
self.floor.save(os.path.join("save", self.current_map, "floor.py"), self.floor.layers)
with open(os.path.join("save", self.current_map, "config.py"), "w") as f:
f.write("config = " + repr(self.config))
player_config = {"dead": self.player.dead, "grid_pos": self.player.grid_pos.copy(), "current_map": self.current_map, "won": self.wongame, "lost": self.lostgame, "inventory": self.player.get_inventory(), "health": self.player.health}
with open(os.path.join("save", "config.py"), "w") as f:
f.write("player_config = " + repr(player_config))
def load_game(self):
if os.path.isdir("save"):
self.wongame = False
self.lostgame = False
player_config = load_module(os.path.join("save", "config.py")).player_config.copy()
self.player.reset()
self.player.dead = player_config["dead"]
self.player.load_inventory(player_config["inventory"])
self.saved_maps = os.listdir("save")
self.current_map = player_config["current_map"]
self.player.health = player_config["health"]
self.saved_maps = [os.path.join("maps", x) for x in os.listdir("save/maps")]
self.loaded_maps = {}
self.load_map(self.current_map, spawn_pos=player_config["grid_pos"])
if player_config["won"]:
self.wingame()
elif player_config["lost"]:
self.losegame()
def load_map(self, path, **kwargs):
if path in self.saved_maps:
self.saved_maps.remove(path)
path = os.path.join("save", path)
self.current_map = None
if self.current_map:
self.loaded_maps[self.current_map] = {}
self.loaded_maps[self.current_map]["floor"] = self.floor.layers.copy()
self.loaded_maps[self.current_map]["obstacles"] = self.obstacles.layers.copy()
self.loaded_maps[self.current_map]["characters"] = self.obstacles.charactermap.copy()
self.loaded_maps[self.current_map]["dead_characters"] = self.obstacles.dead_characters.copy()
self.loaded_maps[self.current_map]["item_map"] = self.obstacles.item_map.copy()
self.loaded_maps[self.current_map]["bullets"] = self.obstacles.bullets.copy()
self.loaded_maps[self.current_map]["triggers"] = self.obstacles.triggers.copy()
self.loaded_maps[self.current_map]["config"] = self.config.copy()
if path.startswith("save"):
self.current_map = path[5:]
else:
self.current_map = path
if path in self.loaded_maps:
self.floor.layers = self.loaded_maps[path]["floor"].copy()
self.obstacles.layers = self.loaded_maps[path]["obstacles"].copy()
self.obstacles.charactermap = self.loaded_maps[path]["characters"].copy()
self.obstacles.dead_characters = self.loaded_maps[path]["dead_characters"].copy()
self.obstacles.item_map = self.loaded_maps[path]["item_map"].copy()
self.obstacles.bullets = self.loaded_maps[path]["bullets"].copy()
self.obstacles.triggers = self.loaded_maps[path]["triggers"].copy()
self.config = self.loaded_maps[path]["config"].copy()
else:
self.floor.load_tilemap(os.path.join(path, "floor.py"))
self.obstacles.load_obstaclemap(os.path.join(path, "obstacles.py"))
self.obstacles.load_charactermap(os.path.join(path, "characters.py"))
self.obstacles.load_item_map(os.path.join(path, "items.py"))
self.obstacles.load_triggermap(os.path.join(path, "triggers.py"))
self.obstacles.dead_characters = []
self.obstacles.bullets = []
self.config = load_module(os.path.join(path, "config.py")).config.copy()
try:
pygame.mixer.music.load(self.config.get("music", "Betrayed.ogg"))
pygame.mixer.music.play(-1)
except:
pass
if kwargs.get("spawn_pos", None):
self.player.grid_pos = kwargs["spawn_pos"].copy()
else:
self.player.grid_pos = self.config.get("spawn_pos", [0, 0]).copy()
self.player.reset()
mapsize = self.config.get("level_dimensions", [50, 50])
self.obstacles.change_size(mapsize)
self.obstacles.refresh_trigger_quadtree()
self.obstacles.refresh_grid()
def update(self, event=None):
if not self.wongame:
screen_size = self.screen.get_size()
isox = (self.player.grid_pos[0] - self.player.grid_pos[1]) * (ISOWIDTH // 2)
isoy = (self.player.grid_pos[0] + self.player.grid_pos[1]) * (ISOHEIGHT // 2)
self.screen_offset = [screen_size[0] // 2 - isox, screen_size[1] // 2 - isoy]
current_time = pygame.time.get_ticks()
self.floor.update(current_time)
self.obstacles.update(current_time=current_time, event=event)
if event and event.type == KEYDOWN:
if event.key == K_F3:
self.savegame()
elif event.key == K_F4:
self.load_game()
elif event.key == K_F11:
self.show_fps = not self.show_fps
def wingame(self):
self.wongame = True
pygame.mixer.music.load("wingame.ogg")
pygame.mixer.music.play(-1)
def losegame(self):
self.lostgame = True
pygame.mixer.music.load("Sad_Piano_3.ogg")
pygame.mixer.music.play(-1)
def draw(self):
self.floor.draw(self.screen_offset)
self.obstacles.draw(self.screen_offset)
if self.wongame:
screen.blit(FONT.render("Congratulations, you won!", True, (255, 255, 255)), (0, 0))
elif self.lostgame:
screen.blit(FONT.render("Shame be upon you! You lost!", True, (255, 255, 255)), (0, 0))
self.screen.blit(FONT.render("Health: " + str(self.player.health), True, (255, 255, 255)), (self.screen.get_size()[0] - 200, 0))
if self.show_fps:
self.screen.blit(FONT.render("FPS: " + str(round(clock.get_fps(), 2)), True, (255, 255, 255)), (self.screen.get_size()[0] - 400, 0))
if __name__ == "__main__":
pygame.init()
screen_info = pygame.display.Info()
screen_size = [screen_info.current_w, screen_info.current_h]
screen = pygame.display.set_mode(screen_size, RESIZABLE)
clock = pygame.time.Clock()
pygame.display.set_caption("Fahrenheit 451")
engine = Engine(screen)
if len(sys.argv) > 1:
engine.load_map(sys.argv[1])
else:
engine.load_map("maps/MapBook1")
while True:
screen.fill((0, 0, 0))
clock.tick()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == VIDEORESIZE:
screen_size = event.dict["size"]
screen = pygame.display.set_mode(screen_size, RESIZABLE)
else:
engine.update(event)
engine.update()
engine.draw()
pygame.display.update()
| gpl-2.0 | 768,760,483,214,125,200 | 48.527363 | 240 | 0.599799 | false |
uw-it-cte/uw-restclients | restclients/views.py | 1 | 6990 | try:
from importlib import import_module
except:
# python 2.6
from django.utils.importlib import import_module
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from django.http import HttpResponseNotFound, HttpResponseRedirect
from django.http import HttpResponse
from django.template import loader, RequestContext, TemplateDoesNotExist
from django.shortcuts import render_to_response
from restclients.dao import SWS_DAO, PWS_DAO, GWS_DAO, NWS_DAO, Hfs_DAO,\
Book_DAO, Canvas_DAO, Uwnetid_DAO, MyLibInfo_DAO, LibCurrics_DAO,\
TrumbaCalendar_DAO, MyPlan_DAO, IASYSTEM_DAO, Grad_DAO
from restclients.mock_http import MockHTTP
from authz_group import Group
from userservice.user import UserService
from time import time
from urllib import quote, unquote, urlencode
from urlparse import urlparse, parse_qs
import simplejson as json
import re
@login_required
@csrf_protect
def proxy(request, service, url):
if not hasattr(settings, "RESTCLIENTS_ADMIN_GROUP"):
print "You must have a group defined as your admin group."
print 'Configure that using RESTCLIENTS_ADMIN_GROUP="u_foo_bar"'
raise Exception("Missing RESTCLIENTS_ADMIN_GROUP in settings")
user_service = UserService()
actual_user = user_service.get_original_user()
g = Group()
is_admin = g.is_member_of_group(actual_user,
settings.RESTCLIENTS_ADMIN_GROUP)
if not is_admin:
return HttpResponseRedirect("/")
use_pre = False
headers = {}
if service == "sws":
dao = SWS_DAO()
headers["X-UW-Act-as"] = actual_user
elif service == "pws":
dao = PWS_DAO()
elif service == "gws":
dao = GWS_DAO()
elif service == "nws":
dao = NWS_DAO()
elif service == "hfs":
dao = Hfs_DAO()
elif service == "book":
dao = Book_DAO()
elif service == "canvas":
dao = Canvas_DAO()
elif service == "grad":
dao = Grad_DAO()
elif service == "uwnetid":
dao = Uwnetid_DAO()
elif service == "libraries":
dao = MyLibInfo_DAO()
elif service == "libcurrics":
dao = LibCurrics_DAO()
elif service == "myplan":
dao = MyPlan_DAO()
elif service == "iasystem":
dao = IASYSTEM_DAO()
headers = {"Accept": "application/vnd.collection+json"}
subdomain = None
if url.endswith('/evaluation'):
if url.startswith('uwb/') or url.startswith('uwt/'):
subdomain = url[:3]
url = url[4:]
else:
subdomain = url[:2]
url = url[3:]
elif service == "calendar":
dao = TrumbaCalendar_DAO()
use_pre = True
else:
return HttpResponseNotFound("Unknown service: %s" % service)
url = "/%s" % quote(url)
if request.GET:
try:
url = "%s?%s" % (url, urlencode(request.GET))
except UnicodeEncodeError:
err = "Bad URL param given to the restclients browser"
return HttpResponse(err)
start = time()
try:
if service == "iasystem" and subdomain is not None:
response = dao.getURL(url, headers, subdomain)
else:
if service == "libcurrics":
if "?campus=" in url:
url = url.replace("?campus=", "/")
elif "course?" in url:
url_prefix = re.sub(r'\?.*$', "", url)
url = "%s/%s/%s/%s/%s/%s" % (
url_prefix,
request.GET["year"],
request.GET["quarter"],
request.GET["curriculum_abbr"].replace(" ", "%20"),
request.GET["course_number"],
request.GET["section_id"])
response = dao.getURL(url, headers)
except Exception as ex:
response = MockHTTP()
response.status = 500
response.data = str(ex)
end = time()
# Assume json, and try to format it.
try:
if not use_pre:
content = format_json(service, response.data)
json_data = response.data
else:
content = response.data
json_data = None
except Exception as e:
content = format_html(service, response.data)
json_data = None
context = {
"url": unquote(url),
"content": content,
"json_data": json_data,
"response_code": response.status,
"time_taken": "%f seconds" % (end - start),
"headers": response.headers,
"override_user": user_service.get_override_user(),
"use_pre": use_pre,
}
try:
loader.get_template("restclients/extra_info.html")
context["has_extra_template"] = True
context["extra_template"] = "restclients/extra_info.html"
except TemplateDoesNotExist:
pass
try:
loader.get_template("restclients/proxy_wrapper.html")
context["wrapper_template"] = "restclients/proxy_wrapper.html"
except TemplateDoesNotExist:
context["wrapper_template"] = "proxy_wrapper.html"
try:
search_template_path = re.sub(r"\..*$", "", url)
search_template = "proxy/%s%s.html" % (service, search_template_path)
loader.get_template(search_template)
context["search_template"] = search_template
context["search"] = format_search_params(url)
except TemplateDoesNotExist:
context["search_template"] = None
return render_to_response("proxy.html",
context,
context_instance=RequestContext(request))
def format_search_params(url):
params = {}
query_params = parse_qs(urlparse(url).query)
for param in query_params:
params[param] = ",".join(query_params[param])
return params
def format_json(service, content):
json_data = json.loads(content, use_decimal=True)
formatted = json.dumps(json_data, sort_keys=True, indent=4)
formatted = formatted.replace("&", "&")
formatted = formatted.replace("<", "<")
formatted = formatted.replace(">", ">")
formatted = formatted.replace(" ", " ")
formatted = formatted.replace("\n", "<br/>\n")
formatted = re.sub(r"\"/(.*?)\"",
r'"<a href="/restclients/view/%s/\1">/\1</a>"' %
service, formatted)
return formatted
def format_html(service, content):
formatted = re.sub(r"href\s*=\s*\"/(.*?)\"",
r"href='/restclients/view/%s/\1'" % service, content)
formatted = re.sub(re.compile(r"<style.*/style>", re.S), "", formatted)
formatted = clean_self_closing_divs(formatted)
return formatted
def clean_self_closing_divs(content):
cleaned = re.sub("((<div[^>]*?)/>)", "<!-- \g<1> -->\g<2>></div>", content)
return cleaned
| apache-2.0 | 8,306,111,691,793,203,000 | 32.605769 | 79 | 0.580973 | false |
dersphere/plugin.programm.xbmcmail | addon.py | 1 | 9677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tristan Fischer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from xbmcswift2 import Plugin, xbmc, xbmcgui
from resources.lib.client import (
XBMCMailClient, InvalidCredentials, InvalidHost
)
STRINGS = {
'email_mark_seen': 30000,
'email_mark_unseen': 30001,
'email_delete': 30002,
'delete': 30003,
'are_you_sure': 30004,
'select_provider': 30005,
'connection_error': 30006,
'wrong_credentials': 30007,
'want_set_now': 30008,
'wrong_host': 30009,
'page': 30010,
'refresh_inbox': 30011,
}
plugin = Plugin()
@plugin.route('/')
def show_mailboxes():
client = _login()
if not client:
return
def _format_label(mailbox):
label = mailbox['name']
if 'unseen' in mailbox and 'total' in mailbox:
label = u'%s (%d/%d)' % (
label,
int(mailbox['unseen']),
int(mailbox['total']),
)
return label
items = [{
'label': _format_label(mailbox),
'path': plugin.url_for(
endpoint='show_mailbox',
mailbox=mailbox['raw_name'],
)
} for mailbox in client.get_mailboxes()]
return plugin.finish(items)
@plugin.route('/mailbox/<mailbox>/', options={'page': '1'})
@plugin.route('/mailbox/<mailbox>/<page>/', name='show_mailbox_page')
def show_mailbox(mailbox, page):
client = _login()
if not client:
return
page = int(page)
limit = 50
offset = (page - 1) * limit
def context_menu(mailbox, email):
items = []
if email['unseen']:
items.append(
(_('email_mark_seen'),
_view(endpoint='email_mark_seen',
mailbox=mailbox,
email_id=email['id']))
)
else:
items.append(
(_('email_mark_unseen'),
_view(endpoint='email_mark_unseen',
mailbox=mailbox,
email_id=email['id']))
)
items.append(
(_('email_delete'),
_view(endpoint='email_delete',
mailbox=mailbox,
email_id=email['id']))
)
items.append(
(_('refresh_inbox'),
_view(endpoint='refresh_inbox',
mailbox=mailbox,
email_id=email['id']))
)
return items
def _format_label(email):
label = '[B]%s[/B] - %s' % (
_format_from(email['from']),
_format_subject(email['subject']),
)
if email['unseen']:
label = '[COLOR red]%s[/COLOR]' % label
return label
def _format_from(s):
if ' <' in s:
return s.split(' <')[0].strip('"')
else:
return s.split('@')[0]
def _format_subject(s):
return s.replace('\r\n', '')
emails, has_next_page = client.get_emails(mailbox, limit, offset)
has_prev_page = page > 1
items = [{
'label': _format_label(email),
'replace_context_menu': True,
'info': {'count': i + 1},
'context_menu': context_menu(mailbox, email),
'path': plugin.url_for(
endpoint='email_show',
mailbox=email['mailbox'],
email_id=email['id']
)
} for i, email in enumerate(emails)]
if has_next_page:
items.append({
'label': '>> %s %s >>' % (_('page'), (page + 1)),
'info': {'count': len(emails) + 2},
'path': plugin.url_for(
endpoint='show_mailbox_page',
mailbox=mailbox,
page=(page + 1),
is_update='true',
)
})
if has_prev_page:
items.append({
'label': '<< %s %s <<' % (_('page'), (page - 1)),
'info': {'count': 0},
'path': plugin.url_for(
endpoint='show_mailbox_page',
mailbox=mailbox,
page=(page - 1),
is_update='true',
)
})
finish_kwargs = {
'update_listing': 'is_update' in plugin.request.args,
'sort_methods': ('playlist_order', )
}
return plugin.finish(items, **finish_kwargs)
@plugin.route('/mailbox/<mailbox>/<email_id>/mark_seen')
def email_mark_seen(mailbox, email_id):
client = _login()
if not client:
return
client.email_mark_seen(email_id, mailbox)
_refresh_view()
@plugin.route('/mailbox/<mailbox>/<email_id>/mark_unseen')
def email_mark_unseen(mailbox, email_id):
client = _login()
if not client:
return
client.email_mark_unseen(email_id, mailbox)
_refresh_view()
@plugin.route('/mailbox/<mailbox>')
def refresh_inbox(mailbox):
return
_refresh_view()
@plugin.route('/mailbox/<mailbox>/<email_id>/delete')
def email_delete(mailbox, email_id):
client = _login()
if not client:
return
confirmed = xbmcgui.Dialog().yesno(
_('delete'),
_('are_you_sure')
)
if not confirmed:
return
client.email_delete(email_id, mailbox)
_refresh_view()
@plugin.route('/mailbox/<mailbox>/<email_id>/show')
def email_show(mailbox, email_id):
client = _login()
if not client:
return
xbmc.executebuiltin('ActivateWindow(%d)' % 10147)
window = xbmcgui.Window(10147)
email = client.get_email(email_id, mailbox)
header = '%s - %s' % (email['from'], email['subject'])
text = '\r\n'.join((
'=====================================================',
'[B]From:[/B] %s' % email['from'],
'[B]To:[/B] %s' % email['to'],
'[B]Date:[/B] %s' % email['date'],
'[B]Subject:[/B] %s' % email['subject'],
'=====================================================',
email['body_text'],
))
window.getControl(1).setLabel(header)
window.getControl(5).setText(text)
def ask_provider():
providers = [
{'name': 'Custom',
'imap_host': ''},
{'name': '1und1.de',
'imap_host': 'imap.1und1.de',
'use_ssl': 'true'},
{'name': 'Arcor.de',
'imap_host': 'imap.arcor.de',
'use_ssl': 'true'},
{'name': 'Freenet.de',
'imap_host': 'mx.freenet.de',
'use_ssl': 'false'},
{'name': 'Gmail.com',
'imap_host': 'imap.gmail.com',
'use_ssl': 'true'},
{'name': 'iCloud.com',
'imap_host': 'imap.mail.me.com',
'use_ssl': 'true'},
{'name': 'T-Online.de',
'imap_host': 'secureimap.t-online.de',
'use_ssl': 'true'},
{'name': 'Web.de',
'imap_host': 'imap.web.de',
'use_ssl': 'false'},
{'name': 'Yahoo.com',
'imap_host': 'imap.mail.yahoo.com',
'use_ssl': 'true'},
]
selected = xbmcgui.Dialog().select(
_('select_provider'), [p['name'] for p in providers]
)
if selected >= 0:
return providers[selected]
@plugin.route('/settings/set_provider')
def set_default_list():
provider = ask_provider()
if provider:
plugin.set_setting('provider', provider['name'])
for k, v in provider.iteritems():
if k == 'name':
plugin.set_setting('provider', v)
else:
plugin.set_setting(k, v)
else:
plugin.set_setting('provider', 'Custom')
def _run(*args, **kwargs):
return 'XBMC.RunPlugin(%s)' % plugin.url_for(*args, **kwargs)
def _view(*args, **kwargs):
return 'XBMC.Container.Update(%s)' % plugin.url_for(*args, **kwargs)
def _refresh_view():
xbmc.executebuiltin('Container.Refresh')
def _login():
logged_in = False
while not logged_in:
try:
client = XBMCMailClient(
username=plugin.get_setting('username', unicode),
password=plugin.get_setting('password', unicode),
host=plugin.get_setting('imap_host', unicode),
use_ssl=plugin.get_setting('use_ssl', bool),
)
except InvalidCredentials:
try_again = xbmcgui.Dialog().yesno(
_('connection_error'),
_('wrong_credentials'),
_('want_set_now')
)
if not try_again:
return
plugin.open_settings()
except InvalidHost:
try_again = xbmcgui.Dialog().yesno(
_('connection_error'),
_('wrong_host'),
_('want_set_now')
)
if not try_again:
return
plugin.open_settings()
else:
logged_in = True
return client
def _(string_id):
if string_id in STRINGS:
return plugin.get_string(STRINGS[string_id])
else:
plugin.log.debug('String is missing: %s' % string_id)
return string_id
if __name__ == '__main__':
plugin.run()
| gpl-2.0 | -2,259,042,183,702,010,400 | 27.800595 | 73 | 0.512969 | false |
fjfnaranjo/fjfnaranjo-bot | tests/components/sorry/test_info.py | 1 | 2251 | from telegram.messageentity import MessageEntity
from fjfnaranjobot.components.sorry.info import (
logger,
sorry_group_handler,
sorry_handler,
)
from tests.base import CallWithMarkup
from ...base import BOT_USERNAME, BotHandlerTestCase
class SorryHandlersTests(BotHandlerTestCase):
def _fake_user_mention(self, text, username, offset):
self.set_string_command(text)
self.set_entities(
{
MessageEntity(
type=MessageEntity.MENTION,
offset=offset,
length=len(username),
user=username,
): username
}
)
def test_sorry_handler_processor(self):
with self.assert_log_dispatch("Sending 'sorry' back to the user.", logger):
sorry_handler(*self.update_and_context)
self.assert_reply_text(
"I don't know what to do about that. Sorry :(",
)
def test_sorry_group_handler_processor_bot(self):
bot_mention = f"@{BOT_USERNAME}"
bot_messages = [
f"{bot_mention}",
f"{bot_mention} some tail",
]
for message in bot_messages:
with self.subTest(message=message):
self._fake_user_mention(message, bot_mention, message.find(bot_mention))
with self.assert_log_dispatch(
"Sending 'sorry' back to the user.", logger
):
sorry_group_handler(*self.update_and_context)
self.assert_reply_calls(
[
CallWithMarkup("I don't know what to do about that. Sorry :("),
CallWithMarkup("I don't know what to do about that. Sorry :("),
]
)
def test_sorry_group_handler_processor_not_bot(self):
bot_mention = f"@{BOT_USERNAME}"
not_bot_messages = [
f"some header {bot_mention}",
f"some header {bot_mention} some tail",
]
for message in not_bot_messages:
with self.subTest(message=message):
self._fake_user_mention(message, bot_mention, message.find(bot_mention))
assert None == sorry_group_handler(*self.update_and_context)
| gpl-3.0 | -8,333,526,967,482,194,000 | 34.730159 | 88 | 0.566859 | false |
Scille/parsec-cloud | tests/monitor.py | 1 | 13104 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
# Monitor POC, shamelessly taken from curio
import os
import signal
import socket
import traceback
import threading
import telnetlib
import argparse
import logging
import trio
from trio.abc import Instrument
from trio.lowlevel import current_statistics
LOGGER = logging.getLogger("trio.monitor")
MONITOR_HOST = "127.0.0.1"
MONITOR_PORT = 48802
# Telnet doesn't support unicode, so we must rely on ascii art instead :'-(
if 0:
MID_PREFIX = "├─ "
MID_CONTINUE = "│ "
END_PREFIX = "└─ "
else:
MID_PREFIX = "|- "
MID_CONTINUE = "| "
END_PREFIX = "|_ "
END_CONTINUE = " " * len(END_PREFIX)
def is_shielded_task(task):
cancel_status = task._cancel_status
while cancel_status:
if cancel_status._scope.shield:
return True
cancel_status = cancel_status._parent
return False
def _render_subtree(name, rendered_children):
lines = []
lines.append(name)
for child_lines in rendered_children:
if child_lines is rendered_children[-1]:
first_prefix = END_PREFIX
rest_prefix = END_CONTINUE
else:
first_prefix = MID_PREFIX
rest_prefix = MID_CONTINUE
lines.append(first_prefix + child_lines[0])
for child_line in child_lines[1:]:
lines.append(rest_prefix + child_line)
return lines
def _rendered_nursery_children(nursery, format_task):
return [task_tree_lines(t, format_task) for t in nursery.child_tasks]
def task_tree_lines(task, format_task):
rendered_children = []
nurseries = list(task.child_nurseries)
while nurseries:
nursery = nurseries.pop()
nursery_children = _rendered_nursery_children(nursery, format_task)
if rendered_children:
nested = _render_subtree("(nested nursery)", rendered_children)
nursery_children.append(nested)
rendered_children = nursery_children
return _render_subtree(format_task(task), rendered_children)
def render_task_tree(task, format_task):
return "\n".join(line for line in task_tree_lines(task, format_task)) + "\n"
class Monitor(Instrument):
def __init__(self, host=MONITOR_HOST, port=MONITOR_PORT):
self.address = (host, port)
self._trio_token = None
self._next_task_short_id = 0
self._tasks = {}
self._closing = None
self._ui_thread = None
def get_task_from_short_id(self, shortid):
for task in self._tasks.values():
if task._monitor_short_id == shortid:
return task
return None
def before_run(self):
LOGGER.info("Starting Trio monitor at %s:%d", *self.address)
self._trio_token = trio.lowlevel.current_trio_token()
self._ui_thread = threading.Thread(target=self.server, args=(), daemon=True)
self._closing = threading.Event()
self._ui_thread.start()
def task_spawned(self, task):
self._tasks[id(task)] = task
task._monitor_short_id = self._next_task_short_id
self._next_task_short_id += 1
task._monitor_state = "spawned"
def task_scheduled(self, task):
task._monitor_state = "scheduled"
def before_task_step(self, task):
task._monitor_state = "running"
def after_task_step(self, task):
task._monitor_state = "waiting"
def task_exited(self, task):
del self._tasks[id(task)]
# def before_io_wait(self, timeout):
# if timeout:
# print("### waiting for I/O for up to {} seconds".format(timeout))
# else:
# print("### doing a quick check for I/O")
# self._sleep_time = trio.current_time()
# def after_io_wait(self, timeout):
# duration = trio.current_time() - self._sleep_time
# print("### finished I/O check (took {} seconds)".format(duration))
def after_run(self):
LOGGER.info("Stopping Trio monitor ui thread")
self._closing.set()
self._ui_thread.join()
def server(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set the timeout to prevent the server loop from
# blocking indefinitaly on sock.accept()
sock.settimeout(0.5)
sock.bind(self.address)
sock.listen(1)
with sock:
while not self._closing.is_set():
try:
client, addr = sock.accept()
with client:
client.settimeout(0.5)
# This bit of magic is for reading lines of input while still allowing
# timeouts and the ability for the monitor to die when curio exits.
# See Issue #108.
def readlines():
buffer = bytearray()
while not self._closing.is_set():
index = buffer.find(b"\n")
if index >= 0:
line = buffer[: index + 1].decode("latin-1")
del buffer[: index + 1]
yield line
try:
chunk = client.recv(1000)
if not chunk:
break
buffer.extend(chunk)
except socket.timeout:
pass
sout = client.makefile("w", encoding="latin-1")
self.interactive_loop(sout, readlines())
except socket.timeout:
continue
def interactive_loop(self, sout, input_lines):
"""
Main interactive loop of the monitor
"""
sout.write("Trio Monitor: %d tasks running\n" % len(self._tasks))
sout.write("Type help for commands\n")
while True:
sout.write("trio > ")
sout.flush()
resp = next(input_lines, None)
if not resp:
return
try:
if resp.startswith("q"):
self.command_exit(sout)
return
elif resp.startswith("pa"):
_, taskid_s = resp.split()
self.command_parents(sout, int(taskid_s))
elif resp.startswith("s"):
self.command_stats(sout)
elif resp.startswith("p"):
self.command_ps(sout)
elif resp.startswith("t"):
self.command_task_tree(sout)
elif resp.startswith("exit"):
self.command_exit(sout)
return
elif resp.startswith("cancel"):
_, taskid_s = resp.split()
self.command_cancel(sout, int(taskid_s))
elif resp.startswith("signal"):
_, signame = resp.split()
self.command_signal(sout, signame)
elif resp.startswith("w"):
_, taskid_s = resp.split()
self.command_where(sout, int(taskid_s))
elif resp.startswith("h"):
self.command_help(sout)
else:
sout.write("Unknown command. Type help.\n")
except Exception as e:
sout.write("Bad command. %s\n" % e)
def command_help(self, sout):
sout.write(
"""Commands:
ps : Show task table
stat : Display general runtime informations
tree : Display hierarchical view of tasks and nurseries
where taskid : Show stack frames for a task
cancel taskid : Cancel an indicated task
signal signame : Send a Unix signal
parents taskid : List task parents
quit : Leave the monitor
"""
)
def command_stats(self, sout):
async def get_current_statistics():
return current_statistics()
stats = trio.from_thread.run(get_current_statistics, trio_token=self._trio_token)
sout.write(
"""tasks_living: {s.tasks_living}
tasks_runnable: {s.tasks_runnable}
seconds_to_next_deadline: {s.seconds_to_next_deadline}
run_sync_soon_queue_size: {s.run_sync_soon_queue_size}
io_statistics:
tasks_waiting_read: {s.io_statistics.tasks_waiting_read}
tasks_waiting_write: {s.io_statistics.tasks_waiting_write}
backend: {s.io_statistics.backend}
""".format(
s=stats
)
)
def command_ps(self, sout):
headers = ("Id", "State", "Shielded", "Task")
widths = (5, 10, 10, 50)
for h, w in zip(headers, widths):
sout.write("%-*s " % (w, h))
sout.write("\n")
sout.write(" ".join(w * "-" for w in widths))
sout.write("\n")
for task in sorted(self._tasks.values(), key=lambda t: t._monitor_short_id):
sout.write(
"%-*d %-*s %-*s %-*s\n"
% (
widths[0],
task._monitor_short_id,
widths[1],
task._monitor_state,
widths[2],
"yes" if is_shielded_task(task) else "",
widths[3],
task.name,
)
)
def command_task_tree(self, sout):
root_task = next(iter(self._tasks.values()))
while root_task.parent_nursery is not None:
root_task = root_task.parent_nursery.parent_task
def _format_task(task):
return "%s (id=%s, %s%s)" % (
task.name,
task._monitor_short_id,
task._monitor_state,
", shielded" if is_shielded_task(task) else "",
)
task_tree = render_task_tree(root_task, _format_task)
sout.write(task_tree)
def command_where(self, sout, taskid):
task = self.get_task_from_short_id(taskid)
if task:
def walk_coro_stack(coro):
while coro is not None:
if hasattr(coro, "cr_frame"):
# A real coroutine
yield coro.cr_frame, coro.cr_frame.f_lineno
coro = coro.cr_await
elif hasattr(coro, "gi_frame"):
# A generator decorated with @types.coroutine
yield coro.gi_frame, coro.gi_frame.f_lineno
coro = coro.gi_yieldfrom
else:
# A coroutine wrapper (used by AsyncGenerator for
# instance), cannot go further
return
ss = traceback.StackSummary.extract(walk_coro_stack(task.coro))
tb = "".join(ss.format())
sout.write(tb + "\n")
else:
sout.write("No task %d\n" % taskid)
def command_signal(self, sout, signame):
if hasattr(signal, signame):
os.kill(os.getpid(), getattr(signal, signame))
else:
sout.write("Unknown signal %s\n" % signame)
def command_cancel(self, sout, taskid):
# TODO: how to cancel a single task ?
# Another solution could be to also display nurseries/cancel_scopes in
# the monitor and allow to cancel them. Given timeout are handled
# by cancel_scope, this could also allow us to monitor the remaining
# time (and task depending on it) in such object.
sout.write("Not supported yet...")
def command_parents(self, sout, taskid):
task = self.get_task_from_short_id(taskid)
while task:
sout.write("%-6d %12s %s\n" % (task._monitor_short_id, "running", task.name))
task = task.parent_nursery._parent_task if task.parent_nursery else None
def command_exit(self, sout):
sout.write("Leaving monitor. Hit Ctrl-C to exit\n")
sout.flush()
def monitor_client(host, port):
"""
Client to connect to the monitor via "telnet"
"""
tn = telnetlib.Telnet()
tn.open(host, port, timeout=0.5)
try:
tn.interact()
except KeyboardInterrupt:
pass
finally:
tn.close()
def main():
parser = argparse.ArgumentParser("usage: python -m trio.monitor [options]")
parser.add_argument(
"-H", "--host", dest="monitor_host", default=MONITOR_HOST, type=str, help="monitor host ip"
)
parser.add_argument(
"-p",
"--port",
dest="monitor_port",
default=MONITOR_PORT,
type=int,
help="monitor port number",
)
args = parser.parse_args()
monitor_client(args.monitor_host, args.monitor_port)
if __name__ == "__main__":
main()
| agpl-3.0 | -6,085,939,352,050,087,000 | 32.403061 | 99 | 0.530548 | false |
michael-ball/mach2 | tests/models/album_test.py | 1 | 1742 | from models.album import Album
def test_instance(database):
album = Album(database, 1)
assert album.id == 1
assert album.name == "Album 1"
assert album.date == "1999-02-04"
def test_artists(database):
album = Album(database, 1)
assert len(album.artists) == 1
assert album.artists[0].name == "Artist 2"
def test_tracks(database):
album = Album(database, 1)
assert len(album.tracks) == 2
assert album.tracks[0].name == "Album track 1"
assert album.tracks[0].tracknumber == 1
assert album.tracks[0].filename == "album/1.mp3"
assert album.tracks[1].name == "Album track 2"
assert album.tracks[1].tracknumber == 2
assert album.tracks[1].grouping == "swing"
assert album.tracks[1].filename == "album/2.mp3"
def test_delete(database):
with database.conn:
cursor = database.cursor()
cursor.execute("INSERT INTO album (name, date) VALUES(?,?)",
("Test album", "2016-02-05"))
album_id = cursor.lastrowid
cursor.close()
album = Album(database, album_id)
assert album.delete()
test_album = Album(database, album_id)
assert not hasattr(test_album, "name")
def test_search(database):
search_payload = {"name": {"data": "Album 1", "operator": "="}}
album_results = Album.search(database, **search_payload)
assert len(album_results) > 0
invalid_search_payload = {"name": {"data": "This album does not exist",
"operator": "="}}
no_album_results = Album.search(database, **invalid_search_payload)
assert len(no_album_results) == 0
def test_all(database):
album_results = Album.all(database)
assert len(album_results) > 0
| mit | -5,306,340,825,651,702,000 | 26.650794 | 75 | 0.619977 | false |
Ecotrust/hnfp | hnfp/migrations/0011_auto_20171025_0852.py | 1 | 3297 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-25 15:52
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hnfp', '0010_auto_20171024_1648'),
]
operations = [
migrations.CreateModel(
name='Alert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alert_date', models.CharField(blank=True, max_length=100, null=True)),
('alert_time', models.CharField(blank=True, max_length=20, null=True)),
('alert_type', models.CharField(blank=True, max_length=400, null=True)),
('alert_created', models.DateTimeField(auto_now_add=True)),
('alert_updated', models.DateTimeField(auto_now=True)),
('alert_username', models.CharField(blank=True, max_length=800, null=True)),
('alert_location', django.contrib.gis.db.models.fields.PointField(blank=True, default=None, null=True, srid=3857)),
('alert_comment', models.CharField(blank=True, default=None, max_length=20000, null=True)),
('alert_confirmed', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Alerts',
},
),
migrations.CreateModel(
name='LandUseProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, null=True)),
('category', models.CharField(blank=True, choices=[('Forest', 'forest'), ('Road', 'road'), ('Stream', 'stream')], max_length=400, null=True)),
('summary', models.CharField(blank=True, default=None, max_length=20000, null=True)),
('description', models.CharField(blank=True, default=None, max_length=20000, null=True)),
('start_date', models.CharField(blank=True, max_length=200, null=True)),
('completion_date', models.CharField(blank=True, max_length=200, null=True)),
('actions', models.CharField(blank=True, max_length=20000, null=True)),
('dollar_costs', models.CharField(blank=True, max_length=4000, null=True)),
('emdollars', models.CharField(blank=True, max_length=4000, null=True)),
('area', django.contrib.gis.db.models.fields.PolygonField(blank=True, default=None, null=True, srid=3857)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('published', models.BooleanField(default=False)),
('username', models.CharField(blank=True, max_length=400, null=True)),
],
options={
'verbose_name_plural': 'Land Use Projects',
},
),
migrations.AddField(
model_name='observation',
name='observer_username',
field=models.CharField(blank=True, max_length=800, null=True),
),
]
| isc | -4,770,443,177,926,591,000 | 52.177419 | 158 | 0.584471 | false |
alisa-ipn/writing-composition-crawler | src/crawl.py | 1 | 2670 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 21:49:22 2015
@author: Alisa
"""
import os
import utils, urlutils
URL1 = "http://abc-english-grammar.com/1/sochinenia_po_angliiskomu_yaziku.htm"
str_to_look = "<a href=\"http://abc-english-grammar.com/1/sochinenia_po_angliiskomu_yaziku"
URL2 = "http://en365.ru/topic.htm"
URL3 = "http://www.native-english.ru/topics"
URL4 = "http://iloveenglish.ru/topics"
URL5 = "http://www.alleng.ru/english/top_08.htm"
PREFIX = "E://Study//SpeechRecognProject//crawl//raw_data//"
def get_next_target(page):
start_link = page.find(str_to_look)
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
content = urlutils.get_page(url)
write_file(content, page, end_quote)
return url, end_quote
def write_file(content, page, end_quote):
name = get_file_name(page, end_quote)
fname0 = dir_name + "//" + name
fname = fname0
i = 0
while (os.path.isfile(fname)):
i+=1
fname = fname0[:-5]+"-"+str(i)+'.html'
fout = open(fname, 'w')
print fname
fout.write(content)
def get_target_urls(content):
links = []
while True:
url, endpos = get_next_target(content)
if url:
links.append(url)
content = content[endpos:]
else:
break
return links
def get_file_name(page, end_quote):
start_name = page.find(">", end_quote)+1
end_name = page.find("<", start_name)
if page.find("-", start_name, end_name) > -1 :
end_name = page.find("-", start_name)
fname = page[start_name:end_name]
fname = fname.strip()
fname = fname.replace(" ", "_")
fname = fname.replace("/", "_")
fname = fname.replace("?", "_")
fname = fname.replace("!", "_")
fname = fname.replace("'", "_")
print fname
return fname + ".html"
def crawl_url(seed): # returns list of crawled links
crawled = []
content = urlutils.get_page(seed)
crawled = get_target_urls(content)
return crawled
# create a directory with the name of the URL if it does not exist
# if it exists, clean the files from the directory
dir_name = urlutils.get_stem_url(URL1)
dir_name = PREFIX+dir_name
if not os.path.exists(dir_name):
os.mkdir(dir_name)
utils.clean_dir(dir_name)
#crawl urls
crawled = crawl_url(URL1)
fout = open(dir_name+"//_url_list.txt",'w')
utils.print_list(crawled, fout)
fout.close()
| mit | 1,130,901,153,823,493,900 | 24.673077 | 91 | 0.582397 | false |
alexschiller/osf.io | api/logs/serializers.py | 1 | 7104 | from rest_framework import serializers as ser
from api.base.serializers import (
JSONAPISerializer,
RelationshipField,
RestrictedDictSerializer,
LinksField,
is_anonymized,
DateByVersion,
)
from website.project.model import Node
from website.util import permissions as osf_permissions
from framework.auth.core import User
from website.preprints.model import PreprintService
class NodeLogIdentifiersSerializer(RestrictedDictSerializer):
doi = ser.CharField(read_only=True)
ark = ser.CharField(read_only=True)
class NodeLogInstitutionSerializer(RestrictedDictSerializer):
id = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
class NodeLogFileParamsSerializer(RestrictedDictSerializer):
materialized = ser.CharField(read_only=True)
url = ser.URLField(read_only=True)
addon = ser.CharField(read_only=True)
node_url = ser.URLField(read_only=True, source='node.url')
node_title = ser.SerializerMethodField()
def get_node_title(self, obj):
user = self.context['request'].user
node_title = obj['node']['title']
node = Node.load(obj['node']['_id'])
if node.has_permission(user, osf_permissions.READ):
return node_title
return 'Private Component'
class NodeLogParamsSerializer(RestrictedDictSerializer):
addon = ser.CharField(read_only=True)
bucket = ser.CharField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
contributors = ser.SerializerMethodField(read_only=True)
data_set = ser.CharField(read_only=True, source='dataset')
destination = NodeLogFileParamsSerializer(read_only=True)
figshare_title = ser.CharField(read_only=True, source='figshare.title')
forward_url = ser.CharField(read_only=True)
github_user = ser.CharField(read_only=True, source='github.user')
github_repo = ser.CharField(read_only=True, source='github.repo')
file = ser.DictField(read_only=True)
filename = ser.CharField(read_only=True)
kind = ser.CharField(read_only=True)
folder = ser.CharField(read_only=True)
folder_name = ser.CharField(read_only=True)
license = ser.CharField(read_only=True, source='new_license')
identifiers = NodeLogIdentifiersSerializer(read_only=True)
institution = NodeLogInstitutionSerializer(read_only=True)
old_page = ser.CharField(read_only=True)
page = ser.CharField(read_only=True)
page_id = ser.CharField(read_only=True)
params_node = ser.SerializerMethodField(read_only=True)
params_project = ser.SerializerMethodField(read_only=True)
path = ser.CharField(read_only=True)
pointer = ser.DictField(read_only=True)
preprint = ser.CharField(read_only=True)
preprint_provider = ser.SerializerMethodField(read_only=True)
previous_institution = NodeLogInstitutionSerializer(read_only=True)
source = NodeLogFileParamsSerializer(read_only=True)
study = ser.CharField(read_only=True)
tag = ser.CharField(read_only=True)
tags = ser.CharField(read_only=True)
target = NodeLogFileParamsSerializer(read_only=True)
template_node = ser.DictField(read_only=True)
title_new = ser.CharField(read_only=True)
title_original = ser.CharField(read_only=True)
updated_fields = ser.DictField(read_only=True)
urls = ser.DictField(read_only=True)
version = ser.CharField(read_only=True)
wiki = ser.DictField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
institution = NodeLogInstitutionSerializer(read_only=True)
def get_view_url(self, obj):
urls = obj.get('urls', None)
if urls:
view = urls.get('view', None)
if view:
return view
return None
def get_params_node(self, obj):
node_id = obj.get('node', None)
if node_id:
node = Node.load(node_id)
return {'id': node_id, 'title': node.title}
return None
def get_params_project(self, obj):
project_id = obj.get('project', None)
if project_id:
node = Node.load(project_id)
return {'id': project_id, 'title': node.title}
return None
def get_contributors(self, obj):
contributor_info = []
if is_anonymized(self.context['request']):
return contributor_info
contributor_ids = obj.get('contributors', None)
params_node = obj.get('node', None)
if contributor_ids:
for contrib_id in contributor_ids:
user = User.load(contrib_id)
unregistered_name = None
if user.unclaimed_records.get(params_node):
unregistered_name = user.unclaimed_records[params_node].get('name', None)
contributor_info.append({
'id': contrib_id,
'full_name': user.fullname,
'given_name': user.given_name,
'middle_names': user.middle_names,
'family_name': user.family_name,
'unregistered_name': unregistered_name,
'active': user.is_active
})
return contributor_info
def get_preprint_provider(self, obj):
preprint_id = obj.get('preprint', None)
if preprint_id:
preprint = PreprintService.load(preprint_id)
if preprint:
provider = preprint.provider
return {'url': provider.external_url, 'name': provider.name}
return None
class NodeLogSerializer(JSONAPISerializer):
filterable_fields = frozenset(['action', 'date'])
non_anonymized_fields = [
'id',
'date',
'action',
]
id = ser.CharField(read_only=True, source='_id')
date = DateByVersion(read_only=True)
action = ser.CharField(read_only=True)
params = NodeLogParamsSerializer(read_only=True)
links = LinksField({'self': 'get_absolute_url'})
class Meta:
type_ = 'logs'
node = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
)
original_node = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<original_node._id>'},
)
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
)
# This would be a node_link, except that data isn't stored in the node log params
linked_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.pointer.id>'}
)
template_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.template_node.id>'}
)
def get_absolute_url(self, obj):
return obj.absolute_url
| apache-2.0 | -8,319,683,326,383,449,000 | 36.193717 | 132 | 0.652168 | false |
brainstorm/bcbio-nextgen | bcbio/variation/vardict.py | 1 | 15794 | """Sensitive variant calling using VarDict.
Defaults to using the faster, equally sensitive Java port:
https://github.com/AstraZeneca-NGS/VarDictJava
if 'vardict' or 'vardict-java' is specified in the configuration. To use the
VarDict perl version:
https://github.com/AstraZeneca-NGS/VarDict
specify 'vardict-perl'.
"""
import os
import sys
from six.moves import zip
import toolz as tz
import pybedtools
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bamprep, bedutils, vcfutils
def _is_bed_file(target):
return target and isinstance(target, basestring) and os.path.isfile(target)
def _vardict_options_from_config(items, config, out_file, target=None):
opts = ["-c 1", "-S 2", "-E 3", "-g 4"]
# ["-z", "-F", "-c", "1", "-S", "2", "-E", "3", "-g", "4", "-x", "0",
# "-k", "3", "-r", "4", "-m", "8"]
# remove low mapping quality reads
opts += ["-Q", "10"]
resources = config_utils.get_resources("vardict", config)
if resources.get("options"):
opts += resources["options"]
assert _is_bed_file(target)
if any(tz.get_in(["config", "algorithm", "coverage_interval"], x, "").lower() == "genome"
for x in items):
target = shared.remove_highdepth_regions(target, items)
target = shared.remove_lcr_regions(target, items)
target = _enforce_max_region_size(target, items[0])
opts += [target] # this must be the last option
return opts
def _enforce_max_region_size(in_file, data):
"""Ensure we don't have any chunks in the region greater than 1Mb.
Larger sections have high memory usage on VarDictJava and failures
on VarDict. This creates minimum windows from the input BED file
to avoid these issues. Downstream VarDict merging sorts out any
variants across windows.
"""
max_size = 1e6
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file
def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run VarDict variant calling.
"""
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _get_jvm_opts(data, out_file):
"""Retrieve JVM options when running the Java version of VarDict.
"""
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return ""
def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with VarDict.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
raw_file = "%s-raw%s" % utils.splitext_plus(out_file)
with file_transaction(items[0], raw_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(
vrs, region, out_file, items=items, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in zip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
strandbias = "teststrandbias.R"
var2vcf = "var2vcf_valid.pl"
opts = (" ".join(_vardict_options_from_config(items, config, out_file, target))
if _is_bed_file(target) else "")
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if tx_out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if dd.get_avg_coverage(items[0]) > 5000 else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
setup = ("unset R_HOME && unset JAVA_HOME && export PATH=%s:$PATH && " %
os.path.dirname(utils.Rscript_cmd()))
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {sample} -b {bamfile} {opts} "
"| {strandbias}"
"| {var2vcf} -N {sample} -E -f {freq} {var2vcf_opts} "
"| bcftools filter -i 'QUAL >= 0' "
"| {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = raw_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if raw_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
if assoc_files.get("dbsnp"):
annotation.add_dbsnp(raw_file, assoc_files["dbsnp"], items[0], out_file)
else:
utils.symlink_plus(raw_file, out_file)
return out_file
def _safe_to_float(x):
if x is None:
return None
else:
try:
return float(x)
except ValueError:
return None
def depth_freq_filter(line, tumor_index, aligner):
"""Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quality and multiple mismatches in a read (NM)
For bwa only: MQ < 55.0 and NM > 1.0 or MQ < 60.0 and NM > 2.0
- Low depth (DP < 10)
- Low QUAL (QUAL < 45)
Also filters in low allele frequency regions with poor quality, if all of these are
true:
- Allele frequency < 0.2
- Quality < 55
- P-value (SSF) > 0.06
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency '
'along with poor depth, quality, mapping quality and read mismatches.">'),
('##FILTER=<ID=LowFreqQuality,Description="Low frequency read with '
'poor quality and p-value (SSF).">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
qual = _safe_to_float(parts[5])
dp = _safe_to_float(sample_ft.get("DP"))
af = _safe_to_float(sample_ft.get("AF"))
nm = _safe_to_float(sample_ft.get("NM"))
mq = _safe_to_float(sample_ft.get("MQ"))
ssfs = [x for x in parts[7].split(";") if x.startswith("SSF=")]
pval = _safe_to_float(ssfs[0].split("=")[-1] if ssfs else None)
fname = None
if dp is not None and af is not None:
if dp * af < 6:
if aligner == "bwa" and nm is not None and mq is not None:
if (mq < 55.0 and nm > 1.0) or (mq < 60.0 and nm > 2.0):
fname = "LowAlleleDepth"
if dp < 10:
fname = "LowAlleleDepth"
if qual is not None and qual < 45:
fname = "LowAlleleDepth"
if af is not None and qual is not None and pval is not None:
if af < 0.2 and qual < 55 and pval > 0.06:
fname = "LowFreqQuality"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect variants with Vardict.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
raw_file = "%s-raw%s" % utils.splitext_plus(out_file)
with file_transaction(items[0], raw_file) as tx_out_file:
target = shared.subset_variant_regions(dd.get_variant_regions(items[0]), region,
out_file, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
strandbias = "testsomatic.R"
var2vcf = "var2vcf_paired.pl"
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
# merge bed file regions as amplicon VarDict is only supported in single sample mode
opts = " ".join(_vardict_options_from_config(items, config, out_file, target))
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if dd.get_avg_coverage(items[0]) > 5000 else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants
somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' "
"| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' "
"| %s -x 'bcbio.variation.freebayes.call_somatic(x)'" %
os.path.join(os.path.dirname(sys.executable), "py"))
freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null "
"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" %
(os.path.join(os.path.dirname(sys.executable), "py"),
0, dd.get_aligner(paired.tumor_data)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
setup = ("unset R_HOME && unset JAVA_HOME && export PATH=%s:$PATH && " %
os.path.dirname(utils.Rscript_cmd()))
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| {strandbias} "
"| {var2vcf} -P 0.9 -m 4.25 -f {freq} {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"{freq_filter} "
"| bcftools filter -i 'QUAL >= 0' "
"{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if assoc_files.get("dbsnp"):
annotation.add_dbsnp(raw_file, assoc_files["dbsnp"], items[0], out_file)
else:
utils.symlink_plus(raw_file, out_file)
return out_file
def get_vardict_command(data):
"""
convert variantcaller specification to proper vardict command, handling
string or list specification
"""
vcaller = dd.get_variantcaller(data)
if isinstance(vcaller, list):
vardict = [x for x in vcaller if "vardict" in x]
if not vardict:
return None
vardict = vardict[0]
elif not vcaller:
return None
else:
vardict = vcaller
vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict"
return vardict
| mit | -344,859,998,789,796,800 | 48.510972 | 126 | 0.542801 | false |
classcat/cctf | cctf/initializations.py | 1 | 10251 | from __future__ import division, print_function, absolute_import
import math
import tensorflow as tf
try:
from tensorflow.contrib.layers.python.layers.initializers import \
xavier_initializer
except Exception:
xavier_initializer = None
try:
from tensorflow.contrib.layers.python.layers.initializers import \
variance_scaling_initializer
except Exception:
variance_scaling_initializer = None
from .utils import get_from_module
def get(identifier):
if hasattr(identifier, '__call__'):
return identifier
else:
return get_from_module(identifier, globals(), 'initialization')
def zeros(shape=None, dtype=tf.float32, seed=None):
""" Zeros.
Initialize a tensor with all elements set to zero.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
dtype: The tensor data type.
Returns:
The Initializer, or an initialized `Tensor` if a shape is specified.
"""
if shape:
return tf.zeros(shape, dtype=dtype)
else:
return tf.constant_initializer(0.)
def uniform(shape=None, minval=0, maxval=None, dtype=tf.float32, seed=None):
""" Uniform.
Initialization with random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval`
must be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
dtype: The tensor data type. Only float are supported.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
return tf.random_uniform(shape, minval=minval, maxval=maxval,
seed=seed, dtype=dtype)
else:
return tf.random_uniform_initializer(minval=minval, maxval=maxval,
seed=seed, dtype=dtype)
def uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=None):
""" Uniform Scaling.
Initialization with random values from uniform distribution without scaling
variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
factor: `float`. A multiplicative factor by which the values will be
scaled.
dtype: The tensor data type. Only float are supported.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
input_size = 1.0
for dim in shape[:-1]:
input_size *= float(dim)
max_val = math.sqrt(3 / input_size) * factor
return tf.random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=seed)
else:
return tf.uniform_unit_scaling_initializer(seed=seed, dtype=dtype)
def normal(shape=None, mean=0.0, stddev=0.02, dtype=tf.float32, seed=None):
""" Normal.
Initialization with random values from a normal distribution.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
mean: Same as `dtype`. The mean of the truncated normal distribution.
stddev: Same as `dtype`. The standard deviation of the truncated
normal distribution.
dtype: The tensor data type.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
return tf.random_normal(shape, mean=mean, stddev=stddev, seed=seed,
dtype=dtype)
else:
return tf.random_normal_initializer(mean=mean, stddev=stddev,
seed=seed, dtype=dtype)
def truncated_normal(shape=None, mean=0.0, stddev=0.02, dtype=tf.float32,
seed=None):
""" Truncated Normal.
Initialization with random values from a normal truncated distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Arguments:
shape: List of `int`. A shape to initialize a Tensor (optional).
mean: Same as `dtype`. The mean of the truncated normal distribution.
stddev: Same as `dtype`. The standard deviation of the truncated
normal distribution.
dtype: The tensor data type.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
"""
if shape:
return tf.truncated_normal(shape=shape, mean=mean, stddev=stddev,
seed=seed, dtype=dtype)
else:
return tf.truncated_normal_initializer(mean=mean, stddev=stddev,
seed=seed, dtype=dtype)
def xavier(uniform=True, seed=None, dtype=tf.float32):
""" Xavier.
Returns an initializer performing "Xavier" initialization for weights.
This initializer is designed to keep the scale of the gradients roughly the
same in all layers. In uniform distribution this ends up being the range:
`x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard
deviation of `sqrt(3. / (in + out))` is used.
Arguments:
uniform: Whether to use uniform or normal distributed random
initialization.
seed: A Python integer. Used to create random seeds. See
`set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer for a weight matrix.
References:
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics. Xavier Glorot and Yoshua Bengio (2010).
Links:
[http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf]
(http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
"""
if xavier_initializer is None:
raise NotImplementedError("'xavier_initializer' not supported, "
"please update TensorFlow.")
return xavier_initializer(uniform=uniform, seed=seed, dtype=dtype)
def variance_scaling(factor=2.0, mode='FAN_IN', uniform=False, seed=None,
dtype=tf.float32):
""" Variance Scaling.
Returns an initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. This initializer use the following formula:
```
if mode='FAN_IN': # Count only number of input connections.
n = fan_in
elif mode='FAN_OUT': # Count only number of output connections.
n = fan_out
elif mode='FAN_AVG': # Average number of inputs and output connections.
n = (fan_in + fan_out)/2.0
truncated_normal(shape, 0.0, stddev=sqrt(factor / n))
```
To get http://arxiv.org/pdf/1502.01852v1.pdf use (Default):
- factor=2.0 mode='FAN_IN' uniform=False
To get http://arxiv.org/abs/1408.5093 use:
- factor=1.0 mode='FAN_IN' uniform=True
To get http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf use:
- factor=1.0 mode='FAN_AVG' uniform=True.
To get xavier_initializer use either:
- factor=1.0 mode='FAN_AVG' uniform=True.
- factor=1.0 mode='FAN_AVG' uniform=False.
Arguments:
factor: Float. A multiplicative factor.
mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.
uniform: Whether to use uniform or normal distributed random
initialization.
seed: A Python integer. Used to create random seeds. See
`set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with unit variance.
Raises:
ValueError: if `dtype` is not a floating point type.
TypeError: if `mode` is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG'].
"""
if variance_scaling_initializer is None:
raise NotImplementedError("'variance_scaling_initializer' not "
"supported, please update TensorFlow.")
return variance_scaling_initializer(factor=factor, mode=mode,
uniform=uniform, seed=seed,
dtype=dtype)
| agpl-3.0 | -1,623,474,419,808,075,000 | 37.537594 | 82 | 0.651546 | false |
AlexOugh/horizon | openstack_dashboard/dashboards/nikolaboard/usagepanel/tabs.py | 1 | 1155 | from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.nikolaboard.usagepanel import tables
class UsageTab(tabs.TableTab):
name = _("List")
slug = "usage_tab"
table_classes = (tables.UsageTable,)
template_name = ("horizon/common/_detail_table.html")
preload = False
def has_more_data(self, table):
return self._has_more
def get_usage_data(self):
try:
marker = self.request.GET.get(
tables.UsageTable._meta.pagination_param, None)
usages, self._has_more, has_prev_data = api.nikola.usage.list_usages(
self.request,
search_opts={'marker': marker, 'paginate': True})
return usages
except Exception:
self._has_more = False
error_message = _('Unable to get usages')
exceptions.handle(self.request, error_message)
return []
class UsagepanelTabs(tabs.TabGroup):
slug = "usagepanel_tabs"
tabs = (UsageTab,)
sticky = True
| apache-2.0 | 1,395,974,317,262,188,300 | 28.615385 | 81 | 0.62684 | false |
dunkhong/grr | grr/server/grr_response_server/file_store_test.py | 1 | 25513 | #!/usr/bin/env python
"""Tests for REL_DB-based file store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
from absl import app
from future.builtins import range
from future.builtins import str
import mock
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
POSITIONAL_ARGS = 0
KEYWORD_ARGS = 1
class BlobStreamTest(test_lib.GRRBaseTest):
"""BlobStream tests."""
def setUp(self):
super(BlobStreamTest, self).setUp()
self.blob_size = 10
self.blob_data, self.blob_refs = vfs_test_lib.GenerateBlobRefs(
self.blob_size, "abcde12345")
blob_ids = [ref.blob_id for ref in self.blob_refs]
data_store.BLOBS.WriteBlobs(dict(zip(blob_ids, self.blob_data)))
self.blob_stream = file_store.BlobStream(None, self.blob_refs, None)
def testRaisesIfBlobIsMissing(self):
_, missing_blob_refs = vfs_test_lib.GenerateBlobRefs(self.blob_size, "0")
blob_stream = file_store.BlobStream(None, missing_blob_refs, None)
with self.assertRaises(file_store.BlobNotFoundError):
blob_stream.read(1)
def testReadsFirstByte(self):
self.assertEqual(self.blob_stream.read(1), b"a")
def testReadsLastByte(self):
self.blob_stream.seek(-1, 2)
self.assertEqual(self.blob_stream.read(1), b"5")
def testReadsFirstChunkPlusOneByte(self):
self.assertEqual(
self.blob_stream.read(self.blob_size + 1), b"a" * self.blob_size + b"b")
def testReadsLastChunkPlusOneByte(self):
self.blob_stream.seek(-self.blob_size - 1, 2)
self.assertEqual(
self.blob_stream.read(self.blob_size + 1), b"4" + b"5" * self.blob_size)
def testReadsWholeFile(self):
self.assertEqual(self.blob_stream.read(), b"".join(self.blob_data))
def testRaisesWhenTryingToReadTooMuchDataAtOnce(self):
with test_lib.ConfigOverrider({"Server.max_unbound_read_size": 4}):
# Recreate to make sure the new config option value is applied.
self.blob_stream = file_store.BlobStream(None, self.blob_refs, None)
self.blob_stream.read(4)
with self.assertRaises(file_store.OversizedReadError):
self.blob_stream.read() # This would implicitly read 6 bytes.
def testWhenReadingWholeFileAndWholeFileSizeIsTooBig(self):
self.blob_stream.read()
self.blob_stream.seek(0)
with test_lib.ConfigOverrider(
{"Server.max_unbound_read_size": self.blob_size * 10 - 1}):
# Recreate to make sure the new config option value is applied.
self.blob_stream = file_store.BlobStream(None, self.blob_refs, None)
with self.assertRaises(file_store.OversizedReadError):
self.blob_stream.read()
def testAllowsReadingAboveLimitWhenSpecifiedManually(self):
with test_lib.ConfigOverrider({"Server.max_unbound_read_size": 1}):
# Recreate to make sure the new config option value is applied.
self.blob_stream = file_store.BlobStream(None, self.blob_refs, None)
self.blob_stream.read(self.blob_size)
class AddFileWithUnknownHashTest(test_lib.GRRBaseTest):
"""Tests for AddFileWithUnknownHash."""
def setUp(self):
super(AddFileWithUnknownHashTest, self).setUp()
self.blob_size = 10
self.blob_data, self.blob_refs = vfs_test_lib.GenerateBlobRefs(
self.blob_size, "abcd")
blob_ids = [ref.blob_id for ref in self.blob_refs]
data_store.BLOBS.WriteBlobs(dict(zip(blob_ids, self.blob_data)))
self.client_id = "C.0000111122223333"
self.client_path = db.ClientPath.OS(self.client_id, ["foo", "bar"])
def testAddsFileWithSingleBlob(self):
hash_id = file_store.AddFileWithUnknownHash(self.client_path,
self.blob_refs[:1])
self.assertEqual(hash_id.AsBytes(), self.blob_refs[0].blob_id.AsBytes())
@mock.patch.object(file_store, "BLOBS_READ_TIMEOUT",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS))
def testRaisesIfOneSingleBlobIsNotFound(self):
blob_ref = rdf_objects.BlobReference(
offset=0, size=0, blob_id=rdf_objects.BlobID.FromBlobData(b""))
with self.assertRaises(file_store.BlobNotFoundError):
file_store.AddFileWithUnknownHash(self.client_path, [blob_ref])
@mock.patch.object(file_store, "BLOBS_READ_TIMEOUT",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS))
def testRaisesIfOneOfTwoBlobsIsNotFound(self):
blob_ref = rdf_objects.BlobReference(
offset=0, size=0, blob_id=rdf_objects.BlobID.FromBlobData(b""))
with self.assertRaises(file_store.BlobNotFoundError):
file_store.AddFileWithUnknownHash(self.client_path,
[self.blob_refs[0], blob_ref])
def testAddsFileWithTwoBlobs(self):
hash_id = file_store.AddFileWithUnknownHash(self.client_path,
self.blob_refs)
self.assertEqual(
hash_id.AsBytes(),
rdf_objects.SHA256HashID.FromData(b"".join(self.blob_data)))
def testFilesWithOneBlobAreStillReadToEnsureBlobExists(self):
_, long_blob_refs = vfs_test_lib.GenerateBlobRefs(self.blob_size, "cd")
_, short_blob_refs1 = vfs_test_lib.GenerateBlobRefs(self.blob_size, "a")
_, short_blob_refs2 = vfs_test_lib.GenerateBlobRefs(self.blob_size, "b")
path1 = db.ClientPath.OS(self.client_id, ["foo"])
path2 = db.ClientPath.OS(self.client_id, ["bar"])
path3 = db.ClientPath.OS(self.client_id, ["baz"])
# One small file, blob is still read.
with mock.patch.object(
data_store.BLOBS, "ReadBlobs", wraps=data_store.BLOBS.ReadBlobs) as p:
file_store.AddFileWithUnknownHash(path1, short_blob_refs1)
p.assert_called_once()
# Same for multiple small files.
with mock.patch.object(
data_store.BLOBS, "ReadBlobs", wraps=data_store.BLOBS.ReadBlobs) as p:
file_store.AddFilesWithUnknownHashes({
path1: short_blob_refs1,
path2: short_blob_refs2
})
p.assert_called_once()
# One large file and two small ones result in a single read for the
# all three blobs.
with mock.patch.object(
data_store.BLOBS, "ReadBlobs", wraps=data_store.BLOBS.ReadBlobs) as p:
file_store.AddFilesWithUnknownHashes({
path1: short_blob_refs1,
path2: short_blob_refs2,
path3: long_blob_refs
})
p.assert_called_once()
self.assertLen(p.call_args[POSITIONAL_ARGS], 1)
self.assertEmpty(p.call_args[KEYWORD_ARGS])
self.assertCountEqual(p.call_args[0][0], [
r.blob_id for r in itertools.chain(short_blob_refs1, short_blob_refs2,
long_blob_refs)
])
@mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles")
def testAddsFileToExternalFileStore(self, add_file_mock):
hash_id = file_store.AddFileWithUnknownHash(self.client_path,
self.blob_refs)
add_file_mock.assert_called_once()
args = add_file_mock.call_args_list[0][0]
self.assertEqual(args[0][hash_id].client_path, self.client_path)
self.assertEqual(args[0][hash_id].blob_refs, self.blob_refs)
def _BlobRefsFromByteArray(data_array):
offset = 0
blob_refs = []
for data in data_array:
blob_id = rdf_objects.BlobID.FromBlobData(data)
blob_refs.append(
rdf_objects.BlobReference(
offset=offset, size=len(data), blob_id=blob_id))
offset += len(data)
return blob_refs
class AddFilesWithUnknownHashesTest(test_lib.GRRBaseTest):
def testDoesNotFailForEmptyDict(self):
file_store.AddFilesWithUnknownHashes({})
def testDoesNotFailForEmptyFiles(self):
client_id = self.SetupClient(0)
paths = []
for idx in range(100):
components = ("foo", "bar", str(idx))
paths.append(db.ClientPath.OS(client_id=client_id, components=components))
hash_ids = file_store.AddFilesWithUnknownHashes(
{path: [] for path in paths})
empty_hash_id = rdf_objects.SHA256HashID.FromData(b"")
for path in paths:
self.assertEqual(hash_ids[path], empty_hash_id)
def testSimpleMultiplePaths(self):
foo_blobs = [b"foo", b"norf", b"thud"]
foo_blob_refs = _BlobRefsFromByteArray(foo_blobs)
foo_blob_ids = [ref.blob_id for ref in foo_blob_refs]
foo_hash_id = rdf_objects.SHA256HashID.FromData(b"".join(foo_blobs))
data_store.BLOBS.WriteBlobs(dict(zip(foo_blob_ids, foo_blobs)))
bar_blobs = [b"bar", b"quux", b"blargh"]
bar_blob_refs = _BlobRefsFromByteArray(bar_blobs)
bar_blob_ids = [ref.blob_id for ref in bar_blob_refs]
bar_hash_id = rdf_objects.SHA256HashID.FromData(b"".join(bar_blobs))
data_store.BLOBS.WriteBlobs(dict(zip(bar_blob_ids, bar_blobs)))
client_id = self.SetupClient(0)
foo_path = db.ClientPath.OS(client_id=client_id, components=("foo",))
bar_path = db.ClientPath.OS(client_id=client_id, components=("bar",))
hash_ids = file_store.AddFilesWithUnknownHashes({
foo_path: foo_blob_refs,
bar_path: bar_blob_refs,
})
self.assertLen(hash_ids, 2)
self.assertEqual(hash_ids[foo_path], foo_hash_id)
self.assertEqual(hash_ids[bar_path], bar_hash_id)
def testSimpleOverlappingBlobIds(self):
foo_blobs = [b"foo", b"norf", b"quux", b"thud"]
bar_blobs = [b"bar", b"norf", b"blag", b"thud"]
foo_blob_refs = _BlobRefsFromByteArray(foo_blobs)
foo_blob_ids = [ref.blob_id for ref in foo_blob_refs]
foo_hash_id = rdf_objects.SHA256HashID.FromData(b"".join(foo_blobs))
bar_blob_refs = _BlobRefsFromByteArray(bar_blobs)
bar_blob_ids = [ref.blob_id for ref in bar_blob_refs]
bar_hash_id = rdf_objects.SHA256HashID.FromData(b"".join(bar_blobs))
data_store.BLOBS.WriteBlobs(dict(zip(foo_blob_ids, foo_blobs)))
data_store.BLOBS.WriteBlobs(dict(zip(bar_blob_ids, bar_blobs)))
client_id = self.SetupClient(0)
foo_path = db.ClientPath.OS(client_id=client_id, components=("foo", "quux"))
bar_path = db.ClientPath.OS(client_id=client_id, components=("bar", "blag"))
hash_ids = file_store.AddFilesWithUnknownHashes({
foo_path: foo_blob_refs,
bar_path: bar_blob_refs,
})
self.assertLen(hash_ids, 2)
self.assertEqual(hash_ids[foo_path], foo_hash_id)
self.assertEqual(hash_ids[bar_path], bar_hash_id)
def testLargeNumberOfPaths(self):
client_id = self.SetupClient(0)
paths = []
for idx in range(1337):
components = ("foo", "bar", str(idx))
paths.append(db.ClientPath.OS(client_id=client_id, components=components))
blobs = [b"foo", b"bar", b"baz"]
blob_refs = _BlobRefsFromByteArray(blobs)
blob_ids = [ref.blob_id for ref in blob_refs]
data_store.BLOBS.WriteBlobs(dict(zip(blob_ids, blobs)))
hash_ids = file_store.AddFilesWithUnknownHashes(
{path: blob_refs for path in paths})
expected_hash_id = rdf_objects.SHA256HashID.FromData(b"foobarbaz")
for path in paths:
self.assertEqual(hash_ids[path], expected_hash_id)
def testLargeNumberOfBlobs(self):
def Blobs(prefix):
for idx in range(1337):
yield prefix + str(idx).encode("ascii")
foo_blobs = list(Blobs(b"foo"))
foo_blob_refs = _BlobRefsFromByteArray(foo_blobs)
foo_blob_ids = [ref.blob_id for ref in foo_blob_refs]
foo_hash_id = rdf_objects.SHA256HashID.FromData(b"".join(foo_blobs))
data_store.BLOBS.WriteBlobs(dict(zip(foo_blob_ids, foo_blobs)))
bar_blobs = list(Blobs(b"bar"))
bar_blob_refs = _BlobRefsFromByteArray(bar_blobs)
bar_blob_ids = [ref.blob_id for ref in bar_blob_refs]
bar_hash_id = rdf_objects.SHA256HashID.FromData(b"".join(bar_blobs))
data_store.BLOBS.WriteBlobs(dict(zip(bar_blob_ids, bar_blobs)))
client_id = self.SetupClient(0)
foo_path = db.ClientPath.OS(client_id=client_id, components=("foo",))
bar_path = db.ClientPath.OS(client_id=client_id, components=("bar",))
with mock.patch.object(file_store, "_BLOBS_READ_BATCH_SIZE", 42):
hash_ids = file_store.AddFilesWithUnknownHashes({
foo_path: foo_blob_refs,
bar_path: bar_blob_refs,
})
self.assertLen(hash_ids, 2)
self.assertEqual(hash_ids[foo_path], foo_hash_id)
self.assertEqual(hash_ids[bar_path], bar_hash_id)
class OpenFileTest(test_lib.GRRBaseTest):
"""Tests for OpenFile."""
def setUp(self):
super(OpenFileTest, self).setUp()
self.client_id = self.SetupClient(0)
self.client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_size = 10
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "abcdef")
blob_ids = [ref.blob_id for ref in blob_refs]
data_store.BLOBS.WriteBlobs(dict(zip(blob_ids, blob_data)))
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "def")
self.hash_id = file_store.AddFileWithUnknownHash(self.client_path,
blob_refs)
self.data = b"".join(blob_data)
_, blob_refs = vfs_test_lib.GenerateBlobRefs(blob_size, "abc")
self.other_hash_id = file_store.AddFileWithUnknownHash(
self.client_path, blob_refs)
self.invalid_hash_id = rdf_objects.SHA256HashID.FromData(b"")
def _PathInfo(self, hash_id=None):
pi = rdf_objects.PathInfo.OS(components=self.client_path.components)
if hash_id:
pi.hash_entry.sha256 = hash_id.AsBytes()
return pi
def testOpensFileWithSinglePathInfoWithHash(self):
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testRaisesForNonExistentFile(self):
with self.assertRaises(file_store.FileNotFoundError):
file_store.OpenFile(self.client_path)
def testRaisesForFileWithSinglePathInfoWithoutHash(self):
data_store.REL_DB.WritePathInfos(self.client_id, [self._PathInfo()])
with self.assertRaises(file_store.FileHasNoContentError):
file_store.OpenFile(self.client_path)
def testRaisesForFileWithSinglePathInfoWithUnknownHash(self):
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.invalid_hash_id)])
with self.assertRaises(file_store.FileHasNoContentError):
file_store.OpenFile(self.client_path)
def testOpensFileWithTwoPathInfosWhereOldestHasHash(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id, [self._PathInfo()])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensFileWithTwoPathInfosWhereNewestHasHash(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id, [self._PathInfo()])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensFileWithTwoPathInfosWhereOldestHashIsUnknown(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.invalid_hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensFileWithTwoPathInfosWhereNewestHashIsUnknown(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.invalid_hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensLatestVersionForPathWithTwoPathInfosWithHashes(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.other_hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
class StreamFilesChunksTest(test_lib.GRRBaseTest):
"""Tests for StreamFilesChunks."""
def _WriteFile(self, client_path, blobs_range=None):
r_from, r_to = blobs_range or (0, 0)
blob_data, blob_refs = vfs_test_lib.GenerateBlobRefs(
self.blob_size, "abcdef"[r_from:r_to])
vfs_test_lib.CreateFileWithBlobRefsAndData(client_path, blob_refs,
blob_data)
return blob_data, blob_refs
def setUp(self):
super(StreamFilesChunksTest, self).setUp()
self.client_id = self.SetupClient(0)
self.client_id_other = self.SetupClient(1)
self.blob_size = 10
def testStreamsSingleFileWithSingleChunk(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data, _ = self._WriteFile(client_path, (0, 1))
chunks = list(file_store.StreamFilesChunks([client_path]))
self.assertLen(chunks, 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, blob_data[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 1)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size)
def testRaisesIfSingleFileChunkIsMissing(self):
_, missing_blob_refs = vfs_test_lib.GenerateBlobRefs(self.blob_size, "0")
hash_id = rdf_objects.SHA256HashID.FromSerializedBytes(
missing_blob_refs[0].blob_id.AsBytes())
data_store.REL_DB.WriteHashBlobReferences({hash_id: missing_blob_refs})
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
path_info = rdf_objects.PathInfo.OS(components=client_path.components)
path_info.hash_entry.sha256 = hash_id.AsBytes()
data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])
# Just getting the generator doesn't raise.
chunks = file_store.StreamFilesChunks([client_path])
# Iterating through the generator does actually raise.
with self.assertRaises(file_store.BlobNotFoundError):
list(chunks)
def testStreamsSingleFileWithTwoChunks(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data, _ = self._WriteFile(client_path, (0, 2))
chunks = list(file_store.StreamFilesChunks([client_path]))
self.assertLen(chunks, 2)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, blob_data[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 2)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size * 2)
self.assertEqual(chunks[1].client_path, client_path)
self.assertEqual(chunks[1].data, blob_data[1])
self.assertEqual(chunks[1].chunk_index, 1)
self.assertEqual(chunks[1].total_chunks, 2)
self.assertEqual(chunks[1].offset, self.blob_size)
self.assertEqual(chunks[1].total_size, self.blob_size * 2)
def testStreamsTwoFilesWithTwoChunksInEach(self):
client_path_1 = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data_1, _ = self._WriteFile(client_path_1, (0, 2))
client_path_2 = db.ClientPath.OS(self.client_id_other, ("foo", "bar"))
blob_data_2, _ = self._WriteFile(client_path_2, (2, 4))
chunks = list(file_store.StreamFilesChunks([client_path_1, client_path_2]))
self.assertLen(chunks, 4)
self.assertEqual(chunks[0].client_path, client_path_1)
self.assertEqual(chunks[0].data, blob_data_1[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 2)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size * 2)
self.assertEqual(chunks[1].client_path, client_path_1)
self.assertEqual(chunks[1].data, blob_data_1[1])
self.assertEqual(chunks[1].chunk_index, 1)
self.assertEqual(chunks[1].total_chunks, 2)
self.assertEqual(chunks[1].offset, self.blob_size)
self.assertEqual(chunks[1].total_size, self.blob_size * 2)
self.assertEqual(chunks[2].client_path, client_path_2)
self.assertEqual(chunks[2].data, blob_data_2[0])
self.assertEqual(chunks[2].chunk_index, 0)
self.assertEqual(chunks[2].total_chunks, 2)
self.assertEqual(chunks[2].offset, 0)
self.assertEqual(chunks[2].total_size, self.blob_size * 2)
self.assertEqual(chunks[3].client_path, client_path_2)
self.assertEqual(chunks[3].data, blob_data_2[1])
self.assertEqual(chunks[3].chunk_index, 1)
self.assertEqual(chunks[3].total_chunks, 2)
self.assertEqual(chunks[3].offset, self.blob_size)
self.assertEqual(chunks[3].total_size, self.blob_size * 2)
def testIgnoresFileWithoutChunks(self):
client_path_1 = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path_1, None)
client_path_2 = db.ClientPath.OS(self.client_id_other, ("foo", "bar"))
blob_data_2, _ = self._WriteFile(client_path_2, (2, 4))
chunks = list(file_store.StreamFilesChunks([client_path_1, client_path_2]))
self.assertLen(chunks, 2)
self.assertEqual(chunks[0].client_path, client_path_2)
self.assertEqual(chunks[0].data, blob_data_2[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 2)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size * 2)
self.assertEqual(chunks[1].client_path, client_path_2)
self.assertEqual(chunks[1].data, blob_data_2[1])
self.assertEqual(chunks[1].chunk_index, 1)
self.assertEqual(chunks[1].total_chunks, 2)
self.assertEqual(chunks[1].offset, self.blob_size)
self.assertEqual(chunks[1].total_size, self.blob_size * 2)
def testRespectsClientPathsOrder(self):
client_path_1 = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path_1, (0, 1))
client_path_2 = db.ClientPath.OS(self.client_id_other, ("foo", "bar"))
self._WriteFile(client_path_2, (0, 1))
chunks = list(file_store.StreamFilesChunks([client_path_1, client_path_2]))
self.assertLen(chunks, 2)
self.assertEqual(chunks[0].client_path, client_path_1)
self.assertEqual(chunks[1].client_path, client_path_2)
# Check that reversing the list of requested client paths reverses the
# result.
chunks = list(file_store.StreamFilesChunks([client_path_2, client_path_1]))
self.assertLen(chunks, 2)
self.assertEqual(chunks[0].client_path, client_path_2)
self.assertEqual(chunks[1].client_path, client_path_1)
def testReadsLatestVersionWhenStreamingWithoutSpecifiedTimestamp(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data_1, _ = self._WriteFile(client_path, (0, 1))
blob_data_2, _ = self._WriteFile(client_path, (1, 2))
chunks = list(file_store.StreamFilesChunks([client_path]))
self.assertLen(chunks, 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertNotEqual(chunks[0].data, blob_data_1[0])
self.assertEqual(chunks[0].data, blob_data_2[0])
def testRespectsMaxTimestampWhenStreamingSingleFile(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data_1, _ = self._WriteFile(client_path, (0, 1))
timestamp_1 = rdfvalue.RDFDatetime.Now()
blob_data_2, _ = self._WriteFile(client_path, (1, 2))
timestamp_2 = rdfvalue.RDFDatetime.Now()
chunks = list(
file_store.StreamFilesChunks([client_path], max_timestamp=timestamp_2))
self.assertLen(chunks, 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertNotEqual(chunks[0].data, blob_data_1[0])
self.assertEqual(chunks[0].data, blob_data_2[0])
chunks = list(
file_store.StreamFilesChunks([client_path], max_timestamp=timestamp_1))
self.assertLen(chunks, 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, blob_data_1[0])
self.assertNotEqual(chunks[0].data, blob_data_2[0])
def testRespectsMaxSizeEqualToOneChunkWhenStreamingSingleFile(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data, _ = self._WriteFile(client_path, (0, 2))
chunks = list(
file_store.StreamFilesChunks([client_path], max_size=self.blob_size))
self.assertLen(chunks, 1)
self.assertEqual(chunks[0].data, blob_data[0])
def testRespectsMaxSizeGreaterThanOneChunkWhenStreamingSingleFile(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
blob_data, _ = self._WriteFile(client_path, (0, 2))
chunks = list(
file_store.StreamFilesChunks([client_path],
max_size=self.blob_size + 1))
self.assertLen(chunks, 2)
self.assertEqual(chunks[0].data, blob_data[0])
self.assertEqual(chunks[1].data, blob_data[1])
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 759,679,221,018,685,600 | 38.801872 | 80 | 0.677106 | false |
JoelBender/bacpypes | sandbox/local_schedule_object_t2.py | 1 | 26204 | #!/usr/bin/env python
"""
Local Schedule Object Test
"""
import sys
import calendar
from time import mktime as _mktime, localtime as _localtime
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred
from bacpypes.task import OneShotTask, TaskManager
from bacpypes.primitivedata import Atomic, Null, Integer, Unsigned, Real, Date, Time, CharacterString
from bacpypes.constructeddata import Array, ArrayOf, SequenceOf, AnyAtomic
from bacpypes.basetypes import CalendarEntry, DailySchedule, DateRange, \
DeviceObjectPropertyReference, SpecialEvent, SpecialEventPeriod, TimeValue
from bacpypes.object import register_object_type, get_datatype, \
WritableProperty, ScheduleObject, AnalogValueObject
from bacpypes.app import Application
from bacpypes.local.object import CurrentPropertyListMixIn
from bacpypes.local.device import LocalDeviceObject
### testing
import time
sys.path[:0] = ['.']
from tests.time_machine import TimeMachine, reset_time_machine, run_time_machine, xdatetime
global timemachine
###
# some debugging
_debug = 0
_log = ModuleLogger(globals())
### testing
@bacpypes_debugging
def setup_module(module=None):
if _debug: setup_module._debug("setup_module %r", module)
global time_machine
# this is a singleton
time_machine = TimeMachine()
# make sure this is the same one referenced by the functions
assert time_machine is reset_time_machine.__globals__['time_machine']
assert time_machine is run_time_machine.__globals__['time_machine']
@bacpypes_debugging
def teardown_module():
if _debug: teardown_module._debug("teardown_module")
global time_machine
# all done
time_machine = None
###
#
# match_date
#
def match_date(date, date_pattern):
"""
Match a specific date, a four-tuple with no special values, with a date
pattern, four-tuple possibly having special values.
"""
# unpack the date and pattern
year, month, day, day_of_week = date
year_p, month_p, day_p, day_of_week_p = date_pattern
# check the year
if year_p == 255:
# any year
pass
elif year != year_p:
# specific year
return False
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the day
if day_p == 255:
# any day
pass
elif day_p == 32:
# last day of the month
last_day = calendar.monthrange(year + 1900, month)[1]
if day != last_day:
return False
elif day_p == 33:
# odd days of the month
if (day % 2) == 0:
return False
elif day_p == 34:
# even days of the month
if (day % 2) == 1:
return False
elif day != day_p:
# specific day
return False
# check the day of week
if day_of_week_p == 255:
# any day of the week
pass
elif day_of_week != day_of_week_p:
# specific day of the week
return False
# all tests pass
return True
#
# match_date_range
#
def match_date_range(date, date_range):
"""
Match a specific date, a four-tuple with no special values, with a DateRange
object which as a start date and end date.
"""
return (date[:3] >= date_range.startDate[:3]) \
and (date[:3] <= date_range.endDate[:3])
#
# match_weeknday
#
def match_weeknday(date, weeknday):
"""
Match a specific date, a four-tuple with no special values, with a
BACnetWeekNDay, an octet string with three (unsigned) octets.
"""
# unpack the date
year, month, day, day_of_week = date
last_day = calendar.monthrange(year + 1900, month)[1]
# unpack the date pattern octet string
if sys.version_info[0] == 2:
weeknday_unpacked = [ord(c) for c in weeknday]
elif sys.version_info[0] == 3:
weeknday_unpacked = [c for c in weeknday]
else:
raise NotImplementedError("match_weeknday requires Python 2.x or 3.x")
month_p, week_of_month_p, day_of_week_p = weeknday_unpacked
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the week of the month
if week_of_month_p == 255:
# any week
pass
elif week_of_month_p == 1:
# days numbered 1-7
if (day > 7):
return False
elif week_of_month_p == 2:
# days numbered 8-14
if (day < 8) or (day > 14):
return False
elif week_of_month_p == 3:
# days numbered 15-21
if (day < 15) or (day > 21):
return False
elif week_of_month_p == 4:
# days numbered 22-28
if (day < 22) or (day > 28):
return False
elif week_of_month_p == 5:
# days numbered 29-31
if (day < 29) or (day > 31):
return False
elif week_of_month_p == 6:
# last 7 days of this month
if (day < last_day - 6):
return False
elif week_of_month_p == 7:
# any of the 7 days prior to the last 7 days of this month
if (day < last_day - 13) or (day > last_day - 7):
return False
elif week_of_month_p == 8:
# any of the 7 days prior to the last 14 days of this month
if (day < last_day - 20) or (day > last_day - 14):
return False
elif week_of_month_p == 9:
# any of the 7 days prior to the last 21 days of this month
if (day < last_day - 27) or (day > last_day - 21):
return False
# check the day
if day_of_week_p == 255:
# any day
pass
elif day_of_week != day_of_week_p:
# specific day
return False
# all tests pass
return True
#
# date_in_calendar_entry
#
@bacpypes_debugging
def date_in_calendar_entry(date, calendar_entry):
if _debug: date_in_calendar_entry._debug("date_in_calendar_entry %r %r", date, calendar_entry)
match = False
if calendar_entry.date:
match = match_date(date, calendar_entry.date)
elif calendar_entry.dateRange:
match = match_date_range(date, calendar_entry.dateRange)
elif calendar_entry.weekNDay:
match = match_weeknday(date, calendar_entry.weekNDay)
else:
raise RuntimeError("")
if _debug: date_in_calendar_entry._debug(" - match: %r", match)
return match
#
# datetime_to_time
#
def datetime_to_time(date, time):
"""Take the date and time 4-tuples and return the time in seconds since
the epoch as a floating point number."""
if (255 in date) or (255 in time):
raise RuntimeError("specific date and time required")
time_tuple = (
date[0]+1900, date[1], date[2],
time[0], time[1], time[2],
0, 0, -1,
)
return _mktime(time_tuple)
#
# LocalScheduleObject
#
@bacpypes_debugging
@register_object_type(vendor_id=999)
class LocalScheduleObject(CurrentPropertyListMixIn, ScheduleObject):
properties = [
WritableProperty('presentValue', AnyAtomic),
]
def __init__(self, **kwargs):
if _debug: LocalScheduleObject._debug("__init__ %r", kwargs)
ScheduleObject.__init__(self, **kwargs)
# attach an interpreter task
self._task = LocalScheduleInterpreter(self)
# add some monitors
for prop in ('weeklySchedule', 'exceptionSchedule', 'scheduleDefault'):
self._property_monitors[prop].append(self._check_reliability)
# check it now
self._check_reliability()
def _check_reliability(self, old_value=None, new_value=None):
"""This function is called when the object is created and after
one of its configuration properties has changed. The new and old value
parameters are ignored, this is called after the property has been
changed and this is only concerned with the current value."""
if _debug: LocalScheduleObject._debug("_check_reliability %r %r", old_value, new_value)
try:
schedule_default = self.scheduleDefault
if schedule_default is None:
raise ValueError("scheduleDefault expected")
if not isinstance(schedule_default, Atomic):
raise TypeError("scheduleDefault must be an instance of an atomic type")
if (self.weeklySchedule is None) and (self.exceptionSchedule is None):
raise ValueError("schedule required")
schedule_datatype = schedule_default.__class__
if _debug: LocalScheduleObject._debug(" - schedule_datatype: %r", schedule_datatype)
# check the weekly schedule values
if self.weeklySchedule:
for daily_schedule in self.weeklySchedule:
for time_value in daily_schedule.daySchedule:
if _debug: LocalScheduleObject._debug(" - daily time_value: %r", time_value)
if time_value is None:
pass
elif not isinstance(time_value.value, (Null, schedule_datatype)):
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
schedule_datatype,
time_value.__class__,
)
raise TypeError("wrong type")
elif 255 in time_value.time:
if _debug: LocalScheduleObject._debug(" - wildcard in time")
raise ValueError("must be a specific time")
# check the exception schedule values
if self.exceptionSchedule:
for special_event in self.exceptionSchedule:
for time_value in special_event.listOfTimeValues:
if _debug: LocalScheduleObject._debug(" - special event time_value: %r", time_value)
if time_value is None:
pass
elif not isinstance(time_value.value, (Null, schedule_datatype)):
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
schedule_datatype,
time_value.__class__,
)
raise TypeError("wrong type")
# check list of object property references
obj_prop_refs = self.listOfObjectPropertyReferences
if obj_prop_refs:
for obj_prop_ref in obj_prop_refs:
if obj_prop_ref.deviceIdentifier:
raise RuntimeError("no external references")
# get the datatype of the property to be written
obj_type = obj_prop_ref.objectIdentifier[0]
datatype = get_datatype(obj_type, obj_prop_ref.propertyIdentifier)
if _debug: LocalScheduleObject._debug(" - datatype: %r", datatype)
if issubclass(datatype, Array) and (obj_prop_ref.propertyArrayIndex is not None):
if obj_prop_ref.propertyArrayIndex == 0:
datatype = Unsigned
else:
datatype = datatype.subtype
if _debug: LocalScheduleObject._debug(" - datatype: %r", datatype)
if datatype is not schedule_datatype:
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
datatype,
schedule_datatype,
)
raise TypeError("wrong type")
# all good
self.reliability = 'noFaultDetected'
if _debug: LocalScheduleObject._debug(" - no fault detected")
except Exception as err:
if _debug: LocalScheduleObject._debug(" - exception: %r", err)
self.reliability = 'configurationError'
#
# LocalScheduleInterpreter
#
@bacpypes_debugging
class LocalScheduleInterpreter(OneShotTask):
def __init__(self, sched_obj):
if _debug: LocalScheduleInterpreter._debug("__init__ %r", sched_obj)
OneShotTask.__init__(self)
# reference the schedule object to update
self.sched_obj = sched_obj
# add a monitor for the present value
sched_obj._property_monitors['presentValue'].append(self.present_value_changed)
# call to interpret the schedule
deferred(self.process_task)
def present_value_changed(self, old_value, new_value):
"""This function is called when the presentValue of the local schedule
object has changed, both internally by this interpreter, or externally
by some client using WriteProperty."""
if _debug: LocalScheduleInterpreter._debug("present_value_changed %s %s", old_value, new_value)
# if this hasn't been added to an application, there's nothing to do
if not self.sched_obj._app:
if _debug: LocalScheduleInterpreter._debug(" - no application")
return
# process the list of [device] object property [array index] references
obj_prop_refs = self.sched_obj.listOfObjectPropertyReferences
if not obj_prop_refs:
if _debug: LocalScheduleInterpreter._debug(" - no writes defined")
return
# primitive values just set the value part
new_value = new_value.value
# loop through the writes
for obj_prop_ref in obj_prop_refs:
if obj_prop_ref.deviceIdentifier:
if _debug: LocalScheduleInterpreter._debug(" - no externals")
continue
# get the object from the application
obj = self.sched_obj._app.get_object_id(obj_prop_ref.objectIdentifier)
if not obj:
if _debug: LocalScheduleInterpreter._debug(" - no object")
continue
# try to change the value
try:
obj.WriteProperty(
obj_prop_ref.propertyIdentifier,
new_value,
arrayIndex=obj_prop_ref.propertyArrayIndex,
priority=self.sched_obj.priorityForWriting,
)
if _debug: LocalScheduleInterpreter._debug(" - success")
except Exception as err:
if _debug: LocalScheduleInterpreter._debug(" - error: %r", err)
def process_task(self):
if _debug: LocalScheduleInterpreter._debug("process_task(%s)", self.sched_obj.objectName)
# check for a valid configuration
if self.sched_obj.reliability != 'noFaultDetected':
if _debug: LocalScheduleInterpreter._debug(" - fault detected")
return
# get the date and time from the device object in case it provides
# some custom functionality
if self.sched_obj._app and self.sched_obj._app.localDevice:
current_date = self.sched_obj._app.localDevice.localDate
if _debug: LocalScheduleInterpreter._debug(" - current_date: %r", current_date)
current_time = self.sched_obj._app.localDevice.localTime
if _debug: LocalScheduleInterpreter._debug(" - current_time: %r", current_time)
else:
# get the current date and time, as provided by the task manager
current_date = Date().now().value
if _debug: LocalScheduleInterpreter._debug(" - current_date: %r", current_date)
current_time = Time().now().value
if _debug: LocalScheduleInterpreter._debug(" - current_time: %r", current_time)
# evaluate the time
current_value, next_transition = self.eval(current_date, current_time)
if _debug: LocalScheduleInterpreter._debug(" - current_value, next_transition: %r, %r", current_value, next_transition)
# set the present value
self.sched_obj.presentValue = current_value
# compute the time of the next transition
transition_time = datetime_to_time(current_date, next_transition)
# install this to run again
self.install_task(transition_time)
def eval(self, edate, etime):
"""Evaluate the schedule according to the provided date and time and
return the appropriate present value, or None if not in the effective
period."""
if _debug: LocalScheduleInterpreter._debug("eval %r %r", edate, etime)
# reference the schedule object
sched_obj = self.sched_obj
if _debug: LocalScheduleInterpreter._debug(" sched_obj: %r", sched_obj)
# verify the date falls in the effective period
if not match_date_range(edate, sched_obj.effectivePeriod):
return None
# the event priority is a list of values that are in effect for
# exception schedules with the special event priority, see 135.1-2013
# clause 7.3.2.23.10.3.8, Revision 4 Event Priority Test
event_priority = [None] * 16
next_day = (24, 0, 0, 0)
next_transition_time = [None] * 16
# check the exception schedule values
if sched_obj.exceptionSchedule:
for special_event in sched_obj.exceptionSchedule:
if _debug: LocalScheduleInterpreter._debug(" - special_event: %r", special_event)
# check the special event period
special_event_period = special_event.period
if special_event_period is None:
raise RuntimeError("special event period required")
match = False
calendar_entry = special_event_period.calendarEntry
if calendar_entry:
if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry)
match = date_in_calendar_entry(edate, calendar_entry)
else:
# get the calendar object from the application
calendar_object = sched_obj._app.get_object_id(special_event_period.calendarReference)
if not calendar_object:
raise RuntimeError("invalid calendar object reference")
if _debug: LocalScheduleInterpreter._debug(" - calendar_object: %r", calendar_object)
for calendar_entry in calendar_object.dateList:
if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry)
match = date_in_calendar_entry(edate, calendar_entry)
if match:
break
# didn't match the period, try the next special event
if not match:
if _debug: LocalScheduleInterpreter._debug(" - no matching calendar entry")
continue
# event priority array index
priority = special_event.eventPriority - 1
if _debug: LocalScheduleInterpreter._debug(" - priority: %r", priority)
# look for all of the possible times
for time_value in special_event.listOfTimeValues:
tval = time_value.time
if tval <= etime:
if isinstance(time_value.value, Null):
if _debug: LocalScheduleInterpreter._debug(" - relinquish exception @ %r", tval)
event_priority[priority] = None
next_transition_time[priority] = None
else:
if _debug: LocalScheduleInterpreter._debug(" - consider exception @ %r", tval)
event_priority[priority] = time_value.value
next_transition_time[priority] = next_day
else:
next_transition_time[priority] = tval
break
# assume the next transition will be at the start of the next day
earliest_transition = next_day
# check if any of the special events came up with something
for priority_value, next_transition in zip(event_priority, next_transition_time):
if next_transition is not None:
earliest_transition = min(earliest_transition, next_transition)
if priority_value is not None:
if _debug: LocalScheduleInterpreter._debug(" - priority_value: %r", priority_value)
return priority_value, earliest_transition
# start out with the default
daily_value = sched_obj.scheduleDefault
# check the daily schedule
if sched_obj.weeklySchedule:
daily_schedule = sched_obj.weeklySchedule[edate[3]]
if _debug: LocalScheduleInterpreter._debug(" - daily_schedule: %r", daily_schedule)
# look for all of the possible times
for time_value in daily_schedule.daySchedule:
if _debug: LocalScheduleInterpreter._debug(" - time_value: %r", time_value)
tval = time_value.time
if tval <= etime:
if isinstance(time_value.value, Null):
if _debug: LocalScheduleInterpreter._debug(" - back to normal @ %r", tval)
daily_value = sched_obj.scheduleDefault
else:
if _debug: LocalScheduleInterpreter._debug(" - new value @ %r", tval)
daily_value = time_value.value
else:
earliest_transition = min(earliest_transition, tval)
break
# return what was matched, if anything
return daily_value, earliest_transition
#
# WritableAnalogValueObject
#
@register_object_type(vendor_id=999)
class WritableAnalogValueObject(CurrentPropertyListMixIn, AnalogValueObject):
properties = [
WritableProperty('presentValue', Real),
]
#
# TestConsoleCmd
#
@bacpypes_debugging
class TestConsoleCmd(ConsoleCmd):
def do_test(self, args):
"""test <date> <time>"""
args = args.split()
if _debug: TestConsoleCmd._debug("do_test %r", args)
date_string, time_string = args
test_date = Date(date_string).value
test_time = Time(time_string).value
for so in schedule_objects:
v, t = so._task.eval(test_date, test_time)
print(so.objectName + ", " + repr(v and v.value) + " until " + str(t))
#
# __main__
#
def main():
global args, schedule_objects
# parse the command line arguments
parser = ConfigArgumentParser(description=__doc__)
# parse the command line arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# set up testing
setup_module()
# reset the time machine
reset_time_machine(start_time="1970-01-01")
# make a device object
this_device = LocalDeviceObject(
objectName=args.ini.objectname,
objectIdentifier=('device', int(args.ini.objectidentifier)),
maxApduLengthAccepted=int(args.ini.maxapdulengthaccepted),
segmentationSupported=args.ini.segmentationsupported,
vendorIdentifier=int(args.ini.vendoridentifier),
)
# make a floating application, no network interface
this_application = Application(this_device)
#
# Simple daily schedule (actually a weekly schedule with every day
# being identical.
#
so = LocalScheduleObject(
objectIdentifier=('schedule', 1),
objectName='Schedule 1',
presentValue=Real(-1.0),
effectivePeriod=DateRange(
startDate=(0, 1, 1, 1),
endDate=(254, 12, 31, 2),
),
weeklySchedule=ArrayOf(DailySchedule)([
DailySchedule(
daySchedule=[
TimeValue(time=(8,0,0,0), value=Real(8)),
TimeValue(time=(14,0,0,0), value=Null()),
TimeValue(time=(17,0,0,0), value=Real(42)),
# TimeValue(time=(0,0,0,0), value=Null()),
]
),
] * 7),
listOfObjectPropertyReferences=[
DeviceObjectPropertyReference(
objectIdentifier=('analogValue', 1),
propertyIdentifier='presentValue',
# propertyArrayIndex=5,
# deviceIdentifier=('device', 999),
),
],
priorityForWriting=7,
scheduleDefault=Real(0.0),
)
_log.debug(" - so: %r", so)
this_application.add_object(so)
# add an analog value object
avo = WritableAnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='analog value 1',
presentValue=0.0,
)
_log.debug(" - avo: %r", avo)
this_application.add_object(avo)
print("{} @ {}".format(so.presentValue.value, Time().now()))
for i in range(1, 25):
hr = "{}:00:01".format(i)
# let it run until just after midnight
run_time_machine(stop_time=hr)
print("{}, {} @ {}".format(
so.presentValue.value,
avo.presentValue,
Time().now(),
))
# done testing
teardown_module()
if __name__ == "__main__":
main()
| mit | 1,479,491,013,076,970,800 | 34.994505 | 130 | 0.580522 | false |
twitter/pants | contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_fetch.py | 1 | 8293 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from collections import defaultdict
from pants.build_graph.address import Address
from pants.util.contextutil import temporary_dir
from pants_test.task_test_base import TaskTestBase
from pants.contrib.go.subsystems.fetcher import ArchiveFetcher
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_fetch import GoFetch
class GoFetchTest(TaskTestBase):
address = Address.parse
@classmethod
def task_type(cls):
return GoFetch
def test_get_remote_import_paths(self):
go_fetch = self.create_task(self.context())
self.create_file('src/github.com/u/a/a.go', contents="""
package a
import (
"fmt"
"math"
"sync"
"bitbucket.org/u/b"
"github.com/u/c"
)
""")
remote_import_ids = go_fetch._get_remote_import_paths('github.com/u/a',
gopath=self.build_root)
self.assertEqual(sorted(remote_import_ids), sorted(['bitbucket.org/u/b', 'github.com/u/c']))
def test_resolve_and_inject_explicit(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
r2 = self.make_target(spec='3rdparty/go/r2', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
resolved = go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', rev=None, implicit_ok=False)
self.assertEqual(r2, resolved)
def test_resolve_and_inject_explicit_failure(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
with self.assertRaises(go_fetch.UndeclaredRemoteLibError) as cm:
go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', rev=None, implicit_ok=False)
self.assertEqual(cm.exception.address, self.address('3rdparty/go/r2'))
def test_resolve_and_inject_implicit(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
r2 = go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', rev=None, implicit_ok=True)
self.assertEqual(self.address('3rdparty/go/r2'), r2.address)
self.assertIsInstance(r2, GoRemoteLibrary)
def test_resolve_and_inject_implicit_already_exists(self):
r1 = self.make_target(spec='3rdparty/go/r1', target_type=GoRemoteLibrary)
self.make_target(spec='3rdparty/go/r2', target_type=GoRemoteLibrary)
go_fetch = self.create_task(self.context())
r2_resolved = go_fetch._resolve(r1, self.address('3rdparty/go/r2'), 'r2', rev=None, implicit_ok=True)
self.assertEqual(self.address('3rdparty/go/r2'), r2_resolved.address)
self.assertIsInstance(r2_resolved, GoRemoteLibrary)
def _create_package(self, dirpath, name, deps):
"""Creates a Go package inside dirpath named 'name' importing deps."""
imports = ['import "localzip/{}"'.format(d) for d in deps]
f = os.path.join(dirpath, '{name}/{name}.go'.format(name=name))
self.create_file(f, contents=
"""package {name}
{imports}
""".format(name=name, imports='\n'.join(imports)))
def _create_zip(self, src, dest, name):
"""Zips the Go package in src named 'name' into dest."""
shutil.make_archive(os.path.join(dest, name), 'zip', root_dir=src)
def _create_remote_lib(self, name):
self.make_target(spec='3rdparty/go/localzip/{name}'.format(name=name),
target_type=GoRemoteLibrary,
pkg=name)
def _init_dep_graph_files(self, src, zipdir, dep_graph):
"""Given a dependency graph, initializes the corresponding BUILD/packages/zip files.
Packages are placed in src, and their zipped contents are placed in zipdir.
"""
for t, deps in dep_graph.items():
self._create_package(src, t, deps)
self._create_zip(src, zipdir, t)
self._create_remote_lib(t)
def _create_fetch_context(self, zipdir):
"""Given a directory of zipfiles, creates a context for GoFetch."""
matcher = ArchiveFetcher.UrlInfo(url_format=os.path.join(zipdir, '\g<zip>.zip'),
default_rev='HEAD',
strip_level=0)
self.set_options_for_scope('go-fetchers', matchers={r'localzip/(?P<zip>[^/]+)': matcher})
context = self.context()
context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
return context
def _assert_dependency_graph(self, root_target, dep_map):
"""Recursively assert that the dependency graph starting at root_target matches dep_map."""
if root_target.name not in dep_map:
return
expected_spec_paths = {'3rdparty/go/localzip/{}'.format(name)
for name in dep_map[root_target.name]}
actual_spec_paths = {dep.address.spec_path for dep in root_target.dependencies}
self.assertEqual(actual_spec_paths, expected_spec_paths)
dep_map = dep_map.copy()
del dep_map[root_target.name]
for dep in root_target.dependencies:
self._assert_dependency_graph(dep, dep_map)
def test_transitive_download_remote_libs_simple(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
dep_graph = {
'r1': ['r2'],
'r2': ['r3'],
'r3': []
}
self._init_dep_graph_files(src, zipdir, dep_graph)
r1 = self.target('3rdparty/go/localzip/r1')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1})
self.assertEqual(undeclared_deps, {})
self._assert_dependency_graph(r1, dep_graph)
def test_transitive_download_remote_libs_complex(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
dep_graph = {
'r1': ['r3', 'r4'],
'r2': ['r3'],
'r3': ['r4'],
'r4': []
}
self._init_dep_graph_files(src, zipdir, dep_graph)
r1 = self.target('3rdparty/go/localzip/r1')
r2 = self.target('3rdparty/go/localzip/r2')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1, r2})
self.assertEqual(undeclared_deps, {})
self._assert_dependency_graph(r1, dep_graph)
self._assert_dependency_graph(r2, dep_graph)
def test_transitive_download_remote_libs_undeclared_deps(self):
with temporary_dir() as src:
with temporary_dir() as zipdir:
dep_graph = {
'r1': ['r2', 'r3'],
'r2': ['r4']
}
self._init_dep_graph_files(src, zipdir, dep_graph)
r1 = self.target('3rdparty/go/localzip/r1')
r2 = self.target('3rdparty/go/localzip/r2')
context = self._create_fetch_context(zipdir)
go_fetch = self.create_task(context)
undeclared_deps = go_fetch._transitive_download_remote_libs({r1})
expected = defaultdict(set)
expected[r1] = {('localzip/r3', self.address('3rdparty/go/localzip/r3'))}
expected[r2] = {('localzip/r4', self.address('3rdparty/go/localzip/r4'))}
self.assertEqual(undeclared_deps, expected)
def test_issues_2616(self):
go_fetch = self.create_task(self.context())
self.create_file('src/github.com/u/a/a.go', contents="""
package a
import (
"fmt"
"math"
"sync"
"bitbucket.org/u/b"
)
""")
self.create_file('src/github.com/u/a/b.go', contents="""
package a
/*
#include <stdlib.h>
*/
import "C" // C was erroneously categorized as a remote lib in issue 2616.
import (
"fmt"
"github.com/u/c"
)
""")
remote_import_ids = go_fetch._get_remote_import_paths('github.com/u/a',
gopath=self.build_root)
self.assertEqual(sorted(remote_import_ids), sorted(['bitbucket.org/u/b', 'github.com/u/c']))
| apache-2.0 | 3,214,416,682,235,323,000 | 36.355856 | 105 | 0.636561 | false |
eaudeweb/lcc-toolkit | lcc/views/legislation.py | 1 | 24257 | import json
import operator
from functools import reduce
from django import views
from django.conf import settings
from django.contrib.auth import mixins
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q as DjQ, IntegerField
from django.db.models.functions import Cast
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.views.generic import (
ListView, CreateView, DetailView, UpdateView, DeleteView
)
from elasticsearch_dsl import Q
from lcc import models, constants, forms
from lcc.constants import LEGISLATION_YEAR_RANGE
from lcc.documents import LegislationDocument
from lcc.views.base import TagGroupRender, TaxonomyFormMixin
from lcc.views.country import (
CountryMetadataFiltering,
POP_RANGES,
HDI_RANGES,
GDP_RANGES,
GHG_LUCF,
GHG_NO_LUCF,
)
CONN = settings.TAXONOMY_CONNECTOR
class HighlightedLaws:
"""
This class wraps a Search instance and is compatible with Django's
pagination API.
"""
def __init__(self, search, sort=None):
self.search = search
self.sort = sort
def __getitem__(self, key):
hits = self.search[key]
if self.sort:
return hits.sort(self.sort).to_queryset()
laws = []
matched_article_tags = []
matched_article_classifications = []
for hit, law in zip(hits, hits.to_queryset()):
if hasattr(hit.meta, 'highlight'):
highlights = hit.meta.highlight.to_dict()
if 'abstract' in highlights:
law._highlighted_abstract = mark_safe(
' […] '.join(highlights['abstract'])
)
if 'pdf_text' in highlights:
law._highlighted_pdf_text = mark_safe(
' […] '.join(
highlights['pdf_text']
).replace('<pre>', '').replace('</pre>', '')
)
if 'title' in highlights:
law._highlighted_title = mark_safe(highlights['title'][0])
if 'classifications' in highlights:
law._highlighted_classifications = [
mark_safe(classification)
for classification in (
highlights['classifications'][0].split(CONN))
]
if 'article_classifications' in highlights:
matched_article_classifications += [
tag[4:-5] for tag in (
highlights['article_classifications'][0].split(CONN))
if '<em>' in tag
]
if 'tags' in highlights:
law._highlighted_tags = [
mark_safe(tag)
for tag in highlights['tags'][0].split(CONN)
]
if 'article_tags' in highlights:
matched_article_tags += [
tag[4:-5] for tag in (
highlights['article_tags'][0].split(CONN))
if '<em>' in tag
]
if hasattr(hit.meta, 'inner_hits'):
law._highlighted_articles = []
if hit.meta.inner_hits.articles:
for article in hit.meta.inner_hits.articles.hits:
article_dict = {
'pk': article.pk,
'code': article.code
}
if not hasattr(article.meta, 'highlight'):
continue
highlights = article.meta.highlight.to_dict()
matched_text = highlights.get('articles.text')
if matched_text:
article_dict['text'] = mark_safe(
' […] '.join(matched_text)
)
matched_classifications = (
highlights.get(
'articles.classifications_text')
)
if matched_classifications:
article_dict['classifications'] = [
mark_safe(classification)
for classification in (
matched_classifications[0].split(CONN))
]
matched_tags = highlights.get(
'articles.tags_text')
if matched_tags:
article_dict['tags'] = [
mark_safe(tag)
for tag in (
matched_tags[0].split(CONN))
]
law._highlighted_articles.append(article_dict)
elif matched_article_classifications or matched_article_tags:
# NOTE: This is a hack. ElasticSearch won't return
# highlighted article tags in some cases so this workaround
# is necessary. Please fix if you know how. Try searching
# for a keyword that is in the title of a law, and filtering
# by a tag that is assigned to an article of that law, but
# not the law itself. The query will work (it will only
# return the law that has such an article, and not others),
# but the inner_hits will be empty.
law._highlighted_articles = []
articles = law.articles.filter(
DjQ(tags__name__in=matched_article_tags) |
DjQ(
classifications__name__in=(
matched_article_classifications)
)
).prefetch_related('tags')
for article in articles:
article_dict = {
'pk': article.pk,
'code': article.code,
'classifications': [
mark_safe('<em>{}</em>'.format(cl.name))
if cl.name in matched_article_classifications
else cl.name
for cl in article.classifications.all()
],
'tags': [
mark_safe('<em>{}</em>'.format(tag.name))
if tag.name in matched_article_tags
else tag.name
for tag in article.tags.all()
]
}
law._highlighted_articles.append(article_dict)
laws.append(law)
return laws
def count(self):
return self.search.count()
class LegislationExplorer(CountryMetadataFiltering, ListView):
template_name = "legislation/explorer.html"
model = models.Legislation
def get_sort(self):
promulgation_sort = self.request.GET.get("promulgation_sort")
country_sort = self.request.GET.get("country_sort")
if promulgation_sort:
if promulgation_sort == '1':
return 'year'
else:
return '-year'
if country_sort:
if country_sort == '1':
return 'country_name'
else:
return '-country_name'
def get_queryset(self):
"""
Perform filtering using ElasticSearch instead of Postgres.
Note that this DOES NOT return a QuerySet object, it returms a Page
object instead. This is necessary because by transforming an
elasticsearch-dsl Search object into a QuerySet a lot of functionality
is lost, so we need to make things a bit more custom.
"""
law_queries = []
article_queries = []
article_highlights = {}
# jQuery's ajax function ads `[]` to duplicated querystring parameters
# or parameters whose values are objects, so we have to take that into
# account when looking for our values in the querystring. More into at:
# - http://api.jquery.com/jQuery.param/
# List of strings representing TaxonomyClassification ids
classification_ids = [
int(pk) for pk in self.request.GET.getlist('classifications[]')]
if classification_ids:
classification_names = models.TaxonomyClassification.objects.filter(
pk__in=classification_ids).values_list('name', flat=True)
# Search root document for any of the classifications received
law_queries.append(
reduce(
operator.or_,
[
Q('match_phrase', classifications=name)
for name in classification_names
]
) | reduce(
operator.or_,
[
Q('match_phrase', article_classifications=name)
for name in classification_names
]
)
)
# Search inside articles for any classifications
article_queries.append(
reduce(
operator.or_,
[
Q(
'match_phrase',
articles__classifications_text=name
) for name in classification_names
]
) | reduce(
operator.or_,
[
Q(
'match_phrase',
articles__parent_classifications=name
) for name in classification_names
]
)
)
article_highlights['articles.classifications_text'] = {
'number_of_fragments': 0
}
# List of strings representing TaxonomyTag ids
tag_ids = [int(pk) for pk in self.request.GET.getlist('tags[]')]
if tag_ids:
tag_names = models.TaxonomyTag.objects.filter(
pk__in=tag_ids).values_list('name', flat=True)
# Search root document
law_queries.append(
reduce(
operator.or_,
[
Q('match_phrase', tags=name)
for name in tag_names
]
) | reduce(
operator.or_,
[
Q('match_phrase', article_tags=name)
for name in tag_names
]
)
)
# Search inside articles
article_queries.append(
reduce(
operator.or_,
[
Q('match_phrase', articles__tags_text=name)
for name in tag_names
]
) | reduce(
operator.or_,
[
Q('match_phrase', articles__parent_tags=name)
for name in tag_names
]
)
)
article_highlights['articles.tags_text'] = {
'number_of_fragments': 0
}
# String to be searched in all text fields (full-text search using
# elasticsearch's default best_fields strategy)
q = self.request.GET.get('q')
law_q_query = []
article_q_query = []
if q:
# Compose root document search
law_q_query = [
Q(
'multi_match', query=q, fields=[
'title', 'abstract', 'pdf_text', 'classifications',
'tags'
]
)
]
# Compose nested document search inside articles
article_q_query = [
Q('multi_match', query=q, fields=['articles.text']) |
Q(
'constant_score', boost=50, filter={
"match_phrase": {
"articles.text": q
}
}
)
]
article_q_highlights = {'articles.text': {}}
search = LegislationDocument.search()
sort = self.get_sort()
if not sort:
if q:
q_in_law = Q(
'bool', must=law_queries + law_q_query + ([
Q(
'nested',
score_mode='max',
# boost=10,
path='articles',
query=Q(
reduce(
operator.and_,
article_queries
)
),
inner_hits={
'highlight': {'fields': article_highlights}
}
)
] if article_queries else [])
)
q_in_article = Q(
'bool', must=law_queries + ([
Q(
'nested',
score_mode='max',
# boost=10,
path='articles',
query=Q(
reduce(
operator.and_,
article_queries + article_q_query
)
),
inner_hits={
'highlight': {
'fields': {
**article_highlights,
**article_q_highlights
}
}
}
)
] if article_queries or article_q_query else [])
)
search = search.query(q_in_law | q_in_article).highlight(
'abstract', 'pdf_text'
)
else:
root_query = [Q(
reduce(
operator.and_,
law_queries
)
)] if law_queries else []
nested_query = [Q(
'nested',
score_mode='max',
# boost=10,
path='articles',
query=Q(
reduce(
operator.and_,
article_queries
)
),
inner_hits={
'highlight': {'fields': article_highlights}
}
)] if article_queries else []
final_query = []
if root_query:
final_query += root_query
if nested_query:
# Necessary for highlights
final_query += root_query and nested_query
if final_query:
search = search.query(
'bool', should=final_query,
minimum_should_match=1
)
# String representing country iso code
countries = self.request.GET.getlist('countries[]')
selected_countries = False
if countries:
selected_countries = True
filtering_countries = self.filter_countries(self.request, selected_countries=selected_countries)
if countries or filtering_countries.count() != models.Country.objects.all().count():
countries.extend([country.iso for country in filtering_countries])
search = search.query('terms', country=countries)
# String representing law_type
law_types = self.request.GET.getlist('law_types[]')
if law_types:
search = search.query('terms', law_type=law_types)
# String representing the minimum year allowed in the results
from_year = self.request.GET.get('from_year')
# String representing the maximum year allowed in the results
to_year = self.request.GET.get('to_year')
if all([from_year, to_year]):
search = search.query(
Q('range', year={'gte': int(from_year), 'lte': int(to_year)}) |
Q('range', year_amendment={
'gte': int(from_year), 'lte': int(to_year)}) |
Q('range', year_mentions={
'gte': int(from_year), 'lte': int(to_year)})
)
search = search.highlight(
'title', 'classifications', 'article_classifications', 'tags',
'article_tags', number_of_fragments=0
)
if not any([classification_ids, tag_ids, q]):
# If there is no score to sort by, sort by id
search = search.sort('id')
# import json; print(json.dumps(search.to_dict(), indent=2))
all_laws = HighlightedLaws(search, sort)
paginator = Paginator(all_laws, settings.LAWS_PER_PAGE)
page = self.request.GET.get('page', 1)
try:
laws = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
laws = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
laws = paginator.page(paginator.num_pages)
return laws
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
group_tags = models.TaxonomyTagGroup.objects.all()
top_classifications = models.TaxonomyClassification.objects.filter(
level=0).annotate(
code_as_int=Cast('code', output_field=IntegerField())
).order_by('code_as_int')
countries = models.Country.objects.all().order_by('name')
regions = models.Region.objects.all().order_by('name')
sub_regions = models.SubRegion.objects.all().order_by('name')
legal_systems = models.LegalSystem.objects.all().order_by('name')
laws = self.object_list
legislation_year = (
LEGISLATION_YEAR_RANGE[0],
LEGISLATION_YEAR_RANGE[len(LEGISLATION_YEAR_RANGE) - 1]
)
filters_dict = dict(self.request.GET)
context.update({
'laws': laws,
'group_tags': group_tags,
'top_classifications': top_classifications,
'countries': countries,
'regions': regions,
'sub_regions': sub_regions,
'legal_systems': legal_systems,
'population': POP_RANGES,
'hdi2015': HDI_RANGES,
'gdp_capita': GDP_RANGES,
'ghg_no_lucf': GHG_NO_LUCF,
'ghg_lucf': GHG_LUCF,
'legislation_type': constants.LEGISLATION_TYPE,
'legislation_year': legislation_year,
'min_year': settings.MIN_YEAR,
'max_year': settings.MAX_YEAR,
'from_year': filters_dict.pop('from_year', [settings.MIN_YEAR])[0],
'to_year': filters_dict.pop('to_year', [settings.MAX_YEAR])[0],
'filters': json.dumps(filters_dict)
})
return context
class LegislationAdd(mixins.LoginRequiredMixin, TaxonomyFormMixin,
CreateView):
template_name = "legislation/add.html"
form_class = forms.LegislationForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
countries = sorted(models.Country.objects.all(), key=lambda c: c.name)
context.update({
"countries": countries,
"legislation_type": constants.LEGISLATION_TYPE,
"tag_groups": [
TagGroupRender(tag_group)
for tag_group in models.TaxonomyTagGroup.objects.all()
],
"available_languages": constants.ALL_LANGUAGES,
"source_types": constants.SOURCE_TYPE,
"geo_coverage": constants.GEOGRAPHICAL_COVERAGE,
"adoption_years": LEGISLATION_YEAR_RANGE,
"classifications": models.TaxonomyClassification.objects.filter(
level=0).order_by('code')
})
return context
def form_valid(self, form):
legislation = form.save()
legislation.save_pdf_pages()
if "save-and-continue-btn" in self.request.POST:
return HttpResponseRedirect(
reverse('lcc:legislation:articles:add',
kwargs={'legislation_pk': legislation.pk})
)
if "save-btn" in self.request.POST:
return HttpResponseRedirect(reverse("lcc:legislation:explorer"))
class LegislationView(DetailView):
template_name = "legislation/detail.html"
pk_url_kwarg = 'legislation_pk'
model = models.Legislation
context_object_name = 'law'
class LegislationPagesView(views.View):
def get(self, request, *args, **kwargs):
law = get_object_or_404(models.Legislation,
pk=kwargs['legislation_pk'])
pages = law.pages.all()
content = {}
for page in pages:
content[page.page_number] = page.page_text
return JsonResponse(content)
class LegislationEditView(mixins.LoginRequiredMixin, TaxonomyFormMixin,
UpdateView):
template_name = "legislation/edit.html"
model = models.Legislation
form_class = forms.LegislationForm
pk_url_kwarg = 'legislation_pk'
context_object_name = 'law'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
countries = sorted(models.Country.objects.all(), key=lambda c: c.name)
context.update({
"countries": countries,
"available_languages": constants.ALL_LANGUAGES,
"legislation_type": constants.LEGISLATION_TYPE,
"tag_groups": [
TagGroupRender(tag_group)
for tag_group in models.TaxonomyTagGroup.objects.all()
],
"classifications": models.TaxonomyClassification.objects.filter(
level=0).order_by('code'),
"adoption_years": LEGISLATION_YEAR_RANGE,
"source_types": constants.SOURCE_TYPE,
"geo_coverage": constants.GEOGRAPHICAL_COVERAGE,
})
return context
def form_valid(self, form):
legislation = form.save()
if 'pdf_file' in self.request.FILES:
models.LegislationPage.objects.filter(
legislation=legislation).delete()
legislation.save_pdf_pages()
return HttpResponseRedirect(
reverse('lcc:legislation:details',
kwargs={'legislation_pk': legislation.pk})
)
class LegislationDeleteView(mixins.LoginRequiredMixin, DeleteView):
model = models.Legislation
pk_url_kwarg = 'legislation_pk'
def get_success_url(self, **kwargs):
return reverse("lcc:legislation:explorer")
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
| gpl-3.0 | 4,738,806,645,539,076,000 | 38.3047 | 108 | 0.474207 | false |
leakim/svtplay-dl | lib/svtplay_dl/service/tv4play.py | 1 | 7249 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import os
import xml.etree.ElementTree as ET
import json
import copy
from svtplay_dl.utils.urllib import urlparse, parse_qs, quote_plus
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils import is_py2_old, filenamify
from svtplay_dl.log import log
from svtplay_dl.fetcher.hls import hlsparse, HLS
from svtplay_dl.fetcher.rtmp import RTMP
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.subtitle import subtitle
from svtplay_dl.error import ServiceError
class Tv4play(Service, OpenGraphThumbMixin):
supported_domains = ['tv4play.se', 'tv4.se']
def __init__(self, url):
Service.__init__(self, url)
self.subtitle = None
self.cookies = {}
def get(self, options):
data = self.get_urldata()
vid = findvid(self.url, data)
if vid is None:
yield ServiceError("Can't find video id for %s" % self.url)
return
if options.username and options.password:
data = self.http.request("get", "https://www.tv4play.se/session/new?https=")
auth_token = re.search('name="authenticity_token" ([a-z]+="[^"]+" )?value="([^"]+)"', data.text)
if not auth_token:
yield ServiceError("Can't find authenticity_token needed for user / password")
return
url = "https://www.tv4play.se/session"
postdata = {"user_name" : options.username, "password": options.password, "authenticity_token":auth_token.group(2), "https": ""}
data = self.http.request("post", url, data=postdata, cookies=self.cookies)
self.cookies = data.cookies
fail = re.search("<p class='failed-login'>([^<]+)</p>", data.text)
if fail:
yield ServiceError(fail.group(1))
return
url = "http://premium.tv4play.se/api/web/asset/%s/play" % vid
data = self.http.request("get", url, cookies=self.cookies)
if data.status_code == 401:
xml = ET.XML(data.content)
code = xml.find("code").text
if code == "SESSION_NOT_AUTHENTICATED":
yield ServiceError("Can't access premium content")
elif code == "ASSET_PLAYBACK_INVALID_GEO_LOCATION":
yield ServiceError("Can't downoad this video because of geoblocked.")
else:
yield ServiceError("Can't find any info for that video")
return
if data.status_code == 404:
yield ServiceError("Can't find the video api")
return
xml = ET.XML(data.content)
ss = xml.find("items")
if is_py2_old:
sa = list(ss.getiterator("item"))
else:
sa = list(ss.iter("item"))
if xml.find("live").text:
if xml.find("live").text != "false":
options.live = True
if xml.find("drmProtected").text == "true":
yield ServiceError("We cant download DRM protected content from this site.")
return
if options.output_auto:
directory = os.path.dirname(options.output)
options.service = "tv4play"
title = "%s-%s-%s" % (options.output, vid, options.service)
title = filenamify(title)
if len(directory):
options.output = os.path.join(directory, title)
else:
options.output = title
if self.exclude(options):
yield ServiceError("Excluding video")
return
for i in sa:
if i.find("mediaFormat").text == "mp4":
base = urlparse(i.find("base").text)
parse = urlparse(i.find("url").text)
if "rtmp" in base.scheme:
swf = "http://www.tv4play.se/flash/tv4playflashlets.swf"
options.other = "-W %s -y %s" % (swf, i.find("url").text)
yield RTMP(copy.copy(options), i.find("base").text, i.find("bitrate").text)
elif parse.path[len(parse.path)-3:len(parse.path)] == "f4m":
streams = hdsparse(copy.copy(options), self.http.request("get", i.find("url").text, params={"hdcore": "3.7.0"}).text, i.find("url").text)
if streams:
for n in list(streams.keys()):
yield streams[n]
elif i.find("mediaFormat").text == "smi":
yield subtitle(copy.copy(options), "smi", i.find("url").text)
url = "http://premium.tv4play.se/api/web/asset/%s/play?protocol=hls" % vid
data = self.http.request("get", url, cookies=self.cookies).content
xml = ET.XML(data)
ss = xml.find("items")
if is_py2_old:
sa = list(ss.getiterator("item"))
else:
sa = list(ss.iter("item"))
for i in sa:
if i.find("mediaFormat").text == "mp4":
parse = urlparse(i.find("url").text)
if parse.path.endswith("m3u8"):
streams = hlsparse(i.find("url").text, self.http.request("get", i.find("url").text).text)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n)
def find_all_episodes(self, options):
parse = urlparse(self.url)
show = parse.path[parse.path.find("/", 1)+1:]
if not re.search("%", show):
show = quote_plus(show)
data = self.http.request("get", "http://webapi.tv4play.se/play/video_assets?type=episode&is_live=false&platform=web&node_nids=%s&per_page=99999" % show).text
jsondata = json.loads(data)
episodes = []
n = 1
for i in jsondata["results"]:
try:
days = int(i["availability"]["availability_group_free"])
except (ValueError, TypeError):
days = 999
if days > 0:
video_id = i["id"]
url = "http://www.tv4play.se/program/%s?video_id=%s" % (
show, video_id)
episodes.append(url)
if n == options.all_last:
break
n += 1
return episodes
def findvid(url, data):
parse = urlparse(url)
if "tv4play.se" in url:
try:
vid = parse_qs(parse.query)["video_id"][0]
except KeyError:
return None
else:
match = re.search(r"\"vid\":\"(\d+)\",", data)
if match:
vid = match.group(1)
else:
match = re.search(r"-(\d+)$", url)
if match:
vid = match.group(1)
else:
match = re.search(r"meta content='([^']+)' property='og:video'", data)
if match:
match = re.search(r"vid=(\d+)&", match.group(1))
if match:
vid = match.group(1)
else:
log.error("Can't find video id for %s", url)
return
else:
return None
return vid | mit | 4,481,248,987,106,100,000 | 39.730337 | 165 | 0.531522 | false |
Lorquas/subscription-manager | test/test_cache.py | 1 | 45245 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import logging
import random
import shutil
import socket
import tempfile
import time
from mock import Mock, patch, mock_open
# used to get a user readable cfg class for test cases
from .stubs import StubProduct, StubProductCertificate, StubCertificateDirectory, \
StubEntitlementCertificate, StubPool, StubEntitlementDirectory
from .fixture import SubManFixture
from rhsm import ourjson as json
from subscription_manager.cache import ProfileManager, \
InstalledProductsManager, EntitlementStatusCache, \
PoolTypeCache, ReleaseStatusCache, ContentAccessCache, \
PoolStatusCache, ContentAccessModeCache, SupportedResourcesCache, \
AvailableEntitlementsCache
from rhsm.profile import Package, RPMProfile, EnabledReposProfile, ModulesProfile
from rhsm.connection import RestlibException, UnauthorizedException, \
RateLimitExceededException
from subscription_manager import injection as inj
from subscription_manager import isodate, cache
log = logging.getLogger(__name__)
class _FACT_MATCHER(object):
def __eq__(self, other):
return True
FACT_MATCHER = _FACT_MATCHER()
CONTENT_REPO_FILE = """
[awesome-os-for-x86_64-upstream-rpms]
name = Awesome OS for x86_64 - Upstream (RPMs)
baseurl = https://cdn.awesome.com/content/dist/awesome/$releasever/x86_64/upstream/os
enabled = 1
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-awesome-release
sslverify = 1
sslcacert = /etc/rhsm/ca/awesome-uep.pem
sslclientkey = /etc/pki/entitlement/0123456789012345678-key.pem
sslclientcert = /etc/pki/entitlement/0123456789012345678.pem
metadata_expire = 86400
ui_repoid_vars = releasever
[awesome-os-for-x86_64-debug-rpms]
name = Awesome OS for x86_64 - Debug (RPMs)
baseurl = https://cdn.awesome.com/content/dist/awesome/$releasever/x86_64/upstream/debug
enabled = 0
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-awesome-release
sslverify = 1
sslcacert = /etc/rhsm/ca/awesome-uep.pem
sslclientkey = /etc/pki/entitlement/0123456789012345678-key.pem
sslclientcert = /etc/pki/entitlement/0123456789012345678.pem
metadata_expire = 86400
ui_repoid_vars = releasever
"""
ENABLED_MODULES = [
{
"name": "duck",
"stream": 0,
"version": "20180730233102",
"context": "deadbeef",
"arch": "noarch",
"profiles": ["default"],
"installed_profiles": [],
"status": "enabled"
},
{
"name": "flipper",
"stream": 0.69,
"version": "20180707144203",
"context": "c0ffee42",
"arch": "x86_64",
"profiles": ["default", "server"],
"installed_profiles": ["server"],
"status": "unknown"
}
]
class TestProfileManager(unittest.TestCase):
def setUp(self):
current_pkgs = [
Package(name="package1", version="1.0.0", release=1, arch="x86_64"),
Package(name="package2", version="2.0.0", release=2, arch="x86_64")
]
temp_repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_repo_dir)
repo_file_name = os.path.join(temp_repo_dir, 'awesome.repo')
with open(repo_file_name, 'w') as repo_file:
repo_file.write(CONTENT_REPO_FILE)
patcher = patch('rhsm.profile.dnf')
self.addCleanup(patcher.stop)
dnf_mock = patcher.start()
dnf_mock.dnf = Mock()
mock_db = Mock()
mock_db.conf = Mock()
mock_db.conf.substitutions = {'releasever': '1', 'basearch': 'x86_64'}
dnf_mock.dnf.Base = Mock(return_value=mock_db)
self.current_profile = self._mock_pkg_profile(current_pkgs, repo_file_name, ENABLED_MODULES)
self.profile_mgr = ProfileManager()
self.profile_mgr.current_profile = self.current_profile
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_no_change(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=False)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updatePackageProfile.call_count)
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_has_changed(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=False)
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid, True)
uep.updatePackageProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(1, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_combined_profile_update_check_has_changed(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ["packages"]
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=True)
uep.updateCombinedProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid, True)
uep.updateCombinedProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(1, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_packages_not_supported(self, mock_get_supported_resources):
# support anything else but not 'packages'
mock_get_supported_resources.return_value = ['foo', 'bar']
uuid = 'FAKEUUID'
uep = Mock()
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updatePackageProfile.call_count)
mock_get_supported_resources.assert_called_once()
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_packages_disabled(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
self.profile_mgr.report_package_profile = 0
uep.updatePackageProfile = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updatePackageProfile.call_count)
mock_get_supported_resources.assert_called_once()
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
def test_report_package_profile_environment_variable(self):
with patch.dict('os.environ', {'SUBMAN_DISABLE_PROFILE_REPORTING': '1'}), \
patch.object(cache, 'conf') as conf:
# report_package_profile is set to 1 and SUBMAN_DISABLE_PROFILE_REPORTING is set to 1, the
# package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 1
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
# report_package_profile in rhsm.conf is set to 0 and SUBMAN_DISABLE_PROFILE_REPORTING is set
# to 1, the package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 0
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
with patch.dict('os.environ', {'SUBMAN_DISABLE_PROFILE_REPORTING': '0'}), \
patch.object(cache, 'conf') as conf:
# report_package_profile in rhsm.conf is set to 1 and SUBMAN_DISABLE_PROFILE_REPORTING is set
# to 0, the package profile should be reported.
conf.__getitem__.return_value.get_int.return_value = 1
self.assertTrue(self.profile_mgr.profile_reporting_enabled())
# report_package_profile in rhsm.conf is set to 0 and SUBMAN_DISABLE_PROFILE_REPORTING is set
# to 0, the package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 0
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
with patch.dict('os.environ', {}), patch.object(cache, 'conf') as conf:
# report_package_profile in rhsm.conf is set to 1 and SUBMAN_DISABLE_PROFILE_REPORTING is not
# set, the package profile should be reported.
conf.__getitem__.return_value.get_int.return_value = 1
self.assertTrue(self.profile_mgr.profile_reporting_enabled())
# report_package_profile in rhsm.conf is set to 0 and SUBMAN_DISABLE_PROFILE_REPORTING is not
# set, the package profile should not be reported.
conf.__getitem__.return_value.get_int.return_value = 0
self.assertFalse(self.profile_mgr.profile_reporting_enabled())
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_error_uploading(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=False)
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
# Throw an exception when trying to upload:
uep.updatePackageProfile = Mock(side_effect=Exception('BOOM!'))
self.assertRaises(Exception, self.profile_mgr.update_check, uep, uuid, True)
uep.updatePackageProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
@patch('subscription_manager.cache.get_supported_resources')
def test_combined_profile_update_check_error_uploading(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = 'FAKEUUID'
uep = Mock()
uep.has_capability = Mock(return_value=True)
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
# Throw an exception when trying to upload:
uep.updateCombinedProfile = Mock(side_effect=Exception('BOOM!'))
self.assertRaises(Exception, self.profile_mgr.update_check, uep, uuid, True)
uep.updateCombinedProfile.assert_called_with(uuid,
FACT_MATCHER)
self.assertEqual(0, self.profile_mgr.write_cache.call_count)
def test_has_changed_no_cache(self):
self.profile_mgr._cache_exists = Mock(return_value=False)
self.assertTrue(self.profile_mgr.has_changed())
def test_has_changed_no_changes(self):
cached_pkgs = [
Package(name="package1", version="1.0.0", release=1, arch="x86_64"),
Package(name="package2", version="2.0.0", release=2, arch="x86_64")
]
temp_repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_repo_dir)
repo_file_name = os.path.join(temp_repo_dir, 'awesome.repo')
with open(repo_file_name, 'w') as repo_file:
repo_file.write(CONTENT_REPO_FILE)
cached_profile = self._mock_pkg_profile(cached_pkgs, repo_file_name, ENABLED_MODULES)
self.profile_mgr._cache_exists = Mock(return_value=True)
self.profile_mgr._read_cache = Mock(return_value=cached_profile)
self.assertFalse(self.profile_mgr.has_changed())
self.profile_mgr._read_cache.assert_called_with()
def test_has_changed(self):
cached_pkgs = [
Package(name="package1", version="1.0.0", release=1, arch="x86_64"),
Package(name="package3", version="3.0.0", release=3, arch="x86_64")
]
cached_profile = self._mock_pkg_profile(cached_pkgs, "/non/existing/path/to/repo/file", [])
self.profile_mgr._cache_exists = Mock(return_value=True)
self.profile_mgr._read_cache = Mock(return_value=cached_profile)
self.assertTrue(self.profile_mgr.has_changed())
self.profile_mgr._read_cache.assert_called_with()
@patch('subscription_manager.cache.get_supported_resources')
def test_update_check_consumer_uuid_none(self, mock_get_supported_resources):
mock_get_supported_resources.return_value = ['packages']
uuid = None
uep = Mock()
self.profile_mgr.has_changed = Mock(return_value=True)
self.profile_mgr.write_cache = Mock()
res = self.profile_mgr.update_check(uep, uuid)
self.assertEqual(0, res)
def test_package_json_handles_non_unicode(self):
package = Package(name=b'\xf6', version=b'\xf6', release=b'\xf6', arch=b'\xf6', vendor=b'\xf6')
data = package.to_dict()
json_str = json.dumps(data) # to json
data = json.loads(json_str) # and back to an object
for attr in ['name', 'version', 'release', 'arch', 'vendor']:
self.assertEqual(u'\ufffd', data[attr])
def test_package_json_as_unicode_type(self):
# note that the data type at time of writing is bytes, so this is just defensive coding
package = Package(name=u'Björk', version=u'Björk', release=u'Björk', arch=u'Björk', vendor=u'Björk')
data = package.to_dict()
json_str = json.dumps(data) # to json
data = json.loads(json_str) # and back to an object
for attr in ['name', 'version', 'release', 'arch', 'vendor']:
self.assertEqual(u'Björk', data[attr])
def test_package_json_missing_attributes(self):
package = Package(name=None, version=None, release=None, arch=None, vendor=None)
data = package.to_dict()
json_str = json.dumps(data) # to json
data = json.loads(json_str) # and back to an object
for attr in ['name', 'version', 'release', 'arch', 'vendor']:
self.assertEqual(None, data[attr])
def test_module_md_uniquify(self):
modules_input = [
{
"name": "duck",
"stream": 0,
"version": "20180730233102",
"context": "deadbeef",
"arch": "noarch",
"profiles": ["default"],
"installed_profiles": [],
"status": "enabled"
},
{
"name": "duck",
"stream": 0,
"version": "20180707144203",
"context": "c0ffee42",
"arch": "noarch",
"profiles": ["default", "server"],
"installed_profiles": ["server"],
"status": "unknown"
}
]
self.assertEqual(modules_input, ModulesProfile._uniquify(modules_input))
# now test dup modules
self.assertEqual(modules_input, ModulesProfile._uniquify(modules_input + [modules_input[0]]))
@staticmethod
def _mock_pkg_profile(packages, repo_file, enabled_modules):
"""
Turn a list of package objects into an RPMProfile object.
"""
dict_list = []
for pkg in packages:
dict_list.append(pkg.to_dict())
mock_file = Mock()
mock_file.read = Mock(return_value=json.dumps(dict_list))
mock_rpm_profile = RPMProfile(from_file=mock_file)
mock_enabled_repos_profile = EnabledReposProfile(repo_file=repo_file)
mock_module_profile = ModulesProfile()
mock_module_profile.collect = Mock(return_value=enabled_modules)
mock_profile = {
"rpm": mock_rpm_profile,
"enabled_repos": mock_enabled_repos_profile,
"modulemd": mock_module_profile
}
return mock_profile
class TestInstalledProductsCache(SubManFixture):
def setUp(self):
super(TestInstalledProductsCache, self).setUp()
self.prod_dir = StubCertificateDirectory([
StubProductCertificate(StubProduct('a-product', name="Product A", provided_tags="product,product-a")),
StubProductCertificate(StubProduct('b-product', name="Product B", provided_tags="product,product-b")),
StubProductCertificate(StubProduct('c-product', name="Product C", provided_tags="product-c")),
])
inj.provide(inj.PROD_DIR, self.prod_dir)
self.mgr = InstalledProductsManager()
def test_cert_parsing(self):
self.assertEqual(3, len(list(self.mgr.installed.keys())))
self.assertTrue('a-product' in self.mgr.installed)
self.assertTrue('b-product' in self.mgr.installed)
self.assertTrue('c-product' in self.mgr.installed)
self.assertEqual("Product A", self.mgr.installed['a-product']['productName'])
self.assertEqual(set(["product", "product-a", "product-b", "product-c"]), set(self.mgr.tags))
def test_load_data(self):
cached = {
'products': {
'prod1': 'Product 1',
'prod2': 'Product 2'
},
'tags': ['p1', 'p2']
}
mock_file = Mock()
mock_file.read = Mock(return_value=json.dumps(cached))
data = self.mgr._load_data(mock_file)
self.assertEqual(data, cached)
def test_has_changed(self):
cached = {
'products': {
'prod1': 'Product 1',
'prod2': 'Product 2'
},
'tags': ['p1', 'p2']
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertTrue(self.mgr.has_changed())
def test_has_changed_with_tags_only(self):
cached = {
'products': {
'a-product': {'productName': 'Product A', 'productId': 'a-product', 'version': '1.0', 'arch': 'x86_64'},
'b-product': {'productName': 'Product B', 'productId': 'b-product', 'version': '1.0', 'arch': 'x86_64'},
'c-product': {'productName': 'Product C', 'productId': 'c-product', 'version': '1.0', 'arch': 'x86_64'}
},
'tags': ['different']
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertTrue(self.mgr.has_changed())
def test_old_format_seen_as_invalid(self):
cached = {
'a-product': {'productName': 'Product A', 'productId': 'a-product', 'version': '1.0', 'arch': 'x86_64'},
'b-product': {'productName': 'Product B', 'productId': 'b-product', 'version': '1.0', 'arch': 'x86_64'},
'c-product': {'productName': 'Product C', 'productId': 'c-product', 'version': '1.0', 'arch': 'x86_64'}
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertTrue(self.mgr.has_changed())
def test_has_not_changed(self):
cached = {
'products': {
'a-product': {'productName': 'Product A', 'productId': 'a-product', 'version': '1.0', 'arch': 'x86_64'},
'b-product': {'productName': 'Product B', 'productId': 'b-product', 'version': '1.0', 'arch': 'x86_64'},
'c-product': {'productName': 'Product C', 'productId': 'c-product', 'version': '1.0', 'arch': 'x86_64'}
},
'tags': ['product-a', 'product-b', 'product-c', 'product']
}
self.mgr._read_cache = Mock(return_value=cached)
self.mgr._cache_exists = Mock(return_value=True)
self.assertFalse(self.mgr.has_changed())
def test_update_check_no_change(self):
uuid = 'FAKEUUID'
uep = Mock()
uep.updateConsumer = Mock()
self.mgr.has_changed = Mock(return_value=False)
self.mgr.write_cache = Mock()
self.mgr.update_check(uep, uuid)
self.assertEqual(0, uep.updateConsumer.call_count)
self.assertEqual(0, self.mgr.write_cache.call_count)
def test_update_check_has_changed(self):
uuid = 'FAKEUUID'
uep = Mock()
uep.updateConsumer = Mock()
self.mgr.has_changed = Mock(return_value=True)
self.mgr.write_cache = Mock()
self.mgr.update_check(uep, uuid, True)
expected = ["product", "product-a", "product-b", "product-c"]
uep.updateConsumer.assert_called_with(uuid,
content_tags=set(expected),
installed_products=self.mgr.format_for_server())
self.assertEqual(1, self.mgr.write_cache.call_count)
def test_update_check_error_uploading(self):
uuid = 'FAKEUUID'
uep = Mock()
self.mgr.has_changed = Mock(return_value=True)
self.mgr.write_cache = Mock()
# Throw an exception when trying to upload:
uep.updateConsumer = Mock(side_effect=Exception('BOOM!'))
self.assertRaises(Exception, self.mgr.update_check, uep, uuid, True)
expected = ["product", "product-a", "product-b", "product-c"]
uep.updateConsumer.assert_called_with(uuid,
content_tags=set(expected),
installed_products=self.mgr.format_for_server())
self.assertEqual(0, self.mgr.write_cache.call_count)
class TestReleaseStatusCache(SubManFixture):
def setUp(self):
super(TestReleaseStatusCache, self).setUp()
self.release_cache = ReleaseStatusCache()
self.release_cache.write_cache = Mock()
def test_load_from_server(self):
uep = Mock()
dummy_release = {'releaseVer': 'MockServer'}
uep.getRelease = Mock(return_value=dummy_release)
self.release_cache.read_status(uep, "THISISAUUID")
self.assertEqual(dummy_release, self.release_cache.server_status)
def test_server_no_release_call(self):
uep = Mock()
uep.getRelease = Mock(side_effect=RestlibException("boom"))
status = self.release_cache.read_status(uep, "SOMEUUID")
self.assertEqual(None, status)
def test_server_network_error_no_cache(self):
uep = Mock()
uep.getRelease = Mock(side_effect=socket.error("boom"))
self.release_cache._cache_exists = Mock(return_value=False)
self.assertEqual(None, self.release_cache.read_status(uep, "SOMEUUID"))
def test_server_network_error_with_cache(self):
uep = Mock()
uep.getRelease = Mock(side_effect=socket.error("boom"))
dummy_release = {'releaseVer': 'MockServer'}
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=True)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
def test_rate_limit_exceed_with_cache(self):
uep = Mock()
uep.getRelease = Mock(side_effect=RateLimitExceededException(429))
dummy_release = {'releaseVer': 'MockServer'}
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=True)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
def test_server_network_works_with_cache(self):
uep = Mock()
dummy_release = {'releaseVer': 'MockServer'}
uep.getRelease = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=True)
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, self.release_cache.write_cache.call_count)
self.assertEqual(0, self.release_cache._read_cache.call_count)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, uep.getRelease.call_count)
def test_server_network_works_cache_caches(self):
uep = Mock()
dummy_release = {'releaseVer': 'MockServer'}
uep.getRelease = Mock(return_value=dummy_release)
self.release_cache._cache_exists = Mock(return_value=False)
self.release_cache.server_status = None
self.release_cache._read_cache = Mock(return_value=dummy_release)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, self.release_cache.write_cache.call_count)
self.assertEqual(0, self.release_cache._read_cache.call_count)
self.release_cache._cache_exists = Mock(return_value=True)
self.assertEqual(dummy_release, self.release_cache.read_status(uep, "SOMEUUID"))
self.assertEqual(1, self.release_cache.write_cache.call_count)
self.assertEqual(1, uep.getRelease.call_count)
class TestEntitlementStatusCache(SubManFixture):
def setUp(self):
super(TestEntitlementStatusCache, self).setUp()
self.status_cache = EntitlementStatusCache()
self.status_cache.write_cache = Mock()
def test_load_from_server(self):
uep = Mock()
dummy_status = {"a": "1"}
uep.getCompliance = Mock(return_value=dummy_status)
self.status_cache.load_status(uep, "SOMEUUID")
self.assertEqual(dummy_status, self.status_cache.server_status)
self.assertEqual(1, self.status_cache.write_cache.call_count)
def test_load_from_server_on_date_args(self):
uep = Mock()
dummy_status = {"a": "1"}
uep.getCompliance = Mock(return_value=dummy_status)
self.status_cache.load_status(uep, "SOMEUUID", "2199-12-25")
self.assertEqual(dummy_status, self.status_cache.server_status)
self.assertEqual(1, self.status_cache.write_cache.call_count)
def test_load_from_server_on_date_kwargs(self):
uep = Mock()
dummy_status = {"a": "1"}
uep.getCompliance = Mock(return_value=dummy_status)
self.status_cache.load_status(uep, "SOMEUUID", on_date="2199-12-25")
self.assertEqual(dummy_status, self.status_cache.server_status)
self.assertEqual(1, self.status_cache.write_cache.call_count)
def test_server_no_compliance_call(self):
uep = Mock()
uep.getCompliance = Mock(side_effect=RestlibException("boom"))
status = self.status_cache.load_status(uep, "SOMEUUID")
self.assertEqual(None, status)
def test_server_network_error(self):
dummy_status = {"a": "1"}
uep = Mock()
uep.getCompliance = Mock(side_effect=socket.error("boom"))
self.status_cache._cache_exists = Mock(return_value=True)
self.status_cache._read_cache = Mock(return_value=dummy_status)
status = self.status_cache.load_status(uep, "SOMEUUID")
self.assertEqual(dummy_status, status)
self.assertEqual(1, self.status_cache._read_cache.call_count)
# Extremely unlikely but just in case:
def test_server_network_error_no_cache(self):
uep = Mock()
uep.getCompliance = Mock(side_effect=socket.error("boom"))
self.status_cache._cache_exists = Mock(return_value=False)
self.assertEqual(None, self.status_cache.load_status(uep, "SOMEUUID"))
def test_write_cache(self):
mock_server_status = {'fake server status': random.uniform(1, 2 ** 32)}
status_cache = EntitlementStatusCache()
status_cache.server_status = mock_server_status
cache_dir = tempfile.mkdtemp()
cache_file = os.path.join(cache_dir, 'status_cache.json')
status_cache.CACHE_FILE = cache_file
status_cache.write_cache()
# try to load the file 5 times, if
# we still can't read it, fail
# we don't know when the write_cache thread ends or
# when it starts. Need to track the cache threads
# but we do not...
tries = 0
while tries <= 5:
try:
new_status_buf = open(cache_file).read()
new_status = json.loads(new_status_buf)
break
except Exception as e:
log.exception(e)
tries += 1
time.sleep(.1)
continue
shutil.rmtree(cache_dir)
self.assertEqual(new_status, mock_server_status)
def test_unauthorized_exception_handled(self):
uep = Mock()
uep.getCompliance = Mock(side_effect=UnauthorizedException(401, "GET"))
self.assertEqual(None, self.status_cache.load_status(uep, "aaa"))
class TestPoolStatusCache(SubManFixture):
"""
Class for testing PoolStatusCache
"""
def setUp(self):
super(TestPoolStatusCache, self).setUp()
self.pool_status_cache = PoolStatusCache()
self.pool_status_cache.write_cache = Mock()
def test_load_data(self):
cached = {
'pools': {
'pool1': 'Pool 1',
'pool2': 'Pool 2'
},
'tags': ['p1', 'p2']
}
mock_file = Mock()
mock_file.read = Mock(return_value=json.dumps(cached))
data = self.pool_status_cache._load_data(mock_file)
self.assertEqual(data, cached)
def test_load_from_server(self):
uep = Mock()
dummy_pools = {
'pools': {
'pool1': 'Pool 1',
'pool2': 'Pool 2'
},
'tags': ['p1', 'p2']
}
uep.getEntitlementList = Mock(return_value=dummy_pools)
self.pool_status_cache.read_status(uep, "THISISAUUID")
self.assertEqual(dummy_pools, self.pool_status_cache.server_status)
class TestPoolTypeCache(SubManFixture):
"""
Class for testing PoolTypeCache
"""
def setUp(self):
super(TestPoolTypeCache, self).setUp()
self.cp_provider = inj.require(inj.CP_PROVIDER)
self.cp_provider.consumer_auth_cp = Mock()
self.cp = self.cp_provider.consumer_auth_cp
certs = [StubEntitlementCertificate(StubProduct('pid1'), pool=StubPool('someid'))]
self.ent_dir = StubEntitlementDirectory(certificates=certs)
self.pool_cache = inj.require(inj.POOL_STATUS_CACHE)
self.pool_cache.write_cache = Mock()
def test_empty_cache(self):
pooltype_cache = PoolTypeCache()
result = pooltype_cache.get("some id")
self.assertEqual('', result)
def test_get_pooltype(self):
self.cp.getEntitlementList.return_value = [self._build_ent_json('poolid', 'some type')]
pooltype_cache = PoolTypeCache()
pooltype_cache._do_update()
result = pooltype_cache.get("poolid")
self.assertEqual('some type', result)
def test_requires_update(self):
pooltype_cache = PoolTypeCache()
pooltype_cache.ent_dir = self.ent_dir
# Doesn't have data for pool with id 'someid'
self.assertTrue(pooltype_cache.requires_update())
pooltype_cache.pooltype_map['someid'] = 'some type'
# After adding data for that entitlements pool, it shouldn't need an update
self.assertFalse(pooltype_cache.requires_update())
def test_update(self):
pooltype_cache = PoolTypeCache()
pooltype_cache.ent_dir = self.ent_dir
self.cp.getEntitlementList.return_value = [
self._build_ent_json('poolid', 'some type'),
self._build_ent_json('poolid2', 'some other type')]
# requires_update should be true, and should allow this method
# to generate a correct mapping
pooltype_cache.update()
self.assertEqual(2, len(pooltype_cache.pooltype_map))
self.assertEqual('some type', pooltype_cache.get('poolid'))
self.assertEqual('some other type', pooltype_cache.get('poolid2'))
# This is populated when available subs are refreshed
def test_update_from_pools(self):
# Input is a map of pool ids to pool json
pools_map = {}
for i in range(5):
pool_id = 'poolid' + str(i)
pools_map[pool_id] = self._build_pool_json(pool_id, 'some type')
pooltype_cache = PoolTypeCache()
pooltype_cache.update_from_pools(pools_map)
self.assertEqual(5, len(pooltype_cache.pooltype_map))
for i in range(5):
expected_id = 'poolid' + str(i)
self.assertEqual('some type', pooltype_cache.get(expected_id))
def test_requires_update_ents_with_no_pool(self):
pooltype_cache = PoolTypeCache()
pooltype_cache.ent_dir = self.ent_dir
for ent in self.ent_dir.certs:
ent.pool = None
# No ents have pools so there is nothing we can update
self.assertFalse(pooltype_cache.requires_update())
def test_reading_pool_type_from_json_cache(self):
pool_status = [self._build_ent_json('poolid', 'some type')]
self.pool_cache.load_status = Mock()
self.pool_cache.server_status = pool_status
pooltype_cache = PoolTypeCache()
pooltype_cache._do_update()
result = pooltype_cache.get("poolid")
self.assertEqual('some type', result)
def _build_ent_json(self, pool_id, pool_type):
result = {}
result['id'] = "1234"
result['pool'] = self._build_pool_json(pool_id, pool_type)
return result
def _build_pool_json(self, pool_id, pool_type):
return {'id': pool_id, 'calculatedAttributes': {'compliance_type': pool_type}}
class TestContentAccessCache(SubManFixture):
MOCK_CONTENT = {
"lastUpdate": "2016-12-01T21:56:35+0000",
"contentListing": {"42": ["cert-part1", "cert-part2"]}
}
MOCK_CONTENT_EMPTY_CONTENT_LISTING = {
"lastUpdate": "2016-12-01T21:56:35+0000",
"contentListing": None
}
MOCK_CERT = """
before
-----BEGIN ENTITLEMENT DATA-----
entitlement data goes here
-----END ENTITLEMENT DATA-----
after
"""
MOCK_OPEN_EMPTY = mock_open()
MOCK_OPEN_CACHE = mock_open(read_data=json.dumps(MOCK_CONTENT))
def setUp(self):
super(TestContentAccessCache, self).setUp()
self.cache = ContentAccessCache()
self.cache.cp_provider = Mock()
self.mock_uep = Mock()
self.mock_uep.getAccessibleContent = Mock(return_value=self.MOCK_CONTENT)
self.cache.cp_provider.get_consumer_auth_cp = Mock(return_value=self.mock_uep)
self.cache.identity = Mock()
self.cert = Mock()
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_empty_cache(self):
self.assertFalse(self.cache.exists())
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_writes_to_cache_after_read(self):
self.cache.check_for_update()
self.MOCK_OPEN_EMPTY.assert_any_call(ContentAccessCache.CACHE_FILE, 'w')
self.MOCK_OPEN_EMPTY().write.assert_any_call(json.dumps(self.MOCK_CONTENT))
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_cert_updated_after_read(self):
self.cert.serial = 42
update_data = self.cache.check_for_update()
self.cache.update_cert(self.cert, update_data)
self.MOCK_OPEN_EMPTY.assert_any_call(self.cert.path, 'w')
self.MOCK_OPEN_EMPTY().write.assert_any_call(''.join(self.MOCK_CONTENT['contentListing']['42']))
@patch('subscription_manager.cache.open', MOCK_OPEN_CACHE)
def test_check_for_update_provides_date(self):
mock_exists = Mock(return_value=True)
with patch('os.path.exists', mock_exists):
self.cache.check_for_update()
date = isodate.parse_date("2016-12-01T21:56:35+0000")
self.mock_uep.getAccessibleContent.assert_called_once_with(self.cache.identity.uuid, if_modified_since=date)
@patch('os.path.exists', Mock(return_value=True))
def test_cache_remove_deletes_file(self):
mock_remove = Mock()
with patch('os.remove', mock_remove):
self.cache.remove()
mock_remove.assert_called_once_with(ContentAccessCache.CACHE_FILE)
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_cache_handles_empty_content_listing(self):
self.mock_uep.getAccessibleContent = Mock(return_value=self.MOCK_CONTENT_EMPTY_CONTENT_LISTING)
self.cache.check_for_update()
# getting this far means we did not raise an exception :-)
@patch('subscription_manager.cache.open', MOCK_OPEN_EMPTY)
def test_cache_fails_server_issues_gracefully(self):
self.mock_uep.getAccessibleContent = Mock(side_effect=RestlibException(404))
self.cache.check_for_update()
# getting this far means we did not raise an exception :-)
class TestContentAccessModeCache(SubManFixture):
MOCK_CACHE_FILE_CONTENT = '{"7f85da06-5c35-44ba-931d-f11f6e581f89": "entitlement"}'
def setUp(self):
super(TestContentAccessModeCache, self).setUp()
self.cache = ContentAccessModeCache()
def test_reading_nonexisting_cache(self):
data = self.cache.read_cache_only()
self.assertIsNone(data)
def test_reading_existing_cache(self):
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'content_access_mode.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
data = self.cache.read_cache_only()
self.assertTrue("7f85da06-5c35-44ba-931d-f11f6e581f89" in data)
self.assertEqual(data["7f85da06-5c35-44ba-931d-f11f6e581f89"], "entitlement")
class TestSupportedResourcesCache(SubManFixture):
MOCK_CACHE_FILE_CONTENT = '{"a3f43883-315b-4cc4-bfb5-5771946d56d7": {"": "/", "cdn": "/cdn"}}'
MOCK_SUPPORTED_RESOURCES_RESPONSE = {"pools": "/pools", "roles": "/roles", "users": "/users"}
def setUp(self):
super(TestSupportedResourcesCache, self).setUp()
self.cache = SupportedResourcesCache()
def test_reading_nonexisting_cache(self):
data = self.cache.read_cache_only()
self.assertIsNone(data)
def test_reading_existing_cache(self):
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'supported_resources.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
data = self.cache.read_cache_only()
self.assertTrue("a3f43883-315b-4cc4-bfb5-5771946d56d7" in data)
self.assertEqual(data["a3f43883-315b-4cc4-bfb5-5771946d56d7"], {"": "/", "cdn": "/cdn"})
def test_cache_is_obsoleted_by_new_identity(self):
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'supported_resources.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
mock_uep = Mock()
mock_uep.get_supported_resources = Mock(return_value=self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
mock_identity = Mock()
mock_identity.uuid = 'f0000000-aaaa-bbbb-bbbb-5771946d56d8'
data = self.cache.read_data(uep=mock_uep, identity=mock_identity)
self.assertEqual(data, self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
def test_cache_is_obsoleted_by_timeout(self):
old_timeout = self.cache.TIMEOUT
self.cache.TIMEOUT = 1
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'supported_resources.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
mock_uep = Mock()
mock_uep.get_supported_resources = Mock(return_value=self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
mock_identity = Mock()
mock_identity.uuid = 'a3f43883-315b-4cc4-bfb5-5771946d56d7'
time.sleep(2)
data = self.cache.read_data(uep=mock_uep, identity=mock_identity)
self.assertEqual(data, self.MOCK_SUPPORTED_RESOURCES_RESPONSE)
self.cache.TIMEOUT = old_timeout
class TestAvailableEntitlementsCache(SubManFixture):
MOCK_CACHE_FILE_CONTENT = '''{
"b1002709-6d67-443e-808b-a7afcbe5b47e": {
"filter_options": {
"after_date": null,
"future": null,
"match_installed": null,
"matches": "*fakeos*",
"no_overlap": null,
"on_date": null,
"service_level": null,
"show_all": null
},
"pools": [
{
"addons": null,
"attributes": [],
"contractNumber": "0",
"endDate": "01/16/2021",
"id": "ff8080816fb38f78016fb392d26f0267",
"management_enabled": false,
"pool_type": "Standard",
"productId": "fakeos-bits",
"productName": "Fake OS Bits",
"providedProducts": {
"38072": "Fake OS Bits"
},
"quantity": "5",
"roles": null,
"service_level": null,
"service_type": null,
"startDate": "01/17/2020",
"suggested": 1,
"usage": null
}
],
"timeout": 1579613054.079684
}
}
'''
def setUp(self):
super(TestAvailableEntitlementsCache, self).setUp()
self.cache = AvailableEntitlementsCache()
def test_reading_nonexisting_cache(self):
"""
Test reading cache, when there is no cache file yet
"""
data = self.cache.read_cache_only()
self.assertIsNone(data)
def test_reading_existing_cache(self):
"""
Test reading cache from file
"""
temp_cache_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_cache_dir)
self.cache.CACHE_FILE = os.path.join(temp_cache_dir, 'available_entitlements.json')
with open(self.cache.CACHE_FILE, 'w') as cache_file:
cache_file.write(self.MOCK_CACHE_FILE_CONTENT)
data = self.cache.read_cache_only()
self.assertTrue("b1002709-6d67-443e-808b-a7afcbe5b47e" in data)
self.assertEqual(data["b1002709-6d67-443e-808b-a7afcbe5b47e"]["timeout"], 1579613054.079684)
self.assertEqual(data["b1002709-6d67-443e-808b-a7afcbe5b47e"]["filter_options"]["matches"], "*fakeos*")
self.assertEqual(len(data["b1002709-6d67-443e-808b-a7afcbe5b47e"]["pools"]), 1)
def test_timeout(self):
"""
Test computing timeout of cache based on smoothed response time (SRT)
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = 3.0
timeout = self.cache.timeout()
self.assertTrue(timeout >= self.cache.LBOUND)
self.assertTrue(timeout <= self.cache.UBOUND)
def test_timeout_no_srt(self):
"""
Test computing timeout, when there is no SRT yet
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = None
timeout = self.cache.timeout()
self.assertEqual(timeout, self.cache.LBOUND)
def test_min_timeout(self):
"""
Test computing timout, when SRT is smaller than lower bound
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = 0.01
timeout = self.cache.timeout()
self.assertEqual(timeout, self.cache.LBOUND)
def test_max_timeout(self):
"""
Test computing timout, when SRT is bigger than upper bound
"""
uep = inj.require(inj.CP_PROVIDER).get_consumer_auth_cp()
uep.conn.smoothed_rt = 20.0
timeout = self.cache.timeout()
self.assertEqual(timeout, self.cache.UBOUND)
| gpl-2.0 | -9,068,067,173,417,491,000 | 39.500448 | 124 | 0.626627 | false |
Stratoscale/upseto | upseto/gitwrapper.py | 1 | 3364 | import urlparse
import os
import re
from upseto import run
from upseto import gitconfigparser
def originURLBasename(originURL):
originURLBasename = urlparse.urlparse(originURL).path.split("/")[-1]
if originURLBasename.endswith(".git"):
originURLBasename = originURLBasename[: - len(".git")] # pragma: no cover
return originURLBasename
def normalizeOriginURL(originURL):
originURL = re.sub(r'^git@(\S+?):(.*)$', r'https://\1/\2', originURL)
if originURL.endswith(".git"):
originURL = originURL[: - len(".git")] # pragma: no cover
return originURL
class GitWrapper:
def __init__(self, directory):
self._cachedOriginURL = None
self._directory = directory
if not os.path.isdir(os.path.join(directory, ".git")):
raise Exception(
"Directory '%s' does not look like a git repository (no .git subdirectory)" %
directory)
if self.originURLBasename() != os.path.basename(os.path.abspath(directory)):
raise Exception(
"Directory '%s' must be named exactly like the "
"origin URL '%s' (with no '.git' extension)" % (
directory, self.originURL()))
@classmethod
def existing(cls, originURL, baseDirectory):
basename = originURLBasename(originURL)
directory = os.path.join(baseDirectory, basename)
if not os.path.isdir(directory):
raise Exception("Directory '%s' does not exist" % directory)
existing = cls(directory)
if normalizeOriginURL(existing.originURL()) != normalizeOriginURL(originURL):
raise Exception(
"Existing directory '%s' origin URL is '%s' which is not the expected '%s' "
"(normalized '%s' and '%s')" % (
directory, existing.originURL(), originURL,
normalizeOriginURL(existing.originURL()),
normalizeOriginURL(originURL)))
return existing
@classmethod
def clone(cls, originURL, baseDirectory):
basename = originURLBasename(originURL)
run.run(["git", "clone", originURL, basename], cwd=baseDirectory)
clone = cls(os.path.join(baseDirectory, basename))
clone.checkout('master')
return clone
def directory(self):
return self._directory
def hash(self, branch='HEAD'):
return self._run(["git", "rev-parse", branch]).strip()
def originURL(self):
if self._cachedOriginURL is None:
url = gitconfigparser.GitConfigParser(self._directory).originURL()
parts = list(urlparse.urlparse(url))
netloc = parts[1]
if '@' in netloc:
netloc = netloc.split('@')[1]
parts[1] = netloc
self._cachedOriginURL = urlparse.urlunparse(parts)
return self._cachedOriginURL
def originURLBasename(self):
return originURLBasename(self.originURL())
def fetch(self):
self._run(["git", "fetch", "--prune"])
def checkout(self, branch):
self._run(["git", "checkout", branch])
def shortStatus(self):
return self._run(["git", "status", "-s"])
def run(self, args):
return self._run(["git"] + args)
def _run(self, command):
return run.run(command=command, cwd=self._directory)
| apache-2.0 | -6,241,408,017,556,831,000 | 35.172043 | 93 | 0.603448 | false |
JohnLZeller/dd-agent | tests/test_win32.py | 1 | 1616 | # stdlib
import unittest
import logging
import gc
import sys
# 3p
#from nose.plugins.attrib import attr
# project
import checks.system.win32 as w32
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__file__)
AGENT_CONFIG = {} # None of the windows checks use this.
class TestWin32(unittest.TestCase):
def _checkMemoryLeak(self, func):
# FIXME: This should use @attr('windows')instead of checking for the
# platform, but just importing nose.plugins.attrib causes all the tests
# to fail with uncollected garbage.
if sys.platform != 'win32':
return
gc.set_debug(gc.DEBUG_LEAK)
try:
start = len(gc.garbage)
func()
end = len(gc.garbage)
self.assertEquals(end - start, 0, gc.garbage)
finally:
gc.set_debug(0)
def testDisk(self):
dsk = w32.Disk(log)
self._checkMemoryLeak(lambda: dsk.check(AGENT_CONFIG))
def testIO(self):
io = w32.IO(log)
self._checkMemoryLeak(lambda: io.check(AGENT_CONFIG))
def testProcesses(self):
proc = w32.Processes(log)
self._checkMemoryLeak(lambda: proc.check(AGENT_CONFIG))
def testMemory(self):
mem = w32.Memory(log)
self._checkMemoryLeak(lambda: mem.check(AGENT_CONFIG))
def testNetwork(self):
net = w32.Network(log)
self._checkMemoryLeak(lambda: net.check(AGENT_CONFIG))
def testCPU(self):
cpu = w32.Cpu(log)
self._checkMemoryLeak(lambda: cpu.check(AGENT_CONFIG))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -6,161,906,718,949,827,000 | 25.491803 | 79 | 0.627475 | false |
gilesp/quickdrawmcgraw | pythagoras_test.py | 1 | 1110 | import unittest
from math import degrees
from pythagoras import cosineRule, angleA
class TestPythagoras(unittest.TestCase):
def setUp(self):
pass
def test_cosineRule_2_3_4(self):
angle = cosineRule(2, 3, 4)
angle_as_string = "{0:.2f}".format(angle)
self.assertEqual(angle_as_string, "1.82")
def test_cosineRule_5_6_8(self):
angle = cosineRule(5, 6, 8)
angle_as_string = "{0:.2f}".format(angle)
self.assertEqual(angle_as_string, "1.62")
def test_cosineRule_111_110_180(self):
angle = cosineRule(111, 110, 180)
angle_as_string = "{0:.2f}".format(angle)
self.assertEqual(angle_as_string, "1.90")
def test_cosineRule_equals_angleA(self):
self.assertEquals(angleA(4, 2, 3), cosineRule(2, 3, 4))
def test_interior_angles_equal_180(self):
a = 2
b = 3
c = 4
A = angleA(2, 3, 4)
B = angleA(3, 4, 2)
C = angleA(4, 2, 3)
totalInRads = A + B + C
self.assertEquals(degrees(totalInRads), 180)
if __name__ == '__main__':
unittest.main()
| mit | 8,295,286,597,199,625,000 | 27.461538 | 63 | 0.582883 | false |
mildass/tlsfuzzer | scripts/test-dhe-rsa-key-exchange-with-bad-messages.py | 1 | 13139 | # Author: Hubert Kario, (c) 2015
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Test for DHE_RSA key exchange error handling"""
from __future__ import print_function
import traceback
import sys
import getopt
import re
from itertools import chain
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, AlertGenerator, \
truncate_handshake, TCPBufferingEnable, TCPBufferingDisable, \
TCPBufferingFlush, pad_handshake
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectServerKeyExchange, \
ExpectApplicationData
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription, \
ExtensionType
def natural_sort_keys(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -a alert numerical value of the expected alert for messages")
print(" with publicly invalid client key shares,")
print(" 47 (illegal_parameter) by default")
print(" --help this message")
def main():
"""Test if server correctly handles malformed DHE_RSA CKE messages"""
host = "localhost"
port = 4433
run_exclude = set()
alert = AlertDescription.illegal_parameter
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:a:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
elif opt == "-a":
alert = int(arg)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
conversations = {}
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
bytearray(b"GET / HTTP/1.0\n\n")))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
node = node.add_child(ExpectClose())
conversations["sanity"] = conversation
# invalid dh_Yc value
#for i in [2*1024, 4*1024, 8*1024, 16*1024]:
for i in [8*1024]:
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(TCPBufferingEnable())
node = node.add_child(ClientKeyExchangeGenerator(dh_Yc=2**(i)))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(AlertLevel.fatal,
alert))
node = node.add_child(ExpectClose())
conversations["invalid dh_Yc value - " + str(i) + "b"] = conversation
for i in [0, 1]:
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(TCPBufferingEnable())
node = node.add_child(ClientKeyExchangeGenerator(dh_Yc=i))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(AlertLevel.fatal,
alert))
node = node.add_child(ExpectClose())
conversations["invalid dh_Yc value - {0}".format(i)] = conversation
# share equal to p
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(TCPBufferingEnable())
node = node.add_child(ClientKeyExchangeGenerator(p_as_share=True))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(AlertLevel.fatal,
alert))
node = node.add_child(ExpectClose())
conversations["invalid dh_Yc value - p"] = conversation
# share equal to p-1
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(TCPBufferingEnable())
node = node.add_child(ClientKeyExchangeGenerator(p_1_as_share=True))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(AlertLevel.fatal,
alert))
node = node.add_child(ExpectClose())
conversations["invalid dh_Yc value - p-1"] = conversation
# truncated dh_Yc value
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(TCPBufferingEnable())
node = node.add_child(truncate_handshake(ClientKeyExchangeGenerator(),
1))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(AlertLevel.fatal,
AlertDescription.decode_error))
node = node.add_child(ExpectClose())
conversations["truncated dh_Yc value"] = conversation
# padded Client Key Exchange
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(TCPBufferingEnable())
node = node.add_child(pad_handshake(ClientKeyExchangeGenerator(),
1))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(AlertLevel.fatal,
AlertDescription.decode_error))
node = node.add_child(ExpectClose())
conversations["padded Client Key Exchange"] = conversation
# run the conversations
good = 0
bad = 0
failed = []
# make sure that sanity test is run first and last
# to verify that server was running and kept running throught
sanity_test = ('sanity', conversations['sanity'])
ordered_tests = chain([sanity_test],
filter(lambda x: x[0] != 'sanity',
conversations.items()),
[sanity_test])
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
try:
runner.run()
except:
print("Error while processing")
print(traceback.format_exc())
res = False
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test version 2")
print("Check if server properly verifies received Client Key Exchange")
print("message. That the extra data (pad) at the end is noticed, that")
print("too short message is rejected and a message with \"obviously\"")
print("wrong client key share is rejected")
print("Test end")
print("successful: {0}".format(good))
print("failed: {0}".format(bad))
failed_sorted = sorted(failed, key=natural_sort_keys)
print(" {0}".format('\n '.join(repr(i) for i in failed_sorted)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-2.0 | 9,218,553,081,500,835,000 | 41.937908 | 83 | 0.593729 | false |
hgwood/codingame | shadows_of_the_knight/part_two.py | 1 | 1627 | import sys
import random
import math
w, h = map(int, input().split())
jumps = int(input())
x, y = map(int, input().split())
px, py = x, y
search_zone = [(x, y) for x in range(w) for y in range(h)]
def distance(ax, ay, bx, by):
return math.sqrt((bx - ax)**2 + (by - ay)**2)
def around(zone, x, y):
return [(x, y) for (x, y) in (
(x, y - 1),
(x + 1, y - 1),
(x + 1, y),
(x + 1, y + 1),
(x, y + 1),
(x - 1, y + 1),
(x - 1, y),
(x - 1, y - 1)) if (x, y) in zone]
def centroid(zone):
sumx, sumy = (0, 0)
for x, y in zone:
sumx += x
sumy += y
print(sumx / len(zone), sumy / len(zone), file=sys.stderr)
result = round(sumx / len(zone)), round(sumy / len(zone))
if result not in zone: result = random.choice(around(zone, *result))
return result
while True:
temperature = input()
if temperature == "UNKNOWN": pass
elif temperature == "WARMER":
search_zone = [(szx, szy) for (szx, szy) in search_zone if distance(szx, szy, x, y) < distance(szx, szy, px, py)]
elif temperature == "COLDER":
search_zone = [(szx, szy) for (szx, szy) in search_zone if distance(szx, szy, x, y) > distance(szx, szy, px, py)]
elif temperature == "SAME":
search_zone = [(szx, szy) for (szx, szy) in search_zone if distance(szx, szy, x, y) == distance(szx, szy, px, py)]
px, py = x, y
x, y = centroid(search_zone)
search_zone = [(szx, szy) for (szx, szy) in search_zone if (szx, szy) != (x, y)]
print(w, h, jumps, x, y, temperature, len(search_zone), file=sys.stderr)
print(x, y)
| gpl-3.0 | 9,150,318,430,111,574,000 | 32.895833 | 122 | 0.540873 | false |
rosenbrockc/aflow | tests/test_entries.py | 1 | 4263 | """Tests that :class:`aflow.entries.Entry` objects have the correct
attributes and that lazy fetching works correctly.
"""
import pytest
def test_eq_hash(paper):
"""Tests equality and hashing of database entries.
"""
paper.reset_iter()
a = paper[0]
assert a == a
assert hash(a) == hash(a.auid)
assert "http://aflowlib.duke.edu/AFLOWDATA" in str(a)
def test_files(paper, tmpdir):
from aflow.entries import AflowFile
from os import path, remove
a = paper[2]
assert isinstance(a.files, list)
assert len(a.files) > 0
assert isinstance(a.files[4], AflowFile)
contents = a.files[4]()
assert len(contents) > 20
first = a.files[4].filename
target = str(tmpdir.join("files_contcar"))
a.files[first](target)
assert path.isfile(target)
with pytest.raises(KeyError):
a.files["dummy"]
a = AflowFile('aflowlib.duke.edu:AFLOWDATA/ICSD_WEB/HEX/Be1O1_ICSD_15620',
'EIGENVAL.bands.xz')
a.__call__()
target = path.abspath(path.expanduser('EIGENVAL.bands.xz'))
assert path.isfile(target)
remove(target)
target = str(tmpdir.join("eigenval.bz2"))
a.__call__(target)
assert path.isfile(target)
def test_atoms(paper):
from aflow import K
paper.reset_iter()
from ase.calculators.lj import LennardJones
from ase.atoms import Atoms
LJ = LennardJones()
kw = {}
kw[K.energy_cell] = "dft_energy"
rawentry = paper[2]
at = rawentry.atoms(keywords=kw, calculator=LJ)
assert isinstance(at, Atoms)
assert isinstance(at.get_total_energy(), float)
assert "dft_energy" in at.results
assert at.results["dft_energy"] == rawentry.energy_cell
at0 = rawentry.atoms(keywords=kw, calculator=LJ)
assert at0 == at
at2 = paper[2].atoms(calculator=LJ)
assert not hasattr(at2, "results")
def test_corner():
"""Tests corner cases in the module that aren't raised during
normal use.
"""
from aflow.entries import _val_from_str
assert _val_from_str("dummy", 22) == 22
def test_fetched(paper):
"""Makes sure that the attributes included in the original query
are returned correctly.
"""
paper.reset_iter()
for i, entry in enumerate(paper):
assert isinstance(entry.Egap, float)
assert isinstance(entry.agl_thermal_conductivity_300K, float)
def test_lazy():
"""Tests lazy retrieval of entry attributes.
"""
a = {
"compound": "Be2O2",
"auid":"aflow:ed51b7b3938f117f",
"aurl":"aflowlib.duke.edu:AFLOWDATA/ICSD_WEB/HEX/Be1O1_ICSD_15620",
"agl_thermal_conductivity_300K":"53.361",
"Egap":"7.4494"
}
b = {
"compound":"B1H4Na1",
"auid":"aflow:3a531e5b3aa9205e",
"aurl":"aflowlib.duke.edu:AFLOWDATA/ICSD_WEB/FCC/B1H4Na1_ICSD_165835",
"agl_thermal_conductivity_300K":"7.45279",
"Egap":"6.6252"
}
from aflow.entries import Entry
A = Entry(**a)
B = Entry(**b)
assert A.Egap == 7.4494
assert A.agl_thermal_conductivity_300K == 53.361
assert A.energy_atom == -7.10342
assert A.catalog == 'ICSD\n'
assert B.Egap == 6.6252
assert B.agl_thermal_conductivity_300K == 7.45279
assert B.volume_cell == 56.9766
assert B.catalog == 'ICSD\n'
def test_all():
"""Tests all retrievals for a given entry, including those it
doesn't have.
"""
entries = [
{
"compound": "Ag2I2",
"auid": "aflow:008f8da25d4acde9",
"aurl": "aflowlib.duke.edu:AFLOWDATA/ICSD_WEB/TET/Ag1I1_ICSD_28230",
"agl_thermal_conductivity_300K": "0.562013",
"Egap": "1.9774"
},
{
"compound": "Mg1",
"auid": "aflow:00528d06f69c7b55",
"aurl": "aflowlib.duke.edu:AFLOWDATA/LIB2_RAW/CeMg_pv/304"
}
]
from aflow.entries import Entry
from aflow import list_keywords
for entry_vals in entries:
A = Entry(**entry_vals)
kws = list_keywords()
kws.append('catalog')
haskw = A.keywords
for kw in kws:
if kw in haskw:
assert getattr(A, kw) is not None
else:
assert getattr(A, kw) is None
| mit | 6,268,098,524,630,799,000 | 28.811189 | 80 | 0.60943 | false |
ntoll/yotta | yotta/init.py | 1 | 4970 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
import os
import logging
import re
# Component, , represents an installed component, internal
from .lib import component
# version, , represent versions and specifications, internal
from .lib import version
# validate, , validate various things, internal
from .lib import validate
Known_Licenses = {
'isc': 'https://spdx.org/licenses/ISC',
'apache-2.0': 'https://spdx.org/licenses/Apache-2.0',
'mit': 'https://spdx.org/licenses/MIT',
'bsd-3-clause': 'https://spdx.org/licenses/BSD-3-Clause'
}
Git_Repo_RE = re.compile("^(git[+a-zA-Z-]*:.*|.*\.git|.*git@.*github\.com.*)$")
HG_Repo_RE = re.compile("^(hg[+a-zA-Z-]*:.*|.*\.hg)$")
SVN_Repo_RE = re.compile("^svn[+a-zA-Z-]*:.*$")
def getUserInput(question, default=None, type_class=str):
# python 2 + 3 compatibility
try:
global input
input = raw_input
except NameError:
pass
while True:
default_descr = ''
if default is not None:
default_descr = ' <%s> ' % str(default)
value = input(question + default_descr)
if default is not None and not value:
if type_class:
return type_class(default)
else:
return default
try:
typed_value = type_class(value)
break
except:
print('"%s" isn\'t a valid "%s" value' % (value, type_class.__name__))
return typed_value
def yesNo(string):
if string.strip().lower() in ('yes', 'y'):
return True
elif string.strip().lower() in ('no', 'n'):
return False
else:
raise ValueError()
yesNo.__name__ = "Yes/No"
def repoObject(string):
string = string.strip()
if not string:
return None
elif Git_Repo_RE.match(string):
repo_type = 'git'
url = Git_Repo_RE.match(string).group(0)
elif HG_Repo_RE.match(string):
repo_type = 'hg'
url = HG_Repo_RE.match(string).group(0)
elif SVN_Repo_RE.match(string):
repo_type = 'svn'
url = SVN_Repo_RE.match(string).group(0)
else:
raise ValueError()
return {'type':repo_type, 'url':url}
def listOfWords(string):
if isinstance(string, list):
return string
else:
return list(filter(bool, re.split(",|\\s", string)))
def addOptions(parser):
pass
def execCommand(args, following_args):
cwd = os.getcwd()
c = component.Component(cwd)
if c:
logging.info('The current directory already a contains a module: existing description will be modified')
elif os.path.isfile(c.getDescriptionFile()):
logging.error('A module description exists but could not be loaded:')
logging.error(c.error)
return 1
default_name = c.getName()
if not default_name:
default_name = validate.componentNameCoerced(os.path.split(cwd)[1])
c.setName(getUserInput("Enter the module name:", default_name))
c.setVersion(getUserInput("Enter the initial version:", str(c.getVersion() or "0.0.0"), version.Version))
def current(x):
return c.description[x] if x in c.description else None
c.description['description'] = getUserInput("Short description: ", current('description'))
c.description['keywords'] = getUserInput("Keywords: ", ' '.join(current('keywords') or []), listOfWords)
c.description['author'] = getUserInput("Author: ", current('author'))
current_repo_url = current('repository')
if isinstance(current_repo_url, dict):
current_repo_url = current_repo_url['url']
new_repo_url = getUserInput("Repository url: ", current_repo_url, repoObject)
if new_repo_url:
c.description['repository'] = new_repo_url
c.description['homepage'] = getUserInput("Homepage: ", current('homepage'))
if not current('licenses') or current('license'):
license = getUserInput('What is the license for this project (Apache-2.0, ISC, MIT etc.)? ', 'Apache-2.0')
license_url = None
if license.lower().strip() in Known_Licenses:
license_url = Known_Licenses[license.lower().strip()]
c.description['licenses'] = [{'type':license, 'url':license_url}]
else:
c.description['license'] = license
c.description['dependencies'] = current('dependencies') or {}
c.description['targetDependencies'] = current('targetDependencies') or {}
isexe = getUserInput("Is this module an executable?", "no", yesNo)
if isexe:
c.description['bin'] = './source'
# Create folders while initing
folders_to_create = ["./source", "./test", "./" + c.getName()]
for folder_name in folders_to_create:
if not os.path.exists(folder_name):
os.mkdir(folder_name)
c.writeDescription()
| apache-2.0 | -5,474,865,060,728,439,000 | 32.809524 | 114 | 0.617505 | false |
dkopecek/amplify | third-party/quex-0.65.2/quex/engine/analyzer/optimizer.py | 1 | 1847 | """TODO:
NOTE: Acceptance Pruning of 'Pre-Contexts', 'Non-Ambigous Post Context'
and 'Backward Input Position Detectors' happens with the original
state machines: Module 'state_machine/acceptance_pruning.py'
(*) Post-pone acceptance storage and position storage as much as
possible. This decreases the probability that a transition
sequence ever hits such places.
(2) If the state is the terminal of a post-context pattern
without further transitions, then the input position
is set to the end of the core pattern. Thus, it does
not need to be incremented.
(11) If all successor acceptance states depend on
a certain pre-context flag being raised, then
the first state on that path can drop-out
on the condition that the pre-context is not met.
(12) When a terminal is reached where the paths took
care of the pre-context checks, then there is no
need to check it again in the terminal.
(16) If no successor acceptance state 'cares' about the lexeme and
a 'dont-care' acceptance state has been passed, then then reload
can set the lexeme_start_p to the current input position and
reload the buffer from start.
(17) All non-acceptance states that immediately follow a 'skipper
state' must cause 'skip-failure' on drop-out.
"""
def do(analyzer):
# (*) Use information about position storage registers that can be shared.
# Replace old register values with new ones.
for state in analyzer.state_db.itervalues():
state.entry.replace_position_registers(analyzer.position_register_map)
state.entry.delete_superfluous_commands()
analyzer.drop_out.entry.replace_position_registers(analyzer.position_register_map)
return analyzer
| gpl-2.0 | -4,873,876,987,642,983,000 | 40.044444 | 86 | 0.696806 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/express_route_circuit_sku.py | 1 | 1581 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium', 'Transport'
:type tier: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuitSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.family = kwargs.get('family', None)
| mit | -8,774,509,893,830,967,000 | 38.525 | 79 | 0.602783 | false |
hjoliver/cylc | tests/unit/test_indep_task_queues.py | 1 | 3273 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests for the task queue manager module
import pytest
from unittest.mock import Mock
from collections import Counter
from cylc.flow.task_queues.independent import IndepQueueManager
from cylc.flow.task_state import TASK_STATUS_PREPARING
MEMBERS = {"a", "b", "c", "d", "e", "f"}
ACTIVE = Counter(["a", "a", "d"])
ALL_TASK_NAMES = [
"o1", "o2", "o3", "o4", "o5", "o6", "o7",
"s1", "s2", "s3", "s4", "s5",
"b1", "b2", "b3", "b4", "b5",
"foo"
]
DESCENDANTS = {
"root": ALL_TASK_NAMES + ["BIG", "SML", "OTH", "foo"],
"BIG": ["b1", "b2", "b3", "b4", "b5"],
"SML": ["s1", "s2", "s3", "s4", "s5"],
"OTH": ["o1", "o2", "o3", "o4", "o5", "o6", "o7"]
}
QCONFIG = {
"default": {
"limit": 6,
"members": [] # (auto: all task names)
},
"big": {
"members": ["BIG", "foo"],
"limit": 2
},
"sml": {
"members": ["SML", "foo"],
"limit": 3
}
}
READY_TASK_NAMES = ["b3", "s4", "o2", "s3", "b4", "o3", "o4", "o5", "o6", "o7"]
@pytest.mark.parametrize(
"active,"
"expected_released,"
"expected_foo_groups",
[
(
Counter(["b1", "b2", "s1", "o1"]),
["s4", "o2", "s3", "o3", "o4", "o5", "o6"],
["sml"]
)
]
)
def test_queue_and_release(
active,
expected_released,
expected_foo_groups):
"""Test task queue and release."""
# configure the queue
queue_mgr = IndepQueueManager(QCONFIG, ALL_TASK_NAMES, DESCENDANTS)
# add newly ready tasks to the queue
queue_me = []
for name in READY_TASK_NAMES:
itask = Mock()
itask.tdef.name = name
itask.state.is_held = False
queue_me.append(itask)
queue_mgr.push_tasks(queue_me)
# release tasks, given current active task counter
released = queue_mgr.release_tasks(active)
assert sorted([r.tdef.name for r in released]) == sorted(expected_released)
# check released tasks change state to "preparing", and not is_queued
for r in released:
assert r.state.reset.called_with(TASK_STATUS_PREPARING)
assert r.state.reset.called_with(is_queued=False)
# check that adopted orphans end up in the default queue
orphans = ["orphan1", "orphan2"]
queue_mgr.adopt_tasks(orphans)
for orphan in orphans:
assert orphan in queue_mgr.queues["default"].members
# check second assignment overrides first
for group in expected_foo_groups:
assert "foo" in queue_mgr.queues[group].members
| gpl-3.0 | -3,384,419,531,014,326,300 | 29.588785 | 79 | 0.605866 | false |
ros2/launch | launch/launch/substitutions/command.py | 1 | 5080 | # Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the Command substitution."""
import os
import shlex
import subprocess
from typing import Iterable
from typing import List
from typing import Text
import launch.logging
from .substitution_failure import SubstitutionFailure
from ..frontend.expose import expose_substitution
from ..launch_context import LaunchContext
from ..some_substitutions_type import SomeSubstitutionsType
from ..substitution import Substitution
@expose_substitution('command')
class Command(Substitution):
"""
Substitution that gets the output of a command as a string.
If the command is not found or fails a `SubstitutionFailure` error is raised.
Behavior on stderr output is configurable, see constructor.
"""
def __init__(
self,
command: SomeSubstitutionsType,
*,
on_stderr: SomeSubstitutionsType = 'fail'
) -> None:
"""
Construct a command substitution.
:param command: command to be executed. The substitutions will be performed, and
`shlex.split` will be used on the result.
:param on_stderr: specifies what to do when there is stderr output.
Can be one of:
- 'fail': raises `SubstitutionFailere` when stderr output is detected.
- 'ignore': `stderr` output is ignored.
- 'warn': The `stderr` output is ignored, but a warning is logged if detected.
- 'capture': The `stderr` output will be captured, together with stdout.
It can also be a substitution, that results in one of those four options.
"""
super().__init__()
from ..utilities import normalize_to_list_of_substitutions # import here to avoid loop
self.__command = normalize_to_list_of_substitutions(command)
self.__on_stderr = normalize_to_list_of_substitutions(on_stderr)
@classmethod
def parse(cls, data: Iterable[SomeSubstitutionsType]):
"""Parse `Command` substitution."""
if len(data) < 1 or len(data) > 2:
raise ValueError('command substitution expects 1 or 2 arguments')
kwargs = {'command': data[0]}
if len(data) == 2:
kwargs['on_stderr'] = data[1]
return cls, kwargs
@property
def command(self) -> List[Substitution]:
"""Getter for command."""
return self.__command
@property
def on_stderr(self) -> List[Substitution]:
"""Getter for on_stderr."""
return self.__on_stderr
def describe(self) -> Text:
"""Return a description of this substitution as a string."""
return 'Command({})'.format(' + '.join([sub.describe() for sub in self.command]))
def perform(self, context: LaunchContext) -> Text:
"""Perform the substitution by running the command and capturing its output."""
from ..utilities import perform_substitutions # import here to avoid loop
command_str = perform_substitutions(context, self.command)
if os.name != 'nt':
command = shlex.split(command_str)
else:
command = command_str
on_stderr = perform_substitutions(context, self.on_stderr)
if on_stderr not in ('fail', 'ignore', 'warn', 'capture'):
raise SubstitutionFailure(
"expected 'on_stderr' to be one of: 'fail', 'ignore', 'warn' or 'capture'")
stderr = subprocess.PIPE
if on_stderr == 'capture':
stderr = subprocess.STDOUT
try:
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=stderr,
universal_newlines=True)
except FileNotFoundError as ex:
raise SubstitutionFailure(f'file not found: {ex}')
if result.returncode != 0:
on_error_message = f'executed command failed. Command: {command_str}'
if result.stderr:
on_error_message += f'\nCaptured stderr output: {result.stderr}'
raise SubstitutionFailure(on_error_message)
if result.stderr:
on_stderr_message = f'executed command showed stderr output.' \
f' Command: {command_str}\n' \
f'Captured stderr output:\n{result.stderr}'
if on_stderr == 'fail':
raise SubstitutionFailure(on_stderr_message)
elif on_stderr == 'warn':
launch.logging.get_logger().warning(on_stderr_message)
return result.stdout
| apache-2.0 | -927,587,593,320,410,900 | 38.379845 | 95 | 0.638976 | false |
cherrygirl/micronaet7 | pickin_import/importation.py | 1 | 6137 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from datetime import datetime
from openerp.tools.translate import _
import time
class importation_default_location(osv.osv):
''' List of 2 element for get defaulf in location and stock one
'''
_name = 'importation.default.location'
_description = 'Default import location'
def get_location(self, cr, uid, name, context = None):
''' Return default value of location
'''
location_ids=self.search(cr, uid, [('name','=',name)])
if location_ids:
return self.read(cr, uid, location_ids)[0]['location_id'][0]
return False
_columns = {
'name':fields.selection([
('customer','Customer'),
('supplier','Supplier'),
('internal','Internal'), ],'Location type', select=True, readonly=False),
'location_id':fields.many2one('stock.location', 'Location stock', required=True),
}
class importation_purchase_order(osv.osv):
''' List of purchase order elements loaded
'''
_name = 'importation.purchase.order'
_description = 'Purchase order import'
_rec_name= 'product_id'
def check_lot(self, cr, uid, product_id, partner_id, purchase_order, context=None):
''' Check in in database is yet loaded a product for a specific purchase order
of a specific customer
Return lot_id assigned if exist, else False
'''
try:
# Search lot for product-partner-order:
item_ids = self.search(cr, uid, [
('product_id', '=', product_id),
('partner_id', '=', partner_id),
('purchase_order', '=', purchase_order),
])
if item_ids: # exist
item_read = self.read(cr, uid, item_ids)[0]
return item_read['lot_id'][0] # lot_id.id
else:
# Search lot for product-partner (no order)
item_ids = self.search(cr, uid, [
('product_id', '=', product_id),
('partner_id', '=', partner_id),
('purchase_order', '=', False),
])
if item_ids: # exist
item_read = self.read(cr, uid, item_ids)[0]
return item_read['lot_id'][0] # lot_id.id
else:
# Search default lot for product
product_proxy = self.pool.get('product.product').browse(
cr, uid, product_id, context=context)
if product_proxy.default_prodlot_id:
return product_proxy.default_prodlot_id.id
except:
pass
return False
def new_lot(self, cr, uid, product_id, partner_id, purchase_order, lot_id, context=None):
''' Check if in the DB exits key element (product_id, partner_id, purchase_order)
if true, assign lot_id as the last lot value
if false, create new element
'''
item_ids = self.search(cr, uid, [
('product_id', '=', product_id),
('partner_id', '=', partner_id),
('purchase_order', '=', purchase_order),
])
if item_ids: # save this lot as last
item_modify = self.write(cr, uid, item_ids, {
'lot_id': lot_id, }, context=context)
else:
item_create = self.create(cr, uid, {
'product_id': product_id,
'partner_id': partner_id,
'purchase_order': purchase_order,
'lot_id': lot_id,
}, context=context)
return
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True),
'partner_id': fields.many2one('res.partner', 'Partner', required=True),
'purchase_order': fields.char('Purchase order', size=15, required=False, readonly=False, help="ID of PO that generate this pickin list"),
'lot_id': fields.many2one('stock.production.lot', 'Lot', required=False),
}
_sql_constraints = [ # TODO non controlla se è vuoto il terzo!
('key_element_unique', 'unique(product_id,partner_id,purchase_order)', 'Key element: product, partner, order must be unique!')
]
class stock_picking_extra_fields(osv.osv):
''' Add extra information for import stock.picking
'''
_name = 'stock.picking'
_inherit = 'stock.picking'
_columns = {
'import_document':fields.char('Document n.', size=18, required=False, readonly=False, help="Link to original imported document, format number/year ex.: 8015/2012"),
'wizard_id': fields.integer('Wizard ID', help="Save wizard creazion ID for open stock.picking after importation")
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 859,732,967,655,162,200 | 42.211268 | 172 | 0.5559 | false |
lukas-ke/faint-graphics-editor | build-sys/code_utils/find_defs.py | 1 | 3953 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from core import enumerate_files, get_root_dir
import re
import sys
EXTENSIONS = [
".cpp",
".hh",
".txt",
]
EXCLUDED_DIRS = [
"__pycache__",
"build",
"build-sys",
"code_utils",
"doxygen",
"help",
"installer",
]
def format_filename(root_dir, f):
f = f.replace(root_dir, "")
return f[1:] if f[0] == "\\" else f
def write_defs_summary(file_name, defs):
with open(file_name, 'w') as f:
for d in sorted(defs.keys()):
key = d
path = defs[d][0]
charNum = str(defs[d][2])
label = defs[d][1]
if len(label) == 0:
label = key
f.write(key + "---" +
path + "---" +
charNum + "---" +
label + "===")
def format_def_line(l):
return l.strip().replace("// ", "")
def format_def_content(text):
return "\n".join(format_def_line(l) for l in text.split("\n"))
def write_defs_index(file_name, defs):
with open(file_name, 'w') as f:
f.write("Index of definitions in Faint source files.\n")
f.write("With faint-start-magic, the following entries\n")
f.write("should be clickable links.\n\n")
for key in sorted(defs.keys()):
f.write("\\ref(%s) (%s)\n" % (key, key))
if __name__ == '__main__':
root_dir = sys.argv[1]
out_file = sys.argv[2]
index_file = sys.argv[3]
defs_pattern = re.compile(r"\\def\((.*?)\)(.*?);", re.DOTALL|re.MULTILINE)
refs_pattern = re.compile(r"\\ref\((.*?)\)")
defs = {}
refs = {}
for filename in enumerate_files(root_dir,
extensions=EXTENSIONS,
excluded_dirs=EXCLUDED_DIRS):
with open(filename) as f:
text = f.read()
for entry in re.finditer(defs_pattern, text):
name = entry.group(1)
if name in defs:
print("Duplicate definition: %s" % name)
else:
content = format_def_content(entry.group(2).strip())
defs[name] = [filename, content, entry.span()[0]]
for entry in re.finditer(refs_pattern, text):
name = entry.group(1)
if name not in refs:
refs[name] = []
refs[name].append(filename)
print("Definitions:")
for num, name in enumerate(sorted(defs.keys())):
print(name + ": " + format_filename(root_dir, defs[name][0]))
for f in refs.get(name, []):
print(" " * len(name) + " <- %s" % format_filename(root_dir, f))
if num != len(defs) - 1:
print()
print()
print("References:")
for name in sorted(refs.keys()):
for f in refs[name]:
print(name + ": ", format_filename(root_dir, f))
for name in defs.keys():
if name not in refs:
print("Unreferenced define: %s" % name)
for name in refs.keys():
if name not in defs.keys():
print("Invalid reference: %s %s" % (name, refs[name][0]))
write_defs_summary(out_file, defs)
write_defs_index(index_file, defs)
| apache-2.0 | 6,920,618,032,387,999,000 | 29.373016 | 78 | 0.521882 | false |
davandev/davanserver | davan/http/ServiceInvoker.py | 1 | 4860 | '''
@author: davandev
'''
import os
import logging
import imp
import time
import re
import davan.config.config_creator as app_config
import davan.util.application_logger as app_logger
import davan.util.constants as constants
class ServiceInvoker(object):
'''
Service Handler module, scanning for services,
'''
def __init__(self, configuration):
'''
Constructor
'''
self.logger = logging.getLogger(os.path.basename(__file__))
self.services ={}
self.expression = re.compile(r'\w+')
self.config = configuration
# Determine if services are started
self.running = False
def is_running(self):
return self.running
def discover_services(self):
"""
Scan service folder for all matching services.
"""
self.logger.info("Discover services")
for root, _, files in os.walk(self.config['SERVICE_PATH']):
for service_file in files:
if (service_file.endswith(".pyc") and
not service_file.endswith("__init__.pyc") and
not service_file.endswith("base_service.pyc")):
module_name = service_file.replace(".pyc","")
mod = imp.load_compiled(module_name,os.path.join(root, service_file))
# self.logger.debug("ModuleName:"+module_name)
try:
attributes = getattr(mod, module_name)
service = attributes(self, self.config)
self.services[service.get_name()] = service
except :
continue
self.logger.debug("Discovered service [" + module_name + "] Service key[" + service.get_name()+"]")
return self.services
def start_services(self):
"""
Start all services that are enabled in configuration
"""
self.logger.info("Starting services")
for name, service in self.services.iteritems():
if service.is_enabled() and not service.is_service_running():
service.start_service()
else:
self.logger.debug("Service " + name + " is disabled")
self.running = True
self.logger.info("All configured services started")
def get_service(self, service):
"""
@param service, name of selected service
@return: service matching service name
"""
result = self.expression.findall(service)[0]
if self.services.has_key(result):
# self.logger.debug("Invoking service: ["+ result+"]")
return self.services[result]
elif service.endswith(constants.MP3_EXTENSION) or service.endswith(constants.MP3_EXTENSION1):
# self.logger.debug("Invoking service: [mp3]")
return self.services[constants.MP3_SERVICE_NAME]
elif service.endswith(constants.OGG_EXTENSION) or service.endswith(constants.OGG_EXTENSION1):
# self.logger.debug("Invoking service: [ogg]")
return self.services[constants.MP3_SERVICE_NAME]
elif service.endswith(constants.HTML_EXTENSION) or service.endswith(constants.CSS_EXTENSION):
# self.logger.debug("Invoking service: [html]")
return self.services[constants.HTML_SERVICE_NAME]
# No service found
self.logger.debug("No service ["+str(service)+"] found")
return None
def stop_all_except(self, service_name):
self.logger.info("Stopping all services")
for service in self.services.itervalues():
#self.logger.debug("Stopping: " + str(service.get_name()))
if service.service_name == service_name:
continue
service.stop_service()
self.running = False
self.logger.info("All started services stopped")
def stop_services(self):
"""
Stop all services
"""
self.logger.info("Stopping all services")
for service in self.services.itervalues():
#self.logger.debug("Stopping: " + str(service.get_name()))
service.stop_service()
self.running = False
self.logger.info("All started services stopped")
if __name__ == '__main__':
config = app_config.create()
app_logger.start_logging(config['LOGFILE_PATH'],loglevel=4)
test = ServiceInvoker()
test.invoke_services()
test.get_service("/tts=234?")
test.get_service("/presence?name=david")
time.sleep(10)
test.logger.info("Stopping thread")
test.stop_services()
| mit | 1,827,155,184,925,920,300 | 36.007813 | 123 | 0.563374 | false |
vitorfs/bootcamp | bootcamp/articles/tests/test_views.py | 1 | 4751 | import tempfile
from PIL import Image
from django.test import Client, override_settings
from django.urls import reverse
from test_plus.test import TestCase
from bootcamp.articles.models import Article
def get_temp_img():
size = (200, 200)
color = (255, 0, 0, 0)
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
image = Image.new("RGB", size, color)
image.save(f, "PNG")
return open(f.name, mode="rb")
class ArticlesViewsTest(TestCase):
def setUp(self):
self.user = self.make_user("first_user")
self.other_user = self.make_user("second_user")
self.client = Client()
self.other_client = Client()
self.client.login(username="first_user", password="password")
self.other_client.login(username="second_user", password="password")
self.article = Article.objects.create(
title="A really nice title",
content="This is a really good content",
status="P",
user=self.user,
)
self.not_p_article = Article.objects.create(
title="A really nice to-be title",
content="""This is a really good content, just if somebody
published it, that would be awesome, but no, nobody wants to
publish it, because they know this is just a test, and you
know than nobody wants to publish a test, just a test;
everybody always wants the real deal.""",
user=self.user,
)
self.test_image = get_temp_img()
def tearDown(self):
self.test_image.close()
def test_index_articles(self):
response = self.client.get(reverse("articles:list"))
self.assertEqual(response.status_code, 200)
def test_error_404(self):
response_no_art = self.client.get(
reverse("articles:article", kwargs={"slug": "no-slug"})
)
self.assertEqual(response_no_art.status_code, 404)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_create_article(self):
response = self.client.post(
reverse("articles:write_new"),
{
"title": "A not that really nice title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "P",
"image": self.test_image,
},
)
assert response.status_code == 302
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_single_article(self):
current_count = Article.objects.count()
response = self.client.post(
reverse("articles:write_new"),
{
"title": "A not that really nice title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "P",
"image": self.test_image,
},
)
# response_art = self.client.get(
# reverse("articles:article",
# kwargs={"slug": "a-not-that-really-nice-title"}))
# assert response_art.status_code == 200
assert response.status_code == 302
assert Article.objects.count() == current_count + 1
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_draft_article(self):
response = self.client.post(
reverse("articles:write_new"),
{
"title": "A not that really nice title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "D",
"image": self.test_image,
},
)
resp = self.client.get(reverse("articles:drafts"))
assert resp.status_code == 200
assert response.status_code == 302
assert (
resp.context["articles"][0].slug
== "first-user-a-not-that-really-nice-title"
)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_draft_article_change(self):
response = self.client.post(
reverse("articles:edit_article", kwargs={"pk": self.not_p_article.id}),
{
"title": "A really nice changed title",
"content": "Whatever works for you",
"tags": "list, lists",
"status": "D",
"image": self.test_image,
},
)
resp = self.client.get(reverse("articles:drafts"))
assert resp.status_code == 200
assert response.status_code == 302
assert resp.context["articles"][0].title == "A really nice changed title"
assert (
resp.context["articles"][0].slug == "first-user-a-really-nice-to-be-title"
)
| mit | 4,451,839,674,186,486,000 | 34.721805 | 86 | 0.55904 | false |
xletmjm416/space-trader-galaxy | main.py | 1 | 2578 | # -*- coding: utf-8 -*-
"""
Main Space Trader Galaxy class.
Created on Thu Jul 6 01:59:38 2017
@author: mjm
"""
import logging
import logging.config
import character as char
import good as good
def trade(seller, buyer, commodity, pay):
"""
Seller sells commodity to the buyer who pays with pay.
Arguments:
seller - type Character
buyer - type Character
commodity - type Good
pay - type Good
Returns:
success (True) or fail (False)
"""
"""
Pseudocode
log init
if seller owns commodity:
if buyer owns pay:
commodity.transfer(new_owner=buyer)
pay.transfer(new_owner=seller)
log
return True
else:
log
return False
else:
log
return False
log end
"""
pass
success = False
if commodity in seller.belongings: #TODO change into owns() by Character
if pay in buyer.belongings: #same here
commodity.transfer(new_owner=buyer)
pay.transfer(new_owner=seller)
pass
success = True
else:
pass
success = False
else:
pass
success = False
pass
return success
def test_trade():
print """Initialisation"""
matt = char.Character("Matt", [], "Earth")
tony = char.Character("Tony", [], "Mars")
matt1 = good.Good("Matt's item 1", matt)
tony1 = good.Good("Tony's item 1", tony)
tony2 = good.Good("Tony's item 2", tony)
print matt.describe()
print
print tony.describe()
print
print """Trade undertaking:"""
print "- Tony trades with Matt, seller does not have good"
print trade(tony, matt, matt1, matt1)
print
print matt.describe()
print
print tony.describe()
print
print "- Tony trades with Matt, buyer does not have good"
print trade(tony, matt, tony1, tony2)
print
print matt.describe()
print
print tony.describe()
print
print "- Tony trades with Matt, None have the good"
print trade(tony, matt, matt1, tony1)
print
print matt.describe()
print
print tony.describe()
print
print "- Matt trades with Tony, both have the good"""
print trade(matt, tony, matt1, tony1)
print matt.describe()
print
print tony.describe()
print
def main():
logging.config.fileConfig('logs.conf')
logging.info("Program started.")
test_trade()
logging.info("Program finished.")
if __name__ == "__main__":
main() | mit | 8,642,490,935,781,050,000 | 22.66055 | 76 | 0.588441 | false |
craffel/mir_eval | mir_eval/sonify.py | 1 | 10893 | '''
Methods which sonify annotations for "evaluation by ear".
All functions return a raw signal at the specified sampling rate.
'''
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy.interpolate import interp1d
from . import util
from . import chord
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal
def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=(gram[n, 0], gram[n, -1]))
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output
def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est))
def chroma(chromagram, times, fs, **kwargs):
"""Reverse synthesis of a chromagram (semitone matrix)
Parameters
----------
chromagram : np.ndarray, shape=(12, times.shape[0])
Chromagram matrix, where each row represents a semitone [C->Bb]
i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to
``times[j + 1]``
times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2)
Either the start time of each column in the chromagram,
or the time interval corresponding to each column.
fs : int
Sampling rate to synthesize audio data at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chromagram
"""
# We'll just use time_frequency with a Shepard tone-gram
# To create the Shepard tone-gram, we copy the chromagram across 7 octaves
n_octaves = 7
# starting from C2
base_note = 24
# and weight each octave by a normal distribution
# The normal distribution has mean 72 (one octave above middle C)
# and std 6 (one half octave)
mean = 72
std = 6
notes = np.arange(12*n_octaves) + base_note
shepard_weight = np.exp(-(notes - mean)**2./(2.*std**2.))
# Copy the chromagram matrix vertically n_octaves times
gram = np.tile(chromagram.T, n_octaves).T
# This fixes issues if the supplied chromagram is int type
gram = gram.astype(float)
# Apply Sheppard weighting
gram *= shepard_weight.reshape(-1, 1)
# Compute frequencies
frequencies = 440.0*(2.0**((notes - 69)/12.0))
return time_frequency(gram, frequencies, times, fs, **kwargs)
def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
"""
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs)
| mit | 147,390,526,746,069,660 | 32.109422 | 78 | 0.615074 | false |
google/tink | python/tink/aead/__init__.py | 1 | 1093 | # Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aead package."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from tink.aead import _aead
from tink.aead import _aead_key_manager
from tink.aead import _aead_key_templates as aead_key_templates
from tink.aead import _kms_envelope_aead
Aead = _aead.Aead
AeadCcToPyWrapper = _aead_key_manager.AeadCcToPyWrapper
register = _aead_key_manager.register
KmsEnvelopeAead = _kms_envelope_aead.KmsEnvelopeAead
| apache-2.0 | 318,504,748,433,194,100 | 35.433333 | 74 | 0.772187 | false |
quattor/aquilon | lib/aquilon/worker/commands/add_network_device.py | 1 | 6514 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add network_device`."""
from sqlalchemy.orm import subqueryload
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import NetworkDevice, Model, Archetype, Chassis, NetworkDeviceChassisSlot
from aquilon.aqdb.model.network import get_net_id_from_ip
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.dns import grab_address
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.interface import (get_or_create_interface,
assign_address,
check_netdev_iftype)
from aquilon.worker.dbwrappers.host import create_host
from aquilon.worker.processes import DSDBRunner
from aquilon.worker.templates.switchdata import PlenarySwitchData
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandAddNetworkDevice(BrokerCommand):
requires_plenaries = True
required_parameters = ["network_device", "model", "type",
"ip", "interface", "iftype"]
def render(self, session, logger, plenaries, network_device, label, model, type, ip,
interface, iftype, mac, vendor, serial, comments, exporter, chassis, slot,
archetype, domain, sandbox, user, justification, reason, **arguments):
dbmodel = Model.get_unique(session, name=model, vendor=vendor,
compel=True)
if not dbmodel.model_type.isNetworkDeviceType():
raise ArgumentError("This command can only be used to "
"add network devices.")
dblocation = get_location(session, query_options=[subqueryload('parents')], **arguments)
if chassis:
dbchassis = Chassis.get_unique(session, chassis, compel=True)
if slot is None:
raise ArgumentError("The --chassis option requires a --slot.")
if dblocation and dblocation != dbchassis.location:
raise ArgumentError("{0} conflicts with chassis location "
"{1}.".format(dblocation, dbchassis.location))
dblocation = dbchassis.location
elif slot is not None:
raise ArgumentError("The --slot option requires a --chassis.")
dbdns_rec, _ = grab_address(session, network_device, ip,
allow_restricted_domain=True,
allow_reserved=True, preclude=True,
exporter=exporter, require_grn=False)
if not label:
label = dbdns_rec.fqdn.name
try:
NetworkDevice.check_label(label)
except ArgumentError:
raise ArgumentError("Could not deduce a valid hardware label "
"from the network device name. Please specify "
"--label.")
# FIXME: What do the error messages for an invalid enum (switch_type)
# look like?
dbnetdev = NetworkDevice(label=label, switch_type=type,
location=dblocation, model=dbmodel,
serial_no=serial, comments=comments)
session.add(dbnetdev)
if chassis:
dbslot = session.query(NetworkDeviceChassisSlot).filter_by(chassis=dbchassis,
slot_number=slot).first()
if dbslot and dbslot.network_device:
raise ArgumentError("{0} slot {1} already has network device "
"{2}.".format(dbchassis, slot,
dbslot.network_device.label))
if not dbslot:
dbslot = NetworkDeviceChassisSlot(chassis=dbchassis, slot_number=slot)
dbslot.network_device = dbnetdev
session.add(dbslot)
dbnetdev.primary_name = dbdns_rec
check_netdev_iftype(iftype)
dbinterface = get_or_create_interface(session, dbnetdev,
name=interface, mac=mac,
interface_type=iftype)
dbnetwork = get_net_id_from_ip(session, ip)
# TODO: should we call check_ip_restrictions() here?
assign_address(dbinterface, ip, dbnetwork, logger=logger)
if not archetype:
hw_section = 'hardware_network_device'
if not self.config.has_option(hw_section, 'default_archetype'):
raise ArgumentError("Cannot determine the archetype for "
"network devices. Please specify "
"--archetype.")
archetype = self.config.get(hw_section, 'default_archetype')
dbarchetype = Archetype.get_unique(session, archetype, compel=True)
dbhost = create_host(session, logger, self.config, dbnetdev,
dbarchetype, domain=domain, sandbox=sandbox,
**arguments)
session.flush()
# Validate ChangeManagement
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(dbhost)
cm.validate()
# Add the legacy template separately
plenaries.add(dbnetdev, cls=PlenarySwitchData)
plenaries.add(dbnetdev)
plenaries.add(dbhost)
with plenaries.transaction():
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.update_host(dbnetdev, None)
dsdb_runner.commit_or_rollback("Could not add network device to DSDB")
return
| apache-2.0 | -409,722,923,466,763,400 | 45.863309 | 102 | 0.602241 | false |
Eksmo/calibre | src/calibre/ebooks/mobi/writer2/resources.py | 1 | 5456 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import imghdr
from calibre.ebooks.mobi import MAX_THUMB_DIMEN, MAX_THUMB_SIZE
from calibre.ebooks.mobi.utils import (rescale_image, mobify_image,
write_font_record)
from calibre.ebooks import generate_masthead
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\x80\x00\x00\x00\x00\x00\xff\xff\xff!\xf9\x04\x01\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00@\x02\x01D\x00;'
class Resources(object):
def __init__(self, oeb, opts, is_periodical, add_fonts=False,
process_images=True):
self.oeb, self.log, self.opts = oeb, oeb.log, opts
self.is_periodical = is_periodical
self.process_images = process_images
self.item_map = {}
self.records = []
self.mime_map = {}
self.masthead_offset = 0
self.used_image_indices = set()
self.image_indices = set()
self.cover_offset = self.thumbnail_offset = None
self.add_resources(add_fonts)
def process_image(self, data):
if not self.process_images:
return data
return (mobify_image(data) if self.opts.mobi_keep_original_images else
rescale_image(data))
def add_resources(self, add_fonts):
oeb = self.oeb
oeb.logger.info('Serializing resources...')
index = 1
mh_href = None
if 'masthead' in oeb.guide and oeb.guide['masthead'].href:
mh_href = oeb.guide['masthead'].href
self.records.append(None)
index += 1
self.used_image_indices.add(0)
self.image_indices.add(0)
elif self.is_periodical:
# Generate a default masthead
data = generate_masthead(unicode(self.oeb.metadata['title'][0]))
self.records.append(data)
self.used_image_indices.add(0)
self.image_indices.add(0)
index += 1
cover_href = self.cover_offset = self.thumbnail_offset = None
if (oeb.metadata.cover and
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
cover_id = unicode(oeb.metadata.cover[0])
item = oeb.manifest.ids[cover_id]
cover_href = item.href
for item in self.oeb.manifest.values():
if item.media_type not in OEB_RASTER_IMAGES: continue
try:
data = self.process_image(item.data)
except:
self.log.warn('Bad image file %r' % item.href)
continue
else:
if mh_href and item.href == mh_href:
self.records[0] = data
continue
self.image_indices.add(len(self.records))
self.records.append(data)
self.item_map[item.href] = index
self.mime_map[item.href] = 'image/%s'%imghdr.what(None, data)
index += 1
if cover_href and item.href == cover_href:
self.cover_offset = self.item_map[item.href] - 1
self.used_image_indices.add(self.cover_offset)
try:
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
maxsizeb=MAX_THUMB_SIZE)
except:
self.log.warn('Failed to generate thumbnail')
else:
self.image_indices.add(len(self.records))
self.records.append(data)
self.thumbnail_offset = index - 1
self.used_image_indices.add(self.thumbnail_offset)
index += 1
finally:
item.unload_data_from_memory()
if add_fonts:
for item in self.oeb.manifest.values():
if item.href and item.href.rpartition('.')[-1].lower() in {
'ttf', 'otf'} and isinstance(item.data, bytes):
self.records.append(write_font_record(item.data))
self.item_map[item.href] = len(self.records)
def add_extra_images(self):
'''
Add any images that were created after the call to add_resources()
'''
for item in self.oeb.manifest.values():
if (item.media_type not in OEB_RASTER_IMAGES or item.href in
self.item_map): continue
try:
data = self.process_image(item.data)
except:
self.log.warn('Bad image file %r' % item.href)
else:
self.records.append(data)
self.item_map[item.href] = len(self.records)
finally:
item.unload_data_from_memory()
def serialize(self, records, used_images):
used_image_indices = self.used_image_indices | {
v-1 for k, v in self.item_map.iteritems() if k in used_images}
for i in self.image_indices-used_image_indices:
self.records[i] = PLACEHOLDER_GIF
records.extend(self.records)
def __bool__(self):
return bool(self.records)
__nonzero__ = __bool__
| gpl-3.0 | -8,675,809,270,525,001,000 | 37.971429 | 156 | 0.552786 | false |
beniwohli/apm-agent-python | elasticapm/contrib/aiohttp/__init__.py | 1 | 2338 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import aiohttp
import elasticapm
from elasticapm import Client
class ElasticAPM:
def __init__(self, app, client=None):
if not client:
config = app.get("ELASTIC_APM", {})
config.setdefault("framework_name", "aiohttp")
config.setdefault("framework_version", aiohttp.__version__)
client = Client(config=config)
self.app = app
self.client = client
self.install_tracing(app, client)
def install_tracing(self, app, client):
from elasticapm.contrib.aiohttp.middleware import tracing_middleware
app.middlewares.insert(0, tracing_middleware(app))
if client.config.instrument and client.config.enabled:
elasticapm.instrument()
| bsd-3-clause | -6,316,243,842,223,861,000 | 43.113208 | 81 | 0.733105 | false |
openstack/octavia | octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py | 1 | 1794 | # Copyright 2018 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add timeout fields to listener
Revision ID: 0fd2c131923f
Revises: ba35e0fb88e1
Create Date: 2018-03-23 03:34:26.657254
"""
from alembic import op
import sqlalchemy as sa
from octavia.common import constants
# revision identifiers, used by Alembic.
revision = '0fd2c131923f'
down_revision = 'ba35e0fb88e1'
def upgrade():
op.add_column('listener',
sa.Column('timeout_client_data',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_CLIENT_DATA))
op.add_column('listener',
sa.Column('timeout_member_connect',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_MEMBER_CONNECT))
op.add_column('listener',
sa.Column('timeout_member_data',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_MEMBER_DATA))
op.add_column('listener',
sa.Column('timeout_tcp_inspect',
sa.Integer(), nullable=True,
default=constants.DEFAULT_TIMEOUT_TCP_INSPECT))
| apache-2.0 | -9,021,182,196,373,340,000 | 34.88 | 78 | 0.622074 | false |
Endika/OpenUpgrade | openerp/addons/openupgrade_records/model/openupgrade_record.py | 1 | 3901 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module Copyright (C) 2012-2014 OpenUpgrade community
# https://launchpad.net/~openupgrade-committers
#
# Contributors:
# Therp BV <http://therp.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from openerp.osv.orm import Model
from openerp.osv import fields
except ImportError:
from osv.osv import osv as Model
from osv import fields
# Cannot use forward references in 6.0
class openupgrade_record(Model):
_name = 'openupgrade.record'
openupgrade_record()
class openupgrade_attribute(Model):
_name = 'openupgrade.attribute'
_rec_name = 'name'
_columns = {
'name': fields.char(
'Name', size=24,
readonly=True,
),
'value': fields.char(
'Value',
size=4096,
readonly=True,
),
'record_id': fields.many2one(
'openupgrade.record', ondelete='CASCADE',
readonly=True,
),
}
openupgrade_attribute()
class openupgrade_record(Model):
_inherit = 'openupgrade.record'
_columns = {
'name': fields.char('Name', size=256, readonly=True),
'module': fields.char('Module', size=128, readonly=True),
'model': fields.char('Model', size=128, readonly=True),
'field': fields.char('Field', size=128, readonly=True),
'mode': fields.selection(
[('create', 'Create'), ('modify', 'Modify')],
'Mode',
help='Set to Create if a field is newly created '
'in this module. If this module modifies an attribute of an '
'exting field, set to Modify.',
readonly=True,
),
'type': fields.selection(
[('field', 'Field'), ('xmlid', 'XML ID')],
'Type',
readonly=True,
),
'attribute_ids': fields.one2many(
'openupgrade.attribute', 'record_id', 'Attributes',
readonly=True,
),
}
def field_dump(self, cr, uid, context=None):
keys = [
'module',
'mode',
'model',
'field',
'type',
'isfunction',
'isproperty',
'isrelated',
'relation',
'required',
'selection_keys',
'req_default',
'inherits',
]
template = dict([(x, False) for x in keys])
ids = self.search(cr, uid, [('type', '=', 'field')], context=context)
records = self.browse(cr, uid, ids, context=context)
data = []
for record in records:
repr = template.copy()
repr.update({
'module': record.module,
'model': record.model,
'field': record.field,
'mode': record.mode,
})
repr.update(
dict([(x.name, x.value) for x in record.attribute_ids]))
data.append(repr)
return data
openupgrade_record()
| agpl-3.0 | 9,089,793,331,484,869,000 | 30.97541 | 78 | 0.530377 | false |
blythemusic/ClyphX | ClyphXControlSurfaceActions.py | 1 | 18706 | """
# Copyright (C) 2013-2016 Stray <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Stray <[email protected]>
"""
# emacs-mode: -*- python-*-
# -*- coding: utf-8 -*-
import Live
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.ControlSurface import ControlSurface
from _Framework.SessionComponent import SessionComponent
from _Framework.MixerComponent import MixerComponent
from _Framework.DeviceComponent import DeviceComponent
from consts import *
if IS_LIVE_9:
from ClyphXPushActions import ClyphXPushActions
from ClyphXPXTActions import ClyphXPXTActions
from ClyphXMXTActions import ClyphXMXTActions
if IS_LIVE_9_5:
from ClyphXArsenalActions import ClyphXArsenalActions
from ableton.v2.control_surface import ControlSurface as CS
else:
from _Framework.ControlSurface import ControlSurface as CS
class ClyphXControlSurfaceActions(ControlSurfaceComponent):
__module__ = __name__
__doc__ = ' Actions related to control surfaces '
def __init__(self, parent):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._push_actions = None
if IS_LIVE_9:
self._push_actions = ClyphXPushActions(parent)
self._pxt_actions = ClyphXPXTActions(parent)
self._mxt_actions = ClyphXMXTActions(parent)
if IS_LIVE_9_5:
self._arsenal_actions = ClyphXArsenalActions(parent)
self._scripts = {}
def disconnect(self):
self._scripts = {}
self._parent = None
self._arsenal_actions = None
self._push_actions = None
self._pxt_actions = None
self._mxt_actions = None
if IS_LIVE_9:
ControlSurfaceComponent.disconnect(self)
def on_enabled_changed(self):
pass
def update(self):
pass
def connect_script_instances(self, instanciated_scripts):
""" Build dict of connected scripts and their components, doesn't work with non-Framework scripts, but does work with User Remote Scripts """
if IS_LIVE_9:
instanciated_scripts = self._parent._control_surfaces()
self._scripts = {}
for index in range (len(instanciated_scripts)):
script = instanciated_scripts[index]
self._scripts[index] = {'script' : script, 'name' : None, 'repeat' : False, 'mixer' : None, 'device' : None, 'last_ring_pos' : None,
'session' : None, 'track_link' : False, 'scene_link' : False, 'centered_link' : False, 'color' : False}
script_name = script.__class__.__name__
if isinstance (script, (ControlSurface, CS)):
if script_name == 'GenericScript':
script_name = script._suggested_input_port
if script_name.startswith('Arsenal'):
self._arsenal_actions.set_script(script)
if script_name == 'Push' and IS_LIVE_9:
self._push_actions.set_script(script)
if script_name.startswith('PXT_Live') and IS_LIVE_9:
self._pxt_actions.set_script(script)
if script_name == 'MXT_Live' and IS_LIVE_9:
self._mxt_actions.set_script(script)
if not script_name.startswith('ClyphX'):
if (IS_LIVE_9 and script._components == None) or script.components == None:
return
else:
self._scripts[index]['name'] = script_name.upper()
for c in script.components:
if isinstance (c, SessionComponent):
self._scripts[index]['session'] = c
if script_name.startswith('APC'):
self._scripts[index]['color'] = {'GREEN' : (1, 2), 'RED' : (3, 4), 'AMBER' : (5, 6)}
self._scripts[index]['metro'] = {'controls' : c._stop_track_clip_buttons, 'component' : None, 'override' : None}
if script_name == 'Launchpad':
self._scripts[index]['color'] = {'GREEN' : (52, 56), 'RED' : (7, 11), 'AMBER' : (55, 59)}
self._scripts[index]['metro'] = {'controls' : script._selector._side_buttons, 'component' : None, 'override' : script._selector}
if isinstance (c, MixerComponent):
self._scripts[index]['mixer'] = c
if isinstance (c, DeviceComponent):
self._scripts[index]['device'] = c
if IS_LIVE_9_5 and script_name == 'Push':
self._scripts[index]['session'] = script._session_ring
self._scripts[index]['mixer'] = script._mixer
elif script_name == 'Nocturn':
self._scripts[index]['device'] = script.device_controller
script.device_controller.canonical_parent = script
def dispatch_push_action(self, track, xclip, ident, action, args):
""" Dispatch Push-related actions to PushActions. """
if self._push_actions:
self._push_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_pxt_action(self, track, xclip, ident, action, args):
""" Dispatch PXT-related actions to PXTActions. """
if self._pxt_actions:
self._pxt_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_mxt_action(self, track, xclip, ident, action, args):
""" Dispatch MXT-related actions to MXTActions. """
if self._mxt_actions:
self._mxt_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_arsenal_action(self, track, xclip, ident, action, args):
""" Dispatch Arsenal-related actions to ArsenalActions. """
if self._arsenal_actions:
self._arsenal_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_cs_action(self, track, xclip, ident, action, args):
""" Dispatch appropriate control surface actions """
script = self._get_script_to_operate_on(action)
if script != None:
if 'METRO ' in args and self._scripts[script].has_key('metro'):
self.handle_visual_metro(self._scripts[script], args)
elif 'RINGLINK ' in args and self._scripts[script]['session']:
self.handle_ring_link(self._scripts[script]['session'], script, args[9:])
elif 'RING ' in args and self._scripts[script]['session']:
self.handle_session_offset(script, self._scripts[script]['session'], args[5:])
elif 'COLORS ' in args and self._scripts[script]['session'] and self._scripts[script]['color']:
self.handle_session_colors(self._scripts[script]['session'], self._scripts[script]['color'], args[7:])
elif 'DEV LOCK' in args and self._scripts[script]['device']:
self._scripts[script]['device'].canonical_parent.toggle_lock()
elif 'BANK ' in args and self._scripts[script]['mixer']:
self.handle_track_bank(script, xclip, ident, self._scripts[script]['mixer'], self._scripts[script]['session'], args[5:])
elif 'RPT' in args and IS_LIVE_9:
self.handle_note_repeat(self._scripts[script]['script'], script, args)
else:
if self._scripts[script]['mixer'] and '/' in args[:4]:
self.handle_track_action(self._scripts[script]['mixer'], xclip, ident, args)
def _get_script_to_operate_on(self, script_info):
""" Returns the script index to operate on, which can be specified in terms of its index
or its name. Also, can use SURFACE (legacy) or CS (new) to indicate a surface action. """
script = None
try:
script_spec = None
if 'SURFACE' in script_info:
script_spec = script_info.strip('SURFACE')
elif 'CS' in script_info:
script_spec = script_info.strip('CS')
if len(script_spec) == 1:
script = int(script_spec) - 1
if not self._scripts.has_key(script):
script = None
else:
script_spec = script_spec.strip('"').strip()
for k, v in self._scripts.items():
if v['name'] == script_spec:
script = k
except: script = None
return script
def handle_note_repeat(self, script, script_index, args):
""" Set note repeat for the given surface """
args = args.replace('RPT', '').strip()
if args in REPEAT_STATES:
if args == 'OFF':
script._c_instance.note_repeat.enabled = False
self._scripts[script_index]['repeat'] = False
else:
script._c_instance.note_repeat.repeat_rate = REPEAT_STATES[args]
script._c_instance.note_repeat.enabled = True
self._scripts[script_index]['repeat'] = True
else:
self._scripts[script_index]['repeat'] = not self._scripts[script_index]['repeat']
script._c_instance.note_repeat.enabled = self._scripts[script_index]['repeat']
def handle_track_action(self, mixer, xclip, ident, args):
""" Get control surface track(s) to operate on and call main action dispatch """
track_start = None
track_end = None
track_range = args.split('/')[0]
actions = str(args[args.index('/')+1:].strip()).split()
new_action = actions[0]
new_args = ''
if len(actions) > 1:
new_args = ' '.join(actions[1:])
if 'ALL' in track_range:
track_start = 0
track_end = len(mixer._channel_strips)
elif '-' in track_range:
track_range = track_range.split('-')
try:
track_start = int(track_range[0]) - 1
track_end = int(track_range[1])
except:
track_start = None
track_end = None
else:
try:
track_start = int(track_range) - 1
track_end = track_start + 1
except:
track_start = None
track_end = None
if track_start != None and track_end != None:
if track_start in range (len(mixer._channel_strips) + 1) and track_end in range (len(mixer._channel_strips) + 1) and track_start < track_end:
track_list = []
for index in range (track_start, track_end):
if index + mixer._track_offset in range (len(mixer.tracks_to_use())):
track_list.append(mixer.tracks_to_use()[index + mixer._track_offset])
if track_list:
self._parent.action_dispatch(track_list, xclip, new_action, new_args, ident)
def handle_track_bank(self, script_key, xclip, ident, mixer, session, args):
""" Move track bank (or session bank) and select first track in bank...this works even with controllers without banks like User Remote Scripts """
if IS_LIVE_9_5 and self._scripts[script_key]['name'] == 'PUSH':
t_offset, s_offset = self._push_actions.get_session_offsets(session)
tracks = session.tracks_to_use()
else:
t_offset, s_offset = mixer._track_offset, session._scene_offset if session else None
tracks = mixer.tracks_to_use()
new_offset = None
if args == 'FIRST':
new_offset = 0
elif args == 'LAST':
new_offset = len(tracks) - len(mixer._channel_strips)
else:
try:
offset = int(args)
if offset + t_offset in range (len(tracks)):
new_offset = offset + t_offset
except: new_offset = None
if new_offset >= 0:
if session:
session.set_offsets(new_offset, s_offset)
else:
mixer.set_track_offset(new_offset)
self.handle_track_action(mixer, xclip, ident, '1/SEL')
def handle_session_offset(self, script_key, session, args):
""" Handle moving session offset absolutely or relatively as well as storing/recalling its last position. """
if IS_LIVE_9_5 and self._scripts[script_key]['name'] == 'PUSH':
last_pos = self._push_actions.handle_session_offset(session, self._scripts[script_key]['last_ring_pos'], args, self._parse_ring_spec)
self._scripts[script_key]['last_ring_pos'] = last_pos or None
return
try:
new_track = session._track_offset
new_scene = session._scene_offset
if args.strip() == 'LAST':
last_pos = self._scripts[script_key]['last_ring_pos']
if last_pos:
session.set_offsets(last_pos[0], last_pos[1])
return
else:
self._scripts[script_key]['last_ring_pos'] = (new_track, new_scene)
new_track, args = self._parse_ring_spec('T', args, new_track, self.song().tracks)
new_scene, args = self._parse_ring_spec('S', args, new_scene, self.song().scenes)
if new_track == -1 or new_scene == -1:
return
session.set_offsets(new_track, new_scene)
except: pass
def _parse_ring_spec(self, spec_id, arg_string, default_index, list_to_search):
""" Parses a ring action specification and returns the specified track/scene index
as well as the arg_string without the specification that was parsed. """
index = default_index
arg_array = arg_string.split()
for a in arg_array:
if a.startswith(spec_id):
if a[1].isdigit():
index = int(a.strip(spec_id)) - 1
arg_string = arg_string.replace(a, '', 1).strip()
break
elif a[1] in ('<', '>'):
index += self._parent.get_adjustment_factor(a.strip(spec_id))
arg_string = arg_string.replace(a, '', 1).strip()
break
elif a[1] == '"':
name_start_pos = arg_string.index(spec_id + '"')
name = arg_string[name_start_pos + 2:]
name_end_pos = name.index('"')
name = name[:name_end_pos]
for i, item in enumerate(list_to_search):
if name == item.name.upper():
index = i
break
arg_string = arg_string.replace(spec_id + '"' + name + '"', '', 1).strip()
break
return (index, arg_string)
def handle_ring_link(self, session, script_index, args):
""" Handles linking/unliking session offsets to the selected track or scene with centering if specified. """
self._scripts[script_index]['track_link'] = args == 'T' or 'T ' in args or ' T' in args
self._scripts[script_index]['scene_link'] = 'S' in args
self._scripts[script_index]['centered_link'] = 'CENTER' in args
def handle_session_colors(self, session, colors, args):
""" Handle changing clip launch LED colors """
args = args.split()
if len(args) == 3:
for a in args:
if not a in colors:
return
for scene_index in range(session.height()):
scene = session.scene(scene_index)
for track_index in range(session.width()):
clip_slot = scene.clip_slot(track_index)
clip_slot.set_started_value(colors[args[0]][0])
clip_slot.set_triggered_to_play_value(colors[args[0]][1])
clip_slot.set_recording_value(colors[args[1]][0])
clip_slot.set_triggered_to_record_value(colors[args[1]][1])
clip_slot.set_stopped_value(colors[args[2]][0])
clip_slot.update()
def handle_visual_metro(self, script, args):
""" Handle visual metro for APCs and Launchpad. """
if 'ON' in args and not script['metro']['component']:
m = VisualMetro(self._parent, script['metro']['controls'], script['metro']['override'])
script['metro']['component'] = m
elif 'OFF' in args and script['metro']['component']:
script['metro']['component'].disconnect()
script['metro']['component'] = None
def on_selected_track_changed(self):
""" Moves the track offset of all track linked surfaces to the selected track with centering if specified. """
trk = self.song().view.selected_track
if trk in self.song().tracks:
trk_id = list(self.song().visible_tracks).index(trk)
for k, v in self._scripts.items():
if v['track_link']:
new_trk_id = trk_id
try:
session = self._scripts[k]['session']
if IS_LIVE_9_5 and v['name'] == 'PUSH':
width = self._push_actions.get_session_dimensions(session)[0]
t_offset, s_offset = self._push_actions.get_session_offsets(session)
else:
width = session.width()
t_offset, s_offset = session._track_offset, session._scene_offset
if self._scripts[k]['centered_link']:
mid_point = (width / 2)
if new_trk_id < mid_point:
if t_offset <= new_trk_id:
return
else:
new_trk_id = 0
else:
centered_id = new_trk_id - mid_point
if centered_id in range(len(self.song().visible_tracks)):
new_trk_id = centered_id
session.set_offsets(new_trk_id, s_offset)
except: pass
def on_selected_scene_changed(self):
""" Moves the scene offset of all scene linked surfaces to the selected scene with centering if specified. """
scn_id = list(self.song().scenes).index(self.song().view.selected_scene)
for k, v in self._scripts.items():
if v['scene_link']:
new_scn_id = scn_id
try:
session = self._scripts[k]['session']
if IS_LIVE_9_5 and v['name'] == 'PUSH':
height = self._push_actions.get_session_dimensions(session)[1]
t_offset, s_offset = self._push_actions.get_session_offsets(session)
else:
height = session.height()
t_offset, s_offset = session._track_offset, session._scene_offset
if self._scripts[k]['centered_link']:
mid_point = (height / 2)
if new_scn_id < mid_point:
if s_offset <= new_scn_id:
return
else:
new_scn_id = 0
else:
centered_id = new_scn_id - mid_point
if centered_id in range(len(self.song().scenes)):
new_scn_id = centered_id
session.set_offsets(t_offset, new_scn_id)
except: pass
class VisualMetro(ControlSurfaceComponent):
__module__ = __name__
__doc__ = ' Visual metro for APCs and Launchpad '
def __init__(self, parent, controls, override):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._controls = controls
self._override = override
self._last_beat = -1
self.song().add_current_song_time_listener(self.on_time_changed)
self.song().add_is_playing_listener(self.on_time_changed)
def disconnect(self):
if self._controls:
self.clear()
self._controls = None
self.song().remove_current_song_time_listener(self.on_time_changed)
self.song().remove_is_playing_listener(self.on_time_changed)
self._override = None
self._parent = None
if IS_LIVE_9:
ControlSurfaceComponent.disconnect(self)
def on_enabled_changed(self):
pass
def update(self):
pass
def on_time_changed(self):
""" Show visual metronome via control LEDs upon beat changes (will not be shown if in Launchpad User 1) """
if self.song().is_playing and (not self._override or (self._override and self._override._mode_index != 1)):
time = str(self.song().get_current_beats_song_time()).split('.')
if self._last_beat != int(time[1])-1:
self._last_beat = int(time[1])-1
self.clear()
if self._last_beat < len(self._controls):
self._controls[self._last_beat].turn_on()
else:
self._controls[len(self._controls)-1].turn_on()
else:
self.clear()
def clear(self):
""" Clear all control LEDs """
for c in self._controls:
c.turn_off()
# local variables:
# tab-width: 4
| lgpl-2.1 | 3,540,569,321,692,649,500 | 37.809129 | 147 | 0.655512 | false |
OCA/event | partner_event/models/event_registration.py | 1 | 2816 | # Copyright 2014 Tecnativa S.L. - Pedro M. Baeza
# Copyright 2015 Tecnativa S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Antonio Espinosa
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class EventRegistration(models.Model):
_inherit = "event.registration"
partner_id = fields.Many2one(
ondelete='restrict',
)
attendee_partner_id = fields.Many2one(
comodel_name='res.partner',
string='Attendee Partner',
ondelete='restrict',
copy=False,
)
def _prepare_partner(self, vals):
return {
'name': vals.get('name') or vals.get('email'),
'email': vals.get('email', False),
'phone': vals.get('phone', False),
}
@api.model
def create(self, vals):
if not vals.get('attendee_partner_id') and vals.get('email'):
Partner = self.env['res.partner']
Event = self.env['event.event']
# Look for a partner with that email
email = vals.get('email').replace('%', '').replace('_', '\\_')
attendee_partner = Partner.search([
('email', '=ilike', email)
], limit=1)
event = Event.browse(vals['event_id'])
if attendee_partner:
vals['name'] = vals.setdefault('name', attendee_partner.name)
vals['phone'] = vals.setdefault(
'phone', attendee_partner.phone)
elif event.create_partner:
# Create partner
attendee_partner = Partner.sudo().create(
self._prepare_partner(vals))
vals['attendee_partner_id'] = attendee_partner.id
return super(EventRegistration, self).create(vals)
@api.multi
def partner_data_update(self, data):
reg_data = dict((k, v) for k, v in
data.items() if k in ['name', 'email', 'phone'])
if reg_data:
# Only update registration data if this event is not old
registrations = self.filtered(
lambda x: x.event_end_date >= fields.Datetime.now())
registrations.write(reg_data)
@api.onchange('attendee_partner_id', 'partner_id')
def _onchange_partner(self):
if self.attendee_partner_id:
if not self.partner_id:
self.partner_id = self.attendee_partner_id
get_attendee_partner_address = {
'get_attendee_partner_address': self.attendee_partner_id,
}
return super(EventRegistration, self.with_context(
**get_attendee_partner_address))._onchange_partner()
return super(EventRegistration, self)._onchange_partner()
| agpl-3.0 | -2,625,753,482,065,121,300 | 38.111111 | 77 | 0.572798 | false |
Jonathan-Livingston-Seagull/cerebro-dl | cerebro/models/hidden_layer_model.py | 1 | 2771 | import theano
import theano.tensor as T
import numpy
class HiddenLayerModel(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
| bsd-3-clause | 6,810,689,489,409,649,000 | 34.525641 | 79 | 0.562252 | false |
tqchen/tvm | tutorials/frontend/from_onnx.py | 1 | 3929 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy ONNX models with Relay.
For us to begin with, ONNX package must be installed.
A quick solution is to install protobuf compiler, and
.. code-block:: bash
pip install onnx --user
or please refer to offical site.
https://github.com/onnx/onnx
"""
import onnx
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = "".join(
[
"https://gist.github.com/zhreshold/",
"bcda4716699ac97ea44f791c24310193/raw/",
"93672b029103648953c4e5ad3ac3aadf346a4cdc/",
"super_resolution_0.2.onnx",
]
)
model_path = download_testdata(model_url, "super_resolution.onnx", module="onnx")
# now you have super_resolution.onnx on disk
onnx_model = onnx.load(model_path)
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr") # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
######################################################################
# Compile the model with relay
# ---------------------------------------------
target = "llvm"
input_name = "1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with tvm.transform.PassContext(opt_level=1):
intrp = relay.build_module.create_executor("graph", mod, tvm.cpu(0), target)
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
tvm_output = intrp.evaluate()(tvm.nd.array(x.astype(dtype)), **params).asnumpy()
######################################################################
# Display results
# ---------------------------------------------
# We put input and output image neck to neck
from matplotlib import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode="L")
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge("YCbCr", [out_y, out_cb, out_cr]).convert("RGB")
canvas = np.full((672, 672 * 2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show()
| apache-2.0 | -3,683,840,951,492,471,000 | 36.419048 | 89 | 0.614915 | false |
kgeorge/kgeorge-cv | samples/skindetect/authoring/python/checkImages.py | 1 | 1352 | __author__ = 'kgeorge'
from optparse import OptionParser
from PIL import Image
import os
def main():
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="imagefilename", metavar="FILE")
(options, args) = parser.parse_args()
print options.filename
srcBaseDir = r'/Users/kgeorge/Documents/projects/kgeorge-cv/samples/skindetect/authoring/image/'
maskBaseDir = r'/Users/kgeorge/Downloads'
maskFilename = os.path.splitext(options.filename)[0] + '.png'
im = Image.open(os.path.join(srcBaseDir, options.filename))
im2 = Image.open(os.path.join(maskBaseDir, 'skindetect-' + maskFilename))
print im.size, im2.size
if(im2.size[0] >= im.size[0] and im2.size[1] >= im.size[1]):
im2 = im2.crop((0,0, im.size[0], im2.size[1]))
#im.paste(im2, (0,0))
im2 = im2.convert('L')
im2 = im2.convert('1')
pass
elif (im2.size[0] <= im.size[0] and im2.size[1] <= im.size[1]):
print 'mask smaller than image'
pass
else:
raise IOError
im.paste(im2, (0,0))
maskFilename = os.path.splitext(options.filename)[0] + '_mask' + '.png'
im.save(os.path.join(srcBaseDir, maskFilename))
print options.filename, im.size
print options.filename, im2.size
pass
if __name__ == '__main__':
main()
| bsd-3-clause | 8,474,431,141,774,026,000 | 29.727273 | 100 | 0.617604 | false |
rollbar/pyrollbar | rollbar/examples/fastapi/app_logger.py | 1 | 1747 | #!/usr/bin/env python
# This example uses Uvicorn package that must be installed. However, it can be
# replaced with any other ASGI-compliant server.
#
# NOTE: Python 3.6 requires aiocontextvars package to be installed.
# Optional asynchronous reporting requires HTTPX package to be installed.
#
# Run: python app_logger.py
import logging
import fastapi
import rollbar
import uvicorn
from rollbar.contrib.fastapi import LoggerMiddleware
from rollbar.logger import RollbarHandler
# Initialize Rollbar SDK with your server-side ACCESS_TOKEN
rollbar.init(
'ACCESS_TOKEN',
environment='staging',
handler='async', # For asynchronous reporting use: default, async or httpx
)
# Set root logger to log DEBUG and above
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Report ERROR and above to Rollbar
rollbar_handler = RollbarHandler()
rollbar_handler.setLevel(logging.ERROR)
# Attach Rollbar handler to the root logger
logger.addHandler(rollbar_handler)
# Integrate Rollbar with FastAPI application
app = fastapi.FastAPI()
app.add_middleware(LoggerMiddleware) # should be added as the last middleware
# GET query params will be sent to Rollbar and available in the UI
# $ curl http://localhost:8888?param1=hello¶m2=world
@app.get('/')
async def read_root():
# Report log entries
logger.critical('Critical message sent to Rollbar')
logger.error('Error message sent to Rollbar')
# Ignore log entries
logger.warning('Warning message is not sent to Rollbar')
logger.info('Info message is not sent to Rollbar')
logger.debug('Debug message is not sent to Rollbar')
return {'hello': 'world'}
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8888)
| mit | 5,014,467,864,173,894,000 | 28.116667 | 79 | 0.746422 | false |
Nitrokey/libnitrokey | python3_bindings_example.py | 1 | 5532 | #!/usr/bin/env python3
"""
Copyright (c) 2015-2018 Nitrokey UG
This file is part of libnitrokey.
libnitrokey is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
libnitrokey is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with libnitrokey. If not, see <http://www.gnu.org/licenses/>.
SPDX-License-Identifier: LGPL-3.0
"""
import cffi
from enum import Enum
"""
This example will print 10 HOTP codes from just written HOTP#2 slot.
For more examples of use please refer to unittest/test_*.py files.
"""
ffi = cffi.FFI()
get_string = ffi.string
class DeviceErrorCode(Enum):
STATUS_OK = 0
NOT_PROGRAMMED = 3
WRONG_PASSWORD = 4
STATUS_NOT_AUTHORIZED = 5
STATUS_AES_DEC_FAILED = 0xa
def get_library():
fp = 'NK_C_API.h' # path to C API header
declarations = []
with open(fp, 'r') as f:
declarations = f.readlines()
cnt = 0
a = iter(declarations)
for declaration in a:
if declaration.strip().startswith('NK_C_API'):
declaration = declaration.replace('NK_C_API', '').strip()
while ';' not in declaration:
declaration += (next(a)).strip()
# print(declaration)
ffi.cdef(declaration, override=True)
cnt +=1
print('Imported {} declarations'.format(cnt))
C = None
import os, sys
path_build = os.path.join(".", "build")
paths = [
os.environ.get('LIBNK_PATH', None),
os.path.join(path_build,"libnitrokey.so"),
os.path.join(path_build,"libnitrokey.dylib"),
os.path.join(path_build,"libnitrokey.dll"),
os.path.join(path_build,"nitrokey.dll"),
]
for p in paths:
if not p: continue
print("Trying " +p)
p = os.path.abspath(p)
if os.path.exists(p):
print("Found: "+p)
C = ffi.dlopen(p)
break
else:
print("File does not exist: " + p)
if not C:
print("No library file found")
print("Please set the path using LIBNK_PATH environment variable to existing library or compile it (see "
"README.md for details)")
sys.exit(1)
return C
def get_hotp_code(lib, i):
return get_string(lib.NK_get_hotp_code(i))
def to_hex(ss):
return ''.join([ format(ord(s),'02x') for s in ss ])
print('Warning!')
print('This example will change your configuration on inserted stick and overwrite your HOTP#2 slot.')
print('Please write "continue" to continue or any other string to quit')
a = input()
if not a == 'continue':
exit()
ADMIN = input('Please enter your admin PIN (empty string uses 12345678) ')
ADMIN = ADMIN or '12345678' # use default if empty string
show_log = input('Should log messages be shown (please write "yes" to enable; this will make harder reading script output) ') == 'yes'
libnitrokey = get_library()
if show_log:
log_level = input('Please select verbosity level (0-5, 2 is library default, 3 will be selected on empty input) ')
log_level = log_level or '3'
log_level = int(log_level)
libnitrokey.NK_set_debug_level(log_level)
else:
libnitrokey.NK_set_debug_level(2)
ADMIN_TEMP = '123123123'
RFC_SECRET = to_hex('12345678901234567890')
# libnitrokey.NK_login('S') # connect only to Nitrokey Storage device
# libnitrokey.NK_login('P') # connect only to Nitrokey Pro device
device_connected = libnitrokey.NK_login_auto() # connect to any Nitrokey Stick
if device_connected:
print('Connected to Nitrokey device!')
else:
print('Could not connect to Nitrokey device!')
exit()
use_8_digits = True
pin_correct = libnitrokey.NK_first_authenticate(ADMIN.encode('ascii'), ADMIN_TEMP.encode('ascii')) == DeviceErrorCode.STATUS_OK.value
if pin_correct:
print('Your PIN is correct!')
else:
print('Your PIN is not correct! Please try again. Please be careful to not lock your stick!')
retry_count_left = libnitrokey.NK_get_admin_retry_count()
print('Retry count left: %d' % retry_count_left )
exit()
# For function parameters documentation please check NK_C_API.h
assert libnitrokey.NK_write_config(255, 255, 255, False, True, ADMIN_TEMP.encode('ascii')) == DeviceErrorCode.STATUS_OK.value
libnitrokey.NK_first_authenticate(ADMIN.encode('ascii'), ADMIN_TEMP.encode('ascii'))
libnitrokey.NK_write_hotp_slot(1, 'python_test'.encode('ascii'), RFC_SECRET.encode('ascii'), 0, use_8_digits, False, False, "".encode('ascii'),
ADMIN_TEMP.encode('ascii'))
# RFC test according to: https://tools.ietf.org/html/rfc4226#page-32
test_data = [
1284755224, 1094287082, 137359152, 1726969429, 1640338314, 868254676, 1918287922, 82162583, 673399871,
645520489,
]
print('Getting HOTP code from Nitrokey Stick (RFC test, 8 digits): ')
for i in range(10):
hotp_slot_1_code = get_hotp_code(libnitrokey, 1)
correct_str = "correct!" if hotp_slot_1_code.decode('ascii') == str(test_data[i])[-8:] else "not correct"
print('%d: %s, should be %s -> %s' % (i, hotp_slot_1_code.decode('ascii'), str(test_data[i])[-8:], correct_str))
libnitrokey.NK_logout() # disconnect device
| lgpl-3.0 | 5,766,300,729,958,608,000 | 34.235669 | 143 | 0.667932 | false |
lorensen/VTKExamples | src/Python/Filtering/ConnectivityFilter.py | 1 | 1442 | #!/usr/bin/env python
import vtk
def main():
sphereSource1 = vtk.vtkSphereSource()
sphereSource1.Update()
delaunay1 = vtk.vtkDelaunay3D()
delaunay1.SetInputConnection(sphereSource1.GetOutputPort())
delaunay1.Update()
sphereSource2 = vtk.vtkSphereSource()
sphereSource2.SetCenter(5,0,0)
sphereSource2.Update()
delaunay2 = vtk.vtkDelaunay3D()
delaunay2.SetInputConnection(sphereSource2.GetOutputPort())
delaunay2.Update()
appendFilter = vtk.vtkAppendFilter()
appendFilter.AddInputConnection(delaunay1.GetOutputPort())
appendFilter.AddInputConnection(delaunay2.GetOutputPort())
appendFilter.Update()
connectivityFilter = vtk.vtkConnectivityFilter()
connectivityFilter.SetInputConnection(appendFilter.GetOutputPort())
connectivityFilter.SetExtractionModeToAllRegions()
connectivityFilter.ColorRegionsOn()
connectivityFilter.Update()
# Visualize
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(connectivityFilter.GetOutputPort())
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renWindow = vtk.vtkRenderWindow()
renWindow.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWindow)
iren.Initialize()
iren.Start()
if __name__ == '__main__':
main()
| apache-2.0 | -5,614,666,063,002,479,000 | 27.27451 | 71 | 0.715673 | false |
frontendphil/analyzr | analyzr/settings.py | 1 | 6077 | from os.path import abspath, dirname
# Django settings for analyzr project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
PROJECT_PATH = abspath('%s/..' % dirname(abspath(__file__)))
CHECKOUT_PATH = '%s/repos' % PROJECT_PATH
RESULT_PATH = '%s/results' % PROJECT_PATH
CONFIG_PATH = '%s/templates/config' % PROJECT_PATH
CONTRIBUTORS_PER_PAGE = 10
ANONYMIZE = True
# defines hardness of the squale aggregation algorithm
# LOW = 3, MEDIUM = 9, HARD = 30
LAMBDA = 9.0
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/analyzr.db' % PROJECT_PATH, # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
EMAIL = {
"host": "", # smtp server
"account": "", # email account name
"password": "" # account password
}
SEND_EMAILS = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = "%s/static" % PROJECT_PATH
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
LOGIN_URL = "/login"
COMPRESS_ENABLED = not DEBUG
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_OUTPUT_DIR = "cache"
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'analyzr.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'analyzr.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"%s/templates" % PROJECT_PATH,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'parsr',
'timezone_field',
'annoying',
'compressor',
'django_extensions',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from local_settings import *
except:
pass
| mit | -6,614,729,428,346,790,000 | 30.487047 | 127 | 0.682245 | false |
bhautikj/vrProjector | vrProjectorWrapper.py | 1 | 3828 | # Copyright 2016 Bhautik J Joshi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import vrProjector
def main():
parser = argparse.ArgumentParser(description='Reproject photospheres')
parser.add_argument('--sourceProjection', required=True, help='Type of source projection. Valid values are: Equirectangular, Cubemap, SideBySideFisheye')
parser.add_argument('--sourceImage', required=True, help='Source image[s]. List multiple images in double quotes like so "front.png right.png back.png left.png top.png bottom.png"')
parser.add_argument('--useBilnear', required=False, help='Use bilinear interpolation when reprojecting. Valid values are true and false.')
parser.add_argument('--outProjection', required=True, help='Type of output projection. Valid values are: Equirectangular, Cubemap, SideBySideFisheye, Fisheye')
parser.add_argument('--outImage', required=True, help='output image[s]. List multiple images in double quotes like so "front.png right.png back.png left.png top.png bottom.png"')
parser.add_argument('--outWidth', required=True, help='output image[s] width in pixels')
parser.add_argument('--outHeight', required=True, help='output image[s] height in pixels')
args = parser.parse_args()
source = None
if args.sourceProjection.lower() == "Equirectangular".lower():
source = vrProjector.EquirectangularProjection()
source.loadImage(args.sourceImage)
elif args.sourceProjection.lower() == "SideBySideFisheye".lower():
source = vrProjector.SideBySideFisheyeProjection()
source.loadImage(args.sourceImage)
elif args.sourceProjection.lower() == "Cubemap".lower():
source = vrProjector.CubemapProjection()
imageList = args.sourceImage.split(' ')
source.loadImages(imageList[0], imageList[1], imageList[2], imageList[3], imageList[4], imageList[5])
elif args.sourceProjection.lower() == "Fisheye".lower():
source = vrProjector.FisheyeProjection()
source.loadImage(args.sourceImage)
else:
print("Quitting because unsupported source projection type: ", args.sourceProjection)
return
if args.useBilnear is not None:
if args.useBilnear.lower() == "true":
source.set_use_bilinear(True)
out = None
if args.outProjection.lower() == "Equirectangular".lower():
out = vrProjector.EquirectangularProjection()
out.initImage(int(args.outWidth), int(args.outHeight))
elif args.outProjection.lower() == "SideBySideFisheye".lower():
out = vrProjector.SideBySideFisheyeProjection()
out.initImage(int(args.outWidth), int(args.outHeight))
elif args.outProjection.lower() == "Cubemap".lower():
out = vrProjector.CubemapProjection()
out.initImages(int(args.outWidth), int(args.outHeight))
elif args.outProjection.lower() == "Fisheye".lower():
out = vrProjector.FisheyeProjection()
out.initImage(int(args.outWidth), int(args.outHeight))
else:
print("Quitting because unsupported output projection type: ", args.outProjection)
return
out.reprojectToThis(source)
# out.reprojectToThisThreaded(source, 16)
if args.outProjection.lower() == "Cubemap".lower():
imageList = args.outImage.split(' ')
out.saveImages(imageList[0], imageList[1], imageList[2], imageList[3], imageList[4], imageList[5])
else:
out.saveImage(args.outImage)
if __name__ == "__main__":
main() | apache-2.0 | 3,129,478,283,443,361,000 | 47.468354 | 183 | 0.738506 | false |
Urinx/SomeCodes | Bioinformatics/other_code/Traindata.py | 1 | 2902 | #!/usr/bin/env python
# coding: utf-8
from time import time
import random
class Traindata():
"""docstring for Traindata"""
posi = []
nega = []
cv = []
k = 3
def __init__(self):
pass
def load_intm(self, filename, isPosi):
startTime = time()
dataset = self.posi if isPosi else self.nega
state = 'Positive' if isPosi else 'Negative'
with open(filename) as f:
f.readline()
i = 0
while 1:
line = f.readline().replace('\t',' ')
if not line: break
pp = line.split(' ')[1].replace('|',' ')
dataset.append(pp)
i += 1
totalTime = time()-startTime
print '[*] Load '+state+' PPIs data('+str(i)+') from \''+filename+'\' in '+str(totalTime)+'s'
def load_txt(self, filename, isPosi):
startTime = time()
dataset = self.posi if isPosi else self.nega
state = 'Positive' if isPosi else 'Negative'
with open(filename) as f:
line = ' '
i = 0
while line:
line = f.readline().replace('\r\n','')
dataset.append(' '.join(line.split('\t')))
i += 1
dataset.pop()
i -= 1
totalTime = time()-startTime
print '[*] Load '+state+' PPIs data('+str(i)+') from \''+filename+'\' in '+str(totalTime)+'s'
def load_mitab(self, filename, isPosi):
startTime = time()
dataset = self.posi if isPosi else self.nega
state = 'Positive' if isPosi else 'Negative'
with open(filename) as f:
i = 0
while 1:
line = f.readline().replace('\n','')
if not line: break
p1, p2 = line.replace('uniprotkb:','').split('\t')[:2]
dataset.append(' '.join([p1,p2]))
i += 1
totalTime = time()-startTime
print '[*] Load '+state+' PPIs data('+str(i)+') from \''+filename+'\' in '+str(totalTime)+'s'
# K-fold Cross Validation
def KCV(self, k):
startTime = time()
self.k = k
self.cv = []
p = len(self.posi)
n = len(self.nega)
prange = range(p)
nrange = range(n)
random.shuffle(prange)
random.shuffle(nrange)
dp, mp = p / k, p % k
dn, mn = n / k, n %k
for i in xrange(k):
tmp = []
for jp in prange[i*dp:(i+1)*dp]:
tmp.append('+ '+self.posi[jp])
if i < mp:
tmp.append('+ '+self.posi[prange[-(i+1)]])
for jn in nrange[i*dn:(i+1)*dn]:
tmp.append('- '+self.nega[jn])
if i >= k - mn:
tmp.append('- '+self.nega[nrange[-(k-i)]])
self.cv.append(tmp)
totalTime = time()-startTime
print '[*] Set cross validation data (k='+str(k)+') in '+str(totalTime)+'s'
def done(self):
p = len(self.posi)
n = len(self.nega)
print '[*] Positive data: '+str(p)+', Negative data: '+str(n)+', Total: '+str(p+n)
def unittest(self):
pass
if __name__=='__main__':
traindata = Traindata()
traindata.load_intm('yeast.db.all.200908.intm', True)
traindata.load_intm('human.db.all.201008.intm', True)
traindata.load_intm('human.db.all.201108-201008.intm', True)
traindata.load_txt('combined.txt', False)
traindata.load_mitab('18509523_neg.mitab', False)
traindata.KCV(10)
traindata.done()
| gpl-2.0 | -9,218,771,574,054,769,000 | 26.121495 | 95 | 0.601309 | false |
galad-loth/LearnDescriptor | patchmatch/train_matchnet.py | 1 | 1806 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 11:28:35 2018
@author: galad-loth
"""
import mxnet as mx
import logging
import sys
from metric_net import match_net
from data import get_UBC_patch_dataiter
logging.basicConfig(level=logging.INFO)
root_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(stdout_handler)
root_logger.setLevel(logging.INFO)
def train_match_net():
datadir="D:\\_Datasets\\UBCPatch"
dataset="liberty"
gt_file="m50_100000_100000_0.txt"
batch_size=50
train_iter,val_iter=get_UBC_patch_dataiter(datadir, dataset,gt_file,
batch_size,"siam",True, 0.05)
model_prefix="checkpoint\\matchnet"
checkpoint = mx.callback.do_checkpoint(model_prefix)
eval_metric=mx.metric.Accuracy()
train_net=match_net(512,256)
train_mod = mx.mod.Module(train_net,context=mx.gpu(),
data_names=['data1','data2'],label_names=["loss_label"])
train_mod.bind(data_shapes=train_iter.provide_data,
label_shapes=train_iter.provide_label)
# train_mod.init_params()
train_mod.fit(train_data=train_iter,
eval_data=val_iter,
initializer =mx.initializer.Xavier(),
optimizer='sgd',
optimizer_params={'learning_rate':0.01,
"momentum":0.9,
"wd":0.005,
"lr_scheduler":mx.lr_scheduler.FactorScheduler(8000,0.9)},
eval_metric=eval_metric,
epoch_end_callback=checkpoint,
num_epoch=10)
if __name__=="__main__":
train_match_net()
| apache-2.0 | -2,724,554,486,041,508,400 | 32.075472 | 92 | 0.566445 | false |
nmayorov/scipy | scipy/integrate/_quad_vec.py | 3 | 20742 | import sys
import copy
import heapq
import collections
import functools
import numpy as np
from scipy._lib._util import MapWrapper
class LRUDict(collections.OrderedDict):
def __init__(self, max_size):
self.__max_size = max_size
def __setitem__(self, key, value):
existing_key = (key in self)
super(LRUDict, self).__setitem__(key, value)
if existing_key:
self.move_to_end(key)
elif len(self) > self.__max_size:
self.popitem(last=False)
def update(self, other):
# Not needed below
raise NotImplementedError()
class SemiInfiniteFunc(object):
"""
Argument transform from (start, +-oo) to (0, 1)
"""
def __init__(self, func, start, infty):
self._func = func
self._start = start
self._sgn = -1 if infty < 0 else 1
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
z = self._sgn * (x - self._start) + 1
if z == 0:
# Can happen only if point not in range
return np.inf
return 1 / z
def __call__(self, t):
if t < self._tmin:
return 0.0
else:
x = self._start + self._sgn * (1 - t) / t
f = self._func(x)
return self._sgn * (f / t) / t
class DoubleInfiniteFunc(object):
"""
Argument transform from (-oo, oo) to (-1, 1)
"""
def __init__(self, func):
self._func = func
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
s = -1 if x < 0 else 1
return s / (abs(x) + 1)
def __call__(self, t):
if abs(t) < self._tmin:
return 0.0
else:
x = (1 - abs(t)) / t
f = self._func(x)
return (f / t) / t
def _max_norm(x):
return np.amax(abs(x))
def _get_sizeof(obj):
try:
return sys.getsizeof(obj)
except TypeError:
# occurs on pypy
if hasattr(obj, '__sizeof__'):
return int(obj.__sizeof__())
return 64
class _Bunch(object):
def __init__(self, **kwargs):
self.__keys = kwargs.keys()
self.__dict__.update(**kwargs)
def __repr__(self):
return "_Bunch({})".format(", ".join("{}={}".format(k, repr(self.__dict__[k]))
for k in self.__keys))
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature=None, full_output=False):
r"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number of bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using
:class:`python:multiprocessing.pool.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
:meth:`python:multiprocessing.pool.Pool.map` for evaluating the
population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'gk15' (Gauss-Kronrod 15-point rule),
'trapezoid' (composite trapezoid rule).
Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
full_output : bool, optional
Return an additional ``info`` dictionary.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Returned only when ``full_output=True``.
Info dictionary. Is an object with the attributes:
success : bool
Whether integration reached target precision.
status : int
Indicator for convergence, success (0),
failure (1), and failure due to rounding error (2).
neval : int
Number of function evaluations.
intervals : ndarray, shape (num_intervals, 2)
Start and end points of subdivision intervals.
integrals : ndarray, shape (num_intervals, ...)
Integral for each interval.
Note that at most ``cache_size`` values are recorded,
and the array may contains *nan* for missing items.
errors : ndarray, shape (num_intervals,)
Estimated integration error for each interval.
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
Examples
--------
We can compute integrations of a vector-valued function:
>>> from scipy.integrate import quad_vec
>>> import matplotlib.pyplot as plt
>>> alpha = np.linspace(0.0, 2.0, num=30)
>>> f = lambda x: x**alpha
>>> x0, x1 = 0, 2
>>> y, err = quad_vec(f, x0, x1)
>>> plt.plot(alpha, y)
>>> plt.xlabel(r"$\alpha$")
>>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
>>> plt.show()
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
kwargs = dict(epsabs=epsabs,
epsrel=epsrel,
norm=norm,
cache_size=cache_size,
limit=limit,
workers=workers,
points=points,
quadrature='gk15' if quadrature is None else quadrature,
full_output=full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, **kwargs)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, **kwargs)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. explicitly split integral at t=0, which separates
# the positive and negative sides
f2 = DoubleInfiniteFunc(f)
if points is not None:
kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
else:
kwargs['points'] = (0,)
if a != b:
res = quad_vec(f2, -1, 1, **kwargs)
else:
res = quad_vec(f2, 1, 1, **kwargs)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
mapwrapper = MapWrapper(workers)
parallel_count = 128
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'gk15': _quadrature_gk15,
'trapz': _quadrature_trapezoid, # alias for backcompat
'trapezoid': _quadrature_trapezoid}[quadrature]
except KeyError as e:
raise ValueError("unknown quadrature {!r}".format(quadrature)) from e
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if not (a < p < b) or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
CONVERGED = 0
NOT_CONVERGED = 1
ROUNDING_ERROR = 2
NOT_A_NUMBER = 3
status_msg = {
CONVERGED: "Target precision reached.",
NOT_CONVERGED: "Target precision not reached.",
ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
NOT_A_NUMBER: "Non-finite values encountered."
}
# Process intervals
with mapwrapper:
ier = NOT_CONVERGED
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
err_sum = 0
for j in range(parallel_count):
if not intervals:
break
if j > 0 and err_sum > global_error - tol/8:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
err_sum += -neg_old_err
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = CONVERGED
break
if global_error < rounding_error:
ier = ROUNDING_ERROR
break
if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
ier = NOT_A_NUMBER
break
res = global_integral
err = global_error + rounding_error
if full_output:
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
errors = np.array([-z[0] for z in intervals])
intervals = np.array([[z[1], z[2]] for z in intervals])
info = _Bunch(neval=neval,
success=(ier == CONVERGED),
status=ier,
message=status_msg[ier],
intervals=intervals,
integrals=integrals,
errors=errors)
return (res, err, info)
else:
return (res, err)
def _subdivide_interval(args):
interval, f, norm_func, _quadrature = args
old_err, a, b, old_int = interval
c = 0.5 * (a + b)
# Left-hand side
if getattr(_quadrature, 'cache_size', 0) > 0:
f = functools.lru_cache(_quadrature.cache_size)(f)
s1, err1, round1 = _quadrature(a, c, f, norm_func)
dneval = _quadrature.num_eval
s2, err2, round2 = _quadrature(c, b, f, norm_func)
dneval += _quadrature.num_eval
if old_int is None:
old_int, _, _ = _quadrature(a, b, f, norm_func)
dneval += _quadrature.num_eval
if getattr(_quadrature, 'cache_size', 0) > 0:
dneval = f.cache_info().misses
dint = s1 + s2 - old_int
derr = err1 + err2 - old_err
dround_err = round1 + round2
subintervals = ((a, c, s1, err1), (c, b, s2, err2))
return dint, derr, dround_err, subintervals, dneval
def _quadrature_trapezoid(x1, x2, f, norm_func):
"""
Composite trapezoid quadrature
"""
x3 = 0.5*(x1 + x2)
f1 = f(x1)
f2 = f(x2)
f3 = f(x3)
s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ 2*float(norm_func(f3))
+ float(norm_func(f2))) * 2e-16
s1 = 0.5 * (x2 - x1) * (f1 + f2)
err = 1/3 * float(norm_func(s1 - s2))
return s2, err, round_err
_quadrature_trapezoid.cache_size = 3 * 3
_quadrature_trapezoid.num_eval = 3
def _quadrature_gk(a, b, f, norm_func, x, w, v):
"""
Generic Gauss-Kronrod quadrature
"""
fv = [0.0]*len(x)
c = 0.5 * (a + b)
h = 0.5 * (b - a)
# Gauss-Kronrod
s_k = 0.0
s_k_abs = 0.0
for i in range(len(x)):
ff = f(c + h*x[i])
fv[i] = ff
vv = v[i]
# \int f(x)
s_k += vv * ff
# \int |f(x)|
s_k_abs += vv * abs(ff)
# Gauss
s_g = 0.0
for i in range(len(w)):
s_g += w[i] * fv[2*i + 1]
# Quadrature of abs-deviation from average
s_k_dabs = 0.0
y0 = s_k / 2.0
for i in range(len(x)):
# \int |f(x) - y0|
s_k_dabs += v[i] * abs(fv[i] - y0)
# Use similar error estimation as quadpack
err = float(norm_func((s_k - s_g) * h))
dabs = float(norm_func(s_k_dabs * h))
if dabs != 0 and err != 0:
err = dabs * min(1.0, (200 * err / dabs)**1.5)
eps = sys.float_info.epsilon
round_err = float(norm_func(50 * eps * h * s_k_abs))
if round_err > sys.float_info.min:
err = max(err, round_err)
return h * s_k, err, round_err
def _quadrature_gk21(a, b, f, norm_func):
"""
Gauss-Kronrod 21 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.995657163025808080735527280689003,
0.973906528517171720077964012084452,
0.930157491355708226001207180059508,
0.865063366688984510732096688423493,
0.780817726586416897063717578345042,
0.679409568299024406234327365114874,
0.562757134668604683339000099272694,
0.433395394129247190799265943165784,
0.294392862701460198131126603103866,
0.148874338981631210884826001129720,
0,
-0.148874338981631210884826001129720,
-0.294392862701460198131126603103866,
-0.433395394129247190799265943165784,
-0.562757134668604683339000099272694,
-0.679409568299024406234327365114874,
-0.780817726586416897063717578345042,
-0.865063366688984510732096688423493,
-0.930157491355708226001207180059508,
-0.973906528517171720077964012084452,
-0.995657163025808080735527280689003)
# 10-point weights
w = (0.066671344308688137593568809893332,
0.149451349150580593145776339657697,
0.219086362515982043995534934228163,
0.269266719309996355091226921569469,
0.295524224714752870173892994651338,
0.295524224714752870173892994651338,
0.269266719309996355091226921569469,
0.219086362515982043995534934228163,
0.149451349150580593145776339657697,
0.066671344308688137593568809893332)
# 21-point weights
v = (0.011694638867371874278064396062192,
0.032558162307964727478818972459390,
0.054755896574351996031381300244580,
0.075039674810919952767043140916190,
0.093125454583697605535065465083366,
0.109387158802297641899210590325805,
0.123491976262065851077958109831074,
0.134709217311473325928054001771707,
0.142775938577060080797094273138717,
0.147739104901338491374841515972068,
0.149445554002916905664936468389821,
0.147739104901338491374841515972068,
0.142775938577060080797094273138717,
0.134709217311473325928054001771707,
0.123491976262065851077958109831074,
0.109387158802297641899210590325805,
0.093125454583697605535065465083366,
0.075039674810919952767043140916190,
0.054755896574351996031381300244580,
0.032558162307964727478818972459390,
0.011694638867371874278064396062192)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk21.num_eval = 21
def _quadrature_gk15(a, b, f, norm_func):
"""
Gauss-Kronrod 15 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.991455371120812639206854697526329,
0.949107912342758524526189684047851,
0.864864423359769072789712788640926,
0.741531185599394439863864773280788,
0.586087235467691130294144838258730,
0.405845151377397166906606412076961,
0.207784955007898467600689403773245,
0.000000000000000000000000000000000,
-0.207784955007898467600689403773245,
-0.405845151377397166906606412076961,
-0.586087235467691130294144838258730,
-0.741531185599394439863864773280788,
-0.864864423359769072789712788640926,
-0.949107912342758524526189684047851,
-0.991455371120812639206854697526329)
# 7-point weights
w = (0.129484966168869693270611432679082,
0.279705391489276667901467771423780,
0.381830050505118944950369775488975,
0.417959183673469387755102040816327,
0.381830050505118944950369775488975,
0.279705391489276667901467771423780,
0.129484966168869693270611432679082)
# 15-point weights
v = (0.022935322010529224963732008058970,
0.063092092629978553290700663189204,
0.104790010322250183839876322541518,
0.140653259715525918745189590510238,
0.169004726639267902826583426598550,
0.190350578064785409913256402421014,
0.204432940075298892414161999234649,
0.209482141084727828012999174891714,
0.204432940075298892414161999234649,
0.190350578064785409913256402421014,
0.169004726639267902826583426598550,
0.140653259715525918745189590510238,
0.104790010322250183839876322541518,
0.063092092629978553290700663189204,
0.022935322010529224963732008058970)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk15.num_eval = 15
| bsd-3-clause | -1,173,841,174,148,520,200 | 31.460094 | 102 | 0.584177 | false |
marcelocure/octopusapi | octopusapi/middleware.py | 1 | 1905 | import json
import logging
import falcon
class StorageError(Exception):
@staticmethod
def handle(ex, req, resp, params):
description = ('Sorry, could not store the message, it worked on my machine')
raise falcon.HTTPError(falcon.HTTP_725, 'Database Error', description)
class AuthMiddleware(object):
def process_request(self, req, resp):
pass
class RequireJSON(object):
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable('This API only supports responses encoded as JSON.')
if req.method in ('POST'):
if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType('This API only supports requests encoded as JSON.')
class JSONTranslator(object):
def process_request(self, req, resp):
if req.content_length in (None, 0):
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body', 'A valid JSON document is required.')
try:
req.context['doc'] = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753, 'Malformed JSON', 'Could not decode the request body. The JSON was incorrect or not encoded as UTF-8.')
def process_response(self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'])
def max_body(limit):
def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = ('The size of the request is too large. The body must not exceed ' + str(limit) + ' bytes in length.')
raise falcon.HTTPRequestEntityTooLarge('Request body is too large', msg)
return hook
| mit | -8,678,748,957,230,808,000 | 33.017857 | 155 | 0.651969 | false |
scheibler/serieSandSubs | src/main.py | 1 | 19793 | #!/usr/bin/env python
import subprocess
import os
import sys
import time
import thread
import argparse # parse the command line parameters
import string
import re # regex
import select
import logging
# non standard librarys
from subtitle_manager import SubtitleManager
from series_manager import SeriesManager
from single_file_manager import SingleFileManager
from config import Config
import helper
from configobj import ConfigObj # parse ini files
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
def control_background_process():
"""
Control the subtitle sending and the position remembering
This method is started in an own thread and runs in parallel to the MPlayer
"""
config = sys.modules['Config'].get_config()
if config['subtitles']['instance'] == None:
show_subtitles = False
else:
show_subtitles = True
# print start message
# get video length
parsing_error_counter = 0
video_length = 0
while True:
video_length = helper.send_command_to_mplayer("videolength")
if video_length > 0:
parsing_error_counter = 0
break
if parsing_error_counter >= 5:
logging.critical("Can't get the length of the video file, MPlayer not accessible")
helper.clean_and_exit(1)
parsing_error_counter = parsing_error_counter +1
time.sleep(5)
# get video artist
video_artist = None
parsing_error_counter = 0
while True:
video_artist = helper.send_command_to_mplayer("get_artist")
if type(video_artist) == type("") or video_artist == None:
parsing_error_counter = 0
break
if parsing_error_counter >= 20:
logging.critical("Can't get the artist of the video file, MPlayer not accessible.")
helper.clean_and_exit(1)
parsing_error_counter = parsing_error_counter +1
time.sleep(0.5)
parsing_error_counter = 0
video_title = None
while True:
video_title = helper.send_command_to_mplayer("get_title")
if type(video_title) == type("") or video_title == None:
parsing_error_counter = 0
break
if parsing_error_counter >= 10:
logging.critical("Can't get the title of the video file")
helper.clean_and_exit(1)
parsing_error_counter = parsing_error_counter +1
time.sleep(0.5)
print "Playing: " + config['paths']['full_media_file']
if video_title != None and video_artist != None:
print "Artist: %s\nTitle: %s\nLength: %.2d:%.2d" % (video_artist, video_title, video_length/60, video_length%60)
elif video_title != None:
print "Title: %s\nLength: %.2d:%.2d" % (video_title, video_length/60, video_length%60)
else:
print "Title: %s\nLength: %.2d:%.2d" % (os.path.basename(config['paths']['full_media_file']), video_length/60, video_length%60)
print "\n"
old_sub = ""
last_video_pos = 0
count_pause_cycles = 0
print_position = 0
last_sleep_timer_success = 0
number_of_beeps = 0
while True:
if config['media_manager']['end_of_video'] == True:
return True
current_video_pos = helper.send_command_to_mplayer("currentpos")
subtitle_visibility = helper.send_command_to_mplayer("subtitle_visibility")
if current_video_pos < 0:
if parsing_error_counter == 15:
logging.warning("Can't parse the MPlayer output")
if parsing_error_counter == 30:
logging.warning("Still can't parse the MPlayer output")
if parsing_error_counter == 60:
logging.critical("Lost the MPlayer access")
helper.clean_and_exit(1)
parsing_error_counter = parsing_error_counter +1
time.sleep(0.5)
continue
else:
parsing_error_counter = 0
# turn on / off subtitles
if config['subtitles']['instance'] != None:
if subtitle_visibility == 0 and show_subtitles == True:
show_subtitles = False
print "\nsubtitles deactivated"
config['subtitles']['instance'].send_msg(config['subtitles']['recipients'], "subtitles deactivated")
if subtitle_visibility == 1 and show_subtitles == False:
show_subtitles = True
print "\nsubtitles activated"
config['subtitles']['instance'].send_msg(config['subtitles']['recipients'], "subtitles activated")
# sleep timer
if config['media_manager']['sleep_timer'] == True:
diff = abs(current_video_pos - last_video_pos)
# reset if video was paused or user jumped more than 15 seconds
if diff == 0 or diff > 15:
last_sleep_timer_success = current_video_pos
number_of_beeps = 0
if (last_sleep_timer_success + config['media_manager']['sleep time interval'] * 60 - 30) < current_video_pos and number_of_beeps == 0:
subprocess.call([mplayer_path, "-quiet", config['paths']['beep']], \
stdout=open("/dev/null", "w"), stderr=open("/dev/null", "w"))
number_of_beeps += 1
if (last_sleep_timer_success + config['media_manager']['sleep time interval'] * 60 - 20) < current_video_pos and number_of_beeps == 1:
subprocess.call([mplayer_path, "-quiet", config['paths']['beep']], \
stdout=open("/dev/null", "w"), stderr=open("/dev/null", "w"))
number_of_beeps += 1
if (last_sleep_timer_success + config['media_manager']['sleep time interval'] * 60 - 10) < current_video_pos and number_of_beeps == 2:
subprocess.call([mplayer_path, "-quiet", config['paths']['beep']], \
stdout=open("/dev/null", "w"), stderr=open("/dev/null", "w"))
number_of_beeps += 1
if (last_sleep_timer_success + config['media_manager']['sleep time interval'] * 60) < current_video_pos:
config['media_manager']['closed_by_sleep_timer'] = True
helper.send_command_to_mplayer("quit")
# if the file is playing (no pause)
if current_video_pos - last_video_pos != 0:
print "\rCurrent position: %.2d:%.2d of %.2d:%.2d (%.1f%%)" % \
(current_video_pos/60, current_video_pos%60, \
video_length/60, video_length%60, \
current_video_pos/video_length*100),; sys.stdout.flush()
# subtitles
if show_subtitles == True:
sub = config['subtitles']['instance'].get_current_subtitle( \
current_video_pos - config['subtitles']['delay'])
if sub != old_sub and sub != "":
config['subtitles']['instance'].send_msg(config['subtitles']['recipients'], sub)
old_sub = sub
# current video position
# save the current position every 5 seconds
if abs(current_video_pos - config['media_manager']['instance'].get_playback_position(config['paths']['full_media_file'])[0]) > 5:
updated = config['media_manager']['instance'].update_playback_position(config['paths']['full_media_file'], current_video_pos, video_length)
if updated == False:
logging.error("Main.control_background_process: can't write the current playback position")
break
last_video_pos = current_video_pos
time.sleep(0.33)
########################
# start of the main part
########################
# create the args parser
parser = argparse.ArgumentParser(description="Series manager and XMPP subtitle distributor")
parser.add_argument("-f", "--from-beginning", action="store_true",
help="Start the media file from beginning regardless of the possibly remenered position")
parser.add_argument("-s", "--subtitle-file",
help="Select a subtitle file. If no file is given the program tries to find a file based on the name of the given media file")
parser.add_argument("-d", "--subtitle-delay",
help="Specify a delay in seconds for the subtitles")
parser.add_argument("-a", "--add-series", action="store_true",
help="If this option is set, the choosen media file is added as the start point of a series")
parser.add_argument("-p", "--persistent", action="store_true",
help="Normally a series is deleted automatically after watching the last episode.\
If this option is set, the series entry will persist in the list until it is deleted by the user (only useful with the -a option)")
parser.add_argument("-c", "--continuous-playback", action="store_true",
help="Continuous playback of the choosen series")
parser.add_argument("-t", "--sleep-timer", action="store_true",
help="Turn on sleep timer. Then you must stop and start the movie at certain \
intervals to verify, that you are still awake. Otherwise the playback will stop.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Shows the error messages of MPlayer and increases the programs output")
parser.add_argument("-V", "--version", action="store_true",
help="Get current program version")
parser.add_argument("mediafile", nargs="?", default="",
help="specify a media file name or specify parts of a series which should be resumed (must be encloesed in \"\").\
If nothing is entered, the program will list all saved series")
# load the config file
sys.modules['Config'] = Config()
config = sys.modules['Config'].get_config()
# check if the mplayer is installed
mplayer_path = string.split(subprocess.check_output(["whereis", "mplayer"]), " ")
try:
mplayer_path = mplayer_path[1]
except IndexError as e:
logging.critical("The Mplayer is not installed, please do that first")
helper.clean_and_exit(1)
# initialize the mplayer command input fifo
mplayer_command_fifo = config['paths']['mplayer_input']
if os.path.exists(mplayer_command_fifo) == False:
try:
os.mkfifo(mplayer_command_fifo)
except OSError as e:
logging.critical("Can't create Mplayer command input fifo\n" + str(e))
helper.clean_and_exit(1)
# create Mplayer output file
mplayer_output_filename = config['paths']['mplayer_output']
try:
mplayer_output_file = open(mplayer_output_filename,"w")
except IOError as e:
logging.critical("Can't create Mplayer output file\n" + str(e))
helper.clean_and_exit(1)
# create the file for the series
if os.path.exists(config['paths']['series_file']) == False:
try:
last_positions_file = open(config['paths']['series_file'],"w")
last_positions_file.close()
except IOError as e:
logging.critical("Can't create file for the series file\n" + str(e))
helper.clean_and_exit(1)
# create the file for the single files
if os.path.exists(config['paths']['single_pos_file']) == False:
try:
last_positions_file = open(config['paths']['single_pos_file'],"w")
last_positions_file.close()
except IOError as e:
logging.critical("Can't create file for the single file\n" + str(e))
helper.clean_and_exit(1)
##################################
# parse the command line arguments
args = parser.parse_args()
# version
if args.version == True:
print "serieSandSubs version 0.2.1"
helper.clean_and_exit(0)
# verbosity
if args.verbose == True:
mplayer_error_path = None
logging.basicConfig(level=logging.INFO)
else:
mplayer_error_path = open(os.devnull, "w")
# process the entered value for the media file
media_file = args.mediafile
# four possible variants for resuming a series
# 1. The user explicitely wants to add the entered media file as the start point of a series
# the media file exists and the -a flag is set
# then with the "get_current_episode" method get the full path for the media file
# after that save the next episode with the "find_next_episode" method
if args.add_series == True and os.path.isfile(media_file) == True:
# create the SeriesManager object
config['media_manager']['instance'] = SeriesManager(config['paths']['series_file'])
config['media_manager']['instance'].clean_series_file()
config['paths']['full_media_file'] = os.path.realpath(media_file)
config['media_manager']['instance'].store_series(config['paths']['full_media_file'], True, args.persistent)
# 2. same situation as in option 1. but the media file does not exist, so exit the program
elif args.add_series == True and os.path.isfile(media_file) == False:
logging.critical("Adding a new series wwas not possible, file " + media_file + " not found")
helper.clean_and_exit(1)
# 3. the user don't wants to add a new series but also entered no valid media file
# so the user wants to view the next episode of a saved series
# the system lists all matched series, starts the choosen one and also finds the next episode
elif args.add_series == False and os.path.isfile(media_file) == False:
# create the SeriesManager object
config['media_manager']['instance'] = SeriesManager(config['paths']['series_file'])
config['media_manager']['instance'].clean_series_file()
config['paths']['full_media_file'] = config['media_manager']['instance'].choose_series(media_file)
config['media_manager']['instance'].store_series(config['paths']['full_media_file'], False, False)
# 4. no add flag set and valid media file
# system starts the file in the normal mode, no resuming of a series
else:
# create the single files manager object
config['media_manager']['instance'] = SingleFileManager(config['paths']['single_pos_file'])
config['media_manager']['instance'].clean_series_file()
config['paths']['full_media_file'] = os.path.realpath(media_file)
config['media_manager']['instance'].store_series(config['paths']['full_media_file'], False, False)
# sleep timer
if args.sleep_timer == True:
config['media_manager']['sleep_timer'] = True
while True:
# should the file definitely played from beginning
# if not, search for a potentially saved position in the remember_last_position file
if args.from_beginning == True or config['general']['activate remember positions'].lower() == "no":
start_at = 0
else:
start_at = config['media_manager']['instance'].get_playback_position(config['paths']['full_media_file'])[0]
# subtitle file
if args.subtitle_file != None:
subtitle_filename = args.subtitle_file
if os.path.exists(subtitle_filename) == False:
logging.critical("The entered subtitle file " + subtitle_filename + " does not exist")
helper.clean_and_exit(1)
else:
media_file_without_ext = os.path.splitext(config['paths']['full_media_file'])
if os.path.exists(media_file_without_ext[0] + ".srt") == True:
subtitle_filename = media_file_without_ext[0] + ".srt"
else:
subtitle_filename = ""
# strip html tags from subtitle file
if subtitle_filename != "":
helper.strip_html_from_subtitle_file(subtitle_filename)
# subtitle offset
if args.subtitle_delay != None:
try:
config['subtitles']['delay'] = float(args.subtitle_delay)
except ValueError as e:
logging.critical("The subtitle delay must be a float")
helper.clean_and_exit(1)
else:
sys.stdout.write("Enter offset value for subtitle file and press ENTER, For no offset, enter nothing: ")
sys.stdout.flush()
i, o, e = select.select( [sys.stdin], [], [], 15)
if (i):
input = sys.stdin.readline().strip()
if input != "":
try:
config['subtitles']['delay'] = float(input)
except ValueError as e:
logging.critical("The subtitle delay must be a float")
helper.clean_and_exit(1)
print "\nSubtitle offset: %.1f seconds" % config['subtitles']['delay']
###########################################
# create the instance of the subtitles manager
if subtitle_filename != "" and config['general']['activate subtitles'].lower() == "yes":
config['subtitles']['instance'] = SubtitleManager(config['subtitles']['sender name'], config['subtitles']['sender password'], subtitle_filename)
# start the background process
config['media_manager']['end_of_video'] = False
thread.start_new_thread(control_background_process, ())
# start the Mplayer
try:
if subtitle_filename == "":
subprocess.call([mplayer_path, "-af", "scaletempo=scale=1", "-ss", str(start_at),
"-quiet", "-input", "file=" + mplayer_command_fifo,
"-subdelay", str(config['subtitles']['delay']),
config['paths']['full_media_file']],
stdout=mplayer_output_file, stderr=mplayer_error_path)
else:
subprocess.call([mplayer_path, "-af", "scaletempo=scale=1", "-sub", subtitle_filename,
"-ss", str(start_at),"-quiet", "-input", "file=" + mplayer_command_fifo,
"-subdelay", str(config['subtitles']['delay']),
config['paths']['full_media_file']],
stdout=mplayer_output_file, stderr=mplayer_error_path)
except OSError as e:
logging.critical("Can't start Mplayer\n" + str(e))
mplayer_output_file.close()
helper.clean_and_exit(1)
config['media_manager']['end_of_video'] = True
if args.continuous_playback == True and config['media_manager']['closed_by_sleep_timer'] == False:
print("\nThe next episode starts automatically in %d seconds, press ENTER or Space to begin \
immediately or press ESC or q to quit: " % config['media_manager']['pause between continuous playback'])
quit = False
while True:
i, o, e = select.select( [sys.stdin], [], [], config['media_manager']['pause between continuous playback'])
if (i):
key = ord(sys.stdin.read(1))
if key == 10 or key == 32:
print "OK\n"
break
if key == 113 or key == 27:
quit = True
break
else:
print "OK\n"
break
if quit == True:
break
current_pos = config['media_manager']['instance'].get_playback_position(config['paths']['full_media_file'])
if current_pos[1] > 0 and (current_pos[1] - current_pos[0]) < 90:
config['paths']['full_media_file'] = config['media_manager']['instance'].get_next_episode(config['paths']['full_media_file'])
config['media_manager']['instance'].clean_series_file()
if config['paths']['full_media_file'] == None:
print "No further episodes available"
break
else:
config['media_manager']['instance'].store_series(config['paths']['full_media_file'], False, False)
else:
break
# cleanup
mplayer_output_file.close()
helper.clean_and_exit(0)
| gpl-3.0 | 1,359,142,456,822,473,200 | 46.465228 | 159 | 0.609711 | false |
myarjunar/QGIS | python/plugins/processing/gui/GetScriptsAndModels.py | 1 | 15404 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GetScriptsAndModels.py
---------------------
Date : June 2014
Copyright : (C) 2014 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import range
__author__ = 'Victor Olaya'
__date__ = 'June 2014'
__copyright__ = '(C) 2014, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
from functools import partial
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QCoreApplication, QUrl
from qgis.PyQt.QtGui import QCursor
from qgis.PyQt.QtWidgets import QApplication, QTreeWidgetItem, QPushButton, QMessageBox
from qgis.PyQt.QtNetwork import QNetworkReply, QNetworkRequest
from qgis.utils import iface, show_message_log
from qgis.core import (QgsNetworkAccessManager,
QgsMessageLog,
QgsApplication)
from qgis.gui import QgsMessageBar
from processing.core.alglist import algList
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui.ToolboxAction import ToolboxAction
from processing.gui import Help2Html
from processing.gui.Help2Html import getDescription, ALG_DESC, ALG_VERSION, ALG_CREATOR
from processing.script.ScriptUtils import ScriptUtils
from processing.algs.r.RUtils import RUtils
from processing.modeler.ModelerUtils import ModelerUtils
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgGetScriptsAndModels.ui'))
class GetScriptsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get scripts from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QgsApplication.getThemeIcon("/processingScript.svg")
def execute(self):
repoUrl = ProcessingConfig.getSetting(ProcessingConfig.MODELS_SCRIPTS_REPO)
if repoUrl is None or repoUrl == '':
QMessageBox.warning(None,
self.tr('Repository error'),
self.tr('Scripts and models repository is not configured.'))
return
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.SCRIPTS)
dlg.exec_()
if dlg.updateProvider:
algList.reloadProvider('script')
class GetRScriptsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get R scripts from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QgsApplication.getThemeIcon("/providerR.svg")
def execute(self):
repoUrl = ProcessingConfig.getSetting(ProcessingConfig.MODELS_SCRIPTS_REPO)
if repoUrl is None or repoUrl == '':
QMessageBox.warning(None,
self.tr('Repository error'),
self.tr('Scripts and models repository is not configured.'))
return
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.RSCRIPTS)
dlg.exec_()
if dlg.updateProvider:
self.toolbox.updateProvider('r')
class GetModelsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get models from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QgsApplication.getThemeIcon("/processingModel.svg")
def execute(self):
repoUrl = ProcessingConfig.getSetting(ProcessingConfig.MODELS_SCRIPTS_REPO)
if repoUrl is None or repoUrl == '':
QMessageBox.warning(None,
self.tr('Repository error'),
self.tr('Scripts and models repository is not configured.'))
return
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.MODELS)
dlg.exec_()
if dlg.updateProvider:
algList.reloadProvider('model')
class GetScriptsAndModelsDialog(BASE, WIDGET):
HELP_TEXT = QCoreApplication.translate('GetScriptsAndModelsDialog',
'<h3> Processing resources manager </h3>'
'<p>Check/uncheck algorithms in the tree to select the ones that you '
'want to install or remove</p>'
'<p>Algorithms are divided in 3 groups:</p>'
'<ul><li><b>Installed:</b> Algorithms already in your system, with '
'the latest version available</li>'
'<li><b>Updatable:</b> Algorithms already in your system, but with '
'a newer version available in the server</li>'
'<li><b>Not installed:</b> Algorithms not installed in your '
'system</li></ul>')
MODELS = 0
SCRIPTS = 1
RSCRIPTS = 2
tr_disambiguation = {0: 'GetModelsAction',
1: 'GetScriptsAction',
2: 'GetRScriptsAction'}
def __init__(self, resourceType):
super(GetScriptsAndModelsDialog, self).__init__(iface.mainWindow())
self.setupUi(self)
if hasattr(self.leFilter, 'setPlaceholderText'):
self.leFilter.setPlaceholderText(self.tr('Search...'))
self.manager = QgsNetworkAccessManager.instance()
repoUrl = ProcessingConfig.getSetting(ProcessingConfig.MODELS_SCRIPTS_REPO)
self.resourceType = resourceType
if self.resourceType == self.MODELS:
self.folder = ModelerUtils.modelsFolders()[0]
self.urlBase = '{}/models/'.format(repoUrl)
self.icon = QgsApplication.getThemeIcon("/processingModel.svg")
elif self.resourceType == self.SCRIPTS:
self.folder = ScriptUtils.scriptsFolders()[0]
self.urlBase = '{}/scripts/'.format(repoUrl)
self.icon = QgsApplication.getThemeIcon("/processingScript.svg")
else:
self.folder = RUtils.RScriptsFolders()[0]
self.urlBase = '{}/rscripts/'.format(repoUrl)
self.icon = QgsApplication.getThemeIcon("/providerR.svg")
self.lastSelectedItem = None
self.updateProvider = False
self.data = None
self.populateTree()
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.cancelPressed)
self.tree.currentItemChanged.connect(self.currentItemChanged)
self.leFilter.textChanged.connect(self.fillTree)
def popupError(self, error=None, url=None):
"""Popups an Error message bar for network errors."""
disambiguation = self.tr_disambiguation[self.resourceType]
widget = iface.messageBar().createMessage(self.tr('Connection problem', disambiguation),
self.tr('Could not connect to scripts/models repository', disambiguation))
if error and url:
QgsMessageLog.logMessage(self.tr(u"Network error code: {} on URL: {}").format(error, url), self.tr(u"Processing"), QgsMessageLog.CRITICAL)
button = QPushButton(QCoreApplication.translate("Python", "View message log"), pressed=show_message_log)
widget.layout().addWidget(button)
iface.messageBar().pushWidget(widget, level=QgsMessageBar.CRITICAL, duration=5)
def grabHTTP(self, url, loadFunction, arguments=None):
"""Grab distant content via QGIS internal classes and QtNetwork."""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
request = QUrl(url)
reply = self.manager.get(QNetworkRequest(request))
if arguments:
reply.finished.connect(partial(loadFunction, reply, arguments))
else:
reply.finished.connect(partial(loadFunction, reply))
while not reply.isFinished():
QCoreApplication.processEvents()
def populateTree(self):
self.grabHTTP(self.urlBase + 'list.txt', self.treeLoaded)
def treeLoaded(self, reply):
"""
update the tree of scripts/models whenever
HTTP request is finished
"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
self.popupError(reply.error(), reply.request().url().toString())
else:
resources = bytes(reply.readAll()).decode('utf8').splitlines()
resources = [r.split(',', 2) for r in resources]
self.resources = {f: (v, n) for f, v, n in resources}
reply.deleteLater()
self.fillTree()
def fillTree(self):
self.tree.clear()
self.uptodateItem = QTreeWidgetItem()
self.uptodateItem.setText(0, self.tr('Installed'))
self.toupdateItem = QTreeWidgetItem()
self.toupdateItem.setText(0, self.tr('Updatable'))
self.notinstalledItem = QTreeWidgetItem()
self.notinstalledItem.setText(0, self.tr('Not installed'))
self.toupdateItem.setIcon(0, self.icon)
self.uptodateItem.setIcon(0, self.icon)
self.notinstalledItem.setIcon(0, self.icon)
text = str(self.leFilter.text())
for i in sorted(list(self.resources.keys()), key=lambda kv: kv[2].lower()):
filename = i
version = self.resources[filename][0]
name = self.resources[filename][1]
treeBranch = self.getTreeBranchForState(filename, float(version))
if text == '' or text.lower() in filename.lower():
item = TreeItem(filename, name, self.icon)
treeBranch.addChild(item)
if treeBranch != self.notinstalledItem:
item.setCheckState(0, Qt.Checked)
self.tree.addTopLevelItem(self.toupdateItem)
self.tree.addTopLevelItem(self.notinstalledItem)
self.tree.addTopLevelItem(self.uptodateItem)
if text != '':
self.tree.expandAll()
self.txtHelp.setHtml(self.HELP_TEXT)
def setHelp(self, reply, item):
"""Change the HTML content"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
html = self.tr('<h2>No detailed description available for this script</h2>')
else:
content = bytes(reply.readAll()).decode('utf8')
descriptions = json.loads(content)
html = '<h2>%s</h2>' % item.name
html += self.tr('<p><b>Description:</b> {0}</p>').format(getDescription(ALG_DESC, descriptions))
html += self.tr('<p><b>Created by:</b> {0}').format(getDescription(ALG_CREATOR, descriptions))
html += self.tr('<p><b>Version:</b> {0}').format(getDescription(ALG_VERSION, descriptions))
reply.deleteLater()
self.txtHelp.setHtml(html)
def currentItemChanged(self, item, prev):
if isinstance(item, TreeItem):
url = self.urlBase + item.filename.replace(' ', '%20') + '.help'
self.grabHTTP(url, self.setHelp, item)
else:
self.txtHelp.setHtml(self.HELP_TEXT)
def getTreeBranchForState(self, filename, version):
if not os.path.exists(os.path.join(self.folder, filename)):
return self.notinstalledItem
else:
helpFile = os.path.join(self.folder, filename + '.help')
try:
with open(helpFile) as f:
helpContent = json.load(f)
currentVersion = float(helpContent[Help2Html.ALG_VERSION])
except Exception:
currentVersion = 0
if version > currentVersion:
return self.toupdateItem
else:
return self.uptodateItem
def cancelPressed(self):
super(GetScriptsAndModelsDialog, self).reject()
def storeFile(self, reply, filename):
"""store a script/model that has been downloaded"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
if os.path.splitext(filename)[1].lower() == '.help':
content = '{"ALG_VERSION" : %s}' % self.resources[filename[:-5]][0]
else:
self.popupError(reply.error(), reply.request().url().toString())
content = None
else:
content = bytes(reply.readAll()).decode('utf8')
reply.deleteLater()
if content:
path = os.path.join(self.folder, filename)
with open(path, 'w') as f:
f.write(content)
self.progressBar.setValue(self.progressBar.value() + 1)
def okPressed(self):
toDownload = []
for i in range(self.toupdateItem.childCount()):
item = self.toupdateItem.child(i)
if item.checkState(0) == Qt.Checked:
toDownload.append(item.filename)
for i in range(self.notinstalledItem.childCount()):
item = self.notinstalledItem.child(i)
if item.checkState(0) == Qt.Checked:
toDownload.append(item.filename)
if toDownload:
self.progressBar.setMaximum(len(toDownload) * 2)
for i, filename in enumerate(toDownload):
QCoreApplication.processEvents()
url = self.urlBase + filename.replace(' ', '%20')
self.grabHTTP(url, self.storeFile, filename)
url += '.help'
self.grabHTTP(url, self.storeFile, filename + '.help')
toDelete = []
for i in range(self.uptodateItem.childCount()):
item = self.uptodateItem.child(i)
if item.checkState(0) == Qt.Unchecked:
toDelete.append(item.filename)
# Remove py and help files if they exist
for filename in toDelete:
for pathname in (filename, filename + u".help"):
path = os.path.join(self.folder, pathname)
if os.path.exists(path):
os.remove(path)
self.updateProvider = len(toDownload) + len(toDelete) > 0
super(GetScriptsAndModelsDialog, self).accept()
class TreeItem(QTreeWidgetItem):
def __init__(self, filename, name, icon):
QTreeWidgetItem.__init__(self)
self.name = name
self.filename = filename
self.setText(0, name)
self.setIcon(0, icon)
self.setCheckState(0, Qt.Unchecked)
| gpl-2.0 | 6,125,041,364,056,533,000 | 40.520216 | 150 | 0.592249 | false |
samuelclay/NewsBlur | vendor/cjson/jsontest.py | 1 | 14597 | #!/usr/bin/python
# -*- coding: latin2 -*-
## this test suite is an almost verbatim copy of the jsontest.py test suite
## found in json-py available from http://sourceforge.net/projects/json-py/
## Copyright (C) 2005 Patrick D. Logan
## 2007-03-15 - Viktor Ferenczi ([email protected])
## Added unit tests for encoder/decoder extensions.
## Added throughput measurement.
## Typical values on a 3.0GHz Intel P4: about 8Mbytes/s
## 2007-04-02 - Viktor Ferenczi ([email protected])
## Added unit test for encoding with automatic dict key to str conversion.
## 2007-05-04 - Viktor Ferenczi ([email protected])
## Added unit tests for unicode encoding/decoding.
## More realistic, grid like data used for performance tests.
import re
import time
import math
import unittest
import datetime
import cjson
_exception = cjson.DecodeError
# The object tests should be order-independent. They're not.
# i.e. they should test for existence of keys and values
# with read/write invariance.
def _removeWhitespace(str):
return str.replace(" ", "")
class JsonTest(unittest.TestCase):
def testReadEmptyObject(self):
obj = cjson.decode("{}")
self.assertEqual({}, obj)
def testWriteEmptyObject(self):
s = cjson.encode({})
self.assertEqual("{}", _removeWhitespace(s))
def testReadStringValue(self):
obj = cjson.decode('{ "name" : "Patrick" }')
self.assertEqual({ "name" : "Patrick" }, obj)
def testReadEscapedQuotationMark(self):
obj = cjson.decode(r'"\""')
self.assertEqual(r'"', obj)
# def testReadEscapedSolidus(self):
# obj = cjson.decode(r'"\/"')
# self.assertEqual(r'/', obj)
def testReadEscapedReverseSolidus(self):
obj = cjson.decode(r'"\\"')
self.assertEqual("\\", obj)
def testReadEscapedBackspace(self):
obj = cjson.decode(r'"\b"')
self.assertEqual("\b", obj)
def testReadEscapedFormfeed(self):
obj = cjson.decode(r'"\f"')
self.assertEqual("\f", obj)
def testReadEscapedNewline(self):
obj = cjson.decode(r'"\n"')
self.assertEqual("\n", obj)
def testReadEscapedCarriageReturn(self):
obj = cjson.decode(r'"\r"')
self.assertEqual("\r", obj)
def testReadEscapedHorizontalTab(self):
obj = cjson.decode(r'"\t"')
self.assertEqual("\t", obj)
def testReadEscapedHexCharacter(self):
obj = cjson.decode(r'"\u000A"')
self.assertEqual("\n", obj)
obj = cjson.decode(r'"\u1001"')
self.assertEqual('\u1001', obj)
def testWriteEscapedQuotationMark(self):
s = cjson.encode(r'"')
self.assertEqual(r'"\""', _removeWhitespace(s))
def testWriteEscapedSolidus(self):
s = cjson.encode(r'/')
#self.assertEqual(r'"\/"', _removeWhitespace(s))
self.assertEqual('"/"', _removeWhitespace(s))
def testWriteNonEscapedSolidus(self):
s = cjson.encode(r'/')
self.assertEqual(r'"/"', _removeWhitespace(s))
def testWriteEscapedReverseSolidus(self):
s = cjson.encode("\\")
self.assertEqual(r'"\\"', _removeWhitespace(s))
def testWriteEscapedBackspace(self):
s = cjson.encode("\b")
self.assertEqual(r'"\b"', _removeWhitespace(s))
def testWriteEscapedFormfeed(self):
s = cjson.encode("\f")
self.assertEqual(r'"\f"', _removeWhitespace(s))
def testWriteEscapedNewline(self):
s = cjson.encode("\n")
self.assertEqual(r'"\n"', _removeWhitespace(s))
def testWriteEscapedCarriageReturn(self):
s = cjson.encode("\r")
self.assertEqual(r'"\r"', _removeWhitespace(s))
def testWriteEscapedHorizontalTab(self):
s = cjson.encode("\t")
self.assertEqual(r'"\t"', _removeWhitespace(s))
def testWriteEscapedHexCharacter(self):
s = cjson.encode('\u1001')
self.assertEqual(r'"\u1001"', _removeWhitespace(s))
def testReadBadEscapedHexCharacter(self):
self.assertRaises(_exception, self.doReadBadEscapedHexCharacter)
def doReadBadEscapedHexCharacter(self):
cjson.decode('"\\u10K5"')
def testReadBadObjectKey(self):
self.assertRaises(_exception, self.doReadBadObjectKey)
def doReadBadObjectKey(self):
cjson.decode('{ 44 : "age" }')
def testReadBadArray(self):
self.assertRaises(_exception, self.doReadBadArray)
def doReadBadArray(self):
cjson.decode('[1,2,3,,]')
def testReadBadObjectSyntax(self):
self.assertRaises(_exception, self.doReadBadObjectSyntax)
def doReadBadObjectSyntax(self):
cjson.decode('{"age", 44}')
def testWriteStringValue(self):
s = cjson.encode({ "name" : "Patrick" })
self.assertEqual('{"name":"Patrick"}', _removeWhitespace(s))
def testReadIntegerValue(self):
obj = cjson.decode('{ "age" : 44 }')
self.assertEqual({ "age" : 44 }, obj)
def testReadNegativeIntegerValue(self):
obj = cjson.decode('{ "key" : -44 }')
self.assertEqual({ "key" : -44 }, obj)
def testReadFloatValue(self):
obj = cjson.decode('{ "age" : 44.5 }')
self.assertEqual({ "age" : 44.5 }, obj)
def testReadNegativeFloatValue(self):
obj = cjson.decode(' { "key" : -44.5 } ')
self.assertEqual({ "key" : -44.5 }, obj)
def testReadBadNumber(self):
self.assertRaises(_exception, self.doReadBadNumber)
def doReadBadNumber(self):
cjson.decode('-44.4.4')
def testReadSmallObject(self):
obj = cjson.decode('{ "name" : "Patrick", "age":44} ')
self.assertEqual({ "age" : 44, "name" : "Patrick" }, obj)
def testReadEmptyArray(self):
obj = cjson.decode('[]')
self.assertEqual([], obj)
def testWriteEmptyArray(self):
self.assertEqual("[]", _removeWhitespace(cjson.encode([])))
def testReadSmallArray(self):
obj = cjson.decode(' [ "a" , "b", "c" ] ')
self.assertEqual(["a", "b", "c"], obj)
def testWriteSmallArray(self):
self.assertEqual('[1,2,3,4]', _removeWhitespace(cjson.encode([1, 2, 3, 4])))
def testWriteSmallObject(self):
s = cjson.encode({ "name" : "Patrick", "age": 44 })
self.assertEqual('{"age":44,"name":"Patrick"}', _removeWhitespace(s))
def testWriteFloat(self):
self.assertEqual("3.44556677", _removeWhitespace(cjson.encode(3.44556677)))
def testReadTrue(self):
self.assertEqual(True, cjson.decode("true"))
def testReadFalse(self):
self.assertEqual(False, cjson.decode("false"))
def testReadNull(self):
self.assertEqual(None, cjson.decode("null"))
def testWriteTrue(self):
self.assertEqual("true", _removeWhitespace(cjson.encode(True)))
def testWriteFalse(self):
self.assertEqual("false", _removeWhitespace(cjson.encode(False)))
def testWriteNull(self):
self.assertEqual("null", _removeWhitespace(cjson.encode(None)))
def testReadArrayOfSymbols(self):
self.assertEqual([True, False, None], cjson.decode(" [ true, false,null] "))
def testWriteArrayOfSymbolsFromList(self):
self.assertEqual("[true,false,null]", _removeWhitespace(cjson.encode([True, False, None])))
def testWriteArrayOfSymbolsFromTuple(self):
self.assertEqual("[true,false,null]", _removeWhitespace(cjson.encode((True, False, None))))
def testReadComplexObject(self):
src = '''
{ "name": "Patrick", "age" : 44, "Employed?" : true, "Female?" : false, "grandchildren":null }
'''
obj = cjson.decode(src)
self.assertEqual({"name":"Patrick","age":44,"Employed?":True,"Female?":False,"grandchildren":None}, obj)
def testReadLongArray(self):
src = '''[ "used",
"abused",
"confused",
true, false, null,
1,
2,
[3, 4, 5]]
'''
obj = cjson.decode(src)
self.assertEqual(["used","abused","confused", True, False, None,
1,2,[3,4,5]], obj)
def testReadIncompleteArray(self):
self.assertRaises(_exception, self.doReadIncompleteArray)
def doReadIncompleteArray(self):
cjson.decode('[')
def testReadComplexArray(self):
src = '''
[
{ "name": "Patrick", "age" : 44,
"Employed?" : true, "Female?" : false,
"grandchildren":null },
"used",
"abused",
"confused",
1,
2,
[3, 4, 5]
]
'''
obj = cjson.decode(src)
self.assertEqual([{"name":"Patrick","age":44,"Employed?":True,"Female?":False,"grandchildren":None},
"used","abused","confused",
1,2,[3,4,5]], obj)
def testWriteComplexArray(self):
obj = [{"name":"Patrick","age":44,"Employed?":True,"Female?":False,"grandchildren":None},
"used","abused","confused",
1,2,[3,4,5]]
self.assertEqual('[{"Female?":false,"age":44,"name":"Patrick","grandchildren":null,"Employed?":true},"used","abused","confused",1,2,[3,4,5]]',
_removeWhitespace(cjson.encode(obj)))
def testReadWriteCopies(self):
orig_obj = {'a':' " '}
json_str = cjson.encode(orig_obj)
copy_obj = cjson.decode(json_str)
self.assertEqual(orig_obj, copy_obj)
self.assertEqual(True, orig_obj == copy_obj)
self.assertEqual(False, orig_obj is copy_obj)
def testStringEncoding(self):
s = cjson.encode([1, 2, 3])
self.assertEqual(str("[1,2,3]", "utf-8"), _removeWhitespace(s))
def testReadEmptyObjectAtEndOfArray(self):
self.assertEqual(["a","b","c",{}],
cjson.decode('["a","b","c",{}]'))
def testReadEmptyObjectMidArray(self):
self.assertEqual(["a","b",{},"c"],
cjson.decode('["a","b",{},"c"]'))
def testReadClosingObjectBracket(self):
self.assertEqual({"a":[1,2,3]}, cjson.decode('{"a":[1,2,3]}'))
def testEmptyObjectInList(self):
obj = cjson.decode('[{}]')
self.assertEqual([{}], obj)
def testObjectWithEmptyList(self):
obj = cjson.decode('{"test": [] }')
self.assertEqual({"test":[]}, obj)
def testObjectWithNonEmptyList(self):
obj = cjson.decode('{"test": [3, 4, 5] }')
self.assertEqual({"test":[3, 4, 5]}, obj)
def testWriteLong(self):
self.assertEqual("12345678901234567890", cjson.encode(12345678901234567890))
def testEncoderExtension(self):
def dateEncoder(d):
assert isinstance(d, datetime.date)
return 'new Date(Date.UTC(%d,%d,%d))'%(d.year, d.month, d.day)
self.assertEqual(cjson.encode([1,datetime.date(2007,1,2),2], extension=dateEncoder), '[1, new Date(Date.UTC(2007,1,2)), 2]')
self.assertRaises(cjson.EncodeError, lambda: cjson.encode(1, extension=0))
def testDecoderExtension(self):
re_date=re.compile('^new\sDate\(Date\.UTC\(.*?\)\)')
def dateDecoder(json,idx):
json=json[idx:]
m=re_date.match(json)
if not m: raise 'cannot parse JSON string as Date object: %s'%json[idx:]
args=cjson.decode('[%s]'%json[18:m.end()-2])
dt=datetime.date(*args)
return (dt,m.end())
self.assertEqual(cjson.decode('[1, new Date(Date.UTC(2007,1,2)), 2]', extension=dateDecoder), [1,datetime.date(2007,1,2),2])
self.assertEqual(cjson.decode('[1, new Date(Date.UTC( 2007, 1 , 2 )) , 2]', extension=dateDecoder), [1,datetime.date(2007,1,2),2])
self.assertRaises(cjson.DecodeError, lambda: cjson.decode('1', extension=0))
def testEncodeKey2Str(self):
d={'1':'str 1', 1:'int 1', 3.1415:'pi'}
self.assertRaises(cjson.EncodeError, lambda: cjson.encode(d))
# NOTE: decode needed for order invariance
self.assertEqual(cjson.decode(cjson.encode(d, key2str=True)),
{"1": "str 1", "1": "int 1", "3.1415": "pi"})
def testUnicodeEncode(self):
self.assertEqual(cjson.encode({'b':2}), '{"b": 2}')
self.assertEqual(cjson.encode({'o"':'öõüû'}), r'{"o\"": "\u00f6\u0151\u00fc\u0171"}')
self.assertEqual(cjson.encode('öõüû', encoding='latin2'), r'"\u00f6\u0151\u00fc\u0171"')
self.assertRaises(cjson.EncodeError, lambda: cjson.encode('öõüû', encoding='ascii'))
def testUnicodeDecode(self):
self.assertEqual(cjson.decode('{"b": 2}', all_unicode=True), {'b':2})
self.assertEqual(cjson.decode(r'{"o\"": "\u00f6\u0151\u00fc\u0171"}'), {'o"':'öõüû'})
self.assertEqual(cjson.decode(r'{"o\"": "\u00f6\u0151\u00fc\u0171"}', encoding='latin2'), {'o"':'öõüû'})
self.assertEqual(cjson.decode(r'"\u00f6\u0151\u00fc\u0171"', all_unicode=True), 'öõüû')
self.assertEqual(cjson.decode(r'"\u00f6\u0151\u00fc\u0171"', encoding='latin2'), 'öõüû')
self.assertRaises(cjson.DecodeError, lambda: cjson.decode('"öõüû"', encoding='ascii'))
def testUnicodeEncodeDecode(self):
for s in ('abc', 'aáé', 'öõüû'):
self.assertEqual(cjson.decode(cjson.encode(s)), s.decode('latin1'))
def measureEncoderThroughput(data):
bytes=0
st=time.time()
cnt=0
while True:
dt=time.time()-st
if dt>=0.5 and cnt>9: break
bytes+=len(cjson.encode(data))
cnt+=1
return int(bytes/1024/dt)
def measureDecoderThroughput(data):
json=cjson.encode(data)
bytes=0
st=time.time()
cnt=0
while True:
dt=time.time()-st
if dt>=0.5 and cnt>9: break
cjson.decode(json)
bytes+=len(json)
cnt+=1
return int(math.floor(bytes/dt/1024.0+0.5))
def measureThroughput():
# Try to imitate realistic data, for example a large grid of records
data=[
dict([
('cell(%d,%d)'%(x,y), (
None, False, True, 0, 1,
x+y, x*y, math.pi, math.pi*x*y,
'str(%d,%d)%s'%(x,y,'#'*(x/10)),
'unicode[%04X]:%s'%(x*y,chr(x*y)),
))
for x in range(y)
])
for y in range(1,100)
]
json=cjson.encode(data)
print('Test data: tuples in dicts in a list, %d bytes as JSON string'%len(json))
print('Encoder throughput: ~%d kbyte/s'%measureEncoderThroughput(data))
print('Decoder throughput: ~%d kbyte/s'%measureDecoderThroughput(data))
def main():
try:
unittest.main()
#suite = unittest.TestLoader().loadTestsFromTestCase(JsonTest)
#unittest.TextTestRunner(verbosity=2).run(suite)
finally:
measureThroughput()
if __name__ == '__main__':
main()
| mit | 3,022,765,328,907,766,000 | 33.921053 | 150 | 0.601356 | false |
sio2project/oioioi | oioioi/exportszu/views.py | 1 | 1862 | import os
import tempfile
from django.http import FileResponse
from django.template.response import TemplateResponse
from oioioi.base.permissions import enforce_condition
from oioioi.contests.utils import contest_exists, is_contest_admin
from oioioi.exportszu.forms import ExportSubmissionsForm
from oioioi.exportszu.utils import (
SubmissionsWithUserDataCollector,
build_submissions_archive,
)
@enforce_condition(contest_exists & is_contest_admin)
def export_submissions_view(request):
if request.method == 'POST':
form = ExportSubmissionsForm(request, request.POST)
if form.is_valid():
round = form.cleaned_data['round']
only_final = form.cleaned_data['only_final']
collector = SubmissionsWithUserDataCollector(
request.contest, round=round, only_final=only_final
)
# TemporaryFile promises removal of the file when it is closed.
# Note that we cannot use with, because we want to keep it beyond
# this function call.
tmp_file = tempfile.TemporaryFile()
build_submissions_archive(tmp_file, collector)
# We send a large file with django. Usually it isn't a good idea,
# but letting the web server do it leads to problems with when to
# delete this file and from where.
tmp_file.seek(0, os.SEEK_SET) # go to the beginning of the file
response = FileResponse(tmp_file)
response['Content-Type'] = 'application/gzip'
response['Content-Disposition'] = (
'attachment; filename="%s.tgz"' % request.contest.id
)
return response
else:
form = ExportSubmissionsForm(request)
return TemplateResponse(
request, 'exportszu/export_submissions.html', {'form': form}
)
| gpl-3.0 | -8,733,963,170,835,522,000 | 40.377778 | 77 | 0.659506 | false |
deepmind/slim-dataset | reader.py | 1 | 10070 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reader for dataset used in the SLIM paper.
Example usage:
filenames, iterator, next_element = make_dataset(batch_size=16)
with tf.Session() as sess:
# Initialize `iterator` with train data.
# training_filenames = ["/var/data/train_1.tfrecord", ...]
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
ne_value = sess.run(next_element)
# Initialize `iterator` with validation data.
# validation_filenames = ["/var/data/train_1.tfrecord", ...]
# sess.run(iterator.initializer, feed_dict={filenames: validation_filenames})
ne_value = sess.run(next_element)
`next_element` is a tuple containing the query, the target, and the raw data.
The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
The raw data is a dictionary with all the fields as read from the tf.Record as
described in the documentation for `_parse_proto`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_NUM_VIEWS = 10
_NUM_RAW_CAMERA_PARAMS = 3
_IMAGE_SCALE = 0.25
_USE_SIMPLIFIED_CAPTIONS = False
_PARSE_METADATA = False
def _parse_proto(buf):
"""Parse binary protocol buffer into tensors.
The protocol buffer is expected to contain the following fields:
* frames: 10 views of the scene rendered as images.
* top_down_frame: single view of the scene from above rendered as an image.
* cameras: 10 vectors describing the camera position from which the frames
have been rendered
* captions: A string description of the scene. For the natural language
dataset, contains descriptions written by human annotators. For
synthetic data contains a string describing each relation between
objects in the scene exactly once.
* simplified_captions: A string description of the scene. For the natural
language dataset contains a string describing each relation between
objects in the scene exactly once. For synthetic datasets contains
a string describing every possible pairwise relation between objects in
the scene.
* meta_shape: A vector of strings describing the object shapes.
* meta_color: A vector of strings describing the object colors.
* meta_size: A vector of strings describing the object sizes.
* meta_obj_positions: A matrix of floats describing the position of each
object in the scene.
* meta_obj_rotations: A matrix of floats describing the rotation of each
object in the scene.
* meta_obj_rotations: A matrix of floats describing the color of each
object in the scene as RGBA in the range [0, 1].
Args:
buf: A string containing the serialized protocol buffer.
Returns:
A dictionary containing tensors for each of the fields in the protocol
buffer. If _PARSE_METADATA is False, will omit fields starting with 'meta_'.
"""
feature_map = {
"frames":
tf.FixedLenFeature(shape=[_NUM_VIEWS], dtype=tf.string),
"top_down_frame":
tf.FixedLenFeature(shape=[1], dtype=tf.string),
"cameras":
tf.FixedLenFeature(
shape=[_NUM_VIEWS * _NUM_RAW_CAMERA_PARAMS], dtype=tf.float32),
"captions":
tf.VarLenFeature(dtype=tf.string),
"simplified_captions":
tf.VarLenFeature(dtype=tf.string),
"meta_shape":
tf.VarLenFeature(dtype=tf.string),
"meta_color":
tf.VarLenFeature(dtype=tf.string),
"meta_size":
tf.VarLenFeature(dtype=tf.string),
"meta_obj_positions":
tf.VarLenFeature(dtype=tf.float32),
"meta_obj_rotations":
tf.VarLenFeature(dtype=tf.float32),
"meta_obj_colors":
tf.VarLenFeature(dtype=tf.float32),
}
example = tf.parse_single_example(buf, feature_map)
images = tf.concat(example["frames"], axis=0)
images = tf.map_fn(
tf.image.decode_jpeg,
tf.reshape(images, [-1]),
dtype=tf.uint8,
back_prop=False)
top_down = tf.image.decode_jpeg(tf.squeeze(example["top_down_frame"]))
cameras = tf.reshape(example["cameras"], shape=[-1, _NUM_RAW_CAMERA_PARAMS])
captions = tf.sparse_tensor_to_dense(example["captions"], default_value="")
simplified_captions = tf.sparse_tensor_to_dense(
example["simplified_captions"], default_value="")
meta_shape = tf.sparse_tensor_to_dense(
example["meta_shape"], default_value="")
meta_color = tf.sparse_tensor_to_dense(
example["meta_color"], default_value="")
meta_size = tf.sparse_tensor_to_dense(example["meta_size"], default_value="")
meta_obj_positions = tf.sparse_tensor_to_dense(
example["meta_obj_positions"], default_value=0)
meta_obj_positions = tf.reshape(meta_obj_positions, shape=[-1, 3])
meta_obj_rotations = tf.sparse_tensor_to_dense(
example["meta_obj_rotations"], default_value=0)
meta_obj_rotations = tf.reshape(meta_obj_rotations, shape=[-1, 4])
meta_obj_colors = tf.sparse_tensor_to_dense(
example["meta_obj_colors"], default_value=0)
meta_obj_colors = tf.reshape(meta_obj_colors, shape=[-1, 4])
data_tensors = {
"images": images,
"cameras": cameras,
"captions": captions,
"simplified_captions": simplified_captions,
"top_down": top_down
}
if _PARSE_METADATA:
data_tensors.update({
"meta_shape": meta_shape,
"meta_color": meta_color,
"meta_size": meta_size,
"meta_obj_positions": meta_obj_positions,
"meta_obj_rotations": meta_obj_rotations,
"meta_obj_colors": meta_obj_colors
})
return data_tensors
def _make_indices():
indices = tf.range(0, _NUM_VIEWS)
indices = tf.random_shuffle(indices)
return indices
def _convert_and_resize_images(images, old_size):
images = tf.image.convert_image_dtype(images, dtype=tf.float32)
new_size = tf.cast(old_size, tf.float32) * _IMAGE_SCALE
new_size = tf.cast(new_size, tf.int32)
images = tf.image.resize_images(images, new_size, align_corners=True)
return images
def _preprocess_images(images, indices):
images_processed = tf.gather(images, indices)
old_size = tf.shape(images_processed)[1:3]
images_processed = _convert_and_resize_images(images_processed, old_size)
return images_processed
def _preprocess_td(td_image):
old_size = tf.shape(td_image)[0:2]
td_image = _convert_and_resize_images(td_image, old_size)
return td_image
def _preprocess_cameras(raw_cameras, indices):
"""Apply a nonlinear transformation to the vector of camera angles."""
raw_cameras = tf.gather(raw_cameras, indices)
azimuth = raw_cameras[:, 0]
pos = raw_cameras[:, 1:]
cameras = tf.concat(
[
pos,
tf.expand_dims(tf.sin(azimuth), -1),
tf.expand_dims(tf.cos(azimuth), -1)
],
axis=1)
return cameras
def _preprocess_captions(raw_caption, indices):
return tf.gather(raw_caption, indices)
def _preprocess_data(raw_data):
"""Randomly shuffle viewpoints and apply preprocessing to each modality."""
indices = _make_indices()
images = _preprocess_images(raw_data["images"], indices)
cameras = _preprocess_cameras(raw_data["cameras"], indices)
top_down = _preprocess_td(raw_data["top_down"])
if _USE_SIMPLIFIED_CAPTIONS:
captions = _preprocess_captions(raw_data["simplified_captions"], indices)
else:
captions = _preprocess_captions(raw_data["captions"], indices)
return [images, cameras, top_down, captions]
def _split_scene(images, cameras, top_down, captions):
"""Splits scene into query and target.
Args:
images: A tensor containing images.
cameras: A tensor containing cameras.
top_down: A tensor containing the scene seen from top.
captions: A tensor containing captions.
Returns:
A tuple query, target. The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
"""
context_image = images[:-1, :, :, :]
context_camera = cameras[:-1, :]
context_caption = captions[:-1]
target_image = images[-1, :, :, :]
target_camera = cameras[-1, :]
target_caption = captions[-1]
query = ((context_image, context_camera, context_caption), target_camera)
target = (target_image, target_caption, top_down)
return query, target
def _parse_function(buf):
raw_data = _parse_proto(buf)
scene_data = _preprocess_data(raw_data)
query, target = _split_scene(*scene_data)
return query, target, raw_data
def make_dataset(batch_size):
"""Returns a tf.data.Dataset object with the dataset."""
filenames = tf.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function)
dataset = dataset.repeat()
dataset = dataset.shuffle(128)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
return filenames, iterator, next_element
| apache-2.0 | 6,471,769,179,025,854,000 | 36.022059 | 80 | 0.697319 | false |
mhils/mitmproxy | mitmproxy/proxy2/layers/http/_http1.py | 1 | 17108 | import abc
from typing import Union, Optional, Callable, Type
import h11
from h11._readers import ChunkedReader, ContentLengthReader, Http10Reader
from h11._receivebuffer import ReceiveBuffer
from mitmproxy import exceptions, http
from mitmproxy.net import http as net_http
from mitmproxy.net.http import http1, status_codes
from mitmproxy.net.http.http1 import read_sansio as http1_sansio
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy2.context import Connection, ConnectionState, Context
from mitmproxy.proxy2.layers.http._base import ReceiveHttp, StreamId
from mitmproxy.proxy2.utils import expect
from mitmproxy.utils import human
from ._base import HttpConnection
from ._events import HttpEvent, RequestData, RequestEndOfMessage, RequestHeaders, RequestProtocolError, ResponseData, \
ResponseEndOfMessage, ResponseHeaders, ResponseProtocolError
TBodyReader = Union[ChunkedReader, Http10Reader, ContentLengthReader]
class Http1Connection(HttpConnection, metaclass=abc.ABCMeta):
stream_id: Optional[StreamId] = None
request: Optional[http.HTTPRequest] = None
response: Optional[http.HTTPResponse] = None
request_done: bool = False
response_done: bool = False
# this is a bit of a hack to make both mypy and PyCharm happy.
state: Union[Callable[[events.Event], layer.CommandGenerator[None]], Callable]
body_reader: TBodyReader
buf: ReceiveBuffer
ReceiveProtocolError: Type[Union[RequestProtocolError, ResponseProtocolError]]
ReceiveData: Type[Union[RequestData, ResponseData]]
ReceiveEndOfMessage: Type[Union[RequestEndOfMessage, ResponseEndOfMessage]]
def __init__(self, context: Context, conn: Connection):
super().__init__(context, conn)
self.buf = ReceiveBuffer()
@abc.abstractmethod
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
@abc.abstractmethod
def read_headers(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, HttpEvent):
yield from self.send(event)
else:
if isinstance(event, events.DataReceived) and self.state != self.passthrough:
self.buf += event.data
yield from self.state(event)
@expect(events.Start)
def start(self, _) -> layer.CommandGenerator[None]:
self.state = self.read_headers
yield from ()
state = start
def read_body(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.stream_id
while True:
try:
if isinstance(event, events.DataReceived):
h11_event = self.body_reader(self.buf)
elif isinstance(event, events.ConnectionClosed):
h11_event = self.body_reader.read_eof()
else:
raise AssertionError(f"Unexpected event: {event}")
except h11.ProtocolError as e:
yield commands.CloseConnection(self.conn)
yield ReceiveHttp(self.ReceiveProtocolError(self.stream_id, f"HTTP/1 protocol error: {e}"))
return
if h11_event is None:
return
elif isinstance(h11_event, h11.Data):
data: bytes = bytes(h11_event.data)
if data:
yield ReceiveHttp(self.ReceiveData(self.stream_id, data))
elif isinstance(h11_event, h11.EndOfMessage):
assert self.request
if h11_event.headers:
raise NotImplementedError(f"HTTP trailers are not implemented yet.")
if self.request.data.method.upper() != b"CONNECT":
yield ReceiveHttp(self.ReceiveEndOfMessage(self.stream_id))
is_request = isinstance(self, Http1Server)
yield from self.mark_done(
request=is_request,
response=not is_request
)
return
def wait(self, event: events.Event) -> layer.CommandGenerator[None]:
"""
We wait for the current flow to be finished before parsing the next message,
as we may want to upgrade to WebSocket or plain TCP before that.
"""
assert self.stream_id
if isinstance(event, events.DataReceived):
return
elif isinstance(event, events.ConnectionClosed):
# for practical purposes, we assume that a peer which sent at least a FIN
# is not interested in any more data from us, see
# see https://github.com/httpwg/http-core/issues/22
if event.connection.state is not ConnectionState.CLOSED:
yield commands.CloseConnection(event.connection)
yield ReceiveHttp(self.ReceiveProtocolError(self.stream_id, f"Client disconnected.",
code=status_codes.CLIENT_CLOSED_REQUEST))
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event}")
def done(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
def make_pipe(self) -> layer.CommandGenerator[None]:
self.state = self.passthrough
if self.buf:
already_received = self.buf.maybe_extract_at_most(len(self.buf))
yield from self.state(events.DataReceived(self.conn, already_received))
self.buf.compress()
def passthrough(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.stream_id
if isinstance(event, events.DataReceived):
yield ReceiveHttp(self.ReceiveData(self.stream_id, event.data))
elif isinstance(event, events.ConnectionClosed):
if isinstance(self, Http1Server):
yield ReceiveHttp(RequestEndOfMessage(self.stream_id))
else:
yield ReceiveHttp(ResponseEndOfMessage(self.stream_id))
def mark_done(self, *, request: bool = False, response: bool = False) -> layer.CommandGenerator[None]:
if request:
self.request_done = True
if response:
self.response_done = True
if self.request_done and self.response_done:
assert self.request
assert self.response
if should_make_pipe(self.request, self.response):
yield from self.make_pipe()
return
connection_done = (
http1_sansio.expected_http_body_size(self.request, self.response) == -1
or http1.connection_close(self.request.http_version, self.request.headers)
or http1.connection_close(self.response.http_version, self.response.headers)
# If we proxy HTTP/2 to HTTP/1, we only use upstream connections for one request.
# This simplifies our connection management quite a bit as we can rely on
# the proxyserver's max-connection-per-server throttling.
or (self.request.is_http2 and isinstance(self, Http1Client))
)
if connection_done:
yield commands.CloseConnection(self.conn)
self.state = self.done
return
self.request_done = self.response_done = False
self.request = self.response = None
if isinstance(self, Http1Server):
self.stream_id += 2
else:
self.stream_id = None
self.state = self.read_headers
if self.buf:
yield from self.state(events.DataReceived(self.conn, b""))
class Http1Server(Http1Connection):
"""A simple HTTP/1 server with no pipelining support."""
ReceiveProtocolError = RequestProtocolError
ReceiveData = RequestData
ReceiveEndOfMessage = RequestEndOfMessage
stream_id: int
def __init__(self, context: Context):
super().__init__(context, context.client)
self.stream_id = 1
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
assert event.stream_id == self.stream_id
if isinstance(event, ResponseHeaders):
self.response = response = event.response
if response.is_http2:
response = response.copy()
# Convert to an HTTP/1 response.
response.http_version = "HTTP/1.1"
# not everyone supports empty reason phrases, so we better make up one.
response.reason = status_codes.RESPONSES.get(response.status_code, "")
# Shall we set a Content-Length header here if there is none?
# For now, let's try to modify as little as possible.
raw = http1.assemble_response_head(response)
yield commands.SendData(self.conn, raw)
elif isinstance(event, ResponseData):
assert self.response
if "chunked" in self.response.headers.get("transfer-encoding", "").lower():
raw = b"%x\r\n%s\r\n" % (len(event.data), event.data)
else:
raw = event.data
if raw:
yield commands.SendData(self.conn, raw)
elif isinstance(event, ResponseEndOfMessage):
assert self.response
if "chunked" in self.response.headers.get("transfer-encoding", "").lower():
yield commands.SendData(self.conn, b"0\r\n\r\n")
yield from self.mark_done(response=True)
elif isinstance(event, ResponseProtocolError):
if not self.response:
resp = http.make_error_response(event.code, event.message)
raw = http1.assemble_response(resp)
yield commands.SendData(self.conn, raw)
yield commands.CloseConnection(self.conn)
else:
raise AssertionError(f"Unexpected event: {event}")
def read_headers(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
request_head = self.buf.maybe_extract_lines()
if request_head:
request_head = [bytes(x) for x in request_head] # TODO: Make url.parse compatible with bytearrays
try:
self.request = http1_sansio.read_request_head(request_head)
expected_body_size = http1_sansio.expected_http_body_size(self.request, expect_continue_as_0=False)
except (ValueError, exceptions.HttpSyntaxException) as e:
yield commands.Log(f"{human.format_address(self.conn.peername)}: {e}")
yield commands.CloseConnection(self.conn)
self.state = self.done
return
yield ReceiveHttp(RequestHeaders(self.stream_id, self.request, expected_body_size == 0))
self.body_reader = make_body_reader(expected_body_size)
self.state = self.read_body
yield from self.state(event)
else:
pass # FIXME: protect against header size DoS
elif isinstance(event, events.ConnectionClosed):
buf = bytes(self.buf)
if buf.strip():
yield commands.Log(f"Client closed connection before completing request headers: {buf!r}")
yield commands.CloseConnection(self.conn)
else:
raise AssertionError(f"Unexpected event: {event}")
def mark_done(self, *, request: bool = False, response: bool = False) -> layer.CommandGenerator[None]:
yield from super().mark_done(request=request, response=response)
if self.request_done and not self.response_done:
self.state = self.wait
class Http1Client(Http1Connection):
"""A simple HTTP/1 client with no pipelining support."""
ReceiveProtocolError = ResponseProtocolError
ReceiveData = ResponseData
ReceiveEndOfMessage = ResponseEndOfMessage
def __init__(self, context: Context):
super().__init__(context, context.server)
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
if not self.stream_id:
assert isinstance(event, RequestHeaders)
self.stream_id = event.stream_id
self.request = event.request
assert self.stream_id == event.stream_id
if isinstance(event, RequestHeaders):
request = event.request
if request.is_http2:
# Convert to an HTTP/1 request.
request = request.copy() # (we could probably be a bit more efficient here.)
request.http_version = "HTTP/1.1"
if "Host" not in request.headers and request.authority:
request.headers.insert(0, "Host", request.authority)
request.authority = ""
raw = http1.assemble_request_head(request)
yield commands.SendData(self.conn, raw)
elif isinstance(event, RequestData):
assert self.request
if "chunked" in self.request.headers.get("transfer-encoding", "").lower():
raw = b"%x\r\n%s\r\n" % (len(event.data), event.data)
else:
raw = event.data
if raw:
yield commands.SendData(self.conn, raw)
elif isinstance(event, RequestEndOfMessage):
assert self.request
if "chunked" in self.request.headers.get("transfer-encoding", "").lower():
yield commands.SendData(self.conn, b"0\r\n\r\n")
elif http1_sansio.expected_http_body_size(self.request, self.response) == -1:
yield commands.CloseConnection(self.conn, half_close=True)
yield from self.mark_done(request=True)
elif isinstance(event, RequestProtocolError):
yield commands.CloseConnection(self.conn)
return
else:
raise AssertionError(f"Unexpected event: {event}")
def read_headers(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
if not self.request:
# we just received some data for an unknown request.
yield commands.Log(f"Unexpected data from server: {bytes(self.buf)!r}")
yield commands.CloseConnection(self.conn)
return
assert self.stream_id
response_head = self.buf.maybe_extract_lines()
if response_head:
response_head = [bytes(x) for x in response_head] # TODO: Make url.parse compatible with bytearrays
try:
self.response = http1_sansio.read_response_head(response_head)
expected_size = http1_sansio.expected_http_body_size(self.request, self.response)
except (ValueError, exceptions.HttpSyntaxException) as e:
yield commands.CloseConnection(self.conn)
yield ReceiveHttp(ResponseProtocolError(self.stream_id, f"Cannot parse HTTP response: {e}"))
return
yield ReceiveHttp(ResponseHeaders(self.stream_id, self.response, expected_size == 0))
self.body_reader = make_body_reader(expected_size)
self.state = self.read_body
yield from self.state(event)
else:
pass # FIXME: protect against header size DoS
elif isinstance(event, events.ConnectionClosed):
if self.conn.state & ConnectionState.CAN_WRITE:
yield commands.CloseConnection(self.conn)
if self.stream_id:
if self.buf:
yield ReceiveHttp(ResponseProtocolError(self.stream_id,
f"unexpected server response: {bytes(self.buf)!r}"))
else:
# The server has closed the connection to prevent us from continuing.
# We need to signal that to the stream.
# https://tools.ietf.org/html/rfc7231#section-6.5.11
yield ReceiveHttp(ResponseProtocolError(self.stream_id, "server closed connection"))
else:
return
else:
raise AssertionError(f"Unexpected event: {event}")
def should_make_pipe(request: net_http.Request, response: net_http.Response) -> bool:
if response.status_code == 101:
return True
elif response.status_code == 200 and request.method.upper() == "CONNECT":
return True
else:
return False
def make_body_reader(expected_size: Optional[int]) -> TBodyReader:
if expected_size is None:
return ChunkedReader()
elif expected_size == -1:
return Http10Reader()
else:
return ContentLengthReader(expected_size)
__all__ = [
"Http1Client",
"Http1Server",
]
| mit | 300,163,260,257,517,600 | 45.113208 | 119 | 0.613573 | false |
delph-in/pydelphin | delphin/eds/_operations.py | 1 | 9714 |
"""
Operations on EDS
"""
import warnings
from itertools import count
from delphin import variable
from delphin import scope
from delphin import eds
from delphin import util
def from_mrs(m, predicate_modifiers=True, unique_ids=True,
representative_priority=None):
"""
Create an EDS by converting from MRS *m*.
In order for MRS to EDS conversion to work, the MRS must satisfy
the intrinsic variable property (see
:func:`delphin.mrs.has_intrinsic_variable_property`).
Args:
m: the input MRS
predicate_modifiers: if `True`, include predicate-modifier
edges; if `False`, only include basic dependencies; if a
callable, then call on the converted EDS before creating
unique ids (if `unique_ids=True`)
unique_ids: if `True`, recompute node identifiers to be unique
by the LKB's method; note that ids from *m* should already
be unique by PyDelphin's method
representative_priority: a function for ranking candidate
representative nodes; see :func:`scope.representatives`
Returns:
EDS
Raises:
EDSError: when conversion fails.
"""
# EP id to node id map; create now to keep ids consistent
hcmap = {hc.hi: hc for hc in m.hcons}
reps = scope.representatives(m, priority=representative_priority)
ivmap = {p.iv: (p, q)
for p, q in m.quantification_pairs()
if p is not None}
top = _mrs_get_top(m.top, hcmap, reps, m.index, ivmap)
deps = _mrs_args_to_basic_deps(m, hcmap, ivmap, reps)
nodes = _mrs_to_nodes(m, deps)
e = eds.EDS(
top=top,
nodes=nodes,
lnk=m.lnk,
surface=m.surface,
identifier=m.identifier)
if predicate_modifiers is True:
predicate_modifiers = find_predicate_modifiers
if predicate_modifiers:
addl_deps = predicate_modifiers(e, m, representatives=reps)
for id, node_deps in addl_deps.items():
e[id].edges.update(node_deps)
if unique_ids:
make_ids_unique(e, m)
return e
def _mrs_get_top(top, hcmap, reps, index, ivmap):
if top in hcmap and hcmap[top].lo in reps:
lbl = hcmap[top].lo
top = reps[lbl][0].id
else:
if top in hcmap:
warnings.warn(
f'broken handle constraint: {hcmap[top]}',
eds.EDSWarning
)
if top in reps:
top = reps[top][0].id
elif index in ivmap and ivmap[index][0].label in reps:
lbl = ivmap[index][0].label
top = reps[lbl][0].id
else:
warnings.warn('unable to find a suitable TOP', eds.EDSWarning)
top = None
return top
def _mrs_args_to_basic_deps(m, hcmap, ivmap, reps):
edges = {}
for src, roleargs in m.arguments().items():
if src in ivmap:
p, q = ivmap[src]
# non-quantifier EPs
edges[src] = {}
for role, tgt in roleargs:
# qeq
if tgt in hcmap:
lbl = hcmap[tgt].lo
if lbl in reps:
tgt = reps[lbl][0].id
else:
warnings.warn(
f'broken handle constraint: {hcmap[tgt]}',
eds.EDSWarning
)
continue
# label arg
elif tgt in reps:
tgt = reps[tgt][0].id
# regular arg
elif tgt in ivmap:
tgt = ivmap[tgt][0].id
# other (e.g., BODY, dropped arguments, etc.)
else:
continue
edges[src][role] = tgt
# add BV if the EP has a quantifier
if q is not None:
edges[q.id] = {eds.BOUND_VARIABLE_ROLE: src}
return edges
def _mrs_to_nodes(m, edges):
nodes = []
for ep in m.rels:
properties, type = None, None
if not ep.is_quantifier():
iv = ep.iv
properties = m.properties(iv)
type = variable.type(iv)
nodes.append(
eds.Node(ep.id,
ep.predicate,
type,
edges.get(ep.id, {}),
properties,
ep.carg,
ep.lnk,
ep.surface,
ep.base))
return nodes
def find_predicate_modifiers(e, m, representatives=None):
"""
Return an argument structure mapping for predicate-modifier edges.
In EDS, predicate modifiers are edges that describe a relation
between predications in the original MRS that is not evident on
the regular and scopal arguments. In practice these are EPs that
share a scope but do not select any other EPs within their scope,
such as when quantifiers are modified ("nearly every...") or with
relative clauses ("the chef whose soup spilled..."). These are
almost the same as the MOD/EQ links of DMRS, except that predicate
modifiers have more restrictions on their usage, mainly due to
their using a standard role (`ARG1`) instead of an
idiosyncratic one.
Generally users won't call this function directly, but by calling
:func:`from_mrs` with `predicate_modifiers=True`, but it is
visible here in case users want to inspect its results separately
from MRS-to-EDS conversion. Note that when calling it separately,
*e* should use the same predication ids as *m* (by calling
:func:`from_mrs` with `unique_ids=False`). Also, users may define
their own function with the same signature and return type and use
it in place of this one. See :func:`from_mrs` for details.
Args:
e: the EDS converted from *m* as by calling :func:`from_mrs`
with `predicate_modifiers=False` and `unique_ids=False`,
used to determine if parts of the graph are connected
m: the source MRS
representatives: the scope representatives; this argument is
mainly to prevent :func:`delphin.scope.representatives`
from being called twice on *m*
Returns:
A dictionary mapping source node identifiers to
role-to-argument dictionaries of any additional
predicate-modifier edges.
Examples:
>>> e = eds.from_mrs(m, predicate_modifiers=False)
>>> print(eds.find_predicate_modifiers(e.argument_structure(), m)
{'e5': {'ARG1': '_1'}}
"""
if representatives is None:
representatives = scope.representatives(m)
role = eds.PREDICATE_MODIFIER_ROLE
# find connected components so predicate modifiers only connect
# separate components
ids = {ep.id for ep in m.rels}
edges = []
for node in e.nodes:
for _, tgt in node.edges.items():
edges.append((node.id, tgt))
components = util._connected_components(ids, edges)
ccmap = {}
for i, component in enumerate(components):
for id in component:
ccmap[id] = i
addl = {}
if len(components) > 1:
for label, eps in representatives.items():
if len(eps) > 1:
first = eps[0]
joined = set([ccmap[first.id]])
for other in eps[1:]:
occ = ccmap[other.id]
type = variable.type(other.args.get(role, 'u0'))
needs_edge = occ not in joined
edge_available = type.lower() == 'u'
if needs_edge and edge_available:
addl.setdefault(other.id, {})[role] = first.id
joined.add(occ)
return addl
def make_ids_unique(e, m):
"""
Recompute the node identifiers in EDS *e* to be unique.
MRS objects used in conversion to EDS already have unique
predication ids, but they are created according to PyDelphin's
method rather than the LKB's method, namely with regard to
quantifiers and MRSs that do not have the intrinsic variable
property. This function recomputes unique EDS node identifiers by
the LKB's method.
.. note::
This function works in-place on *e* and returns nothing.
Args:
e: an EDS converted from MRS *m*, as from :func:`from_mrs`
with `unique_ids=False`
m: the MRS from which *e* was converted
"""
# deps can be used to single out ep from set sharing ARG0s
new_ids = (f'_{i}' for i in count(start=1))
nids = {}
used = {}
# initially only make new ids for quantifiers and those with no IV
for ep in m.rels:
nid = ep.iv
if nid is None or ep.is_quantifier():
nid = next(new_ids)
nids[ep.id] = nid
used.setdefault(nid, set()).add(ep.id)
# for ill-formed MRSs, more than one non-quantifier EP may have
# the same IV. Select a winner like selecting a scope
# representatives: the one not taking others in its group as an
# argument.
deps = {node.id: node.edges.items() for node in e.nodes}
for nid, ep_ids in used.items():
if len(ep_ids) > 1:
ep_ids = sorted(
ep_ids,
key=lambda n: any(d in ep_ids for _, d in deps.get(n, []))
)
for nid in ep_ids[1:]:
nids[nid] = next(new_ids)
# now use the unique ID mapping for reassignment
if e.top is not None:
e.top = nids[e.top]
for node in e.nodes:
node.id = nids[node.id]
edges = {role: nids[arg] for role, arg in node.edges.items()}
node.edges = edges
| mit | 6,722,001,387,782,816,000 | 34.323636 | 74 | 0.574532 | false |
jrecuero/jc2li | jc2li/base.py | 1 | 26215 | __docformat__ = 'restructuredtext en'
# -----------------------------------------------------------------------------
# _ _
# (_)_ __ ___ _ __ ___ _ __| |_ ___
# | | '_ ` _ \| '_ \ / _ \| '__| __/ __|
# | | | | | | | |_) | (_) | | | |_\__ \
# |_|_| |_| |_| .__/ \___/|_| \__|___/
# |_|
# -----------------------------------------------------------------------------
#
# from __future__ import unicode_literals
from functools import wraps, partial
import sys
import inspect
import json
# import shlex
import jc2li.loggerator as loggerator
from prompt_toolkit import prompt
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import Completer, Completion
# from prompt_toolkit.validation import Validator, ValidationError
from prompt_toolkit.token import Token
from prompt_toolkit.styles import style_from_dict
from jc2li.common import TREE_ATTR, SYNTAX_ATTR, ARGOS_ATTR
from jc2li.journal import Journal
# -----------------------------------------------------------------------------
#
# ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
# / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
# | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
# \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
#
# -----------------------------------------------------------------------------
#
MODULE = 'CLI.base'
LOGGER = loggerator.getLoggerator(MODULE)
# -----------------------------------------------------------------------------
# _ _ __ _ _ _ _
# ___| | __ _ ___ ___ __| | ___ / _(_)_ __ (_) |_(_) ___ _ __ ___
# / __| |/ _` / __/ __| / _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \/ __|
# | (__| | (_| \__ \__ \ | (_| | __/ _| | | | | | |_| | (_) | | | \__ \
# \___|_|\__,_|___/___/ \__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|___/
#
# -----------------------------------------------------------------------------
#
class CliBase(object):
"""CliBase class is the base class for any class that will implement
commands to be used by the command line interface.
Attributes:
_WALL (:any:`dict`) : Internal dictionary used to update commands defined\
in derived classes.
CLI_STYLE (:any:`dict`) : Dictionary with default styles to be used in the\
command line.
"""
_WALL = {}
CLI_STYLE = style_from_dict({Token.Toolbar: '#ffffff italic bg:#007777',
Token.RPrompt: 'bg:#ff0066 #ffffff', })
__MODES = []
class CliCompleter(Completer):
"""CliCompleter class provide completion to any entry in the command line.
This class should make use of every completer for command arguments.
"""
def __init__(self, cli):
"""CliCompleter initialization method.
Args:
cli (CliBase) : Cli instance.
"""
self._nodepath = None
self._cli = cli
def get_completions(self, document, complete_event):
"""Method that provides completion for any input in the command line.
Args:
document (:class:`Document`) : Document instance with command line input data.
compleEvent (:class:`CompleteEvent`) : Event with iinput information
Returns:
:class:`Completion` : Completion instance with data to be completed.
"""
# self._nodepath = None
word_before_cursor = document.get_word_before_cursor(WORD=True)
if ' ' not in document.text:
matches = [m for m in self._cli.commands if m.startswith(word_before_cursor)]
for m in matches:
yield Completion(m, start_position=-len(word_before_cursor))
else:
line_as_list = document.text.split()
if len(line_as_list) == 0:
return
last_token = line_as_list[-1] if document.text[-1] != ' ' else ' '
cmdlabel = line_as_list[0]
command = self._cli.get_command_cb(cmdlabel)
if command is not None:
# Required for partial methods
if hasattr(command, 'func'):
command = command.func
root = getattr(command, TREE_ATTR, None)
journal = self._cli.journal
_, cli_argos = journal.get_cmd_and_cli_args(command, None, " ".join(line_as_list[1:]))
nodepath = None
children_nodes = None
try:
nodepath = root.find_path(cli_argos)
except Exception as ex:
LOGGER.error('{0}, {1} | {2}'.format(ex, ex.__traceback__.tb_lineno, self._nodepath))
if not nodepath and self._nodepath is None:
# if there is not path being found and there is not any
# previous path, just get the completion under the root.
self._nodepath = [root, ]
elif nodepath and document.text[-1] == ' ':
# if there is a path found and the last character
# entered is a space, use that path.
self._nodepath = nodepath
if self._nodepath:
# Get children from the path found or the the last path
children_nodes = self._nodepath[-1].get_children_nodes() if self._nodepath[-1] else None
else:
# if there was not path or any last path, get children
# from the root.
children_nodes = root.get_children_nodes()
if children_nodes:
helps = [c.completer.help(last_token) for c in children_nodes]
self._cli.toolbar_str = " | ".join(helps)
for child in children_nodes:
LOGGER.debug('child is: {0}'.format(child.label))
matches = child.completer.complete(document, last_token)
if matches is None:
continue
for i, m in enumerate(matches):
yield Completion(m, start_position=-len(word_before_cursor))
# TODO: Remove help displayed in the completer
# yield Completion(m, start_position=-len(word_before_cursor), display_meta=helps[i])
# TODO: Trace and debug information to be removed or optimized.
LOGGER.debug('completer command: {0}'.format(command))
LOGGER.debug('document text is "{}"'.format(document.text))
LOGGER.debug('last document text is [{}]'.format(line_as_list[-1]))
LOGGER.debug('children nodes are {}'.format(children_nodes))
if children_nodes:
LOGGER.debug('children nodes are {}'.format([x.name for x in children_nodes]))
LOGGER.debug('nodepath is {}'.format(nodepath))
if nodepath:
LOGGER.debug('nodepath is {}'.format([x.name for x in nodepath]))
if self._nodepath and self._nodepath[-1] is not None:
LOGGER.debug('self._nodepath is {}'.format(self._nodepath))
LOGGER.debug('self._nodepath is {}'.format([x.name for x in self._nodepath]))
def __init__(self):
"""CliBase class initialization method.
"""
self.command = None
self.last_cmd = None
self.toolbar_str = ''
self.rprompt_str = ''
self.prompt_str = "> "
self.__commands = {}
self.journal = Journal()
self.setup_commands()
self.__recording = False
self.__record_data = []
@property
def commands(self):
"""Get property that returns keys for _cmdDict attribute
Returns:
:any:`list` : List with all command labels.
"""
return self.__commands.keys()
@property
def mode_stack(self):
return CliBase.__MODES
def get_command_cb(self, command):
"""Get the command callback for the given command label.
Args:
command (str) : String with the command label.
Returns:
:any:`function` : callback function for the given command.
"""
command_entry = self.__commands.get(command, (None, None))
return command_entry[0]
def get_command_desc(self, command):
"""Get the command description for the given command label.
Args:
command (str) : String with the command label.
Returns:
str : description for the given command.
"""
command_entry = self.__commands.get(command, (None, None))
return command_entry[1]
def is_command(self, command):
"""Returns if the given command label is found in the list of available
commands.
Args:
command (str) : Command label to check as an availbale command.
Returns:
bool : True if command label is found, False else.
"""
return command in self.commands
def add_command(self, command, command_cb, desc=""):
"""Adds a new entry to the command dictionary.
Args:
command (str) : String with the command label.
command_cb (:any:`function`) : Function with the command callback.
Returns:
bool : True if command was added.
"""
if self.is_command(command):
LOGGER.warning('[{}] Command [{}] already present.'.format(MODULE, command))
self.__commands[command] = (command_cb, desc)
# At this point, inject the context in every argument attributes using
# command_cb.func._arguments._arguments[#].completer. That should work
# only for those with _arguments attribute inside command_cb.func.
if hasattr(command_cb, 'func') and hasattr(command_cb.func, ARGOS_ATTR):
for argument in getattr(command_cb.func, ARGOS_ATTR).arguments:
argument.journal = self.journal
return True
def exec_command(self, command, user_input):
"""Executes the command callback for the given command label.
Args:
command (str) : Command label for the command to execute.
user_input (str) : String with the command line input.
Returns:
object : value returned by the command callback.
"""
command_cb = self.get_command_cb(command)
if command_cb:
return command_cb(user_input)
def empty_line(self):
"""Method that don't provide any action when <CR> is entered in an
empty line.
By default, the same command is executed when just <CR> is entered,
but we don't want that behavior.
Returns:
:any:`None`
"""
pass
def precmd(self, command, line):
"""Method to be called before any command is being processed.
Args:
command (str) : String with new command entered.
line (str): string entered in the command line.
Returns:
bool : False will skip command execution.
"""
return True
def onecmd(self, line):
"""Method to be called when any command is being processed.
Args:
line (str): string entered in the command line.
Returns:
bool : False will exit command loop.
"""
return True
def postcmd(self, command, line):
"""Method to be called after any command is being processed.
Args:
command (str) : String with new command entered.
line (str): string entered in the command line.
Returns:
bool : False will exit command loop.
"""
return True
def get_bottom_toolbar_tokens(self, cli):
"""Method that provides data and format to be displayed in the ToolBar.
Args:
cli (:class:`CommandLineInterface`) : CommandLineInterface instance.
Returns:
:any:`list` : list with data to be displayed in the ToolBar.
"""
return [(Token.Toolbar, '{}'.format(self.toolbar_str)), ]
def get_rprompt_tokens(self, cli):
"""Returns tokens for command line right prompt.
Args:
cli (:class:`CommandLineInterface`) : CommandLineInterface instance.
Returns:
:any:`list` : list with data to be displayed in the right prompt..
"""
return [(Token.RPrompt, '{}'.format(self.rprompt_str)), ]
def get_prompt_tokens(self, cli):
"""Returns tokens for command line prompt.
Args:
cli (:class:`CommandLineInterface`) : CommandLineInterface instance.
Returns:
:any:`list` : list with data to be displayed in the prompt.
"""
return [(Token.Prompt, '{}'.format(self.prompt_str)), ]
def extend_commands_from_class(self, classname):
"""Extends commands defined in a class to be included in the full
command line.
This is required only for commands defined in a class that is being
derived, and the derived class is the one being used in the command
line. This method allows to include all commands from the base
class.
Args:
classname (str) : String with class name for the class which\
methods should be imported.
Returns:
None
"""
for name, func_cb, desc in self._WALL.get(classname, []):
self.add_command(name, partial(func_cb, self), desc)
def setup_commands(self):
"""Register all commands to be used by the command line interface.
Returns:
None
"""
classname = self.__class__.__name__
calls = self._WALL.get(classname, [])
for name, func_cb, desc in calls:
LOGGER.debug('{0}::setup_commands add command {1}::{2}'.format(classname, name, func_cb))
self.add_command(name, partial(func_cb, self), desc)
def run_prompt(self, **kwargs):
"""Execute the command line.
Args:
prompt (:any:`str` or :any:`function`) : string or callback with prompt value
toolbar (:any:`str` or :any:`function`) : string or callback with toolbar value.
rprompt (:any:`str` or :any:`function`) : string or callback with right prompt value.
Returns:
str : String with the input entered by the user.
"""
toolbar = kwargs.get('toolbar', 'Enter a valid command')
self.toolbar_str = toolbar if isinstance(toolbar, str) else toolbar()
_prompt = kwargs.get('prompt', self.prompt_str)
self.prompt_str = _prompt if isinstance(_prompt, str) else _prompt()
rprompt = kwargs.get('rprompt', None)
if rprompt is not None:
self.rprompt_str = rprompt if isinstance(rprompt, str) else rprompt()
user_input = prompt(history=FileHistory('history.txt'),
auto_suggest=AutoSuggestFromHistory(),
completer=CliBase.CliCompleter(self),
# lexer=SqlLexer,
get_bottom_toolbar_tokens=self.get_bottom_toolbar_tokens,
get_rprompt_tokens=self.get_rprompt_tokens,
get_prompt_tokens=self.get_prompt_tokens,
style=self.CLI_STYLE,
# validator=CliValidator(),
refresh_interval=1)
return user_input
def start_recording(self):
"""Starts recording commands input in the command line.
Returns:
None
"""
self.__recording = True
def stop_recording(self):
"""Stops recording commands input in the command line.
Returns:
None
"""
self.__recording = False
if self.__record_data:
del self.__record_data[-1]
def clear_recording(self, from_record=None, to_record=None):
"""Clears the range of records recorded from the given range.
Args:
from_record (int) : First record to clear. Set to 0 if None.
to_record (int): Last record to clear. Set to last if None
"""
if from_record is None and to_record is None:
self.__record_data.clear()
elif from_record is None and to_record is not None:
if to_record < len(self.__record_data):
del self.__record_data[:to_record + 1]
elif from_record is not None and to_record is None:
if 0 <= from_record <= len(self.__record_data):
del self.__record_data[from_record:]
elif (0 <= from_record <= len(self.__record_data)) and\
to_record < len(self.__record_data) and\
from_record <= to_record:
del self.__record_data[from_record:to_record + 1]
else:
pass
def select_recording(self, from_record=None, to_record=None):
"""Selects the range of records recorded from the given range.
Args:
from_record (int) : First record to select. Set to 0 if None.
to_record (int): Last record to select. Set to last if None
Returns:
list : List of selected records.
"""
if from_record is None and to_record is None:
return self.__record_data
elif from_record is None and to_record is not None:
if to_record < len(self.__record_data):
return self.__record_data[:to_record + 1]
elif from_record is not None and to_record is None:
if 0 <= from_record <= len(self.__record_data):
return self.__record_data[from_record:]
elif (0 <= from_record <= len(self.__record_data)) and\
to_record < len(self.__record_data) and\
from_record < to_record:
return self.__record_data[from_record:to_record + 1]
else:
return []
def display_recording(self, from_record=None, to_record=None):
"""Displays the range of records recorded from the given range.
Args:
from_record (int) : First record to display. Set to 0 if None.
to_record (int): Last record to display. Set to last if None
Returns:
None
"""
records = self.select_recording(from_record, to_record)
for i, record in enumerate(records):
LOGGER.display('{0}: {1}'.format(i, record))
def save_recording(self, filename, from_record=None, to_record=None):
"""
"""
records = self.select_recording(from_record, to_record)
to_save = []
for record in records:
to_save.append({'command': record})
if to_save:
with open(filename, 'w') as f:
json.dump(to_save, f)
def record_command(self, user_input):
"""Saves in a JSON file the range of records recorded from the given
range.
Args:
from_record (int) : First record to save. Set to 0 if None.
to_record (int): Last record to save. Set to last if None
Returns:
None
"""
if self.__recording:
self.__record_data.append(user_input)
def exec_user_input(self, user_input, **kwargs):
"""Executes the string with the user input.
Args:
user_input (str) : String with the input entered by the user.
Keyword Args:
precmd (bool) : True if precmd shoud be called.
postcmd (bool) : True if postcmd should be called.
Returns:
bool : True if application shoudl continue, False else.
"""
pre_return = True
cb_return = True
post_return = True
if user_input:
line_as_list = user_input.split()
if len(line_as_list) == 0:
return True
command = line_as_list[0]
if self.is_command(command):
if kwargs.get('precmd', False):
pre_return = self.precmd(command, user_input)
# precmd callback return value can be used to skip command
# of it returns False.
if pre_return:
self.record_command(user_input)
cb_return = self.exec_command(command, ' '.join(line_as_list[1:]))
# postcmd callback return value can be used to exit the
# command loop if it returns False..
if kwargs.get('postcmd', False):
post_return = self.postcmd(command, user_input)
self.last_cmd = user_input
else:
post_return = self.onecmd(user_input)
return post_return if cb_return is not False else cb_return
def cmdloop(self, **kwargs):
"""Method that is called to wait for any user input.
Keyword Args:
prompt (:any:`str` or :any:`function`) : string or callback with prompt value
toolbar (:class:`str` or :any:`function`) : string or callback with toolbar value.
rprompt (:any:`str` or :any:`function`) : string or callback with right prompt value.
echo (bool) : True is command should be echoed.
precmd (bool) : True if precmd shoud be called.
postcmd (bool) : True if postcmd should be called.
Returns:
None
"""
while True:
user_input = self.run_prompt(**kwargs)
if kwargs.get('echo', False):
LOGGER.display(user_input)
if not self.exec_user_input(user_input, **kwargs):
return
def run(self, **kwargs):
"""Runs the command line interface for the given cli class.
Keyword Args:
prompt (:any:`str` or :any:`function`) : string or callback with prompt value
toolbar (:class:`str` or :any:`function`) : string or callback with toolbar value.
rprompt (:any:`str` or :any:`function`) : string or callback with right prompt value.
echo (bool) : True is command should be echoed.
precmd (bool) : True if precmd shoud be called.
postcmd (bool) : True if postcmd should be called.
Returns:
None
"""
try:
self.cmdloop(**kwargs)
except KeyboardInterrupt:
LOGGER.display("")
pass
def run_mode(self, **kwargs):
"""Enters in a new mode.
In a new mode, parent commands are not available and the new scope
is for commands defined in the created mode.
Returns:
None
"""
mode_name = self.__class__.__name__
CliBase.__MODES.append(mode_name)
self.run(**kwargs)
def leave_mode(self, **kwargs):
"""Exits the running mode.
Returns:
str : Mode exiting name.
"""
if CliBase.__MODES:
return CliBase.__MODES.pop()
return None
def load_commands_from_json(self, json_data):
"""Loads CLI commands from a JSON variable.
The content of the JSON data should be a list of dictionaries, where
every dictionary at least shoudl contain a field called 'command'
which will contains the command to be executed.
Args:
json_data (json) : Variable with JSON data.
Returns:
None
"""
lista = json.loads(json_data)
for entry in lista:
self.exec_user_input(entry['command'])
def load_commands_from_file(self, filename):
"""Loads a file with the given filename with CLI commands in
JSON format.
Args:
filename (string) : String with the filename that contains\
the json data.
Returns:
None
"""
try:
with open(filename, 'r') as f:
data = json.load(f)
self.load_commands_from_json(json.dumps(data))
except OSError:
LOGGER.error('File not found {}'.format(filename), out=True)
@staticmethod
def command(label=None, desc=None):
"""Decorator that setup a function as a command.
Args:
label (str) : command label that identifies the command in the\
command line (optional). If no value is entered, label is\
taken from the @syntax decorator.
desc (str) : command description (optional).
Returns:
func : Function wrapper.
"""
def f_command(f):
@wraps(f)
def _wrapper(self, *args, **kwargs):
return f(self, *args, **kwargs)
LOGGER.debug(f, "YELLOW")
module_name = sys._getframe(1).f_code.co_name
CliBase._WALL.setdefault(module_name, [])
if desc is not None:
_desc = desc
else:
# if the wrapper is not a <method> or a <function> it is a
# partial function, so the __doc__ is inside 'func' attribute.
if inspect.ismethod(_wrapper) or inspect.isfunction(_wrapper):
_desc = _wrapper.__doc__
else:
_desc = _wrapper.func.__doc__
label_from_syntax = getattr(f, SYNTAX_ATTR, None)
_label = f.__name__ if label_from_syntax is None else label_from_syntax.split()[0]
CliBase._WALL[module_name].append((label if label else _label, _wrapper, _desc))
return _wrapper
return f_command
| mit | 9,156,060,067,053,533,000 | 36.131728 | 117 | 0.529239 | false |
cdeboever3/WASP | mapping/test_find_intersecting_snps.py | 1 | 14776 | import glob
import gzip
import os
import subprocess
from find_intersecting_snps import *
def read_bam(bam):
"""
Read a bam file into a list where each element of the list is a line from
the bam file (with the newline stripped). The header is discarded.
"""
res = subprocess.check_output('samtools view {}'.format(bam), shell=True)
return res.strip().split('\n')
def cleanup():
fns = (glob.glob('test_data/test*.keep.bam') +
glob.glob('test_data/test*.remap.fq*.gz') +
glob.glob('test_data/test*.to.remap.bam') +
glob.glob('test_data/test*.to.remap.num.gz'))
[os.remove(x) for x in fns]
class TestSNP:
def test_init(self):
"""Test to see whether __init__ is working as expected."""
snp = SNP('12670\tG\tC\n')
assert snp.pos == 12670 - 1
assert snp.alleles == ['G', 'C']
assert snp.ptype == 'snp'
assert snp.max_len == 1
def test_add_allele(self):
"""Test to see whether we can add an allele."""
snp = SNP('12670\tG\tC\n')
snp.add_allele(['A'])
assert snp.alleles == ['G', 'C', 'A']
def test_add_allele_multiple(self):
"""Test to see whether we can add multiple alleles."""
snp = SNP('12670\tG\tC\n')
snp.add_allele(['A', 'T'])
assert snp.alleles == ['G', 'C', 'A', 'T']
# TODO: tests for adding insertions and deletions
class TestBamScanner:
def test_init_single(self):
is_paired_end = False
max_window = 100000
pref = 'test_data/test_single'
file_name = pref + ".sort.bam"
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq.gz"]
snp_dir = 'test_data/snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
assert bs.max_window == len(bs.snp_table)
cleanup()
def test_init_paired(self):
is_paired_end = True
max_window = 100000
pref = 'test_data/test_paired'
file_name = pref + ".sort.bam"
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq1.gz",
pref + ".remap.fq2.gz"]
snp_dir = 'test_data/snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
assert bs.max_window == len(bs.snp_table)
cleanup()
def test_simple_single(self):
is_paired_end = False
max_window = 100000
pref = 'test_data/test_single'
file_name = pref + ".sort.bam"
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq.gz"]
snp_dir = 'test_data/snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
bs.run()
# Verify fastq is correct. The second base of the first read should be
# switched from a C to an A. (14538 C A)
seq = ('CATCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
qual = ('BBBBBFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFBFF<FFFFFFFFFBFFFFFFFFFFFFFFFFFFF')
with gzip.open('test_data/test_single.remap.fq.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 4
assert lines[1] == seq
assert lines[3] == qual
# Verify to.remap bam is the same as the input bam file.
old_lines = read_bam('test_data/test_single.sort.bam')
new_lines = read_bam('test_data/test_single.to.remap.bam')
assert old_lines == new_lines
cleanup()
def test_simple_paired(self):
is_paired_end = True
max_window = 100000
pref = 'test_data/test_paired'
file_name = pref + ".sort.bam"
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq1.gz",
pref + ".remap.fq2.gz"]
snp_dir = 'test_data/snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
bs.run()
# The second base should be switched from a C to an A.
# 14538 C A
seq = ('CATCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
qual = ('BBBBBFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFBFF<FFFFFFFFFBFFFFFFFFFFFFFFFFFFF')
with gzip.open('test_data/test_paired.remap.fq1.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 4
assert lines[1] == seq
assert lines[3] == qual
# Shouldn't be any changes to the second read.
seq = ('TCATGGAGCCCCCTACGATTCCCAGTCGTCCTCGTCCTCCTCTGCCTGTGGCTGCTGCGGTGG'
'CGGCAGAGGAGGGATGGAGTCTGACACGCGGGCAAAG')
qual = ('B//FF77BB<7/BB<7FBFFF<</FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBBBBB')
with gzip.open('test_data/test_paired.remap.fq2.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 4
assert lines[1] == bs.reverse_complement(seq)
assert lines[3] == qual
# Verify to.remap bam is the same as the input bam file.
old_lines = read_bam('test_data/test_paired.sort.bam')
new_lines = read_bam('test_data/test_paired.to.remap.bam')
assert old_lines == new_lines
cleanup()
def test_two_snps_single(self):
is_paired_end = False
max_window = 100000
pref = 'test_data/test_single'
file_name = pref + ".sort.bam"
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq.gz"]
snp_dir = 'test_data/two_snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
bs.run()
# Verify fastq is correct. The second base of the first read should be
# switched from a C to an A and the third base of the first read should
# be switched from T to G. (14538 C A, 14539 T G)
with gzip.open('test_data/test_single.remap.fq.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 12
seq = ('CATCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
assert lines[1] == seq
seq = ('CCGCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
assert lines[5] == seq
seq = ('CAGCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
assert lines[9] == seq
qual = ('BBBBBFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFBFF<FFFFFFFFFBFFFFFFFFFFFFFFFFFFF')
for i in [3, 7, 11]:
assert lines[i] == qual
# Verify to.remap bam is the same as the input bam file.
old_lines = read_bam('test_data/test_single.sort.bam')
new_lines = read_bam('test_data/test_single.to.remap.bam')
assert old_lines == new_lines
cleanup()
def test_two_snps_paired(self):
is_paired_end = True
max_window = 100000
pref = 'test_data/test_paired'
file_name = pref + ".sort.bam"
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq1.gz",
pref + ".remap.fq2.gz"]
snp_dir = 'test_data/two_snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
bs.run()
# Verify fastq is correct. The second base of the first read should be
# switched from a C to an A and the third base of the first read should
# be switched from T to G. (14538 C A, 14539 T G)
with gzip.open('test_data/test_paired.remap.fq1.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 12
seq = ('CATCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
assert lines[1] == seq
seq = ('CCGCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
assert lines[5] == seq
seq = ('CAGCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
assert lines[9] == seq
qual = ('BBBBBFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFBFF<FFFFFFFFFBFFFFFFFFFFFFFFFFFFF')
for i in [3, 7, 11]:
assert lines[i] == qual
# Shouldn't be any changes to the second read.
seq = ('TCATGGAGCCCCCTACGATTCCCAGTCGTCCTCGTCCTCCTCTGCCTGTGGCTGCTGCGGTGG'
'CGGCAGAGGAGGGATGGAGTCTGACACGCGGGCAAAG')
qual = ('B//FF77BB<7/BB<7FBFFF<</FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBBBBB')
with gzip.open('test_data/test_paired.remap.fq2.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 12
for i in [1, 5, 9]:
assert lines[i] == bs.reverse_complement(seq)
assert lines[i + 2] == qual
# Verify to.remap bam is the same as the input bam file.
old_lines = read_bam('test_data/test_paired.sort.bam')
new_lines = read_bam('test_data/test_paired.to.remap.bam')
assert old_lines == new_lines
cleanup()
def test_issue_18(self):
"""
This was reported as a bug because one read pair that overlaps one SNP
was resulting in multiple pairs of reads in the fastq files. However, it
is not a bug because the reads overlap and both reads overlap the SNP.
"""
is_paired_end = True
max_window = 100000
file_name = 'test_data/issue_18.bam'
pref = 'test_data/test_paired'
keep_file_name = pref + ".keep.bam"
remap_name = pref + ".to.remap.bam"
remap_num_name = pref + ".to.remap.num.gz"
fastq_names = [pref + ".remap.fq1.gz",
pref + ".remap.fq2.gz"]
snp_dir = 'test_data/issue_18_snps'
bs = BamScanner(is_paired_end, max_window, file_name, keep_file_name,
remap_name, remap_num_name, fastq_names, snp_dir)
bs.run()
# Verify fastq are correct.
with gzip.open('test_data/test_paired.remap.fq1.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 12
seq = ('CGAGCGCTCACTCAATATCACGAGAACAGCAAGGGGGAAGTCGGCCCCCANGAGCCAATNACC'
'TCCCANNNGGTCCCTCCCACAACACTGGGAATTACAA')
assert lines[1] == seq
seq = ('CGAGCGCTCACTCAATATCACAAGAACAGCAAGGGGGAAGTCGGCCCCCANGAGCCAATNACC'
'TCCCANNNGGTCCCTCCCACAACACTGGGAATTACAA')
assert lines[5] == seq
seq = ('CGAGCGCTCACTCAATATCACAAGAACAGCAAGGGGGAAGTCGGCCCCCANGAGCCAATNACC'
'TCCCANNNGGTCCCTCCCACAACACTGGGAATTACAA')
assert lines[9] == seq
qual = ('</<<<FBFFFFFFFFFFFBBFFFFFFFF/BFFFFF/BF<BBF//FBFFFB#<<<BFF//#<<'
'FBFBF/###<7<<FFFFFFFFFFFF<<B//B7F7BBFB')
for i in [3, 7, 11]:
assert lines[i] == qual
with gzip.open('test_data/test_paired.remap.fq2.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 12
seq = ('ATCACAAGAACAGCAAGGGGGAAGTCGGCCCCCATGAGCCAATCACCTCCCACCAGGTCCCTC'
'CCACAACACTGGGAATTACAATTTNACATNACATTTG')
assert lines[1] == bs.reverse_complement(seq)
seq = ('ATCACGAGAACAGCAAGGGGGAAGTCGGCCCCCATGAGCCAATCACCTCCCACCAGGTCCCTC'
'CCACAACACTGGGAATTACAATTTNACATNACATTTG')
assert lines[5] == bs.reverse_complement(seq)
seq = ('ATCACAAGAACAGCAAGGGGGAAGTCGGCCCCCATGAGCCAATCACCTCCCACCAGGTCCCTC'
'CCACAACACTGGGAATTACAATTTNACATNACATTTG')
assert lines[9] == bs.reverse_complement(seq)
qual = ('#B<BBFFFFFFFFFFFFFFFF<FFFFBFFFFFFFFF<F/FFFFF<FFFFFFFF<FFFFFFBF'
'F<<FFFFFFFFFFFFFFFFFFFB<B#F<<<#FFBBBBB')
for i in [3, 7, 11]:
assert lines[i] == qual
cleanup()
class TestCLI:
def test_simple_single_cli(self):
"""This test is to make sure the cli functions."""
pref = 'test_data/test_single'
c = ('python find_intersecting_snps.py {}.sort.bam '
'test_data/snps'.format(pref))
subprocess.check_call(c, shell=True)
# file_name = pref + ".sort.bam"
# keep_bam = pref + '_filtered.bam'
# run(remap_name, 'test_data/test_single.remapped.bam', keep_bam,
# remap_num_name, is_paired_end)
# lines = read_bam(keep_bam)
# assert len(lines) == 1
seq = ('CATCAAGCCAGCCTTCCGCTCCTTGAAGCTGGTCTCCACACAGTGCTGGTTCCGTCACCCCC'
'TCCCAAGGAAGTAGGTCTGAGCAGCTTGTCCTGGCTGT')
qual = ('BBBBBFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
'FFFFFBFF<FFFFFFFFFBFFFFFFFFFFFFFFFFFFF')
with gzip.open('test_data/test_single.sort.remap.fq.gz') as f:
lines = [x.strip() for x in f.readlines()]
assert len(lines) == 4
assert lines[1] == seq
assert lines[3] == qual
# Verify to.remap bam is the same as the input bam file.
old_lines = read_bam('test_data/test_single.sort.sort.bam')
new_lines = read_bam('test_data/test_single.sort.to.remap.bam')
assert old_lines == new_lines
cleanup()
| apache-2.0 | 3,816,620,245,912,030,000 | 42.587021 | 80 | 0.597455 | false |
ProjectQ-Framework/ProjectQ | projectq/setups/decompositions/cnot2cz.py | 1 | 1282 | # -*- coding: utf-8 -*-
# Copyright 2018 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers a decomposition to for a CNOT gate in terms of CZ and Hadamard.
"""
from projectq.cengines import DecompositionRule
from projectq.meta import Compute, get_control_count, Uncompute
from projectq.ops import CZ, H, X
def _decompose_cnot(cmd):
"""Decompose CNOT gates."""
ctrl = cmd.control_qubits
eng = cmd.engine
with Compute(eng):
H | cmd.qubits[0]
CZ | (ctrl[0], cmd.qubits[0][0])
Uncompute(eng)
def _recognize_cnot(cmd):
return get_control_count(cmd) == 1
#: Decomposition rules
all_defined_decomposition_rules = [DecompositionRule(X.__class__, _decompose_cnot, _recognize_cnot)]
| apache-2.0 | 4,183,703,901,696,003,600 | 31.871795 | 100 | 0.711388 | false |
htmue/python-wishes | vows/test_feature.py | 1 | 15120 | # -*- coding:utf-8 -*-
# Created by Hans-Thomas on 2011-05-15.
#=============================================================================
# test_feature.py --- Wishes feature vows
#=============================================================================
from __future__ import unicode_literals
import mock
import six
from should_dsl import should
from wishes.compat import unittest
from wishes.feature import FeatureTest, step, StepDefinition, World
from wishes.loader import load_feature
class FeatureVows(unittest.TestCase):
def setUp(self):
StepDefinition.clear()
def test_can_run_feature(self):
@step('there is a step')
def there_is_a_step(step):
my_world.there_is_a_step = True
@step('another step')
def another_step(step):
my_world.another_step = True
@step('steps afterwards')
def steps_afterwards(step):
my_world.steps_afterwards = True
feature = load_feature('''
Feature: run a feature
Scenario: some steps
Given there is a step
And another step
When I add something undefined
Then steps afterwards are not run
''')
my_world = World()
my_world.there_is_a_step = False
my_world.another_step = False
my_world.steps_afterwards = False
result = unittest.TestResult()
feature.run(result)
len(result.skipped) |should| be(1)
result.skipped[0][1] |should| start_with('pending 1 step(s):')
run = my_world.there_is_a_step, my_world.another_step, my_world.steps_afterwards
run |should| be_equal_to((True, True, False))
def test_can_run_feature_with_background(self):
@step('background step')
def background_step(step):
my_world.background_step += 1
@step('scenario step ([0-9]+)')
def scenario_step_number(step, number):
my_world.steps_run.append(int(number))
feature = load_feature('''
Feature: with background
Background: present
Given a background step
Scenario: with background 1
And a scenario step 1
Scenario: with background 2
And a scenario step 2
''')
my_world = World()
my_world.background_step = 0
my_world.steps_run = []
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
my_world.background_step |should| be(2)
my_world.steps_run |should| be_equal_to([1, 2])
def test_can_run_feature_with_multiple_backgrounds(self):
@step('background step ([0-9]+)')
def background_step_number(step, number):
my_world.background_number = number
@step('scenario step ([0-9]+)')
def scenario_step_number(step, number):
my_world.background_number |should| be_equal_to(number)
my_world.steps_run.append(int(number))
feature = load_feature('''
Feature: with background
Background: 1 present
Given a background step 1
Scenario: with background 1
And a scenario step 1
Background: 2 present
Given a background step 2
Scenario: with background 2
And a scenario step 2
''')
my_world = World()
my_world.steps_run = []
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
my_world.steps_run |should| be_equal_to([1, 2])
def test_can_run_feature_with_multiline_step(self):
@step('multiline step')
def multiline_step(step):
my_world.multiline = step.multiline
feature = load_feature('''
Feature: with multiline scenarnio
Scenario: with multiline step
Given a multiline step
"""
multiline content
"""
''')
my_world = World()
my_world.multiline = None
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(1)
result.wasSuccessful() |should| be(True)
my_world.multiline |should| be_equal_to('multiline content\n')
def test_can_run_feature_with_hashes_step(self):
@step('step with hashes')
def step_with_hashes(step):
my_world.hashes = step.hashes
feature = load_feature('''
Feature: with multiline scenarnio
Scenario: with multiline step
Given a step with hashes
| first | second | third |
| first 1 | second 1 | third 1 |
| first 2 | second 2 | third 2 |
''')
my_world = World()
my_world.hashes = None
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(1)
result.wasSuccessful() |should| be(True)
list(my_world.hashes) |should| each_be_equal_to([
dict(first='first 1', second='second 1', third='third 1'),
dict(first='first 2', second='second 2', third='third 2'),
])
def test_can_run_feature_with_hashes_in_background_step(self):
@step('step with hashes')
def step_with_hashes(step):
my_world.hashes = step.hashes
@step('here it is')
def here_it_is(step):
pass
feature = load_feature('''
Feature: with multiline scenarnio
Background: with multiline step
Given a step with hashes
| first | second | third |
| first 1 | second 1 | third 1 |
| first 2 | second 2 | third 2 |
Scenario: with defined step
And here it is
''')
my_world = World()
my_world.hashes = None
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(1)
result.wasSuccessful() |should| be(True)
my_world.hashes |should| each_be_equal_to([
dict(first='first 1', second='second 1', third='third 1'),
dict(first='first 2', second='second 2', third='third 2'),
])
def test_can_run_feature_with_scenario_outline_and_examples(self):
@step('a (.*) with (.*)')
def a_key_with_value(step, key, value):
my_world.run.append((key, value))
feature = load_feature('''
Feature: with multiline scenarnio
Scenario Outline: follows
Given a <key> with <value>
Examples:
| key | value |
| key 1 | value 1 |
| key 2 | value 2 |
''')
my_world = World()
my_world.run = []
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
my_world.run |should| each_be_equal_to([
('key 1', 'value 1'),
('key 2', 'value 2'),
])
def test_can_run_feature_with_scenario_outline_with_multiline(self):
@step('a multiline')
def a_multiline(step):
my_world.run.append(step.multiline)
feature = load_feature('''
Feature: with multiline scenarnio
Scenario Outline: follows
Given a multiline
"""
with <placeholder>
"""
Examples:
| <placeholder> |
| first |
| second |
''')
my_world = World()
my_world.run = []
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
my_world.run |should| each_be_equal_to([
'with first\n',
'with second\n',
])
def test_can_run_feature_with_scenario_outline_with_hashes(self):
@step('a hash')
def a_hash(step):
my_world.run.append(list(step.hashes))
feature = load_feature('''
Feature: with multiline scenarnio
Scenario Outline: follows
Given a hash
| <key> | value |
| the | <placeholder> |
Examples:
| <key> | <placeholder> |
| key | first |
| but | second |
''')
my_world = World()
my_world.run = []
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
my_world.run |should| each_be_equal_to([
[dict(key='the', value='first')],
[dict(but='the', value='second')],
])
def test_can_run_feature_with_scenario_outline_with_background(self):
@step('a (.*)')
def a_something(step, value):
my_world.run.append(value)
feature = load_feature('''
Feature: with multiline scenarnio
Background: with placeholder
Given a <placeholder>
Scenario Outline: follows
And a step
Examples:
| <placeholder> |
| first |
| second |
''')
my_world = World()
my_world.run = []
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
my_world.run |should| each_be_equal_to([
'first', 'step', 'second', 'step',
])
def run_feature_with_result_step_handlers(self, feature, *handlers):
result = unittest.TestResult()
for handler in ['startStep', 'stopStep'] + list(handlers):
setattr(result, handler, mock.Mock(handler))
feature.run(result)
result.testsRun |should| be(1)
result.startStep.call_count |should| be(1)
result.stopStep.call_count |should| be(1)
return result
def test_reports_steps_to_result_object(self):
@step('some step')
def some_step(step):
pass
feature = load_feature('''
Feature: report steps
Scenario: with a step
Given there is some step
''')
result = self.run_feature_with_result_step_handlers(feature)
result.wasSuccessful() |should| be(True)
def test_reports_step_success_to_result_object(self):
@step('some step')
def some_step(step):
pass
feature = load_feature('''
Feature: report steps
Scenario: with a step
Given there is some step
''')
result = self.run_feature_with_result_step_handlers(feature, 'addStepSuccess')
result.wasSuccessful() |should| be(True)
result.addStepSuccess.call_count |should| be(1)
def test_reports_step_failure_to_result_object(self):
@step('some failing step')
def some_step(step):
1 |should| be(2)
feature = load_feature('''
Feature: report steps
Scenario: with a step
Given there is some failing step
''')
result = self.run_feature_with_result_step_handlers(feature, 'addStepFailure')
result.wasSuccessful() |should| be(False)
result.addStepFailure.call_count |should| be(1)
def test_reports_step_error_to_result_object(self):
@step('some error step')
def some_step(step):
raise Exception('hey')
feature = load_feature('''
Feature: report steps
Scenario: with a step
Given there is some error step
''')
result = self.run_feature_with_result_step_handlers(feature, 'addStepError')
result.wasSuccessful() |should| be(False)
result.addStepError.call_count |should| be(1)
def test_reports_undefined_step_to_result_object(self):
feature = load_feature('''
Feature: report steps
Scenario: with a step
Given there is some undefined step
''')
result = self.run_feature_with_result_step_handlers(feature, 'addStepUndefined')
len(result.skipped) |should| be(1)
result.wasSuccessful() |should| be(True)
result.addStepUndefined.call_count |should| be(1)
def test_clears_world_between_scenarios(self):
@step('set a world var')
def set_world(step):
step.world.var = 'set'
@step('check that world var')
def check_var(step):
getattr(step.world, 'var', None) |should| be(None)
feature = load_feature('''
Feature: clears world between scenarios
Scenario: first
When I set a world var
Scenario: second
Then I check that world var
''')
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(2)
result.wasSuccessful() |should| be(True)
def test_makes_itself_accessible_through_world(self):
@step('feature attribute is set to "(.*)"')
def feature_attribute(step, name):
step.world.feature |should| be_instance_of(FeatureTest)
step.world.feature.__class__.__name__ |should| be_equal_to(name)
feature = load_feature('''
Feature: accessible through world
Scenario: test
Then the feature attribute is set to "Feature_accessible_through_world"
''')
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(1)
result.wasSuccessful() |should| be(True)
def test_can_provide_custom_world_class(self):
class MyWorld(World):
pass
class MyFeature(unittest.TestCase):
World = MyWorld
@step('world is an instance of the MyWorld class')
def world_is_instance_of(step):
step.world |should| be_instance_of(MyWorld)
feature = load_feature('''
Feature: custom world class
Scenario: test
Then world is an instance of the MyWorld class
''', test_case_class=MyFeature)
result = unittest.TestResult()
feature.run(result)
result.testsRun |should| be(1)
result.wasSuccessful() |should| be(True)
def test_has_shortDescription_when_empty(self):
feature = load_feature('Feature: empty')
test = six.next(iter(feature))
test.shortDescription() |should| be_equal_to('Feature: empty')
#.............................................................................
# test_feature.py
| unlicense | 552,839,859,071,830,500 | 33.599542 | 88 | 0.539881 | false |
haphaeu/yoshimi | EulerProject/061.py | 1 | 5270 | '''
THIS CODE IS WRONG - LOOK TO THE .C ONE
Problem 61
16 January 2004
Triangle, square, pentagonal, hexagonal, heptagonal, and octagonal numbers are
all figurate (polygonal) numbers and are generated by the following formulae:
Triangle P3,n = n(n+1)/2 1, 3, 6, 10, 15, ...
Square P4,n = n^2 1, 4, 9, 16, 25, ...
Pentagonal P5,n = n(3n-1)/2 1, 5, 12, 22, 35, ...
Hexagonal P6,n = n(2n-1) 1, 6, 15, 28, 45, ...
Heptagonal P7,n = n(5n-3)/2 1, 7, 18, 34, 55, ...
Octagonal P8,n = n(3n-2) 1, 8, 21, 40, 65, ...
The ordered set of three 4-digit numbers: 8128, 2882, 8281, has three
interesting properties.
1.The set is cyclic, in that the last two digits of each number is the first
two digits of the next number (including the last number with the first).
2.Each polygonal type: triangle (P3,127=8128), square (P4,91=8281), and
pentagonal (P5,44=2882), is represented by a different number in the set.
3.This is the only set of 4-digit numbers with this property.
Find the sum of the only ordered set of six cyclic 4-digit numbers for which
each polygonal type: triangle, square, pentagonal, hexagonal, heptagonal, and
octagonal, is represented by a different number in the set.
'''
def create_sets():
triang = [n*(n+1)/2 for n in range(1,150) if (n*(n+1)/2 > 999 and n*(n+1)/2 <= 9999) ]
square = [n*n for n in range(1,150) if (n*n > 999 and n*n <= 9999) ]
penta = [n*(3*n-1)/2 for n in range(1,150) if (n*(3*n-1)/2 > 999 and n*(3*n-1)/2 <= 9999) ]
hexa = [n*(2*n-1) for n in range(1,150) if (n*(2*n-1) > 999 and n*(2*n-1) <= 9999) ]
hepta = [n*(5*n-3)/2 for n in range(1,150) if (n*(5*n-3)/2 > 999 and n*(5*n-3)/2 <= 9999) ]
octa = [n*(3*n-2) for n in range(1,150) if (n*(3*n-2) > 999 and n*(3*n-2) <= 9999) ]
return [triang, square, penta, hexa, hepta, octa]
def check(nums,sets):
l1=set([x/100 for x in nums])
l2=set([x-100*(x/100) for x in nums])
if l1==l2:
if check_types(nums,sets): return True
return False
def check_types(nums,sets):
if set(nums) & set(sets[0]) != set() and \
set(nums) & set(sets[1]) != set() and \
set(nums) & set(sets[2]) != set() and \
set(nums) & set(sets[3]) != set() and \
set(nums) & set(sets[4]) != set() and \
set(nums) & set(sets[5]) != set():
return True
return False
from sys import stdout
from time import time
sets=create_sets()
it=len(sets[5])
jt=len(sets[4])
kt=len(sets[3])
intertot=it*jt*kt
print "Octa Hept Hexa"
stt=time()
for i, p5 in enumerate(sets[5]):
for j, p4 in enumerate(sets[4]):
for k, p3 in enumerate(sets[3]):
for p2 in sets[2]:
for p1 in sets[1]:
for p0 in sets[0]:
nums=[p5, p4, p3, p2, p1, p0]
if check(nums,sets):
print nums
et=time()-stt
rt=intertot/(k+1+j*kt+i*jt*kt) * et / 3600
stdout.write("%d %d %d - %.3fh remaining\r" % (p5,p4,p3,rt))
stdout.flush()
'''
Octa Hept Hexa
[1045, 2512, 1225, 4510, 1225, 1225]
[1045, 2512, 1225, 4510, 1225, 5050]
[1045, 2512, 1225, 4510, 1225, 5151]
[1045, 2512, 1225, 4510, 5625, 2556]
[1045, 2512, 2556, 4510, 5625, 1225]
[1045, 2512, 5151, 4510, 1225, 1225]
[1045, 2512, 5151, 4510, 1225, 5050]
[1045, 2512, 5151, 4510, 1225, 5151]
[1045, 2512, 5565, 4510, 1225, 6555]
[1045, 2839, 8128, 4510, 1681, 3916]
[1045, 4141, 2556, 4510, 5625, 2556]
[1045, 4141, 2556, 4510, 5625, 5050]
[1045, 4141, 2556, 4510, 5625, 5151]
[1045, 4141, 5151, 4510, 5041, 5050]
[1045, 4141, 5151, 4510, 5625, 2556]
[1045, 8910, 5151, 4510, 1089, 5050]
[1045, 8910, 5151, 4510, 1089, 5151]
[1045, 8910, 5565, 4510, 1089, 6555]
[1281, 2512, 1225, 2882, 8281, 8128]
[1281, 2512, 8128, 2882, 5625, 8256]
[1281, 2512, 8128, 2882, 8281, 1225]
[2133, 1651, 3321, 1717, 2116, 5151]
[2133, 1651, 3321, 5192, 9216, 3321]
[2133, 1651, 3321, 5192, 9216, 5050]
[2133, 1651, 3321, 5192, 9216, 5151]
[2133, 1651, 5151, 1717, 2116, 3321]
[2133, 1651, 5151, 5192, 9216, 3321]
[2133, 2512, 1225, 1717, 1225, 3321]
[2133, 2512, 3321, 1717, 1225, 1225]
[2133, 2512, 3321, 1717, 1225, 3321]
[2133, 2512, 3321, 1717, 1225, 5050]
[2133, 2512, 3321, 1717, 1225, 5151]
[2133, 2512, 5151, 1717, 1225, 3321]
[2133, 4141, 2556, 1717, 5625, 3321]
[2133, 4141, 3321, 1717, 1764, 6441]
[2133, 4141, 3321, 1717, 3364, 6441]
[2133, 4141, 3321, 1717, 5041, 5050]
[2133, 4141, 3321, 1717, 5625, 2556]
[2133, 4141, 3321, 2882, 8281, 8128]
[2133, 4141, 6441, 1717, 1764, 3321]
[2133, 4141, 6441, 1717, 3364, 3321]
[2133, 4141, 8128, 2882, 8281, 3321]
[2133, 8910, 2145, 4510, 1089, 3321]
[2133, 8910, 3321, 1717, 1089, 3321]
[2133, 8910, 3321, 1717, 1089, 5050]
[2133, 8910, 3321, 1717, 1089, 5151]
[2133, 8910, 3321, 4510, 1089, 2145]
[2133, 8910, 5151, 1717, 1089, 3321]
[2821, 4141, 1128, 1617, 2116, 1711]
[4033, 3367, 1540, 1717, 6724, 2415]
[4033, 3367, 2415, 1717, 6724, 1540]
[4720, 2512, 1225, 1247, 2025, 1225]
[4720, 2512, 1225, 1247, 2025, 5050]
[4720, 2512, 1225, 1247, 2025, 5151]
[4720, 2512, 5151, 1247, 2025, 1225]
[4720, 2512, 5151, 1247, 2025, 5050]
[4720, 2512, 5151, 1247, 2025, 5151]
[4720, 2512, 5565, 1247, 2025, 6555]
[4720, 8037, 5151, 2147, 3721, 2080]
[5985, 2059, 2556, 1520, 5625, 8515]
[8965, 8910, 5151, 5551, 1089, 6555]
[8965, 8910, 5565, 1717, 1089, 6555]
[9633, 3367, 2415, 1717, 6724, 1596]
| lgpl-3.0 | -4,745,148,190,800,327,000 | 34.85034 | 93 | 0.624288 | false |
rolandgeider/OpenSlides | openslides/utils/views.py | 1 | 4506 | from io import BytesIO
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy
from django.views import generic as django_views
from django.views.decorators.csrf import ensure_csrf_cookie
from reportlab.lib.units import cm
from reportlab.platypus import SimpleDocTemplate, Spacer
from rest_framework.response import Response
from rest_framework.views import APIView as _APIView
from .pdf import firstPage, laterPages
View = django_views.View
class SingleObjectMixin(django_views.detail.SingleObjectMixin):
"""
Mixin for single objects from the database.
"""
def dispatch(self, *args, **kwargs):
if not hasattr(self, 'object'):
# Save the object not only in the cache but in the public
# attribute self.object because Django expects this later.
# Because get_object() has an internal cache this line is not a
# performance problem.
self.object = self.get_object()
return super().dispatch(*args, **kwargs)
def get_object(self, *args, **kwargs):
"""
Returns the single object from database or cache.
"""
try:
obj = self._object
except AttributeError:
obj = super().get_object(*args, **kwargs)
self._object = obj
return obj
class CSRFMixin:
"""
Adds the csrf cookie to the response.
"""
@classmethod
def as_view(cls, *args, **kwargs):
view = super().as_view(*args, **kwargs)
return ensure_csrf_cookie(view)
class PDFView(View):
"""
View to generate an PDF.
"""
filename = ugettext_lazy('undefined-filename')
top_space = 3
document_title = None
required_permission = None
def check_permission(self, request, *args, **kwargs):
"""
Checks if the user has the required permission.
"""
if self.required_permission is None:
return True
else:
return request.user.has_perm(self.required_permission)
def dispatch(self, request, *args, **kwargs):
"""
Check if the user has the permission.
If the user is not logged in, redirect the user to the login page.
"""
if not self.check_permission(request, *args, **kwargs):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get_top_space(self):
return self.top_space
def get_document_title(self):
if self.document_title:
return str(self.document_title)
else:
return ''
def get_filename(self):
return self.filename
def get_template(self, buffer):
return SimpleDocTemplate(buffer)
def build_document(self, pdf_document, story):
pdf_document.build(
story, onFirstPage=firstPage, onLaterPages=laterPages)
def render_to_response(self, filename):
response = HttpResponse(content_type='application/pdf')
filename = 'filename=%s.pdf;' % self.get_filename()
response['Content-Disposition'] = filename.encode('utf-8')
buffer = BytesIO()
pdf_document = self.get_template(buffer)
pdf_document.title = self.get_document_title()
story = [Spacer(1, self.get_top_space() * cm)]
self.append_to_pdf(story)
self.build_document(pdf_document, story)
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
def get(self, request, *args, **kwargs):
return self.render_to_response(self.get_filename())
class APIView(_APIView):
"""
The Django Rest framework APIView with improvements for OpenSlides.
"""
http_method_names = []
"""
The allowed actions have to be explicitly defined.
Django allowes the following:
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
"""
def get_context_data(self, **context):
"""
Returns the context for the response.
"""
return context
def method_call(self, request, *args, **kwargs):
"""
Http method that returns the response object with the context data.
"""
return Response(self.get_context_data())
# Add the http-methods and delete the method "method_call"
get = post = put = patch = delete = head = options = trace = method_call
del method_call
| mit | -4,697,838,992,160,653,000 | 28.644737 | 93 | 0.628051 | false |
Bfstepha/testrepo | html/apacheadmin.com/doc/conf.py | 1 | 9189 | # -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012 - 2013, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0.10.9'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| gpl-2.0 | -5,887,918,677,440,202,000 | 31.129371 | 82 | 0.705191 | false |
probcomp/bdbcontrib | src/parallel.py | 1 | 8878 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Speed up BDB queries by parallelizing them.
Intuition
---------
``ESTIMATE SIMILARITY FROM PAIRWISE t`` will run in ``O(n^2 m v)`` time, where
``n`` is the number of rows, ``m`` is the number of models, and ``v`` is the
average number of views per model. While this query is reasonable for small
datasets, on medium-large datasets the query slows down significantly and can
become intractable. Splitting the processing up among multiple cores can
greatly reduce computation time; this module provides functionality to assist
this multiprocessing.
Currently, a multiprocessing equivalent is provided only for
``ESTIMATE PAIRWISE SIMILARITY``. In fact, this is a query that is most likely
to require multiprocessing, as datasets frequently have many more rows than
columns.
Example
-------
Following are (very) informal timing statistics with a 200 rows by 4 column
.cvs file, run on a late 2012 MacBook Pro with a 2.5 GHz 2-core Intel Core i5::
id,one,two,three,four
0,2,3,4,two
1,1,5,4,three
2,5,1,5,one
...
197,0,5,0,five
198,5,3,0,three
199,4,5,2,three
After inserting this .csv data into a table ``t`` and analyzing it quickly::
bdb.execute('''
CREATE GENERATOR t_cc FOR t USING crosscat (
GUESS(*),
id IGNORE
)
''')
bdb.execute('INITIALIZE 3 MODELS FOR t_cc')
bdb.execute('ANALYZE t_cc MODELS 0-2 FOR 10 ITERATIONS WAIT')
The corresponding similarity table thus has 200^2 = 40000 rows::
In [72]: %timeit -n 10 cursor_to_df(bdb.execute('ESTIMATE SIMILARITY FROM PAIRWISE t_cc'))
10 loops, best of 3: 9.56 s per loop
In [73]: %timeit -n 10 parallel.estimate_pairwise_similarity(bdb_file.name, 't', 't_cc', overwrite=True)
10 loops, best of 3: 5.16 s per loop # And values are located in the t_similarity table.
The approximate 2x speed up is what would be expected from dividing the work
among two cores. Further speed increases are likely with more powerful
machines.
----
"""
from bayeslite.exception import BayesLiteException as BLE
from bdbcontrib.bql_utils import cursor_to_df
import multiprocessing as mp
from bayeslite import bayesdb_open, bql_quote_name
from bayeslite.util import cursor_value
def _query_into_queue(query_string, params, queue, bdb_file):
"""
Estimate pairwise similarity of a certain subset of the bdb according to
query_string; place it in the multiprocessing Manager.Queue().
For two technical reasons, this function is defined as a toplevel class and
independently creates a bdb handle:
1) Multiprocessing workers must be pickleable, and thus must be
declared as toplevel functions;
2) Multiple threads cannot access the same bdb handle, lest concurrency
issues arise with corrupt data.
Parameters
----------
query_string : str
Name of the query to execute, determined by estimate_similarity_mp.
queue : multiprocessing.Manager.Queue
Queue to place results into
bdb_file : str
File location of the BayesDB database. This function will
independently open a new BayesDB handler.
"""
bdb = bayesdb_open(pathname=bdb_file)
res = bdb.execute(query_string, params)
queue.put(cursor_to_df(res))
def _chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def estimate_pairwise_similarity(bdb_file, table, model, sim_table=None,
cores=None, N=None, overwrite=False):
"""
Estimate pairwise similarity from the given model, splitting processing
across multiple processors, and save results into sim_table.
Because called methods in this function must also open up separate BayesDB
instances, this function accepts a BayesDB filename, rather than an actual
bayeslite.BayesDB object.
Parameters
----------
bdb_file : str
File location of the BayesDB database object. This function will
handle opening the file with bayeslite.bayesdb_open.
table : str
Name of the table containing the raw data.
model : str
Name of the metamodel to estimate from.
sim_table : str
Name of the table to insert similarity results into. Defaults to
table name + '_similarity'.
cores : int
Number of processors to use. Defaults to the number of cores as
identified by multiprocessing.num_cores.
N : int
Number of rows for which to estimate pairwise similarities (so
N^2 calculations are done). Should be used just to test small
batches; currently, there is no control over what specific pairwise
similarities are estimated with this parameter.
overwrite : bool
Whether to overwrite the sim_table if it already exists. If
overwrite=False and the table exists, function will raise
sqlite3.OperationalError. Default True.
"""
bdb = bayesdb_open(pathname=bdb_file)
if cores is None:
cores = mp.cpu_count()
if cores < 1:
raise BLE(ValueError(
"Invalid number of cores {}".format(cores)))
if sim_table is None:
sim_table = table + '_similarity'
# Get number of occurrences in the database
count_cursor = bdb.execute(
'SELECT COUNT(*) FROM {}'.format(bql_quote_name(table))
)
table_count = cursor_value(count_cursor)
if N is None:
N = table_count
elif N > table_count:
raise BLE(ValueError(
"Asked for N={} rows but {} rows in table".format(N, table_count)))
# Calculate the size (# of similarities to compute) and
# offset (where to start) calculation for each worker query.
# Divide sizes evenly, and make the last job finish the remainder
sizes = [(N * N) / cores for i in range(cores)]
sizes[-1] += (N * N) % cores
total = 0
offsets = [total]
for size in sizes[:-1]:
total += size
offsets.append(total)
# Create the similarity table. Assumes original table has rowid column.
# XXX: tables don't necessarily have an autoincrementing primary key
# other than rowid, which is implicit and can't be set as a foreign key.
# We ought to ask for an optional user-specified foreign key, but
# ESTIMATE SIMILARITY returns numerical values rather than row names, so
# changing numerical rownames into that foreign key would be finicky. For
# now, we eliminate REFERENCE {table}(foreign_key) from the rowid0 and
# rowid1 specs.
sim_table_q = bql_quote_name(sim_table)
if overwrite:
bdb.sql_execute('DROP TABLE IF EXISTS {}'.format(sim_table_q))
bdb.sql_execute('''
CREATE TABLE {} (
rowid0 INTEGER NOT NULL,
rowid1 INTEGER NOT NULL,
value DOUBLE NOT NULL
)
'''.format(sim_table_q))
# Define the helper which inserts data into table in batches
def insert_into_sim(df):
"""
Use the main thread bdb handle to successively insert results of
ESTIMATEs into the table.
"""
rows = map(list, df.values)
insert_sql = '''
INSERT INTO {} (rowid0, rowid1, value) VALUES (?, ?, ?)
'''.format(sim_table_q)
# Avoid sqlite3 500-insert limit by grouping insert statements
# into one transaction.
with bdb.transaction():
for row in rows:
bdb.sql_execute(insert_sql, row)
pool = mp.Pool(processes=cores)
manager = mp.Manager()
queue = manager.Queue()
# Construct the estimate query template.
q_template = '''
ESTIMATE SIMILARITY FROM PAIRWISE {} LIMIT ? OFFSET ?
''' .format(bql_quote_name(model))
for so in zip(sizes, offsets):
pool.apply_async(
_query_into_queue, args=(q_template, so, queue, bdb_file)
)
# Close pool and wait for processes to finish
# FIXME: This waits for all processes to finish before inserting
# into the table, which means that memory usage is potentially very
# high!
pool.close()
pool.join()
# Process returned results
while not queue.empty():
df = queue.get()
insert_into_sim(df)
| apache-2.0 | -2,968,221,592,813,170,700 | 34.654618 | 108 | 0.669633 | false |
brianwc/courtlistener | cl/users/management/commands/cl_account_management.py | 1 | 4884 | import datetime
import hashlib
import random
from cl.users.models import UserProfile
from cl.users.utils import emails
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.core.management import BaseCommand
from django.utils.timezone import now
class Command(BaseCommand):
help = ('Notify users of unconfirmed accounts and delete accounts that '
'were never confirmed')
def add_arguments(self, parser):
parser.add_argument(
'--notify',
action='store_true',
default=False,
help='Notify users with unconfirmed accounts older than five days, '
'and delete orphaned profiles.'
)
parser.add_argument(
'--delete',
action='store_true',
default=False,
help='Delete unconfirmed accounts older than two months'
)
parser.add_argument(
'--simulate',
action='store_true',
default=False,
help='Simulate the emails that would be sent, using the console '
'backend. Do not delete accounts.'
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Create more output."
)
def handle(self, *args, **options):
self.options = options
if options['delete']:
self.delete_old_accounts()
if options['notify']:
self.notify_unconfirmed_accounts()
if options['simulate']:
print "**************************************"
print "* NO EMAILS SENT OR ACCOUNTS DELETED *"
print "**************************************"
def delete_old_accounts(self):
"""Find accounts older than roughly two months that have not been
confirmed, and delete them. Should be run once a month, or so.
"""
two_months_ago = now() - datetime.timedelta(60)
unconfirmed_ups = UserProfile.objects.filter(
email_confirmed=False,
user__date_joined__lte=two_months_ago,
stub_account=False,
)
for up in unconfirmed_ups:
user = up.user.username
if self.options['verbose']:
print "User %s deleted" % user
if not self.options['simulate']:
# Gather their foreign keys, delete those
up.alert.all().delete()
up.donation.all().delete()
up.favorite.all().delete()
# delete the user then the profile.
up.user.delete()
up.delete()
def notify_unconfirmed_accounts(self):
"""This function will notify people who have not confirmed their
accounts that they must do so for fear of deletion.
This function should be run once a week, or so.
Because it updates the expiration date of the user's key, and also uses
that field to determine if the user should be notified in the first
place, the first week, a user will have an old enough key, and will be
notified, but the next week their key will have a very recent
expiration date (because it was just updated the prior week). This
means that they won't be selected the next week, but the one after,
their key will be old again, and they will be selected. It's not ideal,
but it's OK.
"""
# if your account is more than a week old, and you have not confirmed
# it, we will send you a notification, requesting that you confirm it.
a_week_ago = now() - datetime.timedelta(7)
unconfirmed_ups = UserProfile.objects.filter(
email_confirmed=False,
key_expires__lte=a_week_ago,
stub_account=False
)
for up in unconfirmed_ups:
if self.options['verbose']:
print "User %s will be notified" % up.user
if not self.options['simulate']:
# Build and save a new activation key for the account.
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(
salt + up.user.username).hexdigest()
key_expires = now() + datetime.timedelta(5)
up.activation_key = activation_key
up.key_expires = key_expires
up.save()
# Send the email.
current_site = Site.objects.get_current()
email = emails['email_not_confirmed']
send_mail(
email['subject'] % current_site.name,
email['body'] % (up.user.username, up.activation_key),
email['from'],
[up.user.email]
)
| agpl-3.0 | 8,706,912,930,454,461,000 | 37.456693 | 80 | 0.558149 | false |
jiaphuan/models | research/astronet/light_curve_util/util.py | 1 | 7461 | # Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light curve utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import numpy as np
from six.moves import range # pylint:disable=redefined-builtin
def phase_fold_time(time, period, t0):
"""Creates a phase-folded time vector.
result[i] is the unique number in [-period / 2, period / 2)
such that result[i] = time[i] - t0 + k_i * period, for some integer k_i.
Args:
time: 1D numpy array of time values.
period: A positive real scalar; the period to fold over.
t0: The center of the resulting folded vector; this value is mapped to 0.
Returns:
A 1D numpy array.
"""
half_period = period / 2
result = np.mod(time + (half_period - t0), period)
result -= half_period
return result
def split(all_time, all_flux, gap_width=0.75):
"""Splits a light curve on discontinuities (gaps).
This function accepts a light curve that is either a single segment, or is
piecewise defined (e.g. split by quarter breaks or gaps in the in the data).
Args:
all_time: Numpy array or list of numpy arrays; each is a sequence of time
values.
all_flux: Numpy array or list of numpy arrays; each is a sequence of flux
values of the corresponding time array.
gap_width: Minimum gap size (in time units) for a split.
Returns:
out_time: List of numpy arrays; the split time arrays.
out_flux: List of numpy arrays; the split flux arrays.
"""
# Handle single-segment inputs.
# We must use an explicit length test on all_time because implicit conversion
# to bool fails if all_time is a numpy array, and all_time.size is not defined
# if all_time is a list of numpy arrays.
if len(all_time) > 0 and not isinstance(all_time[0], collections.Iterable): # pylint:disable=g-explicit-length-test
all_time = [all_time]
all_flux = [all_flux]
out_time = []
out_flux = []
for time, flux in itertools.izip(all_time, all_flux):
start = 0
for end in range(1, len(time) + 1):
# Choose the largest endpoint such that time[start:end] has no gaps.
if end == len(time) or time[end] - time[end - 1] > gap_width:
out_time.append(time[start:end])
out_flux.append(flux[start:end])
start = end
return out_time, out_flux
def remove_events(all_time, all_flux, events, width_factor=1.0):
"""Removes events from a light curve.
This function accepts either a single-segment or piecewise-defined light
curve (e.g. one that is split by quarter breaks or gaps in the in the data).
Args:
all_time: Numpy array or list of numpy arrays; each is a sequence of time
values.
all_flux: Numpy array or list of numpy arrays; each is a sequence of flux
values of the corresponding time array.
events: List of Event objects to remove.
width_factor: Fractional multiplier of the duration of each event to remove.
Returns:
output_time: Numpy array or list of numpy arrays; the time arrays with
events removed.
output_flux: Numpy array or list of numpy arrays; the flux arrays with
events removed.
"""
# Handle single-segment inputs.
# We must use an explicit length test on all_time because implicit conversion
# to bool fails if all_time is a numpy array and all_time.size is not defined
# if all_time is a list of numpy arrays.
if len(all_time) > 0 and not isinstance(all_time[0], collections.Iterable): # pylint:disable=g-explicit-length-test
all_time = [all_time]
all_flux = [all_flux]
single_segment = True
else:
single_segment = False
output_time = []
output_flux = []
for time, flux in itertools.izip(all_time, all_flux):
mask = np.ones_like(time, dtype=np.bool)
for event in events:
transit_dist = np.abs(phase_fold_time(time, event.period, event.t0))
mask = np.logical_and(mask,
transit_dist > 0.5 * width_factor * event.duration)
if single_segment:
output_time = time[mask]
output_flux = flux[mask]
else:
output_time.append(time[mask])
output_flux.append(flux[mask])
return output_time, output_flux
def interpolate_masked_spline(all_time, all_masked_time, all_masked_spline):
"""Linearly interpolates spline values across masked points.
Args:
all_time: List of numpy arrays; each is a sequence of time values.
all_masked_time: List of numpy arrays; each is a sequence of time values
with some values missing (masked).
all_masked_spline: List of numpy arrays; the masked spline values
corresponding to all_masked_time.
Returns:
interp_spline: List of numpy arrays; each is the masked spline with missing
points linearly interpolated.
"""
interp_spline = []
for time, masked_time, masked_spline in itertools.izip(
all_time, all_masked_time, all_masked_spline):
if len(masked_time) > 0: # pylint:disable=g-explicit-length-test
interp_spline.append(np.interp(time, masked_time, masked_spline))
else:
interp_spline.append(np.full_like(time, np.nan))
return interp_spline
def count_transit_points(time, event):
"""Computes the number of points in each transit of a given event.
Args:
time: Sorted numpy array of time values.
event: An Event object.
Returns:
A numpy array containing the number of time points "in transit" for each
transit occurring between the first and last time values.
Raises:
ValueError: If there are more than 10**6 transits.
"""
t_min = np.min(time)
t_max = np.max(time)
# Tiny periods or erroneous time values could make this loop take forever.
if (t_max - t_min) / event.period > 10**6:
raise ValueError(
"Too many transits! Time range is [%.2f, %.2f] and period is %.2e." %
(t_min, t_max, event.period))
# Make sure t0 is in [t_min, t_min + period).
t0 = np.mod(event.t0 - t_min, event.period) + t_min
# Prepare loop variables.
points_in_transit = []
i, j = 0, 0
for transit_midpoint in np.arange(t0, t_max, event.period):
transit_begin = transit_midpoint - event.duration / 2
transit_end = transit_midpoint + event.duration / 2
# Move time[i] to the first point >= transit_begin.
while time[i] < transit_begin:
# transit_begin is guaranteed to be < np.max(t) (provided duration >= 0).
# Therefore, i cannot go out of range.
i += 1
# Move time[j] to the first point > transit_end.
while time[j] <= transit_end:
j += 1
# j went out of range. We're finished.
if j >= len(time):
break
# The points in the current transit duration are precisely time[i:j].
# Since j is an exclusive index, there are exactly j-i points in transit.
points_in_transit.append(j - i)
return np.array(points_in_transit)
| apache-2.0 | 6,828,795,574,697,506,000 | 34.193396 | 118 | 0.68275 | false |
jean/sentry | tests/sentry/api/endpoints/test_organization_repositories.py | 1 | 1493 | from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import Repository
from sentry.testutils import APITestCase
class OrganizationRepositoriesListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name='baz')
repo = Repository.objects.create(
name='example',
organization_id=org.id,
)
url = reverse('sentry-api-0-organization-repositories', args=[org.slug])
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(repo.id)
class OrganizationRepositoriesCreateTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name='baz')
url = reverse('sentry-api-0-organization-repositories', args=[org.slug])
response = self.client.post(
url, data={
'provider': 'dummy',
'name': 'getsentry/sentry',
}
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data['id']
repo = Repository.objects.get(id=response.data['id'])
assert repo.provider == 'dummy'
assert repo.name == 'getsentry/sentry'
| bsd-3-clause | -1,317,995,337,978,983,400 | 30.104167 | 84 | 0.636303 | false |
davidbgk/udata | udata/core/discussions/api.py | 1 | 6414 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from flask_security import current_user
from flask_restplus.inputs import boolean
from udata.auth import admin_permission
from udata.api import api, API, fields
from udata.core.user.api_fields import user_ref_fields
from .forms import DiscussionCreateForm, DiscussionCommentForm
from .models import Message, Discussion
from .permissions import CloseDiscussionPermission
from .signals import (
on_new_discussion, on_new_discussion_comment, on_discussion_closed,
on_discussion_deleted
)
ns = api.namespace('discussions', 'Discussion related operations')
message_fields = api.model('DiscussionMessage', {
'content': fields.String(description='The message body'),
'posted_by': fields.Nested(user_ref_fields,
description='The message author'),
'posted_on': fields.ISODateTime(description='The message posting date'),
})
discussion_fields = api.model('Discussion', {
'id': fields.String(description='The discussion identifier'),
'subject': fields.Nested(api.model_reference,
description='The discussion target object'),
'class': fields.ClassName(description='The object class',
discriminator=True),
'title': fields.String(description='The discussion title'),
'user': fields.Nested(
user_ref_fields, description='The discussion author'),
'created': fields.ISODateTime(description='The discussion creation date'),
'closed': fields.ISODateTime(description='The discussion closing date'),
'closed_by': fields.String(
attribute='closed_by.id',
description='The user who closed the discussion'),
'discussion': fields.Nested(message_fields),
'url': fields.UrlFor('api.discussion',
description='The discussion API URI'),
})
start_discussion_fields = api.model('DiscussionStart', {
'title': fields.String(description='The title of the discussion to open',
required=True),
'comment': fields.String(description='The content of the initial comment',
required=True),
'subject': fields.Nested(api.model_reference,
description='The discussion target object',
required=True),
})
comment_discussion_fields = api.model('DiscussionResponse', {
'comment': fields.String(
description='The comment to submit', required=True),
'close': fields.Boolean(
description='Is this a closing response. Only subject owner can close')
})
discussion_page_fields = api.model('DiscussionPage',
fields.pager(discussion_fields))
parser = api.parser()
parser.add_argument(
'sort', type=str, default='-created', location='args',
help='The sorting attribute')
parser.add_argument(
'closed', type=boolean, location='args',
help='Filters discussions on their closed status if specified')
parser.add_argument(
'for', type=str, location='args', action='append',
help='Filter discussions for a given subject')
parser.add_argument(
'page', type=int, default=1, location='args', help='The page to fetch')
parser.add_argument(
'page_size', type=int, default=20, location='args',
help='The page size to fetch')
@ns.route('/<id>/', endpoint='discussion')
class DiscussionAPI(API):
'''
Base class for a discussion thread.
'''
@api.doc('get_discussion')
@api.marshal_with(discussion_fields)
def get(self, id):
'''Get a discussion given its ID'''
discussion = Discussion.objects.get_or_404(id=id)
return discussion
@api.secure
@api.doc('comment_discussion')
@api.expect(comment_discussion_fields)
@api.response(403, 'Not allowed to close this discussion')
@api.marshal_with(discussion_fields)
def post(self, id):
'''Add comment and optionnaly close a discussion given its ID'''
discussion = Discussion.objects.get_or_404(id=id)
form = api.validate(DiscussionCommentForm)
message = Message(
content=form.comment.data,
posted_by=current_user.id
)
discussion.discussion.append(message)
close = form.close.data
if close:
CloseDiscussionPermission(discussion).test()
discussion.closed_by = current_user._get_current_object()
discussion.closed = datetime.now()
discussion.save()
if close:
on_discussion_closed.send(discussion, message=message)
else:
on_new_discussion_comment.send(discussion, message=message)
return discussion
@api.secure(admin_permission)
@api.doc('delete_discussion')
@api.response(403, 'Not allowed to delete this discussion')
def delete(self, id):
'''Delete a discussion given its ID'''
discussion = Discussion.objects.get_or_404(id=id)
discussion.delete()
on_discussion_deleted.send(discussion)
return '', 204
@ns.route('/', endpoint='discussions')
class DiscussionsAPI(API):
'''
Base class for a list of discussions.
'''
@api.doc('list_discussions', parser=parser)
@api.marshal_with(discussion_page_fields)
def get(self):
'''List all Discussions'''
args = parser.parse_args()
discussions = Discussion.objects
if args['for']:
discussions = discussions.generic_in(subject=args['for'])
if args['closed'] is False:
discussions = discussions(closed=None)
elif args['closed'] is True:
discussions = discussions(closed__ne=None)
discussions = discussions.order_by(args['sort'])
return discussions.paginate(args['page'], args['page_size'])
@api.secure
@api.doc('create_discussion')
@api.expect(start_discussion_fields)
@api.marshal_with(discussion_fields)
def post(self):
'''Create a new Discussion'''
form = api.validate(DiscussionCreateForm)
message = Message(
content=form.comment.data,
posted_by=current_user.id)
discussion = Discussion(user=current_user.id, discussion=[message])
form.populate_obj(discussion)
discussion.save()
on_new_discussion.send(discussion)
return discussion, 201
| agpl-3.0 | -1,390,360,502,718,842,000 | 36.508772 | 79 | 0.651232 | false |
prakashpp/trytond-magento | channel.py | 1 | 27247 | # -*- coding: utf-8 -*-
from datetime import datetime
import magento
import logging
import xmlrpclib
import socket
from trytond.pool import PoolMeta, Pool
from trytond.transaction import Transaction
from trytond.pyson import Eval
from trytond.model import ModelView, ModelSQL, fields
from .api import OrderConfig
__metaclass__ = PoolMeta
__all__ = ['Channel', 'MagentoTier']
MAGENTO_STATES = {
'invisible': ~(Eval('source') == 'magento'),
'required': Eval('source') == 'magento'
}
INVISIBLE_IF_NOT_MAGENTO = {
'invisible': ~(Eval('source') == 'magento'),
}
logger = logging.getLogger('magento')
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
class Channel:
"""
Sale Channel model
"""
__name__ = 'sale.channel'
# Instance
magento_url = fields.Char(
"Magento Site URL", states=MAGENTO_STATES, depends=['source']
)
magento_api_user = fields.Char(
"API User", states=MAGENTO_STATES, depends=['source']
)
magento_api_key = fields.Char(
"API Key", states=MAGENTO_STATES, depends=['source']
)
magento_carriers = fields.One2Many(
"magento.instance.carrier", "channel", "Carriers / Shipping Methods",
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_order_prefix = fields.Char(
'Sale Order Prefix',
help="This helps to distinguish between orders from different channels",
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
# website
magento_website_id = fields.Integer(
'Website ID', readonly=True,
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_website_name = fields.Char(
'Website Name', readonly=True,
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_website_code = fields.Char(
'Website Code', readonly=True,
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_root_category_id = fields.Integer(
'Root Category ID', states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_store_name = fields.Char(
'Store Name', readonly=True, states=INVISIBLE_IF_NOT_MAGENTO,
depends=['source']
)
magento_store_id = fields.Integer(
'Store ID', readonly=True, states=INVISIBLE_IF_NOT_MAGENTO,
depends=['source']
)
#: Checking this will make sure that only the done shipments which have a
#: carrier and tracking reference are exported.
magento_export_tracking_information = fields.Boolean(
'Export tracking information', help='Checking this will make sure'
' that only the done shipments which have a carrier and tracking '
'reference are exported. This will update carrier and tracking '
'reference on magento for the exported shipments as well.',
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_taxes = fields.One2Many(
"sale.channel.magento.tax", "channel", "Taxes",
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_price_tiers = fields.One2Many(
'sale.channel.magento.price_tier', 'channel', 'Default Price Tiers',
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
product_listings = fields.One2Many(
'product.product.channel_listing', 'channel', 'Product Listings',
)
magento_payment_gateways = fields.One2Many(
'magento.instance.payment_gateway', 'channel', 'Payments',
)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(Channel, cls).__setup__()
cls._sql_constraints += [
(
'unique_magento_channel',
'UNIQUE(magento_url, magento_website_id, magento_store_id)',
'This store is already added'
)
]
cls._error_messages.update({
"connection_error": "Incorrect API Settings! \n"
"Please check and correct the API settings on channel.",
"multiple_channels": 'Selected operation can be done only for one'
' channel at a time',
'invalid_magento_channel':
'Current channel does not belongs to Magento !'
})
cls._buttons.update({
'import_magento_carriers': {
'invisible': Eval('source') != 'magento'
},
'configure_magento_connection': {
'invisible': Eval('source') != 'magento'
}
})
def validate_magento_channel(self):
"""
Make sure channel source is magento
"""
if self.source != 'magento':
self.raise_user_error('invalid_magento_channel')
@classmethod
def get_source(cls):
"""
Get the source
"""
res = super(Channel, cls).get_source()
res.append(('magento', 'Magento'))
return res
@staticmethod
def default_magento_order_prefix():
"""
Sets default value for magento order prefix
"""
return 'mag_'
@staticmethod
def default_magento_root_category_id():
"""
Sets default root category id. Is set to 1, because the default
root category is 1
"""
return 1
def get_taxes(self, rate):
"Return list of tax records with the given rate"
for mag_tax in self.magento_taxes:
if mag_tax.tax_percent == rate:
return list(mag_tax.taxes)
return []
def import_order_states(self):
"""
Import order states for magento channel
Downstream implementation for channel.import_order_states
"""
if self.source != 'magento':
return super(Channel, self).import_order_states()
with Transaction().set_context({'current_channel': self.id}):
# Import order states
with OrderConfig(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as order_config_api:
order_states_data = order_config_api.get_states()
for code, name in order_states_data.iteritems():
self.create_order_state(code, name)
@classmethod
@ModelView.button_action('magento.wizard_configure_magento')
def configure_magento_connection(cls, channels):
"""
Configure magento connection for current channel
:param channels: List of active records of channels
"""
pass
def test_magento_connection(self):
"""
Test magento connection and display appropriate message to user
:param channels: Active record list of magento channels
"""
# Make sure channel belongs to magento
self.validate_magento_channel()
try:
with magento.API(
self.magento_url, self.magento_api_user,
self.magento_api_key
):
return
except (
xmlrpclib.Fault, IOError, xmlrpclib.ProtocolError, socket.timeout
):
self.raise_user_error("connection_error")
@classmethod
@ModelView.button_action('magento.wizard_import_magento_carriers')
def import_magento_carriers(cls, channels):
"""
Import carriers/shipping methods from magento for channels
:param channels: Active record list of magento channels
"""
InstanceCarrier = Pool().get('magento.instance.carrier')
for channel in channels:
channel.validate_magento_channel()
with Transaction().set_context({'current_channel': channel.id}):
with OrderConfig(
channel.magento_url, channel.magento_api_user,
channel.magento_api_key
) as order_config_api:
mag_carriers = order_config_api.get_shipping_methods()
InstanceCarrier.create_all_using_magento_data(mag_carriers)
@classmethod
def get_current_magento_channel(cls):
"""Helper method to get the current magento_channel.
"""
channel = cls.get_current_channel()
# Make sure channel belongs to magento
channel.validate_magento_channel()
return channel
def import_products(self):
"""
Import products for this magento channel
Downstream implementation for channel.import_products
"""
if self.source != 'magento':
return super(Channel, self).import_products()
self.import_category_tree()
with Transaction().set_context({'current_channel': self.id}):
with magento.Product(
self.magento_url, self.magento_api_user, self.magento_api_key
) as product_api:
# TODO: Implement pagination and import each product as async
# task
magento_products = product_api.list()
products = []
for magento_product in magento_products:
products.append(self.import_product(magento_product['sku']))
return products
def import_product(self, sku, product_data=None):
"""
Import specific product for this magento channel
Downstream implementation for channel.import_product
"""
Product = Pool().get('product.product')
Listing = Pool().get('product.product.channel_listing')
if self.source != 'magento':
return super(Channel, self).import_product(sku, product_data)
if not sku:
# SKU is required can not continue
return
# Sanitize SKU
sku = sku.strip()
products = Product.search([
('code', '=', sku),
])
listings = Listing.search([
('product.code', '=', sku),
('channel', '=', self)
])
if not products or not listings:
# Either way we need the product data from magento. Make that
# dreaded API call.
with magento.Product(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as product_api:
product_data = product_api.info(sku, identifierType="sku")
# XXX: sanitize product_data, sometimes product sku may
# contain trailing spaces
product_data['sku'] = product_data['sku'].strip()
# Create a product since there is no match for an existing
# product with the SKU.
if not products:
product = Product.create_from(self, product_data)
else:
product, = products
if not listings:
Listing.create_from(self, product_data)
else:
product = products[0]
return product
def import_category_tree(self):
"""
Imports the category tree and creates categories in a hierarchy same as
that on Magento
:param website: Active record of website
"""
Category = Pool().get('product.category')
self.validate_magento_channel()
with Transaction().set_context({'current_channel': self.id}):
with magento.Category(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as category_api:
category_tree = category_api.tree(
self.magento_root_category_id
)
Category.create_tree_using_magento_data(category_tree)
def import_orders(self):
"""
Downstream implementation of channel.import_orders
:return: List of active record of sale imported
"""
if self.source != 'magento':
return super(Channel, self).import_orders()
new_sales = []
with Transaction().set_context({'current_channel': self.id}):
order_states = self.get_order_states_to_import()
order_states_to_import_in = map(
lambda state: state.code, order_states
)
with magento.Order(
self.magento_url, self.magento_api_user, self.magento_api_key
) as order_api:
# Filter orders store_id using list()
# then get info of each order using info()
# and call find_or_create_using_magento_data on sale
filter = {
'store_id': {'=': self.magento_store_id},
'state': {'in': order_states_to_import_in},
}
self.write([self], {
'last_order_import_time': datetime.utcnow()
})
page = 1
has_next = True
orders_summaries = []
while has_next:
# XXX: Pagination is only available in
# magento extension >= 1.6.1
api_res = order_api.search(
filters=filter, limit=3000, page=page
)
has_next = api_res['hasNext']
page += 1
orders_summaries.extend(api_res['items'])
for order_summary in orders_summaries:
new_sales.append(self.import_order(order_summary))
return new_sales
def import_order(self, order_info):
"Downstream implementation to import sale order from magento"
if self.source != 'magento':
return super(Channel, self).import_order(order_info)
Sale = Pool().get('sale.sale')
sale = Sale.find_using_magento_data(order_info)
if sale:
return sale
with Transaction().set_context({'current_channel': self.id}):
with magento.Order(
self.magento_url, self.magento_api_user, self.magento_api_key
) as order_api:
order_data = order_api.info(order_info['increment_id'])
return Sale.create_using_magento_data(order_data)
@classmethod
def export_order_status_to_magento_using_cron(cls):
"""
Export sales orders status to magento using cron
:param store_views: List of active record of store view
"""
channels = cls.search([('source', '=', 'magento')])
for channel in channels:
channel.export_order_status()
def export_order_status(self):
"""
Export sale order status to magento for the current store view.
If last export time is defined, export only those orders which are
updated after last export time.
:return: List of active records of sales exported
"""
Sale = Pool().get('sale.sale')
if self.source != 'magento':
return super(Channel, self).export_order_status()
exported_sales = []
domain = [('channel', '=', self.id)]
if self.last_order_export_time:
domain = [
('write_date', '>=', self.last_order_export_time)
]
sales = Sale.search(domain)
self.last_order_export_time = datetime.utcnow()
self.save()
for sale in sales:
exported_sales.append(sale.export_order_status_to_magento())
return exported_sales
def export_product_catalog(self):
"""
Export the current product to the magento category corresponding to
the given `category` under the current magento channel
:return: Active record of product
"""
Channel = Pool().get('sale.channel')
Product = Pool().get('product.product')
ModelData = Pool().get('ir.model.data')
Category = Pool().get('product.category')
if self.source != 'magento':
return super(Channel, self).export_product_catalog()
domain = [
('code', '!=', None),
]
if self.last_product_export_time:
domain.append(
('write_date', '>=', self.last_product_export_time)
)
products = Product.search(domain)
self.last_product_export_time = datetime.utcnow()
self.save()
exported_products = []
category = Category(
ModelData.get_id("magento", "product_category_magento_unclassified")
)
for product in products:
exported_products.append(
product.export_product_catalog_to_magento(category)
)
return exported_products
@classmethod
def export_shipment_status_to_magento_using_cron(cls):
"""
Export Shipment status for shipments using cron
"""
channels = cls.search([('source', '=', 'magento')])
for channel in channels:
channel.export_shipment_status_to_magento()
def export_shipment_status_to_magento(self):
"""
Exports shipment status for shipments to magento, if they are shipped
:return: List of active record of shipment
"""
Shipment = Pool().get('stock.shipment.out')
Sale = Pool().get('sale.sale')
SaleLine = Pool().get('sale.line')
self.validate_magento_channel()
sale_domain = [
('channel', '=', self.id),
('shipment_state', '=', 'sent'),
('magento_id', '!=', None),
('shipments', '!=', None),
]
if self.last_shipment_export_time:
sale_domain.append(
('write_date', '>=', self.last_shipment_export_time)
)
sales = Sale.search(sale_domain)
self.last_shipment_export_time = datetime.utcnow()
self.save()
updated_sales = set([])
for sale in sales:
# Get the increment id from the sale reference
increment_id = sale.reference[
len(self.magento_order_prefix): len(sale.reference)
]
for shipment in sale.shipments:
try:
# Some checks to make sure that only valid shipments are
# being exported
if shipment.is_tracking_exported_to_magento or \
shipment.state != 'done' or \
shipment.magento_increment_id:
continue
updated_sales.add(sale)
with magento.Shipment(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as shipment_api:
item_qty_map = {}
for move in shipment.outgoing_moves:
if isinstance(move.origin, SaleLine) \
and move.origin.magento_id:
# This is done because there can be multiple
# lines with the same product and they need
# to be send as a sum of quanitities
item_qty_map.setdefault(
str(move.origin.magento_id), 0
)
item_qty_map[str(move.origin.magento_id)] += \
move.quantity
shipment_increment_id = shipment_api.create(
order_increment_id=increment_id,
items_qty=item_qty_map
)
Shipment.write(list(sale.shipments), {
'magento_increment_id': shipment_increment_id,
})
if self.magento_export_tracking_information and (
hasattr(shipment, 'tracking_number') and
hasattr(shipment, 'carrier') and
shipment.tracking_number and shipment.carrier
):
with Transaction().set_context(
current_channel=self.id):
shipment.export_tracking_info_to_magento()
except xmlrpclib.Fault, fault:
if fault.faultCode == 102:
# A shipment already exists for this order,
# we cannot do anything about it.
# Maybe it was already exported earlier or was created
# separately on magento
# Hence, just continue
continue
return updated_sales
def export_product_prices(self):
"""
Exports tier prices of products from tryton to magento for this channel
:return: List of products
"""
if self.source != 'magento':
return super(Channel, self).export_product_prices()
ChannelListing = Pool().get('product.product.channel_listing')
price_domain = [
('channel', '=', self.id),
]
if self.last_product_price_export_time:
price_domain.append([
'OR', [(
'product.write_date', '>=',
self.last_product_price_export_time
)], [(
'product.template.write_date', '>=',
self.last_product_price_export_time
)]
])
product_listings = ChannelListing.search(price_domain)
self.last_product_price_export_time = datetime.utcnow()
self.save()
for listing in product_listings:
# Get the price tiers from the product listing if the list has
# price tiers else get the default price tiers from current
# channel
price_tiers = listing.price_tiers or self.magento_price_tiers
price_data = []
for tier in price_tiers:
if hasattr(tier, 'product_listing'):
# The price tier comes from a product listing, then it has a
# function field for price, we use it directly
price = tier.price
else:
# The price tier comes from the default tiers on
# channel,
# we dont have a product on tier, so we use the current
# product in loop for computing the price for this tier
price = self.price_list.compute(
None, listing.product, listing.product.list_price,
tier.quantity, self.default_uom
)
price_data.append({
'qty': tier.quantity,
'price': float(price),
})
# Update stock information to magento
with magento.ProductTierPrice(
self.magento_url, self.magento_api_user, self.magento_api_key
) as tier_price_api:
tier_price_api.update(
listing.product_identifier, price_data,
identifierType="productID"
)
return len(product_listings)
def get_default_tryton_action(self, code, name):
"""
Returns tryton order state for magento state
:param name: Name of the magento state
:return: A dictionary of tryton state and shipment and invoice methods
"""
if self.source != 'magento':
return super(Channel, self).get_default_tryton_action(code, name)
if code in ('new', 'holded'):
return {
'action': 'process_manually',
'invoice_method': 'order',
'shipment_method': 'order'
}
elif code in ('pending_payment', 'payment_review'):
return {
'action': 'import_as_past',
'invoice_method': 'order',
'shipment_method': 'invoice'
}
elif code in ('closed', 'complete'):
return {
'action': 'import_as_past',
'invoice_method': 'order',
'shipment_method': 'order'
}
elif code == 'processing':
return {
'action': 'process_automatically',
'invoice_method': 'order',
'shipment_method': 'order'
}
else:
return {
'action': 'do_not_import',
'invoice_method': 'manual',
'shipment_method': 'manual'
}
def update_order_status(self):
"Downstream implementation of order_status update"
Sale = Pool().get('sale.sale')
if self.source != 'magento':
return super(Channel, self).update_order_status()
sales = Sale.search([
('channel', '=', self.id),
('state', 'in', ('confirmed', 'processing')),
])
order_ids = [sale.reference for sale in sales]
for order_ids_batch in batch(order_ids, 50):
with magento.Order(
self.magento_url, self.magento_api_user, self.magento_api_key
) as order_api:
orders_data = order_api.info_multi(order_ids_batch)
for i, order_data in enumerate(orders_data):
if order_data.get('isFault'):
if order_data['faultCode'] == '100':
# 100: Requested order not exists.
# TODO: Remove order from channel or add some
# exception.
pass
logger.warning("Order %s: %s %s" % (
order_ids_batch[i], order_data['faultCode'],
order_data['faultMessage']
))
continue
sale, = Sale.search([
('reference', '=', order_data['increment_id'])
])
sale.update_order_status_from_magento(order_data=order_data)
class MagentoTier(ModelSQL, ModelView):
"""Price Tiers for store
This model stores the default price tiers to be used while sending
tier prices for a product from Tryton to Magento.
The product also has a similar table like this. If there are no entries in
the table on product, then these tiers are used.
"""
__name__ = 'sale.channel.magento.price_tier'
channel = fields.Many2One(
'sale.channel', 'Magento Store', required=True, readonly=True,
domain=[('source', '=', 'magento')]
)
quantity = fields.Float('Quantity', required=True)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(MagentoTier, cls).__setup__()
cls._sql_constraints += [
(
'channel_quantity_unique', 'UNIQUE(channel, quantity)',
'Quantity in price tiers must be unique for a channel'
)
]
| bsd-3-clause | 55,168,798,315,536,190 | 34.021851 | 80 | 0.539362 | false |
mwhite/JSONAlchemy | setup.py | 1 | 1208 | from setuptools.command.test import test as TestCommand
import setuptools
import io
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setuptools.setup(
name='JSONAlchemy',
version='0.1.0',
description='SQLAlchemy utility for handling semi-structured JSON data',
author='Michael White',
author_email='[email protected]',
url='http://github.com/mwhite/JSONAlchemy',
license='MIT License',
packages=['jsonalchemy'],
test_suite='tests',
install_requires=io.open('requirements.txt').read().splitlines(),
tests_require=['pytest', 'python-dateutil'],
cmdclass = {'test': PyTest},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
'Topic :: Database',
],
)
| mit | -6,740,419,446,265,977,000 | 29.2 | 76 | 0.638245 | false |
log2timeline/dftimewolf | tests/lib/collectors/grr_base.py | 1 | 5042 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the GRR base collector."""
import unittest
import mock
from grr_api_client import errors as grr_errors
from dftimewolf.lib import state
from dftimewolf.lib import errors
from dftimewolf.lib.collectors import grr_base
from dftimewolf import config
ACCESS_FORBIDDEN_MAX = 3
class MockGRRObject(object):
"""Fake GRR object that will be used in the access forbidden wrapper test"""
_access_forbidden_counter = 0
CreateApproval = mock.MagicMock()
ClientApproval = mock.MagicMock()
ClientApproval.client_id = "abcd"
ClientApproval.approval_id = "dcba"
ClientApproval.username = "nobody"
CreateApproval.return_value = ClientApproval
hunt_id = "123"
client_id = "321"
# pylint: disable=unused-argument
def ForbiddenFunction(self, random1, random2, random3=None, random4=None):
"""Will raise a grr_errors.AccessForbiddenError three times, and return."""
while ACCESS_FORBIDDEN_MAX > self._access_forbidden_counter:
self._access_forbidden_counter += 1
raise grr_errors.AccessForbiddenError
return 4
class GRRBaseModuleTest(unittest.TestCase):
"""Tests for the GRR base collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
self.assertIsNotNone(grr_base_module)
@mock.patch('tempfile.mkdtemp')
@mock.patch('grr_api_client.api.InitHttp')
def testSetup(self, mock_grr_inithttp, mock_mkdtemp):
"""Tests that setup works"""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
mock_mkdtemp.return_value = '/fake'
grr_base_module.SetUp(
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin1',
grr_password='admin2',
approvers='[email protected],[email protected]',
verify=True
)
mock_grr_inithttp.assert_called_with(
api_endpoint='http://fake/endpoint',
auth=('admin1', 'admin2'),
verify=True)
self.assertEqual(grr_base_module.approvers,
['[email protected]', '[email protected]'])
self.assertEqual(grr_base_module.output_path, '/fake')
@mock.patch('grr_api_client.api.InitHttp')
def testApprovalWrapper(self, _):
"""Tests that the approval wrapper works correctly."""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
grr_base_module.SetUp(
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin1',
grr_password='admin2',
approvers='[email protected],[email protected]',
verify=True
)
# pylint: disable=protected-access
grr_base_module._CHECK_APPROVAL_INTERVAL_SEC = 0
mock_grr_object = MockGRRObject()
mock_forbidden_function = mock.Mock(
wraps=mock_grr_object.ForbiddenFunction)
result = grr_base_module._WrapGRRRequestWithApproval(
mock_grr_object,
mock_forbidden_function,
'random1',
'random2',
random3=4,
random4=4)
# Final result.
self.assertEqual(result, 4)
mock_forbidden_function.assert_called_with(
'random1', 'random2', random3=4, random4=4)
# Our forbidden function should be called 4 times, the last one succeeeding.
self.assertEqual(mock_forbidden_function.call_count, 4)
mock_grr_object.CreateApproval.assert_called_with(
reason='random reason',
notified_users=['[email protected]', '[email protected]'])
@mock.patch('grr_api_client.api.InitHttp')
def testNoApproversErrorsOut(self, _):
"""Tests that an error is generated if no approvers are specified.
This should only error on unauthorized objects, which is how our mock
behaves.
"""
test_state = state.DFTimewolfState(config.Config)
grr_base_module = grr_base.GRRBaseModule(test_state)
grr_base_module.SetUp(
reason='random',
grr_server_url='http://fake/url',
grr_username='admin1',
grr_password='admin2',
approvers='',
verify=True
)
# pylint: disable=protected-access
grr_base_module._CHECK_APPROVAL_INTERVAL_SEC = 0
mock_grr_object = MockGRRObject()
mock_forbidden_function = mock.Mock(
wraps=mock_grr_object.ForbiddenFunction)
with self.assertRaises(errors.DFTimewolfError) as error:
grr_base_module._WrapGRRRequestWithApproval(
mock_grr_object,
mock_forbidden_function,
'random1',
'random2',
random3=4,
random4=4)
self.assertEqual('GRR needs approval but no approvers specified '
'(hint: use --approvers)', error.exception.message)
self.assertTrue(error.exception.critical)
self.assertEqual(len(test_state.errors), 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,875,692,896,729,224,700 | 33.534247 | 80 | 0.678104 | false |
tobias-lang/crawl | src/old/collection_parser.py | 1 | 2434 | import traceback
import codecs
import src.parse.article_parser
class CollectionParser():
def __init__(self, article_parser=src.parse.article_parser.ArticleParser()):
self.article_parser = src.parse.article_parser
def get_filelist(self, dir, maxNumber=None):
from os import listdir
from os.path import isfile, join
filenames = [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))]
filenames = filter(lambda x: self._is_relevant_file(x), filenames)
filenames = filenames[:maxNumber]
return filenames
def _is_relevant_file(self, filename):
if filename.endswith("#comments"):
return False
return True
def parse_file(self, filename):
f = codecs.open(filename, "r", "utf-8")
lines = f.readlines()
big_data_string = "".join(lines)
self.article_parser.reset()
self.article_parser.feed(big_data_string)
c = self.article_parser.getContents()
return c
def parse_collection(self, filenames):
contents = set()
for idx, filename in enumerate(filenames):
# print "read", filename
c = self.parse_file(filename)
if len(c):
# print "->", len(c)
contents.add(c)
# if len(contents) > 10:
# break
contents = list(contents)
contents.sort()
return contents
def parse_and_write_collection(self, input_filenames, output_filename):
print "write contents to", output_filename
f = codecs.open(output_filename, "w", "utf-8")
n = 100
for i in range(len(input_filenames)/n):
print i*n,
batch_filenames = input_filenames[i*n:(i+1)*n]
batch_contents =self.parse_collection(batch_filenames)
for c in batch_contents:
try:
# print h
# h_ascii = h.encode('ascii', 'ignore')
f.write(c + "\n")
except Exception as e:
print(traceback.format_exc())
f.flush()
f.close()
def run_full(self, input_dir, output_filename, maxNumber=None):
filenames = self.get_filelist(input_dir, maxNumber)
contents = self.parse_and_write_collection(filenames, output_filename)
import os
os.system("wc " + output_filename)
| gpl-3.0 | 3,377,325,573,410,735,000 | 29.810127 | 80 | 0.569433 | false |
AravindK95/ee106b | project4/src/grasper_plan/src/transformations.py | 2 | 66033 | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2015, Christoph Gohlke
# Copyright (c) 2006-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2015.07.18
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.9 <http://www.numpy.org>`_
* `Transformations.c 2015.07.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2015.07.18'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('transformations')
if __name__ == "__main__":
import doctest
import random # used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| mit | -8,569,947,633,209,522,000 | 33.410109 | 79 | 0.579559 | false |
drivnal/drivnal | drivnal/handlers/task.py | 1 | 1800 | from drivnal.constants import *
from drivnal.client import Client
from drivnal.task import Task
from drivnal.event import Event
import drivnal.utils as utils
from drivnal import server
import os
import flask
import time
@server.app.route('/task/<volume_id>', methods=['GET'])
def task_get(volume_id):
client = Client()
volume = client.get_volume(volume_id)
if not volume:
return utils.jsonify({
'error': VOLUME_NOT_FOUND,
'error_msg': VOLUME_NOT_FOUND_MSG,
}, 404)
tasks = []
for task in reversed(volume.get_tasks()):
task_data = {
'id': task.id,
'volume': volume.id,
'volume_name': volume.name,
'type': task.type,
'state': task.state,
'time': task.time,
'has_log': False,
}
if task.log_path:
if os.path.isfile(task.log_path):
task_data['has_log'] = True
if task.snapshot_id:
task_data['snapshot_id'] = task.snapshot_id
tasks.append(task_data)
return utils.jsonify(tasks)
@server.app.route('/task/<volume_id>/<task_id>', methods=['PUT'])
def task_put(volume_id, task_id):
client = Client()
volume = client.get_volume(volume_id)
if not volume:
return utils.jsonify({
'error': VOLUME_NOT_FOUND,
'error_msg': VOLUME_NOT_FOUND_MSG,
}, 404)
task = Task(id=task_id.encode())
if 'abort' in flask.request.json and flask.request.json['abort']:
task.abort()
return utils.jsonify({})
@server.app.route('/task/<volume_id>/<task_id>', methods=['DELETE'])
def task_delete(volume_id, task_id):
client = Client()
task = Task(id=task_id.encode())
task.remove()
return utils.jsonify({})
| agpl-3.0 | 4,385,070,478,506,404,400 | 24.714286 | 69 | 0.583889 | false |
coll-gate/collgate | server/settings/staging.py | 1 | 4562 | # -*- coding: utf-8; -*-
#
# @file staging.py
# @brief Staging specific settings.
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2016-09-01
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
import os
from .base import *
DEBUG = False
ADMINS = (
('admin_fscherma', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'HOST': '',
'PORT': '',
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'collgate',
'USER': 'collgate',
'PASSWORD': 'collgate',
'CONN_MAX_AGE': 86400
}
}
ALLOWED_HOSTS = ['staging.gdec.clermont.inra.fr', 'localhost', '127.0.0.1']
# session cookie path
SESSION_COOKIE_PATH = "/coll-gate/"
# CRSF cookie path
CSRF_COOKIE_PATH = "/coll-gate/"
MAX_UPLOAD_SIZE = 3145728 # 3Mio
CONTENT_TYPES = ['text/plain']
MEDIA_URL = 'media/'
STATIC_ROOT = 'static/'
STATIC_URL = '/coll-gate/static/'
TEMPLATES[0]['OPTIONS']['debug'] = False
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
# 'debug_panel.middleware.DebugPanelMiddleware',
'igdectk.rest.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'igdectk.rest.restmiddleware.RestMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
INSTALLED_APPS = (
'bootstrap3',
'django.contrib.postgres',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'guardian',
'igdectk.common',
'igdectk.jquery',
'igdectk.bootstrap',
'main',
'messenger',
'audit',
'permission',
'descriptor',
'medialibrary',
'geonames',
'geolocation',
'organisation',
'classification',
'accession',
'printer'
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'()': 'logging.Formatter',
'format': '[%(asctime)s] <%(levelname)s> %(name)s : %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'colored': {
'()': 'igdectk.common.logging.ColoredFormatter',
'format': '[%(asctime)s] <%(levelname)s> %(name)s : %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S',
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'colored',
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, '..', '..', 'logs', 'collgate.log'),
'formatter': 'standard',
'maxBytes': 1024*1024*16, # 16MB
'backupCount': 10,
},
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'level': 'WARNING',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'WARNING',
'propagate': True,
},
'collgate': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True,
},
'igdectk': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True,
}
}
}
DEFAULT_FROM_EMAIL = "[email protected]"
EMAIL_HOST = "smtp.clermont.inra.fr"
#EMAIL_USE_TLS = True
EMAIL_PORT = 25 # 465
EMAIL_HOST_USER = "fscherma"
EMAIL_HOST_PASSWORD = ""
#EMAIL_USE_SSL = True
APPLICATIONS['geonames'] = {
'DB_DEFAULT_SETTINGS': {
'geonames_username': "demo",
}
}
APPLICATIONS['medialibrary'] = {
'DB_DEFAULT_SETTINGS': {
'storage_location': "/coll-gate/media",
'storage_path': "/var/lib/collgate/media"
}
}
| mit | 8,841,889,773,940,665,000 | 24.47486 | 83 | 0.555702 | false |
katacarbix/pyhp | resources/filebrowser.py | 1 | 1330 | <?py
import os
from datetime import datetime
path = postvars['path']
dir = postvars['dir']
def fsize(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.0f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
?>
<html>
<head>
<title>Index of <?py print dir ?></title>
<style>
td, th {
padding: 0px 10px;
text-align: left;
}
</style>
</head>
<body>
<h1>Index of <?py print dir ?></h1>
<table border=0>
<tr>
<th style="min-width:200px">Name</th>
<th>Type</th>
<th>Last Modified</th>
<th>Size</th>
</tr>
<tr><th colspan=4><hr></th></tr>
<?py
tree = os.listdir(path)
if dir != "/":
print "<tr>"
print "<td><a href=\"../\">Parent Directory</a></td>"
print "</tr>"
for branch in tree:
branch = str(branch)
print "<tr>"
print "<td><a href=\""+dir+branch+['','/'][os.path.isdir(path+branch)]+"\">"+ branch +"</a></td>"
if os.path.isdir(path+branch):
print "<td>dir</td>"
elif os.path.isfile(path+branch):
print "<td>file</td>"
else:
print "<td>-</td>"
print "<td>"+ datetime.fromtimestamp(os.path.getmtime(path+branch)).isoformat() +"</td>"
print "<td>"+ fsize(os.path.getsize(path+branch)) +"</td>"
print "</tr>"
?>
<tr><th colspan=4><hr></th></tr>
</table>
</body>
</html> | gpl-2.0 | 1,932,784,922,014,772,200 | 22.350877 | 98 | 0.558647 | false |
MediaKraken/MediaKraken_Deployment | docker/alpine/ComposeMediaKrakenLDAP/root/app/nginx-ldap-auth-daemon.py | 1 | 12042 | #!/bin/sh
''''[ -z $LOG ] && export LOG=/dev/stdout # '''
''''which python2 >/dev/null && exec python2 -u "$0" "$@" >> $LOG 2>&1 # '''
''''which python >/dev/null && exec python -u "$0" "$@" >> $LOG 2>&1 # '''
# Copyright (C) 2014-2015 Nginx, Inc.
# Copyright (C) 2018 LinuxServer.io
import sys, os, signal, base64, ldap, Cookie, argparse
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from cryptography.fernet import Fernet
from cryptography.fernet import InvalidToken
#Listen = ('localhost', 8888)
#Listen = "/tmp/auth.sock" # Also uncomment lines in 'Requests are
# processed with UNIX sockets' section below
# -----------------------------------------------------------------------------
# Different request processing models: select one
# -----------------------------------------------------------------------------
# Requests are processed in separate thread
import threading
from SocketServer import ThreadingMixIn
class AuthHTTPServer(ThreadingMixIn, HTTPServer):
pass
# -----------------------------------------------------------------------------
# Requests are processed in separate process
#from SocketServer import ForkingMixIn
#class AuthHTTPServer(ForkingMixIn, HTTPServer):
# pass
# -----------------------------------------------------------------------------
# Requests are processed with UNIX sockets
#import threading
#from SocketServer import ThreadingUnixStreamServer
#class AuthHTTPServer(ThreadingUnixStreamServer, HTTPServer):
# pass
# -----------------------------------------------------------------------------
class AuthHandler(BaseHTTPRequestHandler):
# Return True if request is processed and response sent, otherwise False
# Set ctx['user'] and ctx['pass'] for authentication
def do_GET(self):
ctx = self.ctx
ctx['action'] = 'input parameters check'
for k, v in self.get_params().items():
ctx[k] = self.headers.get(v[0], v[1])
if ctx[k] == None:
self.auth_failed(ctx, 'required "%s" header was not passed' % k)
return True
ctx['action'] = 'performing authorization'
auth_header = self.headers.get('Authorization')
auth_cookie = self.get_cookie(ctx['cookiename'])
if auth_cookie != None and auth_cookie != '':
auth_header = "Basic " + auth_cookie
self.log_message("using username/password from cookie %s" %
ctx['cookiename'])
else:
self.log_message("using username/password from authorization header")
if auth_header is None or not auth_header.lower().startswith('basic '):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="' + ctx['realm'] + '"')
self.send_header('Cache-Control', 'no-cache')
self.end_headers()
return True
ctx['action'] = 'decoding credentials'
try:
cipher_suite = Fernet('REPLACEWITHFERNETKEY')
self.log_message('Trying to dechipher credentials...')
auth_decoded = cipher_suite.decrypt(auth_header[6:])
user, passwd = auth_decoded.split(':', 1)
except InvalidToken:
self.log_message('Incorrect token. Trying to decode credentials from BASE64...')
auth_decoded = base64.b64decode(auth_header[6:])
user, passwd = auth_decoded.split(':', 1)
except Exception as e:
self.auth_failed(ctx)
self.log_error(e)
return True
ctx['user'] = user
ctx['pass'] = passwd
# Continue request processing
return False
def get_cookie(self, name):
cookies = self.headers.get('Cookie')
if cookies:
authcookie = Cookie.BaseCookie(cookies).get(name)
if authcookie:
return authcookie.value
else:
return None
else:
return None
# Log the error and complete the request with appropriate status
def auth_failed(self, ctx, errmsg = None):
msg = 'Error while ' + ctx['action']
if errmsg:
msg += ': ' + errmsg
ex, value, trace = sys.exc_info()
if ex != None:
msg += ": " + str(value)
if ctx.get('url'):
msg += ', server="%s"' % ctx['url']
if ctx.get('user'):
msg += ', login="%s"' % ctx['user']
self.log_error(msg)
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="' + ctx['realm'] + '"')
self.send_header('Cache-Control', 'no-cache')
self.end_headers()
def get_params(self):
return {}
def log_message(self, format, *args):
if len(self.client_address) > 0:
addr = BaseHTTPRequestHandler.address_string(self)
else:
addr = "-"
if not hasattr(self, 'ctx'):
user = '-'
else:
user = self.ctx['user']
sys.stdout.write("%s - %s [%s] %s\n" % (addr, user,
self.log_date_time_string(), format % args))
def log_error(self, format, *args):
self.log_message(format, *args)
# Verify username/password against LDAP server
class LDAPAuthHandler(AuthHandler):
# Parameters to put into self.ctx from the HTTP header of auth request
params = {
# parameter header default
'realm': ('X-Ldap-Realm', 'Restricted'),
'url': ('X-Ldap-URL', None),
'starttls': ('X-Ldap-Starttls', 'false'),
'basedn': ('X-Ldap-BaseDN', None),
'template': ('X-Ldap-Template', '(cn=%(username)s)'),
'binddn': ('X-Ldap-BindDN', ''),
'bindpasswd': ('X-Ldap-BindPass', ''),
'cookiename': ('X-CookieName', '')
}
@classmethod
def set_params(cls, params):
cls.params = params
def get_params(self):
return self.params
# GET handler for the authentication request
def do_GET(self):
ctx = dict()
self.ctx = ctx
ctx['action'] = 'initializing basic auth handler'
ctx['user'] = '-'
if AuthHandler.do_GET(self):
# request already processed
return
ctx['action'] = 'empty password check'
if not ctx['pass']:
self.auth_failed(ctx, 'attempt to use empty password')
return
try:
# check that uri and baseDn are set
# either from cli or a request
if not ctx['url']:
self.log_message('LDAP URL is not set!')
return
if not ctx['basedn']:
self.log_message('LDAP baseDN is not set!')
return
ctx['action'] = 'initializing LDAP connection'
ldap_obj = ldap.initialize(ctx['url']);
# Python-ldap module documentation advises to always
# explicitely set the LDAP version to use after running
# initialize() and recommends using LDAPv3. (LDAPv2 is
# deprecated since 2003 as per RFC3494)
#
# Also, the STARTTLS extension requires the
# use of LDAPv3 (RFC2830).
ldap_obj.protocol_version=ldap.VERSION3
# Establish a STARTTLS connection if required by the
# headers.
if ctx['starttls'] == 'true':
ldap_obj.start_tls_s()
# See http://www.python-ldap.org/faq.shtml
# uncomment, if required
# ldap_obj.set_option(ldap.OPT_REFERRALS, 0)
ctx['action'] = 'binding as search user'
ldap_obj.bind_s(ctx['binddn'], ctx['bindpasswd'], ldap.AUTH_SIMPLE)
ctx['action'] = 'preparing search filter'
searchfilter = ctx['template'] % { 'username': ctx['user'] }
self.log_message(('searching on server "%s" with base dn ' + \
'"%s" with filter "%s"') %
(ctx['url'], ctx['basedn'], searchfilter))
ctx['action'] = 'running search query'
results = ldap_obj.search_s(ctx['basedn'], ldap.SCOPE_SUBTREE,
searchfilter, ['objectclass'], 1)
ctx['action'] = 'verifying search query results'
if len(results) < 1:
self.auth_failed(ctx, 'no objects found')
return
ctx['action'] = 'binding as an existing user'
ldap_dn = results[0][0]
ctx['action'] += ' "%s"' % ldap_dn
ldap_obj.bind_s(ldap_dn, ctx['pass'], ldap.AUTH_SIMPLE)
self.log_message('Auth OK for user "%s"' % (ctx['user']))
# Successfully authenticated user
self.send_response(200)
self.end_headers()
except Exception as e:
self.auth_failed(ctx)
self.log_error(str(e))
raise
def exit_handler(signal, frame):
global Listen
if isinstance(Listen, basestring):
try:
os.unlink(Listen)
except:
ex, value, trace = sys.exc_info()
sys.stderr.write('Failed to remove socket "%s": %s\n' %
(Listen, str(value)))
sys.stderr.flush()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Simple Nginx LDAP authentication helper.""")
# Group for listen options:
group = parser.add_argument_group("Listen options")
group.add_argument('--host', metavar="hostname",
default="localhost", help="host to bind (Default: localhost)")
group.add_argument('-p', '--port', metavar="port", type=int,
default=8888, help="port to bind (Default: 8888)")
# ldap options:
group = parser.add_argument_group(title="LDAP options")
group.add_argument('-u', '--url', metavar="URL",
default="ldap://localhost:389",
help=("LDAP URI to query (Default: ldap://localhost:389)"))
group.add_argument('-s', '--starttls', metavar="starttls",
default="false",
help=("Establish a STARTTLS protected session (Default: false)"))
group.add_argument('-b', metavar="baseDn", dest="basedn", default='',
help="LDAP base dn (Default: unset)")
group.add_argument('-D', metavar="bindDn", dest="binddn", default='',
help="LDAP bind DN (Default: anonymous)")
group.add_argument('-w', metavar="passwd", dest="bindpw", default='',
help="LDAP password for the bind DN (Default: unset)")
group.add_argument('-f', '--filter', metavar='filter',
default='(cn=%(username)s)',
help="LDAP filter (Default: cn=%%(username)s)")
# http options:
group = parser.add_argument_group(title="HTTP options")
group.add_argument('-R', '--realm', metavar='"Restricted Area"',
default="Restricted", help='HTTP auth realm (Default: "Restricted")')
group.add_argument('-c', '--cookie', metavar="cookiename",
default="", help="HTTP cookie name to set in (Default: unset)")
args = parser.parse_args()
global Listen
Listen = (args.host, args.port)
auth_params = {
'realm': ('X-Ldap-Realm', args.realm),
'url': ('X-Ldap-URL', args.url),
'starttls': ('X-Ldap-Starttls', args.starttls),
'basedn': ('X-Ldap-BaseDN', args.basedn),
'template': ('X-Ldap-Template', args.filter),
'binddn': ('X-Ldap-BindDN', args.binddn),
'bindpasswd': ('X-Ldap-BindPass', args.bindpw),
'cookiename': ('X-CookieName', args.cookie)
}
LDAPAuthHandler.set_params(auth_params)
server = AuthHTTPServer(Listen, LDAPAuthHandler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
sys.stdout.write("Start listening on %s:%d...\n" % Listen)
sys.stdout.flush()
server.serve_forever()
| gpl-3.0 | -7,031,179,566,730,927,000 | 35.93865 | 92 | 0.546836 | false |
wisechengyi/pants | contrib/go/src/python/pants/contrib/go/register.py | 1 | 2555 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.build_graph.build_file_aliases import BuildFileAliases, TargetMacro
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_protobuf_library import GoProtobufLibrary
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.targets.go_thrift_library import GoThriftLibrary
from pants.contrib.go.tasks.go_binary_create import GoBinaryCreate
from pants.contrib.go.tasks.go_buildgen import GoBuildgen
from pants.contrib.go.tasks.go_checkstyle import GoCheckstyle
from pants.contrib.go.tasks.go_compile import GoCompile
from pants.contrib.go.tasks.go_fetch import GoFetch
from pants.contrib.go.tasks.go_fmt import GoFmt
from pants.contrib.go.tasks.go_go import GoEnv, GoGo
from pants.contrib.go.tasks.go_protobuf_gen import GoProtobufGen
from pants.contrib.go.tasks.go_run import GoRun
from pants.contrib.go.tasks.go_test import GoTest
from pants.contrib.go.tasks.go_thrift_gen import GoThriftGen
def build_file_aliases():
return BuildFileAliases(
targets={
GoBinary.alias(): TargetMacro.Factory.wrap(GoBinary.create, GoBinary),
GoLibrary.alias(): TargetMacro.Factory.wrap(GoLibrary.create, GoLibrary),
GoProtobufLibrary.alias(): GoProtobufLibrary,
GoThriftLibrary.alias(): GoThriftLibrary,
"go_remote_libraries": TargetMacro.Factory.wrap(
GoRemoteLibrary.from_packages, GoRemoteLibrary
),
"go_remote_library": TargetMacro.Factory.wrap(
GoRemoteLibrary.from_package, GoRemoteLibrary
),
}
)
def register_goals():
task(name="go-thrift", action=GoThriftGen).install("gen")
task(name="go-protobuf", action=GoProtobufGen).install("gen")
task(name="go", action=GoBuildgen).install("buildgen")
task(name="go", action=GoGo).install("go")
task(name="go-env", action=GoEnv).install()
task(name="go", action=GoFetch).install("resolve")
task(name="go", action=GoCompile).install("compile")
task(name="go", action=GoBinaryCreate).install("binary")
task(name="go", action=GoRun).install("run")
task(name="go", action=GoCheckstyle).install("lint")
task(name="go", action=GoTest).install("test")
task(name="go", action=GoFmt).install("fmt")
| apache-2.0 | 1,653,328,518,813,307,600 | 46.314815 | 85 | 0.735029 | false |
LolexInc/Lolex-Tools | ci/build/update/CIRepoAutoUpdate.py | 1 | 1054 | #! python3
# 0
# 0 000000 0 000000 0 0 000000000 00000000 00000000 0 000000
# 0 00 0 0 0 00 00 0 0 0 0 0 0
# 0 00 0 0 00000 00 000000 00 0 0 0 0 0 00000
# 0 00 0 0 0 0 0 00 0 0 0 0 0 0
# 0000000 000000 0000000 000000 0 0 00 00000000 00000000 0000000 000000
#
# authors = Monkeyboy2805
import sys
sys.path.insert(0, "./ci/lib/")
import LolexToolsCIlib
del sys.path[sys.path.index("./ci/lib/")]
sys.path.insert(0, "./ci/build/prop/")
import LATEST_PYTHON_VERSION
del sys.path[sys.path.index("./ci/build/prop/")]
if int(LATEST_PYTHON_VERSION.version) != int(LolexToolsCIlib.get_py_ver()):
LolexToolsCIlib.update_py_ver()
#LolexToolsCIlib.update_headers() #doing headers might be glitchy at the moment
else:
exit(0)
| lgpl-3.0 | 3,172,713,795,305,767,000 | 49.190476 | 115 | 0.486717 | false |
SKIRT/PTS | core/prep/dustgrids.py | 1 | 23186 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.dustgrids Contains the DustGridGenerator class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
from collections import OrderedDict
# Import astronomical modules
from astropy.units import dimensionless_angles
# Import the relevant PTS classes and modules
from ..simulation.grids import BinaryTreeDustGrid, OctTreeDustGrid, CartesianDustGrid
from ...core.basics.log import log
from ...core.basics.range import zip_linear
from ..basics.configurable import Configurable
from ..basics.table import SmartTable
from ..basics.range import RealRange, QuantityRange, IntegerRange
from ..tools import types
# -----------------------------------------------------------------
class DustGridsTable(SmartTable):
"""
This class ...
"""
# Add column info
_column_info = OrderedDict()
_column_info["Type"] = (str, None, "grid type")
_column_info["Min x"] = (float, "pc", "minimum x")
_column_info["Max x"] = (float, "pc", "maximum x")
_column_info["Min y"] = (float, "pc", "minimum y")
_column_info["Max y"] = (float, "pc", "maximum y")
_column_info["Min z"] = (float, "pc", "minimum z")
_column_info["Max z"] = (float, "pc", "maximum z")
_column_info["Smallest scale"] = (float, "pc", "Smallest scale")
_column_info["Min level"] = (int, None, "Minimum level")
_column_info["Max mass fraction"] = (float, None, "Maximum mass fraction")
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(DustGridsTable, self).__init__(*args, **kwargs)
# Add column info
self.add_all_column_info(self._column_info)
# -----------------------------------------------------------------
def add_entry(self, grid_type, x_range, y_range, z_range, scale, min_level, max_mass_fraction):
"""
This function ...
:param grid_type:
:param x_range:
:param y_range:
:param z_range:
:param scale:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Add a row to the table
self.add_row([grid_type, x_range.min, x_range.max, y_range.min, y_range.max, z_range.min, z_range.max, scale, min_level, max_mass_fraction])
# -----------------------------------------------------------------
class DustGridGenerator(Configurable):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(DustGridGenerator, self).__init__(*args, **kwargs)
# -- Attributes --
# Settings
self.scale_range = None
self.level_range = None
self.mass_fraction_range = None
self.ngrids = None
self._grid_type = None
self.x_radius = None
self.y_radius = None
self.z_radius = None
# The dust grids
self.grids = []
# The dust grid property table
self.table = None
# -----------------------------------------------------------------
@property
def grid_type(self):
return self._grid_type
# -----------------------------------------------------------------
@grid_type.setter
def grid_type(self, grid_type):
"""
This function ...
:return:
"""
if not grid_type in ["cartesian", "bintree", "octtree"]: raise RuntimeError("Grid type '" + str(grid_type) + "' invalid. Must be either 'cartesian', 'bintree', or 'octtree'.")
self._grid_type = grid_type
# -----------------------------------------------------------------
@property
def x_min(self):
return - self.x_radius
# -----------------------------------------------------------------
@property
def x_max(self):
return self.x_radius
# -----------------------------------------------------------------
@property
def x_extent(self):
return self.x_max - self.x_min
# -----------------------------------------------------------------
@property
def y_min(self):
return - self.y_radius
# -----------------------------------------------------------------
@property
def y_max(self):
return self.y_radius
# -----------------------------------------------------------------
@property
def y_extent(self):
return self.y_max - self.y_min
# -----------------------------------------------------------------
@property
def z_min(self):
return - self.z_radius
# -----------------------------------------------------------------
@property
def z_max(self):
return self.z_radius
# -----------------------------------------------------------------
@property
def z_extent(self):
return self.z_max - self.z_min
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Generate the dust grids
self.generate()
# 3. Show
if self.config.show: self.show()
# 4. Write
if self.config.write: self.write()
# -----------------------------------------------------------------
@property
def single_grid(self):
"""
This function ...
:return:
"""
if len(self.grids) == 0: raise RuntimeError("No grid")
elif len(self.grids) == 1: return self.grids[0]
else: raise RuntimeError("More than one grid")
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(DustGridGenerator, self).setup(**kwargs)
# Get settings
self.ngrids = kwargs.pop("ngrids")
if self.ngrids == 1:
self.scale_range = QuantityRange.infinitesimal(kwargs.pop("scale"))
self.level_range = IntegerRange.infinitesimal(kwargs.pop("level"))
self.mass_fraction_range = RealRange.infinitesimal(kwargs.pop("mass_fraction"))
else:
self.scale_range = kwargs.pop("scale_range")
self.level_range = kwargs.pop("level_range")
self.mass_fraction_range = kwargs.pop("mass_fraction_range")
# Initialize the table
self.table = DustGridsTable()
# -----------------------------------------------------------------
def generate(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the grids ...")
# Loop over the different grid parameter values
for scale, min_level, mass_fraction in zip_linear(self.scale_range, self.level_range, self.mass_fraction_range, npoints=self.ngrids):
# Create the grid and add it to the list
if self.grid_type == "cartesian": self.create_cartesian_dust_grid(scale)
elif self.grid_type == "bintree": self.create_binary_tree_dust_grid(scale, min_level, mass_fraction)
elif self.grid_type == "octtree": self.create_octtree_dust_grid(scale, min_level, mass_fraction)
else: raise ValueError("Invalid grid type: " + self.grid_type)
# -----------------------------------------------------------------
def create_cartesian_dust_grid(self, scale):
"""
This function ...
:param scale:
:return:
"""
# Create the grid
grid = create_one_cartesian_dust_grid(scale, self.x_extent, self.y_extent, self.z_extent, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max)
# Add the grid
self.grids.append(grid)
# Debugging
log.debug("Created a cartesian dust grid with:")
if log.is_debug:
print("")
print(grid)
print("")
# Add a row to the table
self.table.add_row([self.grid_type, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, scale, None, None])
# -----------------------------------------------------------------
def create_binary_tree_dust_grid(self, scale, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Create the grid
grid = create_one_bintree_dust_grid(scale, self.x_extent, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, min_level, max_mass_fraction)
# Add the grid
self.grids.append(grid)
# Debugging
log.debug("Created a binary tree dust grid with:")
if log.is_debug:
print("")
print(grid)
print("")
# Add a row to the table
self.table.add_row([self.grid_type, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, scale, min_level, max_mass_fraction])
# -----------------------------------------------------------------
def create_octtree_dust_grid(self, scale, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Create the grid
grid = create_one_octtree_dust_grid(scale, self.x_extent, self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, min_level, max_mass_fraction)
# Add the grid
self.grids.append(grid)
# Debugging
log.debug("Created an octtree dust grid with:")
if log.is_debug:
print("")
print(grid)
print("")
# Add entry to the table
x_range = RealRange(self.x_min, self.x_max)
y_range = RealRange(self.y_min, self.y_max)
z_range = RealRange(self.z_min, self.z_max)
self.table.add_entry(self, self.grid_type, x_range, y_range, z_range, scale, min_level, max_mass_fraction)
# -----------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def write(self):
"""
This function ..
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the grids
self.write_grids()
# Write table
self.write_table()
# -----------------------------------------------------------------
def write_grids(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing grids ...")
# -----------------------------------------------------------------
def write_table(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing table ...")
# -----------------------------------------------------------------
def create_one_dust_grid_for_galaxy_from_deprojection(grid_type, deprojection, distance, sky_ellipse, min_level,
max_mass_fraction, max_ndivisions_per_pixel=2, nscaleheights=10.):
"""
This function ...
:param grid_type:
:param deprojection:
:param distance:
:param sky_ellipse:
:param min_level:
:param max_mass_fraction:
:param max_ndivisions_per_pixel:
:param nscaleheights:
:return:
"""
if sky_ellipse is not None:
# Calculate the major radius of the truncation ellipse in physical coordinates (pc)
semimajor_angular = sky_ellipse.semimajor # semimajor axis length of the sky ellipse
radius_physical = (semimajor_angular * distance).to("pc", equivalencies=dimensionless_angles())
else:
x_radius_physical = deprojection.x_range.radius
y_radius_physical = deprojection.y_range.radius
radius_physical = max(x_radius_physical, y_radius_physical)
# Get properties
average_pixelscale = deprojection.pixelscale
scaleheight = deprojection.scale_height
# Get the pixelscale in physical units
if types.is_angle(average_pixelscale):
pixelscale_angular = average_pixelscale.to("deg")
# pixelscale_angular = self.reference_wcs.average_pixelscale.to("deg") # in deg
pixelscale = (pixelscale_angular * distance).to("pc", equivalencies=dimensionless_angles())
elif types.is_length_quantity(average_pixelscale): pixelscale = average_pixelscale.to("pc") # normally it should be this case (deprojections should have their pixelscale defined in physical units)
else: raise ValueError("Pixelscale should be an angle or a length quantity")
# Determine the minimum physical scale
min_scale = pixelscale / float(max_ndivisions_per_pixel)
# Create the dust grid
return create_one_dust_grid_for_galaxy(grid_type, radius_physical, scaleheight, min_scale, min_level, max_mass_fraction, nscaleheights=nscaleheights)
# -----------------------------------------------------------------
def create_one_dust_grid_for_galaxy(grid_type, radius, scaleheight, min_scale, min_level, max_mass_fraction, nscaleheights=10.):
"""
This function ...
:param grid_type:
:param radius: IN PHYSICAL COORDINATES
:param scaleheight: IN PHYSICAL COORDINATES
:param min_scale:
:param min_level:
:param max_mass_fraction:
:param nscaleheights: REAL NUMBER
:return:
"""
# Determine x, y and z radius
x_radius = radius
y_radius = radius
z_radius = scaleheight * nscaleheights
# X
x_min = - x_radius
x_max = x_radius
x_extent = x_max - x_min
# Y
y_min = - y_radius
y_max = y_radius
y_extent = y_max - y_min
# Z
z_min = - z_radius
z_max = z_radius
z_extent = z_max - z_min
# Create the dust grid
return create_one_dust_grid(grid_type, min_scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction)
# -----------------------------------------------------------------
def create_one_dust_grid(grid_type, scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction):
"""
This function ...
:param grid_type:
:param scale:
:param x_extent:
:param y_extent:
:param z_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Create the specified type of grid
if grid_type == "cartesian": return create_one_cartesian_dust_grid(scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max)
elif grid_type == "bintree": return create_one_bintree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction)
elif grid_type == "octtree": return create_one_octtree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction)
else: raise ValueError("Unknown dust grid type: " + grid_type)
# -----------------------------------------------------------------
def create_one_cartesian_dust_grid(scale, x_extent, y_extent, z_extent, x_min, x_max, y_min, y_max, z_min, z_max):
"""
This function ...
:param scale:
:param x_extent:
:param y_extent:
:param z_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:return:
"""
# Inform the user
log.info("Creating a cartesian dust grid with a physical scale of " + str(scale) + " ...")
# Calculate the number of bins in each direction
x_bins = int(math.ceil(x_extent.to("pc").value / scale.to("pc").value))
y_bins = int(math.ceil(y_extent.to("pc").value / scale.to("pc").value))
z_bins = int(math.ceil(z_extent.to("pc").value / scale.to("pc").value))
# Create the grid
grid = CartesianDustGrid(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, z_min=z_min, z_max=z_max,
x_bins=x_bins, y_bins=y_bins, z_bins=z_bins)
# Return the grid
return grid
# -----------------------------------------------------------------
def create_one_bintree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param x_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Inform the user
log.info("Creating a binary tree dust grid with a smallest physical scale of " + str(scale) + ", with a minimum division level of " + str(min_level) + " and a maximum mass fraction of " + str(max_mass_fraction) + " ...")
# Calculate the maximum division level that is necessary to resolve the smallest scale of the input maps
extent_x = x_extent.to("pc").value
smallest_scale = scale.to("pc").value
max_level = max_level_for_smallest_scale_bintree(extent_x, smallest_scale)
# Check arguments
if x_min is None: raise ValueError("'x_min' is undefined")
if x_max is None: raise ValueError("'x_max' is undefined")
if y_min is None: raise ValueError("'y_min' is undefined")
if y_max is None: raise ValueError("'y_max' is undefined")
if z_min is None: raise ValueError("'z_min' is undefined")
if z_max is None: raise ValueError("'z_max' is undefined")
if min_level is None: raise ValueError("'min_level' is undefined")
if max_mass_fraction is None: raise ValueError("'max_mass_fraction' is undefined")
# Create the dust grid
grid = BinaryTreeDustGrid(min_x=x_min, max_x=x_max, min_y=y_min, max_y=y_max, min_z=z_min, max_z=z_max,
min_level=min_level, max_level=max_level, max_mass_fraction=max_mass_fraction)
# Return the grid
return grid
# -----------------------------------------------------------------
def create_one_octtree_dust_grid(scale, x_extent, x_min, x_max, y_min, y_max, z_min, z_max, min_level, max_mass_fraction):
"""
This function ...
:param scale:
:param x_extent:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param z_min:
:param z_max:
:param min_level:
:param max_mass_fraction:
:return:
"""
# Inform the user
log.info("Creating a octtree dust grid with a smallest physical scale of " + str(scale) + ", with a minimum division level of " + str(min_level) + " and a maximum mass fraction of " + str(max_mass_fraction) + " ...")
# Calculate the minimum division level that is necessary to resolve the smallest scale of the input maps
extent_x = x_extent.to("pc").value
smallest_scale = scale.to("pc").value
max_level = max_level_for_smallest_scale_octtree(extent_x, smallest_scale)
# Check arguments
if x_min is None: raise ValueError("'x_min' is undefined")
if x_max is None: raise ValueError("'x_max' is undefined")
if y_min is None: raise ValueError("'y_min' is undefined")
if y_max is None: raise ValueError("'y_max' is undefined")
if z_min is None: raise ValueError("'z_min' is undefined")
if z_max is None: raise ValueError("'z_max' is undefined")
if min_level is None: raise ValueError("'min_level' is undefined")
if max_mass_fraction is None: raise ValueError("'max_mass_fraction' is undefined")
# Create the dust grid
grid = OctTreeDustGrid(min_x=x_min, max_x=x_max, min_y=y_min, max_y=y_max, min_z=z_min, max_z=z_max,
min_level=min_level, max_level=max_level, max_mass_fraction=max_mass_fraction)
# Return the grid
return grid
# -----------------------------------------------------------------
def max_level_for_smallest_scale_bintree(extent, smallest_scale):
"""
This function ...
:param extent:
:param smallest_scale:
:return:
"""
ratio = extent / smallest_scale
octtree_level = int(math.ceil(math.log(ratio, 2)))
level = int(3 * octtree_level)
return level
# -----------------------------------------------------------------
def smallest_scale_for_max_level_bintree(extent, max_level):
"""
This function ...
:param extent:
:param max_level:
:return:
"""
octtree_level = max_level / 3
max_ratio = 2**octtree_level
min_scale = extent / max_ratio
return min_scale
# -----------------------------------------------------------------
def max_level_for_smallest_scale_octtree(extent, smallest_scale):
"""
This function ...
:param extent:
:param smallest_scale:
:return:
"""
ratio = extent / smallest_scale
octtree_level = int(math.ceil(math.log(ratio, 2)))
return octtree_level
# -----------------------------------------------------------------
def smallest_scale_for_max_level_octtree(extent, max_level):
"""
This function ...
:param extent:
:param max_level:
:return:
"""
max_ratio = 2**max_level
min_scale = extent / max_ratio
return min_scale
# -----------------------------------------------------------------
def smallest_scale_for_dust_grid(grid):
"""
This function ...
:param grid:
:return:
"""
# Cartesian grid
if isinstance(grid, CartesianDustGrid):
min_x_scale = grid.x_extent / float(grid.x_bins)
min_y_scale = grid.y_extent / float(grid.y_bins)
min_z_scale = grid.z_extent / float(grid.z_bins)
# Return the minimum scale
return min(min_x_scale, min_y_scale, min_z_scale)
# Octtree
elif isinstance(grid, OctTreeDustGrid):
extent = grid.smallest_extent
max_level = grid.max_level
return smallest_scale_for_max_level_octtree(extent, max_level)
# Binary tree
elif isinstance(grid, BinaryTreeDustGrid):
extent = grid.smallest_extent
max_level = grid.max_level
return smallest_scale_for_max_level_bintree(extent, max_level)
# Other
else: raise NotImplementedError("Other dust grids not implemented")
# -----------------------------------------------------------------
| agpl-3.0 | 7,276,239,575,000,865,000 | 29.913333 | 224 | 0.528575 | false |
ticosax/django-fsm-log | setup.py | 1 | 1452 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-fsm-log',
version='1.7.0dev',
description='Logging for django-fsm',
author='Gizmag',
author_email='[email protected]',
url='https://github.com/gizmag/django-fsm-log',
license='MIT',
packages=find_packages(),
install_requires=['django>=1.8', 'django_fsm>=2', 'django_appconf'],
extras_require={
'testing': [
'pytest',
'pytest-cov',
'pytest-django',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| mit | 2,992,084,338,474,701,300 | 32 | 72 | 0.554408 | false |
Microsoft/PTVS-Samples | PollsDjango/PollsDjango/urls.py | 1 | 1210 | """
Definition of urls for $safeprojectname$.
"""
from datetime import datetime
from django.conf.urls import url, include
from django.contrib import admin
import django.contrib.auth.views
import app.forms
import app.views
admin.autodiscover()
urlpatterns = [
url(r'^', include('app.urls', namespace="app")),
url(r'^contact$', app.views.contact, name='contact'),
url(r'^about', app.views.about, name='about'),
url(r'^seed', app.views.seed, name='seed'),
url(r'^login/$',
django.contrib.auth.views.login,
{
'template_name': 'app/login.html',
'authentication_form': app.forms.BootstrapAuthenticationForm,
'extra_context':
{
'title': 'Log in',
'year': datetime.now().year,
}
},
name='login'),
url(r'^logout$',
django.contrib.auth.views.logout,
{
'next_page': '/',
},
name='logout'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| apache-2.0 | 3,477,789,548,203,435,000 | 26.139535 | 73 | 0.561983 | false |
VirusTotal/msticpy | msticpy/nbtools/security_event.py | 1 | 3507 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Module for SecurityEvent class."""
from typing import List, Dict, Any
import pandas as pd
from .entityschema import Entity, Host, Process, Account, IpAddress, HostLogonSession
from .security_base import SecurityBase
from ..common.utility import export
from .._version import VERSION
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class SecurityEvent(SecurityBase):
"""SecurityEvent class."""
def __init__(self, src_row: pd.Series = None):
"""
Instantiate new instance of SecurityEvent.
:param src_row: Pandas series containing single security event
"""
self._source_data = src_row
super().__init__(src_row=src_row)
self._extract_entities(src_row)
self._find_os_family()
# Properties
@property
def entities(self) -> List[Entity]:
"""
Return the list of entities extracted from the event.
Returns
-------
List[Entity]
The list of entities extracted from the event.
"""
return list(self._entities)
@property
def query_params(self) -> Dict[str, Any]:
"""
Query parameters derived from alert.
Returns
-------
Dict[str, Any]
Dictionary of parameter names
"""
return super().query_params
# index operator override
def __getattr__(self, name):
"""Return the value of the named property 'name'."""
if name is not None and name in self._source_data:
return self._source_data[name]
return None
def _extract_entities(self, src_row):
if "EventID" in src_row:
host = Host(src_event=src_row)
self._entities.append(host)
event_id = str(src_row["EventID"])
if event_id == "4688":
event_proc = Process(src_event=src_row, role="new")
self._entities.append(event_proc)
event_proc["Host"] = host
if "ParentProcess" in event_proc:
self._entities.append(event_proc.ParentProcess)
if "ImageFile" in event_proc.ParentProcess:
self._entities.append(event_proc.ParentProcess.ImageFile)
logon_session = HostLogonSession(src_event=src_row)
logon_session.Host = host
if "Account" in event_proc:
logon_session.Account = event_proc.Account
event_proc.Account.Host = host
self._entities.append(event_proc.Account)
self._entities.append(logon_session)
if "ImageFile" in event_proc:
self._entities.append(event_proc.ImageFile)
if event_id in ("4624", "4625"):
subj_account = Account(src_event=src_row, role="subject")
subj_account.Host = host
self._entities.append(subj_account)
tgt_account = Account(src_event=src_row, role="target")
tgt_account.Host = host
self._entities.append(tgt_account)
self._entities.append(IpAddress(src_event=src_row))
| mit | -7,031,642,204,383,718,000 | 33.722772 | 85 | 0.551468 | false |
frollo/EquationReader | equationReader.py | 1 | 3520 | #!/usr/bin/python
import sys
import re
import string
variables = {}
outs = {}
monomial = "([a-zA-z]+\d+)"
mn = re.compile(monomial)
def extractValues(strin):
xAddr1 = strin[2].strip()
xAddr2 = strin[4].strip()
if xAddr1 in variables:
x1 = variables[xAddr1]
else:
raise Exception("equationReader: variable " + xAddr1 + " not found")
if mn.match(xAddr2):
if xAddr2 in variables:
x2 = variables[xAddr2]
else:
raise Exception("equationReader: variable " + xAddr2 + " not found")
else:
x2 = bool(int(xAddr2))
return {'x1':x1, 'x2':x2}
if len(sys.argv) != 3:
raise Exception("Usage: equationReader <input file> <output file>")
fin = open(sys.argv[1], "r")
lines = fin.readlines()
inputs = re.compile("\d+ inputs")
outputs = re.compile("\d+ outputs")
for index, line in enumerate(lines):
if inputs.match(line):
#Creation of the x set
xLine = lines[index + 1]
xValueLine = lines[index + 2]
ins = string.split(xLine)
insValues = string.split(xValueLine)
if len(ins) != len(insValues):
print(line + xLine + xValueLine)
raise Exception("equationReader: you need to provide a starting value for each x inserted")
for i in range(len(ins)):
x = ins[i].strip()
variables[x] = bool(int(insValues[i]))
else:
if outputs.match(line):
#Creation of the y set
yLine = lines[index + 1]
ins = string.split(yLine, " ")
for y in ins:
y.strip()
outs[y] = None
else:
if line == "begin":
#When the equations start we get to the next cicle which performs the calculations
break
#y = x + z
equation_XOR = re.compile(monomial + " = " + monomial + " \+ (" + monomial + "|(0|1))")
#y = x * z
equation_AND = re.compile(monomial + " = " + monomial + " \* (" + monomial + "|(0|1))")
#y = x
equation_ASSIGNEMENT = re.compile(monomial + " = (" + monomial + "|(0|1))")
for index, line in enumerate(lines):
tmp = string.split(line, " ")
print(line)
if equation_XOR.match(line):
xdict = extractValues(tmp)
yAddr = tmp[0]
y = xdict['x1'] ^ xdict['x2']
variables[yAddr] = y
if yAddr in outs:
outs[yAddr] = y
else:
if equation_AND.match(line):
xdict = extractValues(tmp)
yAddr = tmp[0]
y = xdict['x1'] & xdict['x2']
variables[yAddr] = y
if yAddr in outs:
outs[yAddr] = y
else:
if equation_ASSIGNEMENT.match(line):
yAddr = tmp[0].strip()
xAddr = tmp[2].strip()
if mn.match(xAddr):
if xAddr in variables:
x = variables[xAddr]
else:
raise Exception("equationReader: variable " + xAddr + " not found")
else:
x = bool(xAddr)
y = x
variables[yAddr] = y
if yAddr in outs:
outs[yAddr] = y
else:
print("Skipping malformed equation:" + line)
#Printing out the results
fin.close()
fout = open(sys.argv[2], "w")
for key, value in outs.items():
fout.write(key + " = {}\n".format(int(value)))
fout.close()
| gpl-3.0 | 5,119,402,926,935,882,000 | 31 | 103 | 0.509091 | false |
DavideCanton/Python3 | concur/AABABB.py | 1 | 1342 | import contextlib
import threading
import io
from concur import KamiSemaphore
class A(threading.Thread):
def __init__(self, semA, semB, mutex):
threading.Thread.__init__(self)
self.setName("A")
self.semA = semA
self.semB = semB
self.mutex = mutex
def run(self):
self.semA.acquire(2)
with self.mutex:
print("A", end="")
self.semB.release(2)
class B(threading.Thread):
def __init__(self, semA, semB, mutex):
threading.Thread.__init__(self)
self.setName("B")
self.semA = semA
self.semB = semB
self.mutex = mutex
def run(self):
self.semB.acquire()
with self.mutex:
print("B", end="")
self.semA.release()
if __name__ == "__main__":
output = io.StringIO()
with contextlib.redirect_stdout(output):
semA = KamiSemaphore.KamiSemaphoreT(5)
semB = KamiSemaphore.KamiSemaphoreT(-3)
par = {"semA": semA, "semB": semB, "mutex": threading.Lock()}
threads = [A(**par) for i in range(3)] + [B(**par) for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
result = output.getvalue()
if result == "AABABB":
print("OK")
else:
print("NO: {}".format(result))
| gpl-3.0 | 6,614,634,021,793,715,000 | 23.851852 | 77 | 0.544709 | false |
Talvalin/server-client-python | tableauserverclient/models/project_item.py | 1 | 2738 | import xml.etree.ElementTree as ET
from .property_decorators import property_is_enum, property_not_empty
from .. import NAMESPACE
class ProjectItem(object):
class ContentPermissions:
LockedToProject = 'LockedToProject'
ManagedByOwner = 'ManagedByOwner'
def __init__(self, name, description=None, content_permissions=None):
self._content_permissions = None
self._id = None
self.description = description
self.name = name
self.content_permissions = content_permissions
@property
def content_permissions(self):
return self._content_permissions
@content_permissions.setter
@property_is_enum(ContentPermissions)
def content_permissions(self, value):
self._content_permissions = value
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@name.setter
@property_not_empty
def name(self, value):
self._name = value
def is_default(self):
return self.name.lower() == 'default'
def _parse_common_tags(self, project_xml):
if not isinstance(project_xml, ET.Element):
project_xml = ET.fromstring(project_xml).find('.//t:project', namespaces=NAMESPACE)
if project_xml is not None:
(_, name, description, content_permissions) = self._parse_element(project_xml)
self._set_values(None, name, description, content_permissions)
return self
def _set_values(self, project_id, name, description, content_permissions):
if project_id is not None:
self._id = project_id
if name:
self._name = name
if description:
self.description = description
if content_permissions:
self._content_permissions = content_permissions
@classmethod
def from_response(cls, resp):
all_project_items = list()
parsed_response = ET.fromstring(resp)
all_project_xml = parsed_response.findall('.//t:project', namespaces=NAMESPACE)
for project_xml in all_project_xml:
(id, name, description, content_permissions) = cls._parse_element(project_xml)
project_item = cls(name)
project_item._set_values(id, name, description, content_permissions)
all_project_items.append(project_item)
return all_project_items
@staticmethod
def _parse_element(project_xml):
id = project_xml.get('id', None)
name = project_xml.get('name', None)
description = project_xml.get('description', None)
content_permissions = project_xml.get('contentPermissions', None)
return id, name, description, content_permissions
| mit | 9,043,964,154,326,998,000 | 32.390244 | 95 | 0.642805 | false |
AlanWarren/dotfiles | .weechat/python/lnotify.py | 1 | 3800 | # Project: lnotify
# Description: A libnotify script for weechat. Uses
# subprocess.call to execute notify-send with arguments.
# Author: kevr <[email protected]>
# License: GPL3
#
# 0.1.2
# added option to display weechat's icon by tomboy64
#
# 0.1.3
# changed the way that icon to WeeChat notification is specified.
# (No absolute path is needed)
# /usr/bin/notify-send isn't needed anymore.
# (pynotify is handling notifications now)
# changed the way that lnotify works. When using gnome 3, every new
# notification was creating a new notification instance. The way that
# it is now, all WeeChat notifications are in a group (that have the
# WeeChat icon and have WeeChat name).
# Got report that it has better look for KDE users too.
#
# 0.1.4
# change hook_print callback argument type of displayed/highlight
# (WeeChat >= 1.0)
#
# 0.2.0
# - changed entire system to hook_process_hashtable calls to notify-send
# - also changed the configuration option names and methods
# Note: If you want pynotify, refer to the 'notify.py' weechat script
import weechat as weechat
lnotify_name = "lnotify"
lnotify_version = "0.2.0"
lnotify_license = "GPL3"
# convenient table checking for bools
true = { "on": True, "off": False }
# declare this here, will be global config() object
# but is initialized in __main__
cfg = None
class config(object):
def __init__(self):
# default options for lnotify
self.opts = {
"highlight": "on",
"query": "on",
"notify_away": "off",
"icon": "weechat",
}
self.init_config()
self.check_config()
def init_config(self):
for opt, value in self.opts.items():
temp = weechat.config_get_plugin(opt)
if not len(temp):
weechat.config_set_plugin(opt, value)
def check_config(self):
for opt in self.opts:
self.opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self.opts[key]
def printc(msg):
weechat.prnt("", msg)
def handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message):
highlight = bool(highlight) and cfg["highlight"]
query = true[cfg["query"]]
notify_away = true[cfg["notify_away"]]
buffer_type = weechat.buffer_get_string(pbuffer, "localvar_type")
away = weechat.buffer_get_string(pbuffer, "localvar_away")
if pbuffer == weechat.current_buffer():
return weechat.WEECHAT_RC_OK
if away and not notify_away:
return weechat.WEECHAT_RC_OK
buffer_name = weechat.buffer_get_string(pbuffer, "short_name")
if buffer_type == "private" and query:
notify_user(buffer_name, message)
elif buffer_type == "channel" and highlight:
notify_user("{} @ {}".format(prefix, buffer_name), message)
return weechat.WEECHAT_RC_OK
def process_cb(data, command, return_code, out, err):
if return_code == weechat.WEECHAT_HOOK_PROCESS_ERROR:
weechat.prnt("", "Error with command '%s'" % command)
elif return_code != 0:
weechat.prnt("", "return_code = %d" % return_code)
weechat.prnt("", "notify-send has an error")
return weechat.WEECHAT_RC_OK
def notify_user(origin, message):
hook = weechat.hook_process_hashtable("notify-send",
{ "arg1": "-i", "arg2": cfg["icon"],
"arg3": "-a", "arg4": "WeeChat",
"arg5": origin, "arg6": message },
20000, "process_cb", "")
return weechat.WEECHAT_RC_OK
# execute initializations in order
if __name__ == "__main__":
weechat.register(lnotify_name, "kevr", lnotify_version, lnotify_license,
"{} - A libnotify script for weechat".format(lnotify_name), "", "")
cfg = config()
print_hook = weechat.hook_print("", "", "", 1, "handle_msg", "")
| gpl-2.0 | 296,881,346,025,027,200 | 31.478632 | 81 | 0.644211 | false |
rgommers/numpy | runtests.py | 1 | 25169 | #!/usr/bin/env python3
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
$ python runtests.py --durations 20
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Disable pytest capturing of output by using its '-s' option:
$ python runtests.py -- -s
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
Run lint checks.
Provide target branch name or `uncommitted` to check before committing:
$ python runtests.py --lint main
$ python runtests.py --lint uncommitted
"""
#
# This is a generic test runner script for projects using NumPy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "numpy"
PROJECT_ROOT_FILES = ['numpy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py::test_byteorder_check"
SAMPLE_SUBMODULE = "linalg"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os, glob
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="Add one verbosity level to pytest. Default is 0")
parser.add_argument("--debug-info", action="store_true",
help=("Add --verbose-cfg to build_src to show "
"compiler configuration output while creating "
"_numpyconfig.h and config.h"))
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="Do not build the project (use system installed "
"version)")
parser.add_argument("--build-only", "-b", action="store_true",
default=False, help="Just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide (doctest) check (do not run "
"regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("Report coverage of project code. HTML output "
"goes under build/coverage"))
parser.add_argument("--lint", default=None,
help="'<Target Branch>' or 'uncommitted', passed to "
"tools/linter.py [--branch BRANCH] "
"[--uncommitted]")
parser.add_argument("--durations", action="store", default=-1, type=int,
help=("Time N slowest tests, time all if 0, time none "
"if < 0"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("Enable C code coverage via gcov (requires "
"GCC). gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("Produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster, "
"constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--mypy", action="store_true",
help="Run mypy on files with NumPy on the MYPYPATH")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
help="Number of parallel jobs during build")
parser.add_argument("--warn-error", action="store_true",
help="Set -Werror to convert all compiler warnings to "
"errors")
parser.add_argument("--cpu-baseline", default=None,
help="Specify a list of enabled baseline CPU "
"optimizations"),
parser.add_argument("--cpu-dispatch", default=None,
help="Specify a list of dispatched CPU optimizations"),
parser.add_argument("--disable-optimization", action="store_true",
help="Disable CPU optimized code (dispatch, simd, "
"fast, ...)"),
parser.add_argument("--simd-test", default=None,
help="Specify a list of CPU optimizations to be "
"tested against NumPy SIMD interface"),
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="store", metavar="COMMIT",
help=("Compare benchmark results of current HEAD to "
"BEFORE. Use an additional "
"--bench-compare=COMMIT to override HEAD with "
"COMMIT. Note that you need to commit your "
"changes first!"))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to pytest, asv, mypy, Python "
"or shell")
args = parser.parse_args(argv)
if args.durations < 0:
args.durations = -1
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug "
"version; remove -g flag ***")
if args.lint:
check_lint(args.lint)
if not args.no_build:
# we need the noarch path in case the package is pure python.
site_dir, site_dir_noarch = build_project(args)
sys.path.insert(0, site_dir)
sys.path.insert(0, site_dir_noarch)
os.environ['PYTHONPATH'] = site_dir + os.pathsep + site_dir_noarch
else:
_temp = __import__(PROJECT_MODULE)
site_dir = os.path.sep.join(_temp.__file__.split(os.path.sep)[:-2])
extra_argv = args.args[:]
if not args.bench:
# extra_argv may also lists selected benchmarks
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
# Debugging issues with warnings is much easier if you can see them
print("Enabling display of all warnings")
import warnings
import types
warnings.filterwarnings("always")
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = types.ModuleType('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
# Debugging issues with warnings is much easier if you can see them
print("Enabling display of all warnings and pre-importing numpy as np")
import warnings; warnings.filterwarnings("always")
import IPython
import numpy as np
IPython.embed(colors='neutral', user_ns={"np": np})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'cmd' if os.name == 'nt' else 'sh')
print("Spawning a shell ({})...".format(shell))
subprocess.call([shell] + extra_argv)
sys.exit(0)
if args.mypy:
try:
import mypy.api
except ImportError:
raise RuntimeError(
"Mypy not found. Please install it by running "
"pip install -r test_requirements.txt from the repo root"
)
os.environ['MYPYPATH'] = site_dir
# By default mypy won't color the output since it isn't being
# invoked from a tty.
os.environ['MYPY_FORCE_COLOR'] = '1'
config = os.path.join(
site_dir,
"numpy",
"typing",
"tests",
"data",
"mypy.ini",
)
report, errors, status = mypy.api.run(
['--config-file', config] + args.args
)
print(report, end='')
print(errors, end='', file=sys.stderr)
sys.exit(status)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
for i, v in enumerate(extra_argv):
if v.startswith("--"):
items = extra_argv[:i]
if v == "--":
i += 1 # skip '--' indicating further are passed on.
bench_args = extra_argv[i:]
break
else:
items = extra_argv
bench_args = []
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = ['asv', 'run', '-n', '-e', '--python=same'] + bench_args
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
else:
commits = [x.strip() for x in args.bench_compare.split(',')]
if len(commits) == 1:
commit_a = commits[0]
commit_b = 'HEAD'
elif len(commits) == 2:
commit_a, commit_b = commits
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
out = subprocess.check_output(['git', 'rev-parse', commit_b])
commit_b = out.strip().decode('ascii')
out = subprocess.check_output(['git', 'rev-parse', commit_a])
commit_a = out.strip().decode('ascii')
# generate config file with the required build options
asv_cfpath = [
'--config', asv_compare_config(
os.path.join(ROOT_DIR, 'benchmarks'), args,
# to clear the cache if the user changed build options
(commit_a, commit_b)
)
]
cmd = ['asv', 'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + asv_cfpath + bench_args
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
if args.build_only:
sys.exit(0)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
# Run the tests under build/test
if not args.no_build:
test_dir = site_dir
else:
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage,
durations=args.durations,
tests=tests)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
import sysconfig
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
cvars = sysconfig.get_config_vars()
compiler = env.get('CC') or cvars.get('CC', '')
if 'gcc' in compiler:
# Check that this isn't clang masquerading as gcc.
if sys.platform != 'darwin' or 'gnu-gcc' in compiler:
# add flags used as werrors
warnings_as_errors = ' '.join([
# from tools/travis-test.sh
'-Werror=vla',
'-Werror=nonnull',
'-Werror=pointer-arith',
'-Wlogical-op',
# from sysconfig
'-Werror=unused-function',
])
env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '')
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ["build"]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
if args.warn_error:
cmd += ["--warn-error"]
if args.cpu_baseline:
cmd += ["--cpu-baseline", args.cpu_baseline]
if args.cpu_dispatch:
cmd += ["--cpu-dispatch", args.cpu_dispatch]
if args.disable_optimization:
cmd += ["--disable-optimization"]
if args.simd_test is not None:
cmd += ["--simd-test", args.simd_test]
if args.debug_info:
cmd += ["build_src", "--verbose-cfg"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
site_dir_noarch = get_python_lib(prefix=dst_dir, plat_specific=False)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
if not os.path.exists(site_dir_noarch):
os.makedirs(site_dir_noarch)
env['PYTHONPATH'] = site_dir + os.pathsep + site_dir_noarch
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except:
p.kill()
p.wait()
raise
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
return site_dir, site_dir_noarch
def asv_compare_config(bench_path, args, h_commits):
"""
Fill the required build options through custom variable
'numpy_build_options' and return the generated config path.
"""
conf_path = os.path.join(bench_path, "asv_compare.conf.json.tpl")
nconf_path = os.path.join(bench_path, "_asv_compare.conf.json")
# add custom build
build = []
if args.parallel > 1:
build += ["-j", str(args.parallel)]
if args.cpu_baseline:
build += ["--cpu-baseline", args.cpu_baseline]
if args.cpu_dispatch:
build += ["--cpu-dispatch", args.cpu_dispatch]
if args.disable_optimization:
build += ["--disable-optimization"]
is_cached = asv_substitute_config(conf_path, nconf_path,
numpy_build_options = ' '.join([f'\\"{v}\\"' for v in build]),
numpy_global_options= ' '.join([f'--global-option=\\"{v}\\"' for v in ["build"] + build])
)
if not is_cached:
asv_clear_cache(bench_path, h_commits)
return nconf_path
def asv_clear_cache(bench_path, h_commits, env_dir="env"):
"""
Force ASV to clear the cache according to specified commit hashes.
"""
# FIXME: only clear the cache from the current environment dir
asv_build_pattern = os.path.join(bench_path, env_dir, "*", "asv-build-cache")
for asv_build_cache in glob.glob(asv_build_pattern, recursive=True):
for c in h_commits:
try: shutil.rmtree(os.path.join(asv_build_cache, c))
except OSError: pass
def asv_substitute_config(in_config, out_config, **custom_vars):
"""
A workaround to allow substituting custom tokens within
ASV configuration file since there's no official way to add custom
variables(e.g. env vars).
Parameters
----------
in_config : str
The path of ASV configuration file, e.g. '/path/to/asv.conf.json'
out_config : str
The path of generated configuration file,
e.g. '/path/to/asv_substituted.conf.json'.
The other keyword arguments represent the custom variables.
Returns
-------
True(is cached) if 'out_config' is already generated with
the same '**custom_vars' and updated with latest 'in_config',
False otherwise.
Examples
--------
See asv_compare_config().
"""
assert in_config != out_config
assert len(custom_vars) > 0
def sdbm_hash(*factors):
chash = 0
for f in factors:
for char in str(f):
chash = ord(char) + (chash << 6) + (chash << 16) - chash
chash &= 0xFFFFFFFF
return chash
vars_hash = sdbm_hash(custom_vars, os.path.getmtime(in_config))
try:
with open(out_config, "r") as wfd:
hash_line = wfd.readline().split('hash:')
if len(hash_line) > 1 and int(hash_line[1]) == vars_hash:
return True
except IOError:
pass
custom_vars = {f'{{{k}}}':v for k, v in custom_vars.items()}
with open(in_config, "r") as rfd, open(out_config, "w") as wfd:
wfd.write(f"// hash:{vars_hash}\n")
wfd.write("// This file is automatically generated by runtests.py\n")
for line in rfd:
for key, val in custom_vars.items():
line = line.replace(key, val)
wfd.write(line)
return False
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
def check_lint(lint_args):
"""
Adds ROOT_DIR to path and performs lint checks.
This functions exits the program with status code of lint check.
"""
sys.path.append(ROOT_DIR)
try:
from tools.linter import DiffLinter
except ModuleNotFoundError as e:
print(f"Error: {e.msg}. "
"Install using linter_requirements.txt.")
sys.exit(1)
uncommitted = lint_args == "uncommitted"
branch = "main" if uncommitted else lint_args
DiffLinter(branch).run_lint(uncommitted)
if __name__ == "__main__":
main(argv=sys.argv[1:])
| bsd-3-clause | -8,328,558,529,596,377,000 | 35.958884 | 97 | 0.54579 | false |
cmuphyscomp/hmv-s16 | Grasshopper/MocapDemo/GestureLogic.py | 1 | 19991 | # GestureLogic - state machine for interface logic for the gesture-based editing.
#
# This encompasses all the logic for the editor which is easier to write in
# Python than Grasshopper objects. Only one GestureLogic instance is expected to
# exist since it holds and tracks user inputs.
#
# Objectives for this block:
#
# 1. Only hold transient user state. All persistent data is read from the
# RhinoDoc, manipulated in a transient way, and then written back to the
# RhinoDoc or discarded.
#
# 2. Emit signals to read and write the model rather than directly manipulate
# the RhinoDoc. This does increase the number of I/O variables, but
# is intended to make the operation easier to observe, debug, and extend.
#
# inputs:
# name - name token string for sticky
# reset - bool to reset to lowest value
# gesture - None or relative sample index (integer) of detected gesture event
# cursor - list of Planes with recent cursor object trajectory
# poses - list of Planes saved at gesture events
# mode
# update
# selection
# selguids
# setselect
# clear
# all_names
# layer_name - name of the editable layer in the RhinoDoc
# create_interval - integer number of cycles between creating new objects
# Note: the following must have 'List Access' set: cursor, poses, selection, selguids, all_names
#
# outputs:
# out - log string output for display; the log is persistent to reduce the amount of flickering
# add
# move
# names
# newloc
# status
# objects
# guids
# xform
################################################################
import scriptcontext as sc
import clr
import System.Guid
import math
import Rhino
import pythonlibpath; pythonlibpath.add_library_path()
import ghutil.ghrhinodoc as ghrhinodoc
################################################################
class EditorLogic(object):
"""Utility class to manage the state of the interactive editor."""
def __init__(self, _layer_name = 'Cards', _all_names = []):
self.layer_name = _layer_name
self._last_setselect = False # debounce variable
self.attached = False
self.interval_counter = 0
self.mocap_dt = 1.0 / 120 # sampling rate of the motion capture stream
# list of strings in which to accumulate messages for output
self.log = ["Editor initialized for layer %s." % self.layer_name]
# initialize the default sets of object names based on those found on the RhinoDoc layer
self._update_namesets(_all_names)
# freshly created objects: poses and names
self.new_object_poses = list()
self.new_object_names = list()
# the current selection
self.selection = None
self.docguids = None
self.selection_bb = None
self.selection_bb_size = None
# coordinate transformation for group edits
self.transform = None
self.motion = None
self.xforms = [] # list of transforms, one per selected object
return
def add_new_object_pose(self, plane):
if plane is not None:
name = self.choose_new_name()
if name is not None:
self.new_object_poses.append(plane)
self.new_object_names.append(name)
return
def clear_edits(self):
"""Reset all transient editor state, either at user request or after editing cycle is complete."""
self.new_object_poses = list()
self.new_object_names = list()
self.attached = False
self.selection = None
self.docguids = None
return
def logprint(self, msg):
self.log.append(msg)
def clear_log(self):
self.log = []
def set_namesets(self, all_names, used_names):
"""Update the name manager given a list of all possible object names and the list of object names currently in use."""
self.all_names = set(all_names)
self.used_names = set()
# check for duplicate names
for used in used_names:
if used in self.used_names:
self.logprint("Warning: object name %s appears more than once." % used)
else:
self.used_names.add(used)
# check for used names not listed in the all_names set
invalid_names = self.used_names - self.all_names
if invalid_names:
self.logprint("Warning: invalid names in use: %s" % invalid_names)
# compute the list of available names
self.unused_names = self.all_names - self.used_names
self.logprint("Found the following unused object names: %s" % self.unused_names)
return
def choose_new_name(self):
"""Pick an name arbitrarily from the set of unused names."""
if len(self.unused_names) == 0:
self.logprint("Warning: no more object names available.")
return None
# return new names in numerical order for clarity
new_name = sorted(self.unused_names)[0]
self.unused_names.remove(new_name)
return new_name
def _update_namesets(self, all_names):
all_objects = ghrhinodoc.all_doc_objects(layer_name)
names = [obj.Attributes.Name for obj in all_objects]
self.set_namesets(all_names, names)
return
def _compute_set_bounding_box(self, selection):
if selection is None or len(selection) == 0:
return None
# compute bounding box for all objects in a set
boxes = [obj.GetBoundingBox(True) for obj in selection]
# compute union of all boxes
union = boxes[0]
# destructively merge this with the other boxes
for b in boxes[1:]:
union.Union(b)
return union, union.Diagonal.Length
def manage_user_selection(self, setselect, selection, selguids, all_names):
"""Process the user 'Set Selection' input, updating the editor state for any new
objects and names as needed."""
if setselect != self._last_setselect:
# debounce input to just trigger once
self._last_setselect = setselect
if setselect == True:
self.selection = selection
self.docguids = selguids
self.selection_bb, self.selection_bb_size = self._compute_set_bounding_box(selection)
self.logprint("Updated selection bounding box to %s, diagonal size %f" % (self.selection_bb, self.selection_bb_size))
# reset the pick and place state
self.attached = False
self.transform = None
self.xforms = []
self.logprint("Selection set with %d objects." % len(selection))
self._update_namesets(all_names)
#================================================================
def read_objects_from_layer(self, layer_name):
"""Read the user-visible names of all objects on a specific RhinoDoc layer.
Returns a tuple (geoms, guids, names) with lists of all geometry
objects, RhinoDoc GUID strings, and name attributes.
"""
layer_index = ghrhinodoc.fetch_or_create_layer_index(layer_name)
# Fetch all objects on the layer and report out individual properties.
all_objects = ghrhinodoc.all_doc_objects(layer_name)
geoms = [obj.Geometry for obj in all_objects]
guids = [str(obj.Id) for obj in all_objects]
names = [obj.Attributes.Name for obj in all_objects]
return geoms, guids, names
#================================================================
def update_tap_create_mode(self, gesture, cursor, update, clear):
"""Update state for the 'tap create' mode in which gestures create individual cards.
gesture - the integer gesture sample index or None
cursor - the list of recent cursor poses
returns newloc, names, add
"""
# default outputs
names, add = None, None
# tap create mode: each gesture creates a new object
if gesture is not None:
self.logprint("Gesture detected with sample offset %d." % gesture)
# gesture is an integer sample where zero is the most recent pose;
# index the current cursor poses from the end to select the correct
# pose
self.add_new_object_pose(cursor[gesture-1])
if clear == True:
self.logprint( "Abandoning editor changes (dropping new object poses).")
self.clear_edits()
# by default, always emit the new poses so they can be visualized
newloc = self.new_object_poses
if update == True:
self.logprint("Writing new objects to RhinoDoc: %s" % self.new_object_names)
names = self.new_object_names
add = True
self.clear_edits() # note: the current list has already been emitted, this just resets the buffer
return newloc, names, add
#================================================================
def update_path_create_mode(self, _gesture, _cursor, _update, _clear, _all_names, _create_rate):
"""Update state for the 'symbol sprayer' mode which places new objects along a
cursor path. Each gesture event toggles the creation events on or off.
returns newloc, names, add
"""
# default outputs
names, add = None, False
# detect the singular gesture events (for now, a flick of the wand)
if _gesture is not None:
if self.attached:
self.logprint("Creation path ended.")
else:
self.logprint("Creation path beginning.")
self.interval_counter = 0
self._update_namesets(_all_names)
# toggle the 'attached' state
self.attached = not self.attached
# while 'attached' to the sprayer, create new objects at regular intervals
if self.attached:
self.interval_counter += 1
if self.interval_counter > _create_rate:
self.interval_counter = 0
self.add_new_object_pose(_cursor[-1])
if _clear == True:
self.logprint( "Abandoning editor changes (dropping new object poses).")
self.clear_edits()
# by default, always emit the new poses so they can be visualized
newloc = self.new_object_poses
if _update == True:
self.logprint("Writing new objects to RhinoDoc: %s" % self.new_object_names)
names = self.new_object_names
add = True
self.clear_edits() # note: the current list has already been emitted, this just resets the buffer
return newloc, names, add
#================================================================
def update_block_move_mode(self, gesture, cursor, poses, update, clear):
"""Update state for the 'block move' mode in which each gesture alternately
attaches or detaches the selection from the cursor.
returns objects, guids, xform, move
"""
# set default outputs
objects = self.selection
guids = self.docguids
move = False
motion = Rhino.Geometry.Transform(1) # motion transform is identity value by default
if clear == True:
self.logprint("Abandoning editor changes (clearing movement).")
self.transform = None
# detect the singular gesture events (for now, a flick of the wand)
if gesture is not None:
# if we are ending a motion segment, save the most recent transformation as the new base transform
if self.attached:
self.transform = self.transform * self.motion
self.logprint("Motion ended, new transform saved.")
else:
self.logprint("Motion beginning.")
# toggle the 'attached' state
self.attached = not self.attached
if self.attached:
if len(poses) > 0 and len(cursor) > 0:
# compute a tranform the from most recent saved pose to the newest cursor position
motion = Rhino.Geometry.Transform.PlaneToPlane(poses[-1], cursor[-1])
# compute an output transformation from the accumulated transform plus any transient movement
if self.transform is None:
self.transform = Rhino.Geometry.Transform(1) # identity
xform = self.transform * motion
self.motion = motion
if update == True:
self.logprint("Updating RhinoDoc selection with new poses.")
move = True
self.clear_edits()
return objects, guids, xform, move
#================================================================
def update_path_move_mode(self, gesture, cursor, poses, update, clear):
"""Update state for the 'path move' mode in which each gesture toggles the
enable, and the cursor velocity affects object positions within a 'brush'
radius.
returns objects, guids, xform, move
"""
# set default outputs
objects = self.selection
guids = self.docguids
move = False
delta = Rhino.Geometry.Transform(1) # motion transform is identity value by default
# FIXME: this is probably moot
if self.transform is None:
self.transform = Rhino.Geometry.Transform(1) # identity
if self.selection is not None and (self.xforms is None or len(self.xforms) != len(self.selection)):
self.xforms = [Rhino.Geometry.Transform(1) for x in self.selection]
if clear == True:
self.logprint("Abandoning editor changes (clearing movement).")
self.transform = Rhino.Geometry.Transform(1)
# detect the singular gesture events (for now, a flick of the wand)
if gesture is not None:
# if we are ending a motion segment
if self.attached:
self.logprint("Motion deactivated.")
else:
self.logprint("Motion activated.")
# toggle the 'attached' state
self.attached = not self.attached
if self.attached:
if len(cursor) > 1 and cursor[-1] is not None and cursor[-2] is not None:
# Compute separate translation and rotation thresholds to
# determine whether the velocity is high enough to be a gesture.
# Find the rotation and translation between the last pair of samples:
rot = Rhino.Geometry.Quaternion.Rotation(cursor[-2], cursor[-1])
delta = cursor[-1].Origin - cursor[-2].Origin
displacement = delta.Length
# Convert the rotation to axis-angle form to find the magnitude. The function uses C# call by reference to return
# the parameters as 'out' values:
angle = clr.Reference[float]()
axis = clr.Reference[Rhino.Geometry.Vector3d]()
rot.GetRotation(angle, axis)
angle = angle.Value # get rid of the StrongBox around the number
axis = axis.Value # get rid of the StrongBox around the vector
# The angle is returned on (0,2*pi); manage the wraparound
if angle > math.pi:
angle -= 2*math.pi
# normalize to a velocity measure: m/sec, radians/sec
speed = displacement / self.mocap_dt
omega = angle / self.mocap_dt
# compute object to cursor distances
boxes = [obj.GetBoundingBox(False) for obj in self.selection]
center = cursor[-1].Origin
distances = [box.Center.DistanceTo(center) for box in boxes]
# Apply thresholds to determine whether the gesture represents intentional motion:
if speed > 1.0 and True:
self.transform = self.transform * Rhino.Geometry.Transform.Translation(delta)
if abs(omega) > 2.0 and True:
# self.logprint("detected motion on speed %f and angular rate %f" % (speed, omega))
# apply the movement to the output tranform
# FIXME: transform should be a list, one per object, selective via a spherical cursor
# choose a specific method from the set of overloaded signatures
Rotation_Factory = Rhino.Geometry.Transform.Rotation.Overloads[float, Rhino.Geometry.Vector3d, Rhino.Geometry.Point3d]
rot_xform = Rotation_Factory(angle, axis, center)
self.transform = self.transform * rot_xform
# Apply a weighted displacement to each object transform. The scaling matches the rolloff of the
# effect to be proportional to the size of the bounding box of the moving objects.
scale = 0.1 * self.selection_bb_size * self.selection_bb_size
weights = [min(1.0, scale/(dist*dist)) if dist > 0.0 else 1.0 for dist in distances]
# self.logprint("Weights: %s" % (weights,))
rotations = [Rotation_Factory(angle*weight, axis, center) for weight in weights]
self.xforms = [xform*rot for xform,rot in zip(self.xforms, rotations)]
if update == True:
self.logprint("Updating RhinoDoc selection with new poses.")
move = True
self.clear_edits()
return objects, guids, self.xforms, move
################################################################
# create or re-create the editor state as needed
editor = sc.sticky.get(name)
if editor is None or reset:
editor = EditorLogic('Cards', all_names)
sc.sticky[name] = editor
# set default output values
add = False
move = False
names = None
newloc = None
objects = None
guids = None
xform = None
if reset:
print "Interaction logic in reset state."
status = "Reset"
else:
# for all modes, record the set of selected objects when indicated
editor.manage_user_selection(setselect, selection, selguids, all_names)
# handle the state update for each individual mode
if mode == 1:
newloc, names, add = editor.update_tap_create_mode(gesture, cursor, update, clear)
elif mode == 2:
newloc, names, add = editor.update_path_create_mode(gesture, cursor, update, clear, all_names, create_interval)
elif mode == 3:
objects, guids, xform, move = editor.update_block_move_mode(gesture, cursor, poses, update, clear)
elif mode == 4:
objects, guids, xform, move = editor.update_path_move_mode(gesture, cursor, poses, update, clear)
# emit terse status for remote panel
status = "M:%s C:%d P:%d N:%d" % (editor.attached, len(cursor), len(poses), len(editor.new_object_poses))
# re-emit the log output
for str in editor.log: print str
| bsd-3-clause | -4,635,597,503,302,952,000 | 40.170886 | 144 | 0.573615 | false |
marianotepper/nmu_rfit | rnmu/pme/sampling.py | 1 | 2096 | from __future__ import absolute_import
import numpy as np
import collections
try:
from itertools import imap
except ImportError:
imap = map
try:
from functools import reduce
except ImportError:
pass
class SampleSet(collections.MutableSet):
def __init__(self):
self._dict = {}
self._len = 0
def __contains__(self, sample):
try:
return reduce(lambda d, k: d[k], sample, self._dict)
except KeyError:
return False
def __len__(self):
return self._len
def add(self, sample):
d = self._dict
for i, s in enumerate(sample):
if i == len(sample) - 1:
d[s] = True
continue
if s not in d:
d[s] = {}
d = d[s]
self._len += 1
def discard(self, sample):
pass
def __iter__(self):
pass
class UniformSampler(object):
def __init__(self, n_samples=None, seed=None):
self.n_samples = n_samples
self.sample_set = SampleSet()
if seed is not None:
np.random.seed(seed)
def generate(self, x, min_sample_size):
n_elements = len(x)
all_elems = np.arange(n_elements)
for _ in range(self.n_samples):
sample = np.random.choice(all_elems, size=min_sample_size,
replace=False)
if sample not in self.sample_set:
self.sample_set.add(sample)
yield sample
class ModelGenerator(object):
def __init__(self, model_class, sampler):
self._sampler = sampler
self.model_class = model_class
self.elements = None
@property
def n_samples(self):
return self._sampler.n_samples
def __iter__(self):
def generate(s):
ms_set = np.take(self.elements, s, axis=0)
return self.model_class(ms_set)
samples = self._sampler.generate(self.elements,
self.model_class().min_sample_size)
return imap(generate, samples)
| bsd-3-clause | -3,478,362,334,029,783,600 | 25.2 | 76 | 0.537214 | false |
wimberosa/samba | source4/scripting/python/samba/netcmd/common.py | 1 | 2466 | #!/usr/bin/env python
#
# common functions for samba-tool python commands
#
# Copyright Andrew Tridgell 2010
# Copyright Giampaolo Lauria 2011 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
from samba.dcerpc import nbt
from samba.net import Net
def _get_user_realm_domain(user):
""" get the realm or the domain and the base user
from user like:
* username
* DOMAIN\username
* username@REALM
"""
baseuser = user
realm = ""
domain = ""
m = re.match(r"(\w+)\\(\w+$)", user)
if m:
domain = m.group(1)
baseuser = m.group(2)
return (baseuser.lower(), domain.upper(), realm)
m = re.match(r"(\w+)@(\w+)", user)
if m:
baseuser = m.group(1)
realm = m.group(2)
return (baseuser.lower(), domain, realm.upper())
def netcmd_dnsname(lp):
'''return the full DNS name of our own host. Used as a default
for hostname when running status queries'''
return lp.get('netbios name').lower() + "." + lp.get('realm').lower()
def netcmd_finddc(lp, creds, realm=None):
'''Return domain-name of a writable/ldap-capable DC for the default
domain (parameter "realm" in smb.conf) unless another realm has been
specified as argument'''
net = Net(creds=creds, lp=lp)
if realm is None:
realm = lp.get('realm')
cldap_ret = net.finddc(domain=realm,
flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE)
return cldap_ret.pdc_dns_name
def netcmd_get_domain_infos_via_cldap(lp, creds, address=None):
'''Return domain informations (CLDAP record) of the ldap-capable
DC with the specified address'''
net = Net(creds=creds, lp=lp)
cldap_ret = net.finddc(address=address,
flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS)
return cldap_ret
| gpl-3.0 | 3,366,601,980,079,461,000 | 32.780822 | 88 | 0.663017 | false |
Subsets and Splits